From c76ac49d266e27aa3fea84ef2df1f813d24c91c7 Mon Sep 17 00:00:00 2001 From: Simon Mo Date: Fri, 15 Nov 2024 12:47:40 -0800 Subject: [PATCH 001/397] [Docs] Add Nebius as sponsors (#10371) Signed-off-by: simon-mo --- README.md | 1 + docs/source/community/sponsors.md | 1 + 2 files changed, 2 insertions(+) diff --git a/README.md b/README.md index 6530886ed7de2..0ef073210d070 100644 --- a/README.md +++ b/README.md @@ -100,6 +100,7 @@ vLLM is a community project. Our compute resources for development and testing a - Dropbox - Google Cloud - Lambda Lab +- Nebius - NVIDIA - Replicate - Roblox diff --git a/docs/source/community/sponsors.md b/docs/source/community/sponsors.md index 52fbf9a577c7e..c6f83b3a92ca0 100644 --- a/docs/source/community/sponsors.md +++ b/docs/source/community/sponsors.md @@ -15,6 +15,7 @@ vLLM is a community project. Our compute resources for development and testing a - Dropbox - Google Cloud - Lambda Lab +- Nebius - NVIDIA - Replicate - Roblox From a067f85e08f6604b328a16efe3ead4629e0ead5b Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Fri, 15 Nov 2024 16:13:53 -0500 Subject: [PATCH 002/397] [Frontend] Add --version flag to CLI (#10369) Signed-off-by: Russell Bryant --- vllm/scripts.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/vllm/scripts.py b/vllm/scripts.py index 4e4c071784287..a51c21cfa29e7 100644 --- a/vllm/scripts.py +++ b/vllm/scripts.py @@ -9,6 +9,7 @@ from openai import OpenAI from openai.types.chat import ChatCompletionMessageParam +import vllm.version from vllm.engine.arg_utils import EngineArgs from vllm.entrypoints.openai.api_server import run_server from vllm.entrypoints.openai.cli_args import (make_arg_parser, @@ -143,6 +144,11 @@ def main(): env_setup() parser = FlexibleArgumentParser(description="vLLM CLI") + parser.add_argument('-v', + '--version', + action='version', + version=vllm.version.__version__) + subparsers = parser.add_subparsers(required=True, dest="subparser") serve_parser = subparsers.add_parser( From 3e8d14d8a1e3e54655f79d7bb3481cde02943281 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Fri, 15 Nov 2024 16:20:20 -0500 Subject: [PATCH 003/397] [Doc] Move PR template content to docs (#10159) Signed-off-by: Russell Bryant --- .github/PULL_REQUEST_TEMPLATE.md | 71 +--------------- .github/scripts/cleanup_pr_body.sh | 25 +++++- docs/source/contributing/overview.rst | 114 +++++++++++++++++++++++--- 3 files changed, 126 insertions(+), 84 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index be0afc6305044..51a73c857ccb2 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -2,73 +2,4 @@ FILL IN THE PR DESCRIPTION HERE FIX #xxxx (*link existing issues this PR will resolve*) -**BEFORE SUBMITTING, PLEASE READ THE CHECKLIST BELOW AND FILL IN THE DESCRIPTION ABOVE** - ---- - -
- - PR Checklist (Click to Expand) - -

Thank you for your contribution to vLLM! Before submitting the pull request, please ensure the PR meets the following criteria. This helps vLLM maintain the code quality and improve the efficiency of the review process.

- -

PR Title and Classification

-

Only specific types of PRs will be reviewed. The PR title is prefixed appropriately to indicate the type of change. Please use one of the following:

-
    -
  • [Bugfix] for bug fixes.
  • -
  • [CI/Build] for build or continuous integration improvements.
  • -
  • [Doc] for documentation fixes and improvements.
  • -
  • [Model] for adding a new model or improving an existing model. Model name should appear in the title.
  • -
  • [Frontend] For changes on the vLLM frontend (e.g., OpenAI API server, LLM class, etc.)
  • -
  • [Kernel] for changes affecting CUDA kernels or other compute kernels.
  • -
  • [Core] for changes in the core vLLM logic (e.g., LLMEngine, AsyncLLMEngine, Scheduler, etc.)
  • -
  • [Hardware][Vendor] for hardware-specific changes. Vendor name should appear in the prefix (e.g., [Hardware][AMD]).
  • -
  • [Misc] for PRs that do not fit the above categories. Please use this sparingly.
  • -
-

Note: If the PR spans more than one category, please include all relevant prefixes.

- -

Code Quality

- -

The PR need to meet the following code quality standards:

- -
    -
  • We adhere to Google Python style guide and Google C++ style guide.
  • -
  • Pass all linter checks. Please use format.sh to format your code.
  • -
  • The code need to be well-documented to ensure future contributors can easily understand the code.
  • -
  • Include sufficient tests to ensure the project to stay correct and robust. This includes both unit tests and integration tests.
  • -
  • Please add documentation to docs/source/ if the PR modifies the user-facing behaviors of vLLM. It helps vLLM user understand and utilize the new features or changes.
  • -
- -

Adding or changing kernels

-

Each custom kernel needs a schema and one or more implementations to be registered with PyTorch.

-
    -
  • Make sure custom ops are registered following PyTorch guidelines: Custom C++ and CUDA Operators and The Custom Operators Manual
  • -
  • Custom operations that return Tensors require meta-functions. Meta-functions should be implemented and registered in python so that dynamic dims can be handled automatically. See above documents for a description of meta-functions.
  • -
  • Use torch.libary.opcheck() to test the function registration and meta-function for any registered ops. See tests/kernels for examples.
  • -
  • When changing the C++ signature of an existing op, the schema must be updated to reflect the changes.
  • -
  • If a new custom type is needed, see the following document: Custom Class Support in PT2. -
- -

Notes for Large Changes

-

Please keep the changes as concise as possible. For major architectural changes (>500 LOC excluding kernel/data/config/test), we would expect a GitHub issue (RFC) discussing the technical design and justification. Otherwise, we will tag it with rfc-required and might not go through the PR.

- -

What to Expect for the Reviews

- -

The goal of the vLLM team is to be a transparent reviewing machine. We would like to make the review process transparent and efficient and make sure no contributor feel confused or frustrated. However, the vLLM team is small, so we need to prioritize some PRs over others. Here is what you can expect from the review process:

- -
    -
  • After the PR is submitted, the PR will be assigned to a reviewer. Every reviewer will pick up the PRs based on their expertise and availability.
  • -
  • After the PR is assigned, the reviewer will provide status update every 2-3 days. If the PR is not reviewed within 7 days, please feel free to ping the reviewer or the vLLM team.
  • -
  • After the review, the reviewer will put an action-required label on the PR if there are changes required. The contributor should address the comments and ping the reviewer to re-review the PR.
  • -
  • Please respond to all comments within a reasonable time frame. If a comment isn't clear or you disagree with a suggestion, feel free to ask for clarification or discuss the suggestion. -
  • -
- -

Thank You

- -

Finally, thank you for taking the time to read these guidelines and for your interest in contributing to vLLM. Your contributions make vLLM a great tool for everyone!

- - -
- - +**BEFORE SUBMITTING, PLEASE READ https://docs.vllm.ai/en/latest/contributing/overview.html ** diff --git a/.github/scripts/cleanup_pr_body.sh b/.github/scripts/cleanup_pr_body.sh index 3b2da7b9f8966..3246c6f9bc4b7 100755 --- a/.github/scripts/cleanup_pr_body.sh +++ b/.github/scripts/cleanup_pr_body.sh @@ -15,19 +15,36 @@ NEW=/tmp/new_pr_body.txt gh pr view --json body --template "{{.body}}" "${PR_NUMBER}" > "${OLD}" cp "${OLD}" "${NEW}" -# Remove all lines after and including "**BEFORE SUBMITTING, PLEASE READ THE CHECKLIST BELOW AND FILL IN THE DESCRIPTION ABOVE**" -sed -i '/\*\*BEFORE SUBMITTING, PLEASE READ THE CHECKLIST BELOW AND FILL IN THE DESCRIPTION ABOVE\*\*/,$d' "${NEW}" - # Remove "FIX #xxxx (*link existing issues this PR will resolve*)" sed -i '/FIX #xxxx.*$/d' "${NEW}" # Remove "FILL IN THE PR DESCRIPTION HERE" sed -i '/FILL IN THE PR DESCRIPTION HERE/d' "${NEW}" +# Remove all lines after and including "**BEFORE SUBMITTING, PLEASE READ THE CHECKLIST BELOW AND FILL IN THE DESCRIPTION ABOVE**" +sed -i '/\*\*BEFORE SUBMITTING, PLEASE READ.*\*\*/,$d' "${NEW}" + +# Remove HTML
section that includes text of "PR Checklist (Click to Expand)" +python3 - <.*?.*?PR Checklist \(Click to Expand\).*?.*?
', re.DOTALL) +content = re.sub(pattern, '', content) + +with open("${NEW}", "w") as file: + file.write(content) +EOF + # Run this only if ${NEW} is different than ${OLD} if ! cmp -s "${OLD}" "${NEW}"; then - echo "Updating PR body" gh pr edit --body-file "${NEW}" "${PR_NUMBER}" + echo + echo "Updated PR body:" + echo + cat "${NEW}" else echo "No changes needed" fi diff --git a/docs/source/contributing/overview.rst b/docs/source/contributing/overview.rst index ac2d2b2fe4103..4cea0afdaea74 100644 --- a/docs/source/contributing/overview.rst +++ b/docs/source/contributing/overview.rst @@ -41,15 +41,6 @@ Testing Contribution Guidelines ======================= -DCO and Signed-off-by ----------------------- - -When contributing changes to this project, you must agree to the `DCO `_. -Commits must include a ``Signed-off-by:`` header which certifies agreement with -the terms of the `DCO `_. - -Using ``-s`` with ``git commit`` will automatically add this header. - Issues ------ @@ -61,7 +52,110 @@ If you encounter a bug or have a feature request, please `search existing issues Pull Requests & Code Reviews ---------------------------- -Please check the PR checklist in the `PR template `_ for a detailed guide for contribution. +Thank you for your contribution to vLLM! Before submitting the pull request, +please ensure the PR meets the following criteria. This helps vLLM maintain the +code quality and improve the efficiency of the review process. + +DCO and Signed-off-by +^^^^^^^^^^^^^^^^^^^^^ + +When contributing changes to this project, you must agree to the `DCO `_. +Commits must include a ``Signed-off-by:`` header which certifies agreement with +the terms of the `DCO `_. + +Using ``-s`` with ``git commit`` will automatically add this header. + +PR Title and Classification +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Only specific types of PRs will be reviewed. The PR title is prefixed +appropriately to indicate the type of change. Please use one of the following: + +- ``[Bugfix]`` for bug fixes. +- ``[CI/Build]`` for build or continuous integration improvements. +- ``[Doc]`` for documentation fixes and improvements. +- ``[Model]`` for adding a new model or improving an existing model. Model name + should appear in the title. +- ``[Frontend]`` For changes on the vLLM frontend (e.g., OpenAI API server, + ``LLM`` class, etc.) +- ``[Kernel]`` for changes affecting CUDA kernels or other compute kernels. +- ``[Core]`` for changes in the core vLLM logic (e.g., ``LLMEngine``, + ``AsyncLLMEngine``, ``Scheduler``, etc.) +- ``[Hardware][Vendor]`` for hardware-specific changes. Vendor name should + appear in the prefix (e.g., ``[Hardware][AMD]``). +- ``[Misc]`` for PRs that do not fit the above categories. Please use this + sparingly. + +.. note:: + If the PR spans more than one category, please include all relevant prefixes. + +Code Quality +^^^^^^^^^^^^ + +The PR needs to meet the following code quality standards: + +- We adhere to `Google Python style guide + `_ and `Google C++ style guide + `_. +- Pass all linter checks. Please use `format.sh + `_ to format your + code. +- The code needs to be well-documented to ensure future contributors can easily + understand the code. +- Include sufficient tests to ensure the project stays correct and robust. This + includes both unit tests and integration tests. +- Please add documentation to ``docs/source/`` if the PR modifies the + user-facing behaviors of vLLM. It helps vLLM users understand and utilize the + new features or changes. + +Adding or Changing Kernels +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Each custom kernel needs a schema and one or more implementations to be registered with PyTorch. + +- Make sure custom ops are registered following PyTorch guidelines: + `Custom C++ and CUDA Operators `_ + and `The Custom Operators Manual `_. +- Custom operations that return ``Tensors`` require meta-functions. + Meta-functions should be implemented and registered in Python so that dynamic + dims can be handled automatically. See above documents for a description of + meta-functions. +- Use `torch.library.opcheck() `_ + to test the function registration and meta-function for any registered ops. + See ``tests/kernels`` for examples. +- When changing the C++ signature of an existing op, the schema must be updated + to reflect the changes. +- If a new custom type is needed, see the following document: + `Custom Class Support in PT2 `_. + +Notes for Large Changes +^^^^^^^^^^^^^^^^^^^^^^^ + +Please keep the changes as concise as possible. For major architectural changes +(>500 LOC excluding kernel/data/config/test), we would expect a GitHub issue +(RFC) discussing the technical design and justification. Otherwise, we will tag +it with ``rfc-required`` and might not go through the PR. + +What to Expect for the Reviews +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The goal of the vLLM team is to be a *transparent reviewing machine*. We would +like to make the review process transparent and efficient and make sure no +contributor feels confused or frustrated. However, the vLLM team is small, so we +need to prioritize some PRs over others. Here is what you can expect from the +review process: + +- After the PR is submitted, the PR will be assigned to a reviewer. Every + reviewer will pick up the PRs based on their expertise and availability. +- After the PR is assigned, the reviewer will provide status updates every 2-3 + days. If the PR is not reviewed within 7 days, please feel free to ping the + reviewer or the vLLM team. +- After the review, the reviewer will put an ``action-required`` label on the PR + if there are changes required. The contributor should address the comments and + ping the reviewer to re-review the PR. +- Please respond to all comments within a reasonable time frame. If a comment + isn't clear or you disagree with a suggestion, feel free to ask for + clarification or discuss the suggestion. Thank You --------- From 4f168f69a3e856bda3f30e02fcee7db2a01ff32b Mon Sep 17 00:00:00 2001 From: Michael Green <59619482+mikegre-google@users.noreply.github.com> Date: Fri, 15 Nov 2024 21:26:17 +0000 Subject: [PATCH 004/397] [Docs] Misc updates to TPU installation instructions (#10165) --- .../getting_started/tpu-installation.rst | 54 ++++++++++++------- 1 file changed, 35 insertions(+), 19 deletions(-) diff --git a/docs/source/getting_started/tpu-installation.rst b/docs/source/getting_started/tpu-installation.rst index 75ab2b6ba02dc..22cc684a1c778 100644 --- a/docs/source/getting_started/tpu-installation.rst +++ b/docs/source/getting_started/tpu-installation.rst @@ -44,15 +44,18 @@ Requirements Provision Cloud TPUs ==================== -You can provision Cloud TPUs using the `Cloud TPU API `_` -or the `queued resources `_` -API. This section shows how to create TPUs using the queued resource API. -For more information about using the Cloud TPU API, see `Create a Cloud TPU using the Create Node API `_. -`Queued resources `_ -enable you to request Cloud TPU resources in a queued manner. When you request -queued resources, the request is added to a queue maintained by the Cloud TPU -service. When the requested resource becomes available, it's assigned to your -Google Cloud project for your immediate exclusive use. +You can provision Cloud TPUs using the `Cloud TPU API `_ +or the `queued resources `_ +API. This section shows how to create TPUs using the queued resource API. For +more information about using the Cloud TPU API, see `Create a Cloud TPU using the Create Node API `_. +Queued resources enable you to request Cloud TPU resources in a queued manner. +When you request queued resources, the request is added to a queue maintained by +the Cloud TPU service. When the requested resource becomes available, it's +assigned to your Google Cloud project for your immediate exclusive use. + +.. note:: + In all of the following commands, replace the ALL CAPS parameter names with + appropriate values. See the parameter descriptions table for more information. Provision a Cloud TPU with the queued resource API -------------------------------------------------- @@ -68,6 +71,7 @@ Create a TPU v5e with 4 TPU chips: --runtime-version RUNTIME_VERSION \ --service-account SERVICE_ACCOUNT + .. list-table:: Parameter descriptions :header-rows: 1 @@ -81,12 +85,13 @@ Create a TPU v5e with 4 TPU chips: * - PROJECT_ID - Your Google Cloud project * - ZONE - - The `zone `_ where you - want to create your Cloud TPU. + - The GCP zone where you want to create your Cloud TPU. The value you use + depends on the version of TPUs you are using. For more information, see + `TPU regions and zones `_ * - ACCELERATOR_TYPE - - The TPU version you want to use. Specify the TPU version, followed by a - '-' and the number of TPU cores. For example `v5e-4` specifies a v5e TPU - with 4 cores. For more information, see `TPU versions `_. + - The TPU version you want to use. Specify the TPU version, for example + `v5litepod-4` specifies a v5e TPU with 4 cores. For more information, + see `TPU versions `_. * - RUNTIME_VERSION - The TPU VM runtime version to use. For more information see `TPU VM images `_. * - SERVICE_ACCOUNT @@ -98,7 +103,15 @@ Connect to your TPU using SSH: .. code-block:: bash - gcloud compute tpus tpu-vm ssh TPU_NAME + gcloud compute tpus tpu-vm ssh TPU_NAME --zone ZONE + +Install Miniconda + +.. code-block:: bash + + wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh + bash Miniconda3-latest-Linux-x86_64.sh + source ~/.bashrc Create and activate a Conda environment for vLLM: @@ -162,9 +175,11 @@ Run the Docker image with the following command: .. note:: - Since TPU relies on XLA which requires static shapes, vLLM bucketizes the possible input shapes and compiles an XLA graph for each different shape. - The compilation time may take 20~30 minutes in the first run. - However, the compilation time reduces to ~5 minutes afterwards because the XLA graphs are cached in the disk (in :code:`VLLM_XLA_CACHE_PATH` or :code:`~/.cache/vllm/xla_cache` by default). + Since TPU relies on XLA which requires static shapes, vLLM bucketizes the + possible input shapes and compiles an XLA graph for each shape. The + compilation time may take 20~30 minutes in the first run. However, the + compilation time reduces to ~5 minutes afterwards because the XLA graphs are + cached in the disk (in :code:`VLLM_XLA_CACHE_PATH` or :code:`~/.cache/vllm/xla_cache` by default). .. tip:: @@ -173,7 +188,8 @@ Run the Docker image with the following command: .. code-block:: console from torch._C import * # noqa: F403 - ImportError: libopenblas.so.0: cannot open shared object file: No such file or directory + ImportError: libopenblas.so.0: cannot open shared object file: No such + file or directory Install OpenBLAS with the following command: From 32e46e000f77499f4dd7c0bed194e33856f2df24 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Sat, 16 Nov 2024 13:35:40 +0800 Subject: [PATCH 005/397] [Frontend] Automatic detection of chat content format from AST (#9919) Signed-off-by: DarkLight1337 --- .../serving/openai_compatible_server.md | 18 +- tests/entrypoints/openai/test_serving_chat.py | 3 +- tests/entrypoints/test_chat_utils.py | 619 +++++++++++------- vllm/config.py | 2 - vllm/engine/arg_utils.py | 10 - vllm/engine/llm_engine.py | 4 +- vllm/entrypoints/chat_utils.py | 246 ++++++- vllm/entrypoints/llm.py | 44 +- vllm/entrypoints/openai/api_server.py | 13 +- vllm/entrypoints/openai/cli_args.py | 17 +- vllm/entrypoints/openai/protocol.py | 71 +- vllm/entrypoints/openai/run_batch.py | 2 + vllm/entrypoints/openai/serving_chat.py | 40 +- vllm/entrypoints/openai/serving_embedding.py | 12 +- vllm/entrypoints/openai/serving_engine.py | 17 +- .../openai/serving_tokenization.py | 20 +- 16 files changed, 788 insertions(+), 350 deletions(-) diff --git a/docs/source/serving/openai_compatible_server.md b/docs/source/serving/openai_compatible_server.md index 78965813b1213..79d032bf8b211 100644 --- a/docs/source/serving/openai_compatible_server.md +++ b/docs/source/serving/openai_compatible_server.md @@ -172,12 +172,20 @@ completion = client.chat.completions.create( ] ) ``` -Most chat templates for LLMs expect the `content` to be a `string` but there are some newer models like -`meta-llama/Llama-Guard-3-1B` that expect the content to be parsed with the new OpenAI spec. In order to choose which -format the content needs to be parsed in by vLLM, please use the `--chat-template-text-format` argument to specify -between `string` or `openai`. The default value is `string` and vLLM internally converts both spec formats to match -this, unless explicitly specified. +Most chat templates for LLMs expect the `content` field to be a string, but there are some newer models like +`meta-llama/Llama-Guard-3-1B` that expect the content to be formatted according to the OpenAI schema in the +request. vLLM provides best-effort support to detect this automatically, which is logged as a string like +*"Detected the chat template content format to be..."*, and internally converts incoming requests to match +the detected format, which can be one of: + +- `"string"`: A string. + - Example: `"Hello world"` +- `"openai"`: A list of dictionaries, similar to OpenAI schema. + - Example: `[{"type": "text", "text": "Hello world!"}]` + +If the result is not what you expect, you can set the `--chat-template-content-format` CLI argument +to override which format to use. ## Command line arguments for the server diff --git a/tests/entrypoints/openai/test_serving_chat.py b/tests/entrypoints/openai/test_serving_chat.py index e969d33775d86..93660e6118ca8 100644 --- a/tests/entrypoints/openai/test_serving_chat.py +++ b/tests/entrypoints/openai/test_serving_chat.py @@ -26,7 +26,6 @@ class MockModelConfig: tokenizer = MODEL_NAME trust_remote_code = False tokenizer_mode = "auto" - chat_template_text_format = "string" max_model_len = 100 tokenizer_revision = None multimodal_config = MultiModalConfig() @@ -49,6 +48,7 @@ async def _async_serving_chat_init(): BASE_MODEL_PATHS, response_role="assistant", chat_template=CHAT_TEMPLATE, + chat_template_content_format="auto", lora_modules=None, prompt_adapters=None, request_logger=None) @@ -70,6 +70,7 @@ def test_serving_chat_should_set_correct_max_tokens(): BASE_MODEL_PATHS, response_role="assistant", chat_template=CHAT_TEMPLATE, + chat_template_content_format="auto", lora_modules=None, prompt_adapters=None, request_logger=None) diff --git a/tests/entrypoints/test_chat_utils.py b/tests/entrypoints/test_chat_utils.py index 5fa466f8f041f..72477e048eafa 100644 --- a/tests/entrypoints/test_chat_utils.py +++ b/tests/entrypoints/test_chat_utils.py @@ -6,15 +6,24 @@ from vllm.assets.image import ImageAsset from vllm.config import ModelConfig -from vllm.entrypoints.chat_utils import (parse_chat_messages, - parse_chat_messages_futures) +from vllm.entrypoints.chat_utils import (_try_extract_ast, load_chat_template, + parse_chat_messages, + parse_chat_messages_futures, + resolve_chat_template_content_format) from vllm.entrypoints.llm import apply_hf_chat_template from vllm.multimodal import MultiModalDataDict from vllm.multimodal.utils import encode_image_base64 from vllm.transformers_utils.tokenizer_group import TokenizerGroup +from ..utils import VLLM_PATH + +EXAMPLES_DIR = VLLM_PATH / "examples" + PHI3V_MODEL_ID = "microsoft/Phi-3.5-vision-instruct" +ULTRAVOX_MODEL_ID = "fixie-ai/ultravox-v0_3" +QWEN2VL_MODEL_ID = "Qwen/Qwen2-VL-2B-Instruct" MLLAMA_MODEL_ID = "meta-llama/Llama-3.2-11B-Vision-Instruct" +LLAMA_GUARD_MODEL_ID = "meta-llama/Llama-Guard-3-1B" @pytest.fixture(scope="function") @@ -26,7 +35,6 @@ def phi3v_model_config(): trust_remote_code=True, dtype="bfloat16", seed=0, - chat_template_text_format="string", limit_mm_per_prompt={ "image": 2, }) @@ -94,19 +102,24 @@ def test_parse_chat_messages_single_image( phi3v_tokenizer, image_url, ): - conversation, mm_data = parse_chat_messages([{ - "role": - "user", - "content": [{ - "type": "image_url", - "image_url": { - "url": image_url - } - }, { - "type": "text", - "text": "What's in the image?" - }] - }], phi3v_model_config, phi3v_tokenizer) + conversation, mm_data = parse_chat_messages( + [{ + "role": + "user", + "content": [{ + "type": "image_url", + "image_url": { + "url": image_url + } + }, { + "type": "text", + "text": "What's in the image?" + }] + }], + phi3v_model_config, + phi3v_tokenizer, + content_format="string", + ) assert conversation == [{ "role": "user", @@ -121,19 +134,24 @@ async def test_parse_chat_messages_single_image_async( phi3v_tokenizer, image_url, ): - conversation, mm_future = parse_chat_messages_futures([{ - "role": - "user", - "content": [{ - "type": "image_url", - "image_url": { - "url": image_url - } - }, { - "type": "text", - "text": "What's in the image?" - }] - }], phi3v_model_config, phi3v_tokenizer) + conversation, mm_future = parse_chat_messages_futures( + [{ + "role": + "user", + "content": [{ + "type": "image_url", + "image_url": { + "url": image_url + } + }, { + "type": "text", + "text": "What's in the image?" + }] + }], + phi3v_model_config, + phi3v_tokenizer, + content_format="string", + ) assert conversation == [{ "role": "user", @@ -147,24 +165,29 @@ def test_parse_chat_messages_multiple_images( phi3v_tokenizer, image_url, ): - conversation, mm_data = parse_chat_messages([{ - "role": - "user", - "content": [{ - "type": "image_url", - "image_url": { - "url": image_url - } - }, { - "type": "image_url", - "image_url": { - "url": image_url - } - }, { - "type": "text", - "text": "What's in these images?" - }] - }], phi3v_model_config, phi3v_tokenizer) + conversation, mm_data = parse_chat_messages( + [{ + "role": + "user", + "content": [{ + "type": "image_url", + "image_url": { + "url": image_url + } + }, { + "type": "image_url", + "image_url": { + "url": image_url + } + }, { + "type": "text", + "text": "What's in these images?" + }] + }], + phi3v_model_config, + phi3v_tokenizer, + content_format="string", + ) assert conversation == [{ "role": @@ -181,24 +204,29 @@ async def test_parse_chat_messages_multiple_images_async( phi3v_tokenizer, image_url, ): - conversation, mm_future = parse_chat_messages_futures([{ - "role": - "user", - "content": [{ - "type": "image_url", - "image_url": { - "url": image_url - } - }, { - "type": "image_url", - "image_url": { - "url": image_url - } - }, { - "type": "text", - "text": "What's in these images?" - }] - }], phi3v_model_config, phi3v_tokenizer) + conversation, mm_future = parse_chat_messages_futures( + [{ + "role": + "user", + "content": [{ + "type": "image_url", + "image_url": { + "url": image_url + } + }, { + "type": "image_url", + "image_url": { + "url": image_url + } + }, { + "type": "text", + "text": "What's in these images?" + }] + }], + phi3v_model_config, + phi3v_tokenizer, + content_format="string", + ) assert conversation == [{ "role": @@ -214,27 +242,31 @@ def test_parse_chat_messages_placeholder_already_in_prompt( phi3v_tokenizer, image_url, ): - conversation, mm_data = parse_chat_messages([{ - "role": - "user", - "content": [{ - "type": "image_url", - "image_url": { - "url": image_url - } - }, { - "type": "image_url", - "image_url": { - "url": image_url - } - }, { - "type": - "text", - "text": - "What's in <|image_1|> and how does it compare to <|image_2|>?" - }] - }], phi3v_model_config, phi3v_tokenizer) - + conversation, mm_data = parse_chat_messages( + [{ + "role": + "user", + "content": [{ + "type": "image_url", + "image_url": { + "url": image_url + } + }, { + "type": "image_url", + "image_url": { + "url": image_url + } + }, { + "type": + "text", + "text": + "What's in <|image_1|> and how does it compare to <|image_2|>?" + }] + }], + phi3v_model_config, + phi3v_tokenizer, + content_format="string", + ) assert conversation == [{ "role": "user", @@ -249,26 +281,35 @@ def test_parse_chat_messages_placeholder_one_already_in_prompt( phi3v_tokenizer, image_url, ): - conversation, mm_data = parse_chat_messages([{ - "role": - "user", - "content": [{ - "type": "image_url", - "image_url": { - "url": image_url - } - }, { - "type": "image_url", - "image_url": { - "url": image_url - } - }, { - "type": - "text", - "text": - "What's in <|image_1|> and how does it compare to the other one?" - }] - }], phi3v_model_config, phi3v_tokenizer) + conversation, mm_data = parse_chat_messages( + [{ + "role": + "user", + "content": [ + { + "type": "image_url", + "image_url": { + "url": image_url + } + }, + { + "type": "image_url", + "image_url": { + "url": image_url + } + }, + { + "type": + "text", + "text": + "What's in <|image_1|> and how does it compare to the other one?" # noqa: E501 + } + ] + }], + phi3v_model_config, + phi3v_tokenizer, + content_format="string", + ) assert conversation == [{ "role": @@ -285,34 +326,39 @@ def test_parse_chat_messages_multiple_images_across_messages( phi3v_tokenizer, image_url, ): - conversation, mm_data = parse_chat_messages([{ - "role": - "user", - "content": [{ - "type": "image_url", - "image_url": { - "url": image_url - } + conversation, mm_data = parse_chat_messages( + [{ + "role": + "user", + "content": [{ + "type": "image_url", + "image_url": { + "url": image_url + } + }, { + "type": "text", + "text": "What's in this image?" + }] }, { - "type": "text", - "text": "What's in this image?" - }] - }, { - "role": "assistant", - "content": "Some stuff." - }, { - "role": - "user", - "content": [{ - "type": "image_url", - "image_url": { - "url": image_url - } + "role": "assistant", + "content": "Some stuff." }, { - "type": "text", - "text": "What about this one?" - }] - }], phi3v_model_config, phi3v_tokenizer) + "role": + "user", + "content": [{ + "type": "image_url", + "image_url": { + "url": image_url + } + }, { + "type": "text", + "text": "What about this one?" + }] + }], + phi3v_model_config, + phi3v_tokenizer, + content_format="string", + ) assert conversation == [ { @@ -335,7 +381,6 @@ def test_parse_chat_messages_context_text_format( phi3v_model_config, phi3v_tokenizer, ): - phi3v_model_config.chat_template_text_format = "openai" conversation, mm_data = parse_chat_messages( [{ "role": "user", @@ -349,7 +394,11 @@ def test_parse_chat_messages_context_text_format( }, { "role": "user", "content": "What about this one?" - }], phi3v_model_config, phi3v_tokenizer) + }], + phi3v_model_config, + phi3v_tokenizer, + content_format="openai", + ) assert conversation == [ { @@ -389,29 +438,34 @@ def test_parse_chat_messages_rejects_too_many_images_in_one_message( ValueError, match="At most 2 image\\(s\\) may be provided in one request\\." ): - parse_chat_messages([{ - "role": - "user", - "content": [{ - "type": "image_url", - "image_url": { - "url": image_url - } - }, { - "type": "image_url", - "image_url": { - "url": image_url - } - }, { - "type": "image_url", - "image_url": { - "url": image_url - } - }, { - "type": "text", - "text": "What's in these images?" - }] - }], phi3v_model_config, phi3v_tokenizer) + parse_chat_messages( + [{ + "role": + "user", + "content": [{ + "type": "image_url", + "image_url": { + "url": image_url + } + }, { + "type": "image_url", + "image_url": { + "url": image_url + } + }, { + "type": "image_url", + "image_url": { + "url": image_url + } + }, { + "type": "text", + "text": "What's in these images?" + }] + }], + phi3v_model_config, + phi3v_tokenizer, + content_format="string", + ) def test_parse_chat_messages_rejects_too_many_images_across_messages( @@ -427,39 +481,44 @@ def test_parse_chat_messages_rejects_too_many_images_across_messages( ValueError, match="At most 2 image\\(s\\) may be provided in one request\\." ): - parse_chat_messages([{ - "role": - "user", - "content": [{ - "type": "image_url", - "image_url": { - "url": image_url - } + parse_chat_messages( + [{ + "role": + "user", + "content": [{ + "type": "image_url", + "image_url": { + "url": image_url + } + }, { + "type": "text", + "text": "What's in this image?" + }] }, { - "type": "text", - "text": "What's in this image?" - }] - }, { - "role": "assistant", - "content": "Some stuff." - }, { - "role": - "user", - "content": [{ - "type": "image_url", - "image_url": { - "url": image_url - } + "role": "assistant", + "content": "Some stuff." }, { - "type": "image_url", - "image_url": { - "url": image_url - } - }, { - "type": "text", - "text": "What about these two?" - }] - }], phi3v_model_config, phi3v_tokenizer) + "role": + "user", + "content": [{ + "type": "image_url", + "image_url": { + "url": image_url + } + }, { + "type": "image_url", + "image_url": { + "url": image_url + } + }, { + "type": "text", + "text": "What about these two?" + }] + }], + phi3v_model_config, + phi3v_tokenizer, + content_format="string", + ) def test_parse_chat_messages_multiple_images_uncommon_input( @@ -467,17 +526,22 @@ def test_parse_chat_messages_multiple_images_uncommon_input( phi3v_tokenizer, image_url, ): - conversation, mm_data = parse_chat_messages([{ - "role": - "user", - "content": [ - "What's in these images?", { - "image_url": image_url - }, { - "image_url": image_url - } - ] - }], phi3v_model_config, phi3v_tokenizer) + conversation, mm_data = parse_chat_messages( + [{ + "role": + "user", + "content": [ + "What's in these images?", { + "image_url": image_url + }, { + "image_url": image_url + } + ] + }], + phi3v_model_config, + phi3v_tokenizer, + content_format="string", + ) assert conversation == [{ "role": @@ -495,16 +559,21 @@ def test_mllama_single_image( image_url, ): """Ensures that a single image is parsed correctly mllama.""" - conversation, mm_data = parse_chat_messages([{ - "role": - "user", - "content": [{ - 'type': 'text', - 'text': 'The content of this image is:' - }, { - "image_url": image_url - }] - }], mllama_model_config, mllama_tokenizer) + conversation, mm_data = parse_chat_messages( + [{ + "role": + "user", + "content": [{ + 'type': 'text', + 'text': 'The content of this image is:' + }, { + "image_url": image_url + }] + }], + mllama_model_config, + mllama_tokenizer, + content_format="openai", + ) _assert_mm_data_is_image_input(mm_data, 1) assert conversation == [{ 'role': @@ -524,26 +593,31 @@ def test_mllama_interleaved_images( image_url, ): """Ensures that multiple image are parsed as interleaved dicts.""" - conversation, mm_data = parse_chat_messages([{ - "role": - "user", - "content": [ - { - 'type': 'text', - 'text': 'The content of the first image is:' - }, - { - "image_url": image_url - }, - { - 'type': 'text', - 'text': 'The content of the second image is:' - }, - { - "image_url": image_url - }, - ] - }], mllama_model_config, mllama_tokenizer) + conversation, mm_data = parse_chat_messages( + [{ + "role": + "user", + "content": [ + { + 'type': 'text', + 'text': 'The content of the first image is:' + }, + { + "image_url": image_url + }, + { + 'type': 'text', + 'text': 'The content of the second image is:' + }, + { + "image_url": image_url + }, + ] + }], + mllama_model_config, + mllama_tokenizer, + content_format="openai", + ) _assert_mm_data_is_image_input(mm_data, 2) assert conversation == [{ 'role': @@ -626,6 +700,7 @@ def get_conversation(is_hf: bool): vllm_conversation, model_config, tokenizer_group, + content_format="openai", ) vllm_result = apply_hf_chat_template( @@ -636,3 +711,89 @@ def get_conversation(is_hf: bool): ) assert hf_result == vllm_result + + +# yapf: disable +@pytest.mark.parametrize( + ("model", "expected_format"), + [(PHI3V_MODEL_ID, "string"), + (QWEN2VL_MODEL_ID, "openai"), + (ULTRAVOX_MODEL_ID, "string"), + (MLLAMA_MODEL_ID, "openai"), + (LLAMA_GUARD_MODEL_ID, "openai")], +) +# yapf: enable +def test_resolve_content_format_hf_defined(model, expected_format): + tokenizer_group = TokenizerGroup( + model, + enable_lora=False, + max_num_seqs=5, + max_input_length=None, + ) + tokenizer = tokenizer_group.tokenizer + + chat_template = tokenizer.chat_template + assert isinstance(chat_template, str) + + print("[TEXT]") + print(chat_template) + print("[AST]") + print(_try_extract_ast(chat_template)) + + resolved_format = resolve_chat_template_content_format( + None, # Test detecting the tokenizer's chat_template + "auto", + tokenizer, + ) + + assert resolved_format == expected_format + + +# yapf: disable +@pytest.mark.parametrize( + ("template_path", "expected_format"), + [("template_alpaca.jinja", "string"), + ("template_baichuan.jinja", "string"), + ("template_blip2.jinja", "string"), + ("template_chatglm.jinja", "string"), + ("template_chatglm2.jinja", "string"), + ("template_chatml.jinja", "string"), + ("template_falcon_180b.jinja", "string"), + ("template_falcon.jinja", "string"), + ("template_inkbot.jinja", "string"), + ("template_llava.jinja", "string"), + ("template_vlm2vec.jinja", "openai"), + ("tool_chat_template_granite_20b_fc.jinja", "string"), + ("tool_chat_template_hermes.jinja", "string"), + ("tool_chat_template_internlm2_tool.jinja", "string"), + ("tool_chat_template_llama3.1_json.jinja", "string"), + ("tool_chat_template_llama3.2_json.jinja", "string"), + ("tool_chat_template_mistral_parallel.jinja", "string"), + ("tool_chat_template_mistral.jinja", "string")], +) +# yapf: enable +def test_resolve_content_format_examples(template_path, expected_format): + tokenizer_group = TokenizerGroup( + PHI3V_MODEL_ID, + enable_lora=False, + max_num_seqs=5, + max_input_length=None, + ) + dummy_tokenizer = tokenizer_group.tokenizer + dummy_tokenizer.chat_template = None + + chat_template = load_chat_template(EXAMPLES_DIR / template_path) + assert isinstance(chat_template, str) + + print("[TEXT]") + print(chat_template) + print("[AST]") + print(_try_extract_ast(chat_template)) + + resolved_format = resolve_chat_template_content_format( + chat_template, + "auto", + dummy_tokenizer, + ) + + assert resolved_format == expected_format diff --git a/vllm/config.py b/vllm/config.py index 1c190da1d327e..64b2f75e092de 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -155,7 +155,6 @@ def __init__( limit_mm_per_prompt: Optional[Mapping[str, int]] = None, use_async_output_proc: bool = True, config_format: ConfigFormat = ConfigFormat.AUTO, - chat_template_text_format: str = "string", hf_overrides: Optional[HfOverrides] = None, mm_processor_kwargs: Optional[Dict[str, Any]] = None, override_neuron_config: Optional[Dict[str, Any]] = None, @@ -216,7 +215,6 @@ def __init__( self.model, revision) self.dtype = _get_and_verify_dtype(self.hf_text_config, dtype) self.use_async_output_proc = use_async_output_proc - self.chat_template_text_format = chat_template_text_format self.mm_processor_kwargs = mm_processor_kwargs # Set enforce_eager to False if the value is unset. diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index d73f95f59c71f..92fa87c7fa45b 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -90,7 +90,6 @@ class EngineArgs: task: TaskOption = "auto" skip_tokenizer_init: bool = False tokenizer_mode: str = 'auto' - chat_template_text_format: str = 'string' trust_remote_code: bool = False allowed_local_media_path: str = "" download_dir: Optional[str] = None @@ -258,14 +257,6 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: 'fast tokenizer if available.\n* "slow" will ' 'always use the slow tokenizer. \n* ' '"mistral" will always use the `mistral_common` tokenizer.') - parser.add_argument( - '--chat-template-text-format', - type=str, - default=EngineArgs.chat_template_text_format, - choices=['string', 'openai'], - help='The format to render text content within a chat template. ' - '"string" will keep the content field as a string whereas ' - '"openai" will parse content in the current OpenAI format.') parser.add_argument('--trust-remote-code', action='store_true', help='Trust remote code from huggingface.') @@ -894,7 +885,6 @@ def create_model_config(self) -> ModelConfig: # We know this is not None because we set it in __post_init__ tokenizer=cast(str, self.tokenizer), tokenizer_mode=self.tokenizer_mode, - chat_template_text_format=self.chat_template_text_format, trust_remote_code=self.trust_remote_code, allowed_local_media_path=self.allowed_local_media_path, dtype=self.dtype, diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index aa9c7893c4cfe..9a2d73a020c8f 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -262,8 +262,7 @@ def __init__( "num_scheduler_steps=%d, chunked_prefill_enabled=%s " "multi_step_stream_outputs=%s, enable_prefix_caching=%s, " "use_async_output_proc=%s, use_cached_outputs=%s, " - "chat_template_text_format=%s, mm_processor_kwargs=%s, " - "pooler_config=%r)", + "mm_processor_kwargs=%s, pooler_config=%r)", VLLM_VERSION, model_config.model, speculative_config, @@ -296,7 +295,6 @@ def __init__( cache_config.enable_prefix_caching, model_config.use_async_output_proc, use_cached_outputs, - model_config.chat_template_text_format, model_config.mm_processor_kwargs, model_config.pooler_config, ) diff --git a/vllm/entrypoints/chat_utils.py b/vllm/entrypoints/chat_utils.py index 3ca460c47c3bd..abee5ac46391c 100644 --- a/vllm/entrypoints/chat_utils.py +++ b/vllm/entrypoints/chat_utils.py @@ -2,12 +2,14 @@ import codecs import json from abc import ABC, abstractmethod -from collections import defaultdict +from collections import defaultdict, deque from functools import lru_cache, partial from pathlib import Path from typing import (Any, Awaitable, Callable, Dict, Generic, Iterable, List, Literal, Mapping, Optional, Tuple, TypeVar, Union, cast) +import jinja2.nodes +import transformers.utils.chat_template_utils as hf_chat_utils # yapf conflicts with isort for this block # yapf: disable from openai.types.chat import (ChatCompletionAssistantMessageParam, @@ -153,6 +155,199 @@ class ConversationMessage(TypedDict, total=False): """The tool calls generated by the model, such as function calls.""" +# Passed in by user +ChatTemplateContentFormatOption = Literal["auto", "string", "openai"] + +# Used internally +_ChatTemplateContentFormat = Literal["string", "openai"] + + +def _is_var_access(node: jinja2.nodes.Node, varname: str) -> bool: + if isinstance(node, jinja2.nodes.Name): + return node.ctx == "load" and node.name == varname + + return False + + +def _is_attr_access(node: jinja2.nodes.Node, varname: str, key: str) -> bool: + if isinstance(node, jinja2.nodes.Getitem): + return (_is_var_access(node.node, varname) + and isinstance(node.arg, jinja2.nodes.Const) + and node.arg.value == key) + + if isinstance(node, jinja2.nodes.Getattr): + return _is_var_access(node.node, varname) and node.attr == key + + return False + + +def _is_var_or_elems_access( + node: jinja2.nodes.Node, + varname: str, + key: Optional[str] = None, +) -> bool: + if isinstance(node, jinja2.nodes.Filter): + return (node.node is not None + and _is_var_or_elems_access(node.node, varname, key)) + if isinstance(node, jinja2.nodes.Test): + return _is_var_or_elems_access(node.node, varname, key) + + if (isinstance(node, jinja2.nodes.Getitem) + and isinstance(node.arg, jinja2.nodes.Slice)): + return _is_var_or_elems_access(node.node, varname, key) + + # yapf: disable + return ( + _is_attr_access(node, varname, key) if key + else _is_var_access(node, varname) + ) # yapf: enable + + +def _iter_nodes_assign_var_or_elems(root: jinja2.nodes.Node, varname: str): + # Global variable that is implicitly defined at the root + yield root, varname + + # Iterative BFS + related_varnames = deque([varname]) + while related_varnames: + related_varname = related_varnames.popleft() + + for assign_ast in root.find_all(jinja2.nodes.Assign): + lhs = assign_ast.target + rhs = assign_ast.node + + if _is_var_or_elems_access(rhs, related_varname): + assert isinstance(lhs, jinja2.nodes.Name) + yield assign_ast, lhs.name + + # Avoid infinite looping for self-assignment + if lhs.name != related_varname: + related_varnames.append(lhs.name) + + +# NOTE: The proper way to handle this is to build a CFG so that we can handle +# the scope in which each variable is defined, but that is too complicated +def _iter_nodes_assign_messages_item(root: jinja2.nodes.Node): + messages_varnames = [ + varname + for _, varname in _iter_nodes_assign_var_or_elems(root, "messages") + ] + + # Search for {%- for message in messages -%} loops + for loop_ast in root.find_all(jinja2.nodes.For): + loop_iter = loop_ast.iter + loop_target = loop_ast.target + + for varname in messages_varnames: + if _is_var_or_elems_access(loop_iter, varname): + assert isinstance(loop_target, jinja2.nodes.Name) + yield loop_ast, loop_target.name + break + + +def _iter_nodes_assign_content_item(root: jinja2.nodes.Node): + message_varnames = [ + varname for _, varname in _iter_nodes_assign_messages_item(root) + ] + + # Search for {%- for content in message['content'] -%} loops + for loop_ast in root.find_all(jinja2.nodes.For): + loop_iter = loop_ast.iter + loop_target = loop_ast.target + + for varname in message_varnames: + if _is_var_or_elems_access(loop_iter, varname, "content"): + assert isinstance(loop_target, jinja2.nodes.Name) + yield loop_ast, loop_target.name + break + + +def _try_extract_ast(chat_template: str) -> Optional[jinja2.nodes.Template]: + try: + jinja_compiled = hf_chat_utils._compile_jinja_template(chat_template) + return jinja_compiled.environment.parse(chat_template) + except Exception: + logger.exception("Error when compiling Jinja template") + return None + + +def _detect_content_format( + chat_template: str, + *, + default: _ChatTemplateContentFormat, +) -> _ChatTemplateContentFormat: + jinja_ast = _try_extract_ast(chat_template) + if jinja_ast is None: + return default + + try: + next(_iter_nodes_assign_content_item(jinja_ast)) + except StopIteration: + return "string" + except Exception: + logger.exception("Error when parsing AST of Jinja template") + return default + else: + return "openai" + + +def _resolve_chat_template_content_format( + chat_template: Optional[str], + given_format: ChatTemplateContentFormatOption, + tokenizer: AnyTokenizer, +) -> _ChatTemplateContentFormat: + if isinstance(tokenizer, (PreTrainedTokenizer, PreTrainedTokenizerFast)): + tokenizer_chat_template = tokenizer.chat_template + else: + tokenizer_chat_template = None + + jinja_text: Optional[str] + if isinstance(tokenizer_chat_template, str) and chat_template is None: + jinja_text = tokenizer_chat_template + elif (isinstance(tokenizer_chat_template, dict) + and chat_template in tokenizer_chat_template): + jinja_text = tokenizer_chat_template[chat_template] + else: + jinja_text = load_chat_template(chat_template, is_literal=True) + + detected_format = ("string" if jinja_text is None else + _detect_content_format(jinja_text, default="string")) + + return detected_format if given_format == "auto" else given_format + + +@lru_cache +def resolve_chat_template_content_format( + chat_template: Optional[str], + given_format: ChatTemplateContentFormatOption, + tokenizer: AnyTokenizer, +) -> _ChatTemplateContentFormat: + detected_format = _resolve_chat_template_content_format( + chat_template, + given_format, + tokenizer, + ) + + logger.info( + "Detected the chat template content format to be '%s'. " + "You can set `--chat-template-content-format` to override this.", + detected_format, + ) + + if given_format != "auto" and given_format != detected_format: + logger.warning( + "You specified `--chat-template-content-format %s` " + "which is different from the detected format '%s'. " + "If our automatic detection is incorrect, please consider " + "opening a GitHub issue so that we can improve it: " + "https://github.com/vllm-project/vllm/issues/new/choose", + given_format, + detected_format, + ) + + return detected_format + + ModalityStr = Literal["image", "audio", "video"] _T = TypeVar("_T") @@ -407,12 +602,23 @@ def validate_chat_template(chat_template: Optional[Union[Path, str]]): def load_chat_template( - chat_template: Optional[Union[Path, str]]) -> Optional[str]: + chat_template: Optional[Union[Path, str]], + *, + is_literal: bool = False, +) -> Optional[str]: if chat_template is None: return None + + if is_literal: + if isinstance(chat_template, Path): + raise TypeError("chat_template is expected to be read directly " + "from its value") + + return codecs.decode(chat_template, "unicode_escape") + try: with open(chat_template) as f: - resolved_chat_template = f.read() + return f.read() except OSError as e: if isinstance(chat_template, Path): raise @@ -426,10 +632,7 @@ def load_chat_template( # If opening a file fails, set chat template to be args to # ensure we decode so our escape are interpreted correctly - resolved_chat_template = codecs.decode(chat_template, "unicode_escape") - - logger.info("Using supplied chat template:\n%s", resolved_chat_template) - return resolved_chat_template + return load_chat_template(chat_template, is_literal=True) # TODO: Let user specify how to insert multimodal tokens into prompt @@ -464,7 +667,6 @@ def _get_full_multimodal_text_prompt(placeholder_counts: Dict[str, int], _AudioParser = partial(cast, ChatCompletionContentPartAudioParam) _RefusalParser = partial(cast, ChatCompletionContentPartRefusalParam) _VideoParser = partial(cast, ChatCompletionContentPartVideoParam) -MODEL_KEEP_MULTI_MODAL_CONTENT = {'mllama'} # Define a mapping from part types to their corresponding parsing functions. MM_PARSER_MAP: Dict[str, Callable[[ChatCompletionContentPartParam], str]] = { @@ -542,18 +744,12 @@ def _parse_chat_message_content_parts( role: str, parts: Iterable[ChatCompletionContentPartParam], mm_tracker: BaseMultiModalItemTracker, - chat_template_text_format: str, + *, + wrap_dicts: bool, ) -> List[ConversationMessage]: content: List[Union[str, Dict[str, str]]] = [] mm_parser = mm_tracker.create_parser() - model_config = mm_tracker.model_config - - wrap_dicts = (chat_template_text_format == "openai" - or (model_config.task == "embedding" - and model_config.is_multimodal_model) - or (model_config.hf_config.model_type - in MODEL_KEEP_MULTI_MODAL_CONTENT)) for part in parts: parse_res = _parse_chat_message_content_part( @@ -578,9 +774,11 @@ def _parse_chat_message_content_parts( def _parse_chat_message_content_part( - part: ChatCompletionContentPartParam, - mm_parser: BaseMultiModalContentParser, - wrap_dicts: bool) -> Optional[Union[str, Dict[str, str]]]: + part: ChatCompletionContentPartParam, + mm_parser: BaseMultiModalContentParser, + *, + wrap_dicts: bool, +) -> Optional[Union[str, Dict[str, str]]]: """Parses a single part of a conversation. If wrap_dicts is True, structured dictionary pieces for texts and images will be wrapped in dictionaries, i.e., {"type": "text", "text", ...} and @@ -629,7 +827,7 @@ def _parse_chat_message_content_part( def _parse_chat_message_content( message: ChatCompletionMessageParam, mm_tracker: BaseMultiModalItemTracker, - chat_template_text_format: str, + content_format: _ChatTemplateContentFormat, ) -> List[ConversationMessage]: role = message["role"] content = message.get("content") @@ -645,7 +843,7 @@ def _parse_chat_message_content( role, content, # type: ignore mm_tracker, - chat_template_text_format, + wrap_dicts=(content_format == "openai"), ) for result_msg in result: @@ -684,6 +882,7 @@ def parse_chat_messages( messages: List[ChatCompletionMessageParam], model_config: ModelConfig, tokenizer: AnyTokenizer, + content_format: _ChatTemplateContentFormat, ) -> Tuple[List[ConversationMessage], Optional[MultiModalDataDict]]: conversation: List[ConversationMessage] = [] mm_tracker = MultiModalItemTracker(model_config, tokenizer) @@ -692,7 +891,7 @@ def parse_chat_messages( sub_messages = _parse_chat_message_content( msg, mm_tracker, - model_config.chat_template_text_format, + content_format, ) conversation.extend(sub_messages) @@ -706,6 +905,7 @@ def parse_chat_messages_futures( messages: List[ChatCompletionMessageParam], model_config: ModelConfig, tokenizer: AnyTokenizer, + content_format: _ChatTemplateContentFormat, ) -> Tuple[List[ConversationMessage], Awaitable[Optional[MultiModalDataDict]]]: conversation: List[ConversationMessage] = [] mm_tracker = AsyncMultiModalItemTracker(model_config, tokenizer) @@ -714,7 +914,7 @@ def parse_chat_messages_futures( sub_messages = _parse_chat_message_content( msg, mm_tracker, - model_config.chat_template_text_format, + content_format, ) conversation.extend(sub_messages) diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index 4b33fc1458ee3..86b0b6893f1d9 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -13,9 +13,11 @@ TaskOption) from vllm.engine.llm_engine import LLMEngine from vllm.entrypoints.chat_utils import (ChatCompletionMessageParam, + ChatTemplateContentFormatOption, apply_hf_chat_template, apply_mistral_chat_template, - parse_chat_messages) + parse_chat_messages, + resolve_chat_template_content_format) from vllm.inputs import PromptType, TextPrompt, TokensPrompt from vllm.inputs.parse import parse_and_batch_prompt from vllm.logger import init_logger @@ -523,6 +525,7 @@ def chat( use_tqdm: bool = True, lora_request: Optional[LoRARequest] = None, chat_template: Optional[str] = None, + chat_template_content_format: ChatTemplateContentFormatOption = "auto", add_generation_prompt: bool = True, continue_final_message: bool = False, tools: Optional[List[Dict[str, Any]]] = None, @@ -539,9 +542,11 @@ def chat( to the OpenAI API. Args: - messages: A list of conversations or a single conversation. - - Each conversation is represented as a list of messages. - - Each message is a dictionary with 'role' and 'content' keys. + messages: A list of conversations or a single conversation. + + - Each conversation is represented as a list of messages. + - Each message is a dictionary with 'role' and 'content' keys. + sampling_params: The sampling parameters for text generation. If None, we use the default sampling parameters. When it is a single value, it is applied to every prompt. When it @@ -551,11 +556,19 @@ def chat( lora_request: LoRA request to use for generation, if any. chat_template: The template to use for structuring the chat. If not provided, the model's default chat template will be used. + chat_template_content_format: The format to render message content. + + - "string" will render the content as a string. + Example: ``"Who are you?"`` + - "openai" will render the content as a list of dictionaries, + similar to OpenAI schema. + Example: ``[{"type": "text", "text": "Who are you?"}]`` + add_generation_prompt: If True, adds a generation template to each message. continue_final_message: If True, continues the final message in - the conversation instead of starting a new one. Cannot be `True` - if `add_generation_prompt` is also `True`. + the conversation instead of starting a new one. Cannot be + ``True`` if ``add_generation_prompt`` is also ``True``. mm_processor_kwargs: Multimodal processor kwarg overrides for this chat request. Only used for offline requests. @@ -576,17 +589,26 @@ def chat( cast(List[ChatCompletionMessageParam], messages) ] + tokenizer = self.get_tokenizer() + model_config = self.llm_engine.get_model_config() + resolved_content_format = resolve_chat_template_content_format( + chat_template, + chat_template_content_format, + tokenizer, + ) + prompts: List[Union[TokensPrompt, TextPrompt]] = [] for msgs in list_of_messages: - tokenizer = self.get_tokenizer() - model_config = self.llm_engine.get_model_config() - # NOTE: _parse_chat_message_content_parts() currently doesn't # handle mm_processor_kwargs, since there is no implementation in # the chat message parsing for it. conversation, mm_data = parse_chat_messages( - msgs, model_config, tokenizer) + msgs, + model_config, + tokenizer, + content_format=resolved_content_format, + ) prompt_data: Union[str, List[int]] if isinstance(tokenizer, MistralTokenizer): @@ -737,7 +759,7 @@ def encode( generation, if any. Returns: - A list of `EmbeddingRequestOutput` objects containing the + A list of ``EmbeddingRequestOutput`` objects containing the generated embeddings in the same order as the input prompts. Note: diff --git a/vllm/entrypoints/openai/api_server.py b/vllm/entrypoints/openai/api_server.py index b13f6a228b4c6..b0fe061f5db4a 100644 --- a/vllm/entrypoints/openai/api_server.py +++ b/vllm/entrypoints/openai/api_server.py @@ -29,6 +29,7 @@ from vllm.engine.multiprocessing.client import MQLLMEngineClient from vllm.engine.multiprocessing.engine import run_mp_engine from vllm.engine.protocol import EngineClient +from vllm.entrypoints.chat_utils import load_chat_template from vllm.entrypoints.launcher import serve_http from vllm.entrypoints.logger import RequestLogger from vllm.entrypoints.openai.cli_args import (make_arg_parser, @@ -529,6 +530,9 @@ def init_app_state( state.engine_client = engine_client state.log_stats = not args.disable_log_stats + resolved_chat_template = load_chat_template(args.chat_template) + logger.info("Using supplied chat template:\n%s", resolved_chat_template) + state.openai_serving_chat = OpenAIServingChat( engine_client, model_config, @@ -537,7 +541,8 @@ def init_app_state( lora_modules=args.lora_modules, prompt_adapters=args.prompt_adapters, request_logger=request_logger, - chat_template=args.chat_template, + chat_template=resolved_chat_template, + chat_template_content_format=args.chat_template_content_format, return_tokens_as_token_ids=args.return_tokens_as_token_ids, enable_auto_tools=args.enable_auto_tool_choice, tool_parser=args.tool_call_parser, @@ -557,7 +562,8 @@ def init_app_state( model_config, base_model_paths, request_logger=request_logger, - chat_template=args.chat_template, + chat_template=resolved_chat_template, + chat_template_content_format=args.chat_template_content_format, ) if model_config.task == "embedding" else None state.openai_serving_tokenization = OpenAIServingTokenization( engine_client, @@ -565,7 +571,8 @@ def init_app_state( base_model_paths, lora_modules=args.lora_modules, request_logger=request_logger, - chat_template=args.chat_template, + chat_template=resolved_chat_template, + chat_template_content_format=args.chat_template_content_format, ) diff --git a/vllm/entrypoints/openai/cli_args.py b/vllm/entrypoints/openai/cli_args.py index eb08a89293370..24c206a1261f2 100644 --- a/vllm/entrypoints/openai/cli_args.py +++ b/vllm/entrypoints/openai/cli_args.py @@ -7,10 +7,11 @@ import argparse import json import ssl -from typing import List, Optional, Sequence, Union +from typing import List, Optional, Sequence, Union, get_args from vllm.engine.arg_utils import AsyncEngineArgs, nullable_str -from vllm.entrypoints.chat_utils import validate_chat_template +from vllm.entrypoints.chat_utils import (ChatTemplateContentFormatOption, + validate_chat_template) from vllm.entrypoints.openai.serving_engine import (LoRAModulePath, PromptAdapterPath) from vllm.entrypoints.openai.tool_parsers import ToolParserManager @@ -132,6 +133,18 @@ def make_arg_parser(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: help="The file path to the chat template, " "or the template in single-line form " "for the specified model") + parser.add_argument( + '--chat-template-content-format', + type=str, + default="auto", + choices=get_args(ChatTemplateContentFormatOption), + help='The format to render message content within a chat template.' + '\n\n' + '* "string" will render the content as a string. ' + 'Example: "Hello World"\n' + '* "openai" will render the content as a list of dictionaries, ' + 'similar to OpenAI schema. ' + 'Example: [{"type": "text", "text": "Hello world!"}]') parser.add_argument("--response-role", type=nullable_str, default="assistant", diff --git a/vllm/entrypoints/openai/protocol.py b/vllm/entrypoints/openai/protocol.py index 820aefd8800d9..b7b064ae01f05 100644 --- a/vllm/entrypoints/openai/protocol.py +++ b/vllm/entrypoints/openai/protocol.py @@ -5,9 +5,8 @@ from typing import Any, Dict, List, Literal, Optional, Union import torch -from openai.types.chat import ChatCompletionContentPartParam from pydantic import BaseModel, ConfigDict, Field, model_validator -from typing_extensions import Annotated, Required, TypedDict +from typing_extensions import Annotated from vllm.entrypoints.chat_utils import ChatCompletionMessageParam from vllm.pooling_params import PoolingParams @@ -35,26 +34,6 @@ assert _LONG_INFO.max == _MOCK_LONG_INFO.max -class CustomChatCompletionMessageParam(TypedDict, total=False): - """Enables custom roles in the Chat Completion API.""" - role: Required[str] - """The role of the message's author.""" - - content: Union[str, List[ChatCompletionContentPartParam]] - """The contents of the message.""" - - name: str - """An optional name for the participant. - - Provides the model information to differentiate between participants of the - same role. - """ - - tool_call_id: Optional[str] - - tool_calls: Optional[List[dict]] - - class OpenAIBaseModel(BaseModel): # OpenAI API does not allow extra fields model_config = ConfigDict(extra="forbid") @@ -1054,16 +1033,56 @@ class TokenizeCompletionRequest(OpenAIBaseModel): model: str prompt: str - add_special_tokens: bool = Field(default=True) + add_special_tokens: bool = Field( + default=True, + description=( + "If true (the default), special tokens (e.g. BOS) will be added to " + "the prompt."), + ) class TokenizeChatRequest(OpenAIBaseModel): model: str messages: List[ChatCompletionMessageParam] - add_generation_prompt: bool = Field(default=True) - continue_final_message: bool = Field(default=False) - add_special_tokens: bool = Field(default=False) + add_generation_prompt: bool = Field( + default=True, + description= + ("If true, the generation prompt will be added to the chat template. " + "This is a parameter used by chat template in tokenizer config of the " + "model."), + ) + continue_final_message: bool = Field( + default=False, + description= + ("If this is set, the chat will be formatted so that the final " + "message in the chat is open-ended, without any EOS tokens. The " + "model will continue this message rather than starting a new one. " + "This allows you to \"prefill\" part of the model's response for it. " + "Cannot be used at the same time as `add_generation_prompt`."), + ) + add_special_tokens: bool = Field( + default=False, + description=( + "If true, special tokens (e.g. BOS) will be added to the prompt " + "on top of what is added by the chat template. " + "For most models, the chat template takes care of adding the " + "special tokens so this should be set to false (as is the " + "default)."), + ) + chat_template: Optional[str] = Field( + default=None, + description=( + "A Jinja template to use for this conversion. " + "As of transformers v4.44, default chat template is no longer " + "allowed, so you must provide a chat template if the tokenizer " + "does not define one."), + ) + chat_template_kwargs: Optional[Dict[str, Any]] = Field( + default=None, + description=("Additional kwargs to pass to the template renderer. " + "Will be accessible by the chat template."), + ) @model_validator(mode="before") @classmethod diff --git a/vllm/entrypoints/openai/run_batch.py b/vllm/entrypoints/openai/run_batch.py index 1b422a93263b2..00cdb3b6839f5 100644 --- a/vllm/entrypoints/openai/run_batch.py +++ b/vllm/entrypoints/openai/run_batch.py @@ -222,6 +222,7 @@ async def main(args): prompt_adapters=None, request_logger=request_logger, chat_template=None, + chat_template_content_format="auto", enable_prompt_tokens_details=args.enable_prompt_tokens_details, ) if model_config.task == "generate" else None openai_serving_embedding = OpenAIServingEmbedding( @@ -230,6 +231,7 @@ async def main(args): base_model_paths, request_logger=request_logger, chat_template=None, + chat_template_content_format="auto", ) if model_config.task == "embedding" else None tracker = BatchProgressTracker() diff --git a/vllm/entrypoints/openai/serving_chat.py b/vllm/entrypoints/openai/serving_chat.py index 77cae00ae827f..2eef909eb9319 100644 --- a/vllm/entrypoints/openai/serving_chat.py +++ b/vllm/entrypoints/openai/serving_chat.py @@ -10,7 +10,8 @@ from vllm.config import ModelConfig from vllm.engine.protocol import EngineClient -from vllm.entrypoints.chat_utils import ConversationMessage, load_chat_template +from vllm.entrypoints.chat_utils import (ChatTemplateContentFormatOption, + ConversationMessage) from vllm.entrypoints.logger import RequestLogger from vllm.entrypoints.openai.protocol import ( ChatCompletionLogProb, ChatCompletionLogProbs, @@ -38,20 +39,23 @@ class OpenAIServingChat(OpenAIServing): - def __init__(self, - engine_client: EngineClient, - model_config: ModelConfig, - base_model_paths: List[BaseModelPath], - response_role: str, - *, - lora_modules: Optional[List[LoRAModulePath]], - prompt_adapters: Optional[List[PromptAdapterPath]], - request_logger: Optional[RequestLogger], - chat_template: Optional[str], - return_tokens_as_token_ids: bool = False, - enable_auto_tools: bool = False, - tool_parser: Optional[str] = None, - enable_prompt_tokens_details: bool = False): + def __init__( + self, + engine_client: EngineClient, + model_config: ModelConfig, + base_model_paths: List[BaseModelPath], + response_role: str, + *, + lora_modules: Optional[List[LoRAModulePath]], + prompt_adapters: Optional[List[PromptAdapterPath]], + request_logger: Optional[RequestLogger], + chat_template: Optional[str], + chat_template_content_format: ChatTemplateContentFormatOption, + return_tokens_as_token_ids: bool = False, + enable_auto_tools: bool = False, + tool_parser: Optional[str] = None, + enable_prompt_tokens_details: bool = False, + ) -> None: super().__init__(engine_client=engine_client, model_config=model_config, base_model_paths=base_model_paths, @@ -61,8 +65,8 @@ def __init__(self, return_tokens_as_token_ids=return_tokens_as_token_ids) self.response_role = response_role - self.use_tool_use_model_template = False - self.chat_template = load_chat_template(chat_template) + self.chat_template = chat_template + self.chat_template_content_format: Final = chat_template_content_format # set up tool use self.enable_auto_tools: bool = enable_auto_tools @@ -120,6 +124,7 @@ async def create_chat_completion( ) = self._maybe_get_adapters(request) tokenizer = await self.engine_client.get_tokenizer(lora_request) + tool_parser = self.tool_parser # validation for OpenAI tools @@ -157,6 +162,7 @@ async def create_chat_completion( tokenizer, request.messages, chat_template=request.chat_template or self.chat_template, + chat_template_content_format=self.chat_template_content_format, add_generation_prompt=request.add_generation_prompt, continue_final_message=request.continue_final_message, tool_dicts=tool_dicts, diff --git a/vllm/entrypoints/openai/serving_embedding.py b/vllm/entrypoints/openai/serving_embedding.py index bbe7db8f13231..74ad7389784fc 100644 --- a/vllm/entrypoints/openai/serving_embedding.py +++ b/vllm/entrypoints/openai/serving_embedding.py @@ -1,7 +1,7 @@ import asyncio import base64 import time -from typing import AsyncGenerator, List, Literal, Optional, Union, cast +from typing import AsyncGenerator, Final, List, Literal, Optional, Union, cast import numpy as np from fastapi import Request @@ -9,7 +9,7 @@ from vllm.config import ModelConfig from vllm.engine.protocol import EngineClient -from vllm.entrypoints.chat_utils import load_chat_template +from vllm.entrypoints.chat_utils import ChatTemplateContentFormatOption from vllm.entrypoints.logger import RequestLogger from vllm.entrypoints.openai.protocol import (EmbeddingChatRequest, EmbeddingRequest, @@ -77,7 +77,8 @@ def __init__( *, request_logger: Optional[RequestLogger], chat_template: Optional[str], - ): + chat_template_content_format: ChatTemplateContentFormatOption, + ) -> None: super().__init__(engine_client=engine_client, model_config=model_config, base_model_paths=base_model_paths, @@ -85,7 +86,8 @@ def __init__( prompt_adapters=None, request_logger=request_logger) - self.chat_template = load_chat_template(chat_template) + self.chat_template = chat_template + self.chat_template_content_format: Final = chat_template_content_format async def create_embedding( self, @@ -144,6 +146,8 @@ async def create_embedding( tokenizer, request.messages, chat_template=request.chat_template or self.chat_template, + chat_template_content_format=self. + chat_template_content_format, add_generation_prompt=request.add_generation_prompt, continue_final_message=request.continue_final_message, truncate_prompt_tokens=truncate_prompt_tokens, diff --git a/vllm/entrypoints/openai/serving_engine.py b/vllm/entrypoints/openai/serving_engine.py index fa315fa516632..cae2877ea7e99 100644 --- a/vllm/entrypoints/openai/serving_engine.py +++ b/vllm/entrypoints/openai/serving_engine.py @@ -11,14 +11,16 @@ from vllm.config import ModelConfig from vllm.engine.protocol import EngineClient +# yapf conflicts with isort for this block +# yapf: disable from vllm.entrypoints.chat_utils import (ChatCompletionMessageParam, + ChatTemplateContentFormatOption, ConversationMessage, apply_hf_chat_template, apply_mistral_chat_template, - parse_chat_messages_futures) + parse_chat_messages_futures, + resolve_chat_template_content_format) from vllm.entrypoints.logger import RequestLogger -# yapf conflicts with isort for this block -# yapf: disable from vllm.entrypoints.openai.protocol import (ChatCompletionRequest, CompletionRequest, DetokenizeRequest, @@ -426,7 +428,8 @@ async def _preprocess_chat( request: ChatLikeRequest, tokenizer: AnyTokenizer, messages: List[ChatCompletionMessageParam], - chat_template: Optional[str] = None, + chat_template: Optional[str], + chat_template_content_format: ChatTemplateContentFormatOption, add_generation_prompt: bool = True, continue_final_message: bool = False, tool_dicts: Optional[List[Dict[str, Any]]] = None, @@ -437,10 +440,16 @@ async def _preprocess_chat( add_special_tokens: bool = False, ) -> Tuple[List[ConversationMessage], Sequence[RequestPrompt], List[TokensPrompt]]: + resolved_content_format = resolve_chat_template_content_format( + chat_template, + chat_template_content_format, + tokenizer, + ) conversation, mm_data_future = parse_chat_messages_futures( messages, self.model_config, tokenizer, + content_format=resolved_content_format, ) _chat_template_kwargs: Dict[str, Any] = dict( diff --git a/vllm/entrypoints/openai/serving_tokenization.py b/vllm/entrypoints/openai/serving_tokenization.py index 1fd82304f7a4d..59b3b1311f881 100644 --- a/vllm/entrypoints/openai/serving_tokenization.py +++ b/vllm/entrypoints/openai/serving_tokenization.py @@ -1,8 +1,8 @@ -from typing import List, Optional, Union +from typing import Final, List, Optional, Union from vllm.config import ModelConfig from vllm.engine.protocol import EngineClient -from vllm.entrypoints.chat_utils import load_chat_template +from vllm.entrypoints.chat_utils import ChatTemplateContentFormatOption from vllm.entrypoints.logger import RequestLogger # yapf conflicts with isort for this block # yapf: disable @@ -33,7 +33,8 @@ def __init__( lora_modules: Optional[List[LoRAModulePath]], request_logger: Optional[RequestLogger], chat_template: Optional[str], - ): + chat_template_content_format: ChatTemplateContentFormatOption, + ) -> None: super().__init__(engine_client=engine_client, model_config=model_config, base_model_paths=base_model_paths, @@ -41,12 +42,8 @@ def __init__( prompt_adapters=None, request_logger=request_logger) - # If this is None we use the tokenizer's default chat template - # the list of commonly-used chat template names for HF named templates - hf_chat_templates: List[str] = ['default', 'tool_use'] - self.chat_template = chat_template \ - if chat_template in hf_chat_templates \ - else load_chat_template(chat_template) + self.chat_template = chat_template + self.chat_template_content_format: Final = chat_template_content_format async def create_tokenize( self, @@ -75,9 +72,12 @@ async def create_tokenize( request, tokenizer, request.messages, - chat_template=self.chat_template, + chat_template=request.chat_template or self.chat_template, + chat_template_content_format=self. + chat_template_content_format, add_generation_prompt=request.add_generation_prompt, continue_final_message=request.continue_final_message, + chat_template_kwargs=request.chat_template_kwargs, add_special_tokens=request.add_special_tokens, ) else: From 755b85359be910fabe39a75299439fc11beb57d4 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Fri, 15 Nov 2024 21:46:27 -0800 Subject: [PATCH 006/397] [doc] add doc for the plugin system (#10372) Signed-off-by: youkaichao --- docs/source/design/class_hierarchy.rst | 2 + docs/source/design/plugin_system.rst | 62 ++++++++++++++++++++++++++ docs/source/index.rst | 1 + docs/source/models/adding_model.rst | 25 +++-------- vllm/plugins/__init__.py | 16 +++++-- 5 files changed, 84 insertions(+), 22 deletions(-) create mode 100644 docs/source/design/plugin_system.rst diff --git a/docs/source/design/class_hierarchy.rst b/docs/source/design/class_hierarchy.rst index 15f0c8ccf77ee..58a888b17ba53 100644 --- a/docs/source/design/class_hierarchy.rst +++ b/docs/source/design/class_hierarchy.rst @@ -1,3 +1,5 @@ +.. _class_hierarchy: + vLLM's Class Hierarchy ======================= diff --git a/docs/source/design/plugin_system.rst b/docs/source/design/plugin_system.rst new file mode 100644 index 0000000000000..bfca702b9267a --- /dev/null +++ b/docs/source/design/plugin_system.rst @@ -0,0 +1,62 @@ +.. _plugin_system: + +vLLM's Plugin System +==================== + +The community frequently requests the ability to extend vLLM with custom features. To facilitate this, vLLM includes a plugin system that allows users to add custom features without modifying the vLLM codebase. This document explains how plugins work in vLLM and how to create a plugin for vLLM. + +How Plugins Work in vLLM +------------------------ + +Plugins are user-registered code that vLLM executes. Given vLLM's architecture (see :ref:`class_hierarchy`), multiple processes may be involved, especially when using distributed inference with various parallelism techniques. To enable plugins successfully, every process created by vLLM needs to load the plugin. This is done by the `load_general_plugins `__ function in the ``vllm.plugins`` module. This function is called for every process created by vLLM before it starts any work. + +How vLLM Discovers Plugins +-------------------------- + +vLLM's plugin system uses the standard Python ``entry_points`` mechanism. This mechanism allows developers to register functions in their Python packages for use by other packages. An example of a plugin: + +.. code-block:: python + + # inside `setup.py` file + from setuptools import setup + + setup(name='vllm_add_dummy_model', + version='0.1', + packages=['vllm_add_dummy_model'], + entry_points={ + 'vllm.general_plugins': + ["register_dummy_model = vllm_add_dummy_model:register"] + }) + + # inside `vllm_add_dummy_model.py` file + def register(): + from vllm import ModelRegistry + + if "MyLlava" not in ModelRegistry.get_supported_archs(): + ModelRegistry.register_model("MyLlava", + "vllm_add_dummy_model.my_llava:MyLlava") + +For more information on adding entry points to your package, please check the `official documentation `__. + +Every plugin has three parts: + +1. **Plugin group**: The name of the entry point group. vLLM uses the entry point group ``vllm.general_plugins`` to register general plugins. This is the key of ``entry_points`` in the ``setup.py`` file. Always use ``vllm.general_plugins`` for vLLM's general plugins. + +2. **Plugin name**: The name of the plugin. This is the value in the dictionary of the ``entry_points`` dictionary. In the example above, the plugin name is ``register_dummy_model``. Plugins can be filtered by their names using the ``VLLM_PLUGINS`` environment variable. To load only a specific plugin, set ``VLLM_PLUGINS`` to the plugin name. + +3. **Plugin value**: The fully qualified name of the function to register in the plugin system. In the example above, the plugin value is ``vllm_add_dummy_model:register``, which refers to a function named ``register`` in the ``vllm_add_dummy_model`` module. + +What Can Plugins Do? +-------------------- + +Currently, the primary use case for plugins is to register custom, out-of-the-tree models into vLLM. This is done by calling ``ModelRegistry.register_model`` to register the model. In the future, the plugin system may be extended to support more features, such as swapping in custom implementations for certain classes in vLLM. + +Guidelines for Writing Plugins +------------------------------ + +- **Being re-entrant**: The function specified in the entry point should be re-entrant, meaning it can be called multiple times without causing issues. This is necessary because the function might be called multiple times in some processes. + +Compatibility Guarantee +----------------------- + +vLLM guarantees the interface of documented plugins, such as ``ModelRegistry.register_model``, will always be available for plugins to register models. However, it is the responsibility of plugin developers to ensure their plugins are compatible with the version of vLLM they are targeting. For example, ``"vllm_add_dummy_model.my_llava:MyLlava"`` should be compatible with the version of vLLM that the plugin targets. The interface for the model may change during vLLM's development. \ No newline at end of file diff --git a/docs/source/index.rst b/docs/source/index.rst index a2abd2995b1cc..3b2698a8845ed 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -158,6 +158,7 @@ Documentation design/class_hierarchy design/huggingface_integration + design/plugin_system design/input_processing/model_inputs_index design/kernel/paged_attention design/multimodal/multimodal_index diff --git a/docs/source/models/adding_model.rst b/docs/source/models/adding_model.rst index c6d88cc38e99b..a70ebf99c746f 100644 --- a/docs/source/models/adding_model.rst +++ b/docs/source/models/adding_model.rst @@ -102,11 +102,11 @@ This method should load the weights from the HuggingFace's checkpoint file and a Finally, register your :code:`*ForCausalLM` class to the :code:`_VLLM_MODELS` in `vllm/model_executor/models/registry.py `_. 6. Out-of-Tree Model Integration --------------------------------------------- +-------------------------------- -We also provide a way to integrate a model without modifying the vLLM codebase. Step 2, 3, 4 are still required, but you can skip step 1 and 5. +You can integrate a model without modifying the vLLM codebase. Steps 2, 3, and 4 are still required, but you can skip steps 1 and 5. Instead, write a plugin to register your model. For general introduction of the plugin system, see :ref:`plugin_system`. -Just add the following lines in your code: +To register the model, use the following code: .. code-block:: python @@ -114,7 +114,7 @@ Just add the following lines in your code: from your_code import YourModelForCausalLM ModelRegistry.register_model("YourModelForCausalLM", YourModelForCausalLM) -If your model imports modules that initialize CUDA, consider instead lazy-importing it to avoid an error like :code:`RuntimeError: Cannot re-initialize CUDA in forked subprocess`: +If your model imports modules that initialize CUDA, consider lazy-importing it to avoid errors like :code:`RuntimeError: Cannot re-initialize CUDA in forked subprocess`: .. code-block:: python @@ -123,19 +123,8 @@ If your model imports modules that initialize CUDA, consider instead lazy-import ModelRegistry.register_model("YourModelForCausalLM", "your_code:YourModelForCausalLM") .. important:: - If your model is a multimodal model, make sure the model class implements the :class:`~vllm.model_executor.models.interfaces.SupportsMultiModal` interface. + If your model is a multimodal model, ensure the model class implements the :class:`~vllm.model_executor.models.interfaces.SupportsMultiModal` interface. Read more about that :ref:`here `. -If you are running api server with :code:`vllm serve `, you can wrap the entrypoint with the following code: - -.. code-block:: python - - from vllm import ModelRegistry - from your_code import YourModelForCausalLM - ModelRegistry.register_model("YourModelForCausalLM", YourModelForCausalLM) - - if __name__ == '__main__': - import runpy - runpy.run_module('vllm.entrypoints.openai.api_server', run_name='__main__') - -Save the above code in a file and run it with :code:`python your_file.py `. +.. note:: + Although you can directly put these code snippets in your script using ``vllm.LLM``, the recommended way is to place these snippets in a vLLM plugin. This ensures compatibility with various vLLM features like distributed inference and the API server. diff --git a/vllm/plugins/__init__.py b/vllm/plugins/__init__.py index 8373e11cfff9f..9fca724599012 100644 --- a/vllm/plugins/__init__.py +++ b/vllm/plugins/__init__.py @@ -27,16 +27,24 @@ def load_general_plugins(): allowed_plugins = envs.VLLM_PLUGINS discovered_plugins = entry_points(group='vllm.general_plugins') + logger.info("Available plugins:") + for plugin in discovered_plugins: + logger.info("name=%s, value=%s, group=%s", plugin.name, plugin.value, + plugin.group) + if allowed_plugins is None: + logger.info("all available plugins will be loaded.") + logger.info("set environment variable VLLM_PLUGINS to control" + " which plugins to load.") + else: + logger.info("plugins to load: %s", allowed_plugins) for plugin in discovered_plugins: - logger.info("Found general plugin: %s", plugin.name) if allowed_plugins is None or plugin.name in allowed_plugins: try: func = plugin.load() func() - logger.info("Loaded general plugin: %s", plugin.name) + logger.info("plugin %s loaded.", plugin.name) except Exception: - logger.exception("Failed to load general plugin: %s", - plugin.name) + logger.exception("Failed to load plugin %s", plugin.name) _torch_compile_backend: Optional[Union[Callable, str]] = None From 2f427c2d163b5c6d5923a8808e9d786e170944ce Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sat, 16 Nov 2024 01:23:20 -0800 Subject: [PATCH 007/397] [misc][plugin] improve log messages (#10386) Signed-off-by: youkaichao --- vllm/plugins/__init__.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/vllm/plugins/__init__.py b/vllm/plugins/__init__.py index 9fca724599012..7b1bbb14c5302 100644 --- a/vllm/plugins/__init__.py +++ b/vllm/plugins/__init__.py @@ -27,6 +27,9 @@ def load_general_plugins(): allowed_plugins = envs.VLLM_PLUGINS discovered_plugins = entry_points(group='vllm.general_plugins') + if len(discovered_plugins) == 0: + logger.info("No plugins found.") + return logger.info("Available plugins:") for plugin in discovered_plugins: logger.info("name=%s, value=%s, group=%s", plugin.name, plugin.value, From 1d754726265d52773653e53e1a18f6eb63122480 Mon Sep 17 00:00:00 2001 From: rasmith Date: Sat, 16 Nov 2024 03:55:05 -0600 Subject: [PATCH 008/397] [BugFix] [Kernel] Fix GPU SEGV occuring in fused_moe kernel (#10385) Signed-off-by: Randall Smith --- vllm/model_executor/layers/fused_moe/fused_moe.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/vllm/model_executor/layers/fused_moe/fused_moe.py b/vllm/model_executor/layers/fused_moe/fused_moe.py index 340da32263c1c..e6f9f01ef0f74 100644 --- a/vllm/model_executor/layers/fused_moe/fused_moe.py +++ b/vllm/model_executor/layers/fused_moe/fused_moe.py @@ -105,16 +105,18 @@ def fused_moe_kernel( num_tokens_post_padded = tl.load(num_tokens_post_padded_ptr) if pid_m * BLOCK_SIZE_M >= num_tokens_post_padded: return - offs_token_id = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_token_id = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M).to( + tl.int64) offs_token = tl.load(sorted_token_ids_ptr + offs_token_id) token_mask = offs_token < num_valid_tokens - offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N + offs_bn = (pid_n * BLOCK_SIZE_N + + tl.arange(0, BLOCK_SIZE_N).to(tl.int64)) % N offs_k = tl.arange(0, BLOCK_SIZE_K) a_ptrs = a_ptr + (offs_token[:, None] // top_k * stride_am + offs_k[None, :] * stride_ak) - off_experts = tl.load(expert_ids_ptr + pid_m) + off_experts = tl.load(expert_ids_ptr + pid_m).to(tl.int64) b_ptrs = b_ptr + off_experts * stride_be + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn) if use_int8_w8a16: From 8b6725b0cf4ee5f363218f4bc341970c80297ccf Mon Sep 17 00:00:00 2001 From: Jaehyun An Date: Sat, 16 Nov 2024 19:15:40 +0900 Subject: [PATCH 009/397] [Misc] Update benchmark to support image_url file or http (#10287) Signed-off-by: rbbang --- benchmarks/benchmark_serving.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/benchmarks/benchmark_serving.py b/benchmarks/benchmark_serving.py index bdb8ea8e2a5dc..e9fc037a46965 100644 --- a/benchmarks/benchmark_serving.py +++ b/benchmarks/benchmark_serving.py @@ -251,6 +251,19 @@ def sample_hf_requests( "url": f"data:image/jpeg;base64,{image_base64}" }, } + elif "image" in data and isinstance(data["image"], str): + if (data["image"].startswith("http://") or \ + data["image"].startswith("file://")): + image_url = data["image"] + else: + image_url = f"file://{data['image']}" + + mm_content = { + "type": "image_url", + "image_url": { + "url": image_url + }, + } else: mm_content = None From b98d89efd4b1a09c11c4d0cf30c9af0e93514764 Mon Sep 17 00:00:00 2001 From: Sky Lee <46676799+skylee-01@users.noreply.github.com> Date: Sun, 17 Nov 2024 00:33:01 +0800 Subject: [PATCH 010/397] [Misc] Medusa supports custom bias (#10361) --- vllm/model_executor/models/medusa.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/vllm/model_executor/models/medusa.py b/vllm/model_executor/models/medusa.py index de5b2d89c0962..b05360b55466b 100644 --- a/vllm/model_executor/models/medusa.py +++ b/vllm/model_executor/models/medusa.py @@ -14,11 +14,14 @@ class ResidualBlock(nn.Module): - def __init__(self, hidden_size: int, num_layers: int) -> None: + def __init__(self, config: VllmConfig, hidden_size: int, + num_layers: int) -> None: super().__init__() self.layers = nn.ModuleList([ - nn.Linear(hidden_size, hidden_size, bias=False) + nn.Linear(hidden_size, + hidden_size, + bias=getattr(config, "medusa_fc_bias", False)) for _ in range(num_layers) ]) self.act = nn.SiLU() @@ -49,7 +52,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: super().__init__() self.config = config self.blocks = nn.ModuleList([ - ResidualBlock(hidden_size=self.config.hidden_size, + ResidualBlock(config=config, + hidden_size=self.config.hidden_size, num_layers=self.config.num_hidden_layers) for _ in range(self.config.num_heads) ]) From 361c29e1740e0b2186f8cca3ed96ad235a8a960a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=94=B5=E8=84=91=E6=98=9F=E4=BA=BA?= Date: Sun, 17 Nov 2024 02:10:00 +0800 Subject: [PATCH 011/397] [Bugfix] Fix M-RoPE position calculation when chunked prefill is enabled (#10388) Signed-off-by: imkero --- .../vision_language/test_qwen2_vl.py | 136 +++++++++++++++++- .../model_executor/layers/rotary_embedding.py | 3 +- vllm/worker/model_runner.py | 1 + 3 files changed, 135 insertions(+), 5 deletions(-) diff --git a/tests/models/decoder_only/vision_language/test_qwen2_vl.py b/tests/models/decoder_only/vision_language/test_qwen2_vl.py index 718c675b86fb4..71b6ba4dca435 100644 --- a/tests/models/decoder_only/vision_language/test_qwen2_vl.py +++ b/tests/models/decoder_only/vision_language/test_qwen2_vl.py @@ -18,6 +18,7 @@ IMAGE_PLACEHOLDER = "<|vision_start|><|image_pad|><|vision_end|>" VIDEO_PLACEHOLDER = "<|vision_start|><|video_pad|><|vision_end|>" +MODEL_HIDDEN_SIZE = 1536 def qwen2_vl_chat_template(*query): @@ -230,7 +231,7 @@ def batch_make_video_embeddings( return result -def run_test( +def run_embedding_input_test( vllm_runner: Type[VllmRunner], inputs: List[Tuple[List[str], PromptImageInput, PromptVideoInput]], model: str, @@ -326,7 +327,7 @@ def test_qwen2_vl_image_embeddings_input(vllm_runner, image_assets, model, [], ) for image, prompt in zip(images, IMAGE_PROMPTS)] - run_test( + run_embedding_input_test( vllm_runner, inputs_per_case, model, @@ -371,7 +372,7 @@ def test_qwen2_vl_multiple_image_embeddings_input(vllm_runner, image_assets, [], )] - run_test( + run_embedding_input_test( vllm_runner, inputs_per_case, model, @@ -416,7 +417,134 @@ def test_qwen2_vl_video_embeddings_input(vllm_runner, video_assets, model, [rescale_video_size(video, factor) for factor in size_factors], ) for video, prompt in zip(sampled_vids, VIDEO_PROMPTS)] - run_test( + run_embedding_input_test( + vllm_runner, + inputs_per_case, + model, + dtype=dtype, + max_tokens=max_tokens, + num_logprobs=num_logprobs, + mm_limit=1, + tensor_parallel_size=1, + ) + + +def run_chunked_prefill_test( + vllm_runner: Type[VllmRunner], + inputs: List[Tuple[List[str], PromptImageInput, PromptVideoInput]], + model: str, + *, + dtype: str, + max_tokens: int, + num_logprobs: int, + mm_limit: int, + tensor_parallel_size: int, + distributed_executor_backend: Optional[str] = None, +): + """Compare inference result between + chunked prefill disabled and chunked prefill enabled + """ + + # NOTE: + # max_model_len should be greater than image_feature_size + with vllm_runner(model, + task="generate", + max_model_len=4000, + max_num_seqs=4, + dtype=dtype, + limit_mm_per_prompt={ + "image": mm_limit, + "video": mm_limit + }, + tensor_parallel_size=tensor_parallel_size, + distributed_executor_backend=distributed_executor_backend + ) as vllm_model: + + outputs_per_case = [ + vllm_model.generate_greedy_logprobs(prompts, + max_tokens, + num_logprobs=num_logprobs, + images=images or None, + videos=videos or None) + for prompts, images, videos in inputs + ] + + with vllm_runner( + model, + task="generate", + max_model_len=4000, + max_num_seqs=4, + dtype=dtype, + limit_mm_per_prompt={ + "image": mm_limit, + "video": mm_limit + }, + tensor_parallel_size=tensor_parallel_size, + distributed_executor_backend=distributed_executor_backend, + enable_chunked_prefill=True, + # should be small enough to ensure prefilling is chunked + max_num_batched_tokens=32, + mm_processor_kwargs={ + "max_pixels": 16 * 28 * 28, + }) as vllm_model_chunked: + outputs_per_case_chunked = [ + vllm_model_chunked.generate_greedy_logprobs( + prompts, + max_tokens, + num_logprobs=num_logprobs, + images=images or None, + videos=videos or None) for prompts, images, videos in inputs + ] + + for outputs, \ + outputs_chunked \ + in zip(outputs_per_case, + outputs_per_case_chunked): + check_logprobs_close( + outputs_0_lst=outputs, + outputs_1_lst=outputs_chunked, + name_0="non_chunked", + name_1="chunked", + ) + + +@pytest.mark.core_model +@pytest.mark.parametrize("model", models) +@pytest.mark.parametrize("dtype", [target_dtype]) +@pytest.mark.parametrize("max_tokens", [1]) +@pytest.mark.parametrize("num_logprobs", [10]) +def test_qwen2_vl_mrope_chunked_prefill(vllm_runner, example_prompts, + model: str, dtype: str, + max_tokens: int, + num_logprobs: int) -> None: + """ + Test Qwen2-VL's chunked prefill with M-RoPE + """ + prompts = [ + qwen2_vl_chat_template(IMAGE_PLACEHOLDER, prompt) + for prompt in example_prompts[:1] + ] + + # 1. Qwen2-VL's M-RoPE works only when there are some multi-modal inputs, + # so an image is included in the inputs + # 2. however, Qwen2-VL currently won't work properly + # when chunked prefill is enabled and there are some multi-modal inputs, + # here use a hacky way: provide a **zero-length** image to make it happy + # + # and finally we achieved: + # (1) chunked_prefill enabled; (2) M-RoPE works; to continue our tests + zero_len_image = { + "image_embeds": torch.empty((0, MODEL_HIDDEN_SIZE)), + "image_grid_thw": torch.tensor([[0, 0, 0]]) + } + images = [zero_len_image] * len(prompts) + + inputs_per_case: List[Tuple[List[str], PromptImageInput, + PromptVideoInput]] = [ + (prompts, images, []), + ] + + run_chunked_prefill_test( vllm_runner, inputs_per_case, model, diff --git a/vllm/model_executor/layers/rotary_embedding.py b/vllm/model_executor/layers/rotary_embedding.py index 63ceec63e8317..b01e4c61fe101 100644 --- a/vllm/model_executor/layers/rotary_embedding.py +++ b/vllm/model_executor/layers/rotary_embedding.py @@ -847,6 +847,7 @@ def get_input_positions( vision_end_token_id: int, spatial_merge_size: int, context_len: int = 0, + seq_len: Optional[int] = None, ) -> Tuple[List[List[int]], int]: """Get mrope input positions and delta value.""" @@ -921,7 +922,7 @@ def get_input_positions( torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1) - llm_positions = llm_positions[:, context_len:] + llm_positions = llm_positions[:, context_len:seq_len] mrope_position_delta = (llm_positions.max() + 1 - len(input_tokens)).item() diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py index 042f9f07eace6..22ee3f9f863e4 100644 --- a/vllm/worker/model_runner.py +++ b/vllm/worker/model_runner.py @@ -700,6 +700,7 @@ def _compute_multi_modal_input(self, inter_data: InterDataForSeqGroup, spatial_merge_size=hf_config.vision_config. spatial_merge_size, context_len=inter_data.context_lens[seq_idx], + seq_len=inter_data.seq_lens[seq_idx], ) seq_data.mrope_position_delta = mrope_position_delta From 661a34fd4fdd700a29b2db758e23e4e243e7ff18 Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Sat, 16 Nov 2024 10:45:26 -0800 Subject: [PATCH 012/397] [V1] Add code owners for V1 (#10397) Signed-off-by: Woosuk Kwon --- .github/CODEOWNERS | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index cd721971d01d6..3cb91fc0f8232 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -3,13 +3,16 @@ # This lists cover the "core" components of vLLM that require careful review /vllm/attention/backends/abstract.py @WoosukKwon @zhuohan123 @youkaichao @alexm-neuralmagic @comaniac @njhill -/vllm/core @WoosukKwon @zhuohan123 @youkaichao @alexm-neuralmagic @comaniac @njhill -/vllm/engine/llm_engine.py @WoosukKwon @zhuohan123 @youkaichao @alexm-neuralmagic @comaniac @njhill -/vllm/executor/executor_base.py @WoosukKwon @zhuohan123 @youkaichao @alexm-neuralmagic @comaniac @njhill -/vllm/worker/worker_base.py @WoosukKwon @zhuohan123 @youkaichao @alexm-neuralmagic @comaniac @njhill -/vllm/worker/worker.py @WoosukKwon @zhuohan123 @youkaichao @alexm-neuralmagic @comaniac @njhill -/vllm/model_executor/layers/sampler.py @WoosukKwon @zhuohan123 @youkaichao @alexm-neuralmagic @comaniac @njhill -CMakeLists.txt @tlrmchlsmth @WoosukKwon +/vllm/core @zhuohan123 @youkaichao @alexm-neuralmagic @comaniac @njhill +/vllm/engine/llm_engine.py @zhuohan123 @youkaichao @alexm-neuralmagic @comaniac @njhill +/vllm/executor/executor_base.py @zhuohan123 @youkaichao @alexm-neuralmagic @comaniac @njhill +/vllm/worker/worker_base.py @zhuohan123 @youkaichao @alexm-neuralmagic @comaniac @njhill +/vllm/worker/worker.py @zhuohan123 @youkaichao @alexm-neuralmagic @comaniac @njhill +/vllm/model_executor/layers/sampler.py @zhuohan123 @youkaichao @alexm-neuralmagic @comaniac @njhill +CMakeLists.txt @tlrmchlsmth + +# vLLM V1 +/vllm/v1 @WoosukKwon @robertgshaw2-neuralmagic @njhill @ywang96 @comaniac @alexm-neuralmagic # Test ownership /tests/async_engine @njhill @robertgshaw2-neuralmagic @simon-mo From 4fd937502827a7e06c54ded1f9d9b70ff640e222 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sat, 16 Nov 2024 18:02:14 -0800 Subject: [PATCH 013/397] [2/N][torch.compile] make compilation cfg part of vllm cfg (#10383) Signed-off-by: youkaichao --- tests/compile/piecewise/test_simple.py | 8 +- tests/compile/piecewise/test_toy_llama.py | 22 +- tests/compile/test_basic_correctness.py | 2 +- tests/compile/test_full_graph.py | 2 +- tests/compile/test_fusion.py | 2 +- tests/compile/test_wrapper.py | 4 +- tests/compile/utils.py | 2 +- .../model_executor/test_enabled_custom_ops.py | 52 ++--- tests/tpu/test_compilation.py | 2 +- tests/tpu/test_custom_dispatcher.py | 2 +- vllm/compilation/backends.py | 20 +- vllm/compilation/config.py | 159 --------------- vllm/compilation/decorators.py | 10 +- vllm/compilation/fusion.py | 2 +- vllm/compilation/inductor_pass.py | 2 +- vllm/compilation/levels.py | 8 - vllm/compilation/wrapper.py | 11 +- vllm/config.py | 189 ++++++++++++++++++ vllm/envs.py | 13 -- vllm/model_executor/custom_op.py | 27 +-- vllm/model_executor/model_loader/loader.py | 7 +- vllm/platforms/interface.py | 20 +- vllm/platforms/tpu.py | 21 +- vllm/plugins/__init__.py | 30 ++- vllm/v1/worker/gpu_model_runner.py | 10 +- vllm/worker/model_runner.py | 7 +- vllm/worker/tpu_model_runner.py | 8 +- 27 files changed, 359 insertions(+), 283 deletions(-) delete mode 100644 vllm/compilation/config.py delete mode 100644 vllm/compilation/levels.py diff --git a/tests/compile/piecewise/test_simple.py b/tests/compile/piecewise/test_simple.py index c631850ecdedb..45f56cbbd4b16 100644 --- a/tests/compile/piecewise/test_simple.py +++ b/tests/compile/piecewise/test_simple.py @@ -11,8 +11,8 @@ from vllm.compilation.compile_context import set_compile_context from vllm.compilation.counter import compilation_counter from vllm.compilation.decorators import support_torch_compile -from vllm.compilation.levels import CompilationLevel -from vllm.config import VllmConfig +from vllm.config import CompilationLevel, VllmConfig +from vllm.plugins import set_current_vllm_config from vllm.utils import direct_register_custom_op global_counter = 0 @@ -82,7 +82,9 @@ def test_simple_piecewise_compile(): os.environ["VLLM_TORCH_COMPILE_CONFIG"] = config os.environ["VLLM_TORCH_COMPILE_LEVEL"] = str(CompilationLevel.PIECEWISE) - model = SillyModel(vllm_config=VllmConfig(), prefix='') + vllm_config = VllmConfig() + with set_current_vllm_config(vllm_config): + model = SillyModel(vllm_config=vllm_config, prefix='') inputs = torch.randn(100).cuda() diff --git a/tests/compile/piecewise/test_toy_llama.py b/tests/compile/piecewise/test_toy_llama.py index c363a587a818e..8032304e95806 100644 --- a/tests/compile/piecewise/test_toy_llama.py +++ b/tests/compile/piecewise/test_toy_llama.py @@ -15,12 +15,10 @@ from torch.library import Library from vllm.compilation.compile_context import set_compile_context -from vllm.compilation.config import CompilationConfig from vllm.compilation.counter import compilation_counter from vllm.compilation.decorators import support_torch_compile -from vllm.compilation.levels import CompilationLevel -from vllm.config import VllmConfig -from vllm.plugins import set_compilation_config +from vllm.config import CompilationConfig, CompilationLevel, VllmConfig +from vllm.plugins import set_compilation_config, set_current_vllm_config from vllm.utils import direct_register_custom_op # create a library to hold the custom op @@ -272,9 +270,11 @@ def run_model(llama_config, CompilationLevel.NO_COMPILATION) set_compilation_config(None) - model = LlamaModel(config=llama_config, - vllm_config=VllmConfig(), - prefix="").eval().cuda() + vllm_config = VllmConfig() + with set_current_vllm_config(vllm_config): + model = LlamaModel(config=llama_config, + vllm_config=vllm_config, + prefix="").eval().cuda() B = 16 # max batch size input_ids = torch.randint(0, llama_config.vocab_size, (B, )).cuda() @@ -395,9 +395,11 @@ def benchmark(): else: set_compilation_config(None) - model = LlamaModel(config=llama_config, - vllm_config=VllmConfig(), - prefix="").eval().cuda().to(torch.bfloat16) + vllm_config = VllmConfig() + with set_current_vllm_config(vllm_config): + model = LlamaModel(config=llama_config, + vllm_config=vllm_config, + prefix="").eval().cuda().to(torch.bfloat16) B = 256 # max batch size input_ids = torch.randint(0, llama_config.vocab_size, (B, )).cuda() diff --git a/tests/compile/test_basic_correctness.py b/tests/compile/test_basic_correctness.py index 833589ba5dc9f..08747ebc58b75 100644 --- a/tests/compile/test_basic_correctness.py +++ b/tests/compile/test_basic_correctness.py @@ -3,7 +3,7 @@ import pytest -from vllm.compilation.levels import CompilationLevel +from vllm.config import CompilationLevel from vllm.utils import cuda_device_count_stateless from ..utils import compare_all_settings diff --git a/tests/compile/test_full_graph.py b/tests/compile/test_full_graph.py index f00334934cb46..4dfdfe21a67df 100644 --- a/tests/compile/test_full_graph.py +++ b/tests/compile/test_full_graph.py @@ -1,6 +1,6 @@ import pytest -from vllm.compilation.levels import CompilationLevel +from vllm.config import CompilationLevel from ..utils import fork_new_process_for_each_test from .utils import TEST_MODELS, check_full_graph_support diff --git a/tests/compile/test_fusion.py b/tests/compile/test_fusion.py index e4d3defafb951..4db79b070fd8d 100644 --- a/tests/compile/test_fusion.py +++ b/tests/compile/test_fusion.py @@ -3,10 +3,10 @@ from compressed_tensors.quantization import FP8_DTYPE import vllm.envs as envs -from vllm.compilation.config import CompilationConfig from vllm.compilation.fusion import (FusionPass, find_auto_fn, find_auto_fn_maybe) from vllm.compilation.reshapes import RedundantReshapesPass +from vllm.config import CompilationConfig from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.quantization.utils.w8a8_utils import ( apply_fp8_linear) diff --git a/tests/compile/test_wrapper.py b/tests/compile/test_wrapper.py index 3668c1fab6b89..74f66baaa5ea1 100644 --- a/tests/compile/test_wrapper.py +++ b/tests/compile/test_wrapper.py @@ -3,6 +3,7 @@ import torch from vllm.compilation.wrapper import TorchCompileWrapperWithCustomDispatcher +from vllm.config import CompilationLevel class MyMod(torch.nn.Module): @@ -18,7 +19,8 @@ class MyWrapper(TorchCompileWrapperWithCustomDispatcher): def __init__(self, model): self.model = model compiled_callable = torch.compile(self.forward, backend="eager") - super().__init__(compiled_callable) + super().__init__(compiled_callable, + compilation_level=CompilationLevel.DYNAMO_ONCE) def forward(self, x: torch.Tensor, cache: Optional[torch.Tensor] = None): # this is the function to be compiled diff --git a/tests/compile/utils.py b/tests/compile/utils.py index 222c63a342a4b..729f10676888b 100644 --- a/tests/compile/utils.py +++ b/tests/compile/utils.py @@ -4,7 +4,7 @@ from tests.quantization.utils import is_quant_method_supported from vllm import LLM, SamplingParams -from vllm.compilation.levels import CompilationLevel +from vllm.config import CompilationLevel from vllm.platforms import current_platform TEST_MODELS = [ diff --git a/tests/model_executor/test_enabled_custom_ops.py b/tests/model_executor/test_enabled_custom_ops.py index af267f804ffa7..c3219bc50646b 100644 --- a/tests/model_executor/test_enabled_custom_ops.py +++ b/tests/model_executor/test_enabled_custom_ops.py @@ -3,11 +3,13 @@ import pytest +from vllm.config import CompilationConfig, VllmConfig from vllm.model_executor.custom_op import CustomOp from vllm.model_executor.layers.activation import (GeluAndMul, ReLUSquaredActivation, SiluAndMul) from vllm.model_executor.layers.layernorm import RMSNorm +from vllm.plugins import set_current_vllm_config # Registered subclass for test @@ -51,42 +53,40 @@ class Relu3(ReLUSquaredActivation): ]) def test_enabled_ops(env: str, torch_level: int, ops_enabled: List[int], default_on: bool): - os.environ["VLLM_CUSTOM_OPS"] = env os.environ["VLLM_TORCH_COMPILE_LEVEL"] = str(torch_level) + vllm_config = VllmConfig(compilation_config=CompilationConfig( + custom_ops=env.split(","))) + with set_current_vllm_config(vllm_config): + assert CustomOp.default_on() == default_on - # Reset default_on (computed once): - CustomOp.default_on.cache_clear() + ops_enabled = [bool(x) for x in ops_enabled] - assert CustomOp.default_on() == default_on + assert RMSNorm(1024).enabled() == ops_enabled[0] + assert CustomOp.op_registry["rms_norm"].enabled() == ops_enabled[0] - ops_enabled = [bool(x) for x in ops_enabled] + assert SiluAndMul().enabled() == ops_enabled[1] + assert CustomOp.op_registry["silu_and_mul"].enabled() == ops_enabled[1] - assert RMSNorm(1024).enabled() == ops_enabled[0] - assert CustomOp.op_registry["rms_norm"].enabled() == ops_enabled[0] + assert GeluAndMul().enabled() == ops_enabled[2] + assert CustomOp.op_registry["gelu_and_mul"].enabled() == ops_enabled[2] - assert SiluAndMul().enabled() == ops_enabled[1] - assert CustomOp.op_registry["silu_and_mul"].enabled() == ops_enabled[1] + # If registered, subclasses should follow their own name + assert Relu3().enabled() == ops_enabled[3] + assert CustomOp.op_registry["relu3"].enabled() == ops_enabled[3] - assert GeluAndMul().enabled() == ops_enabled[2] - assert CustomOp.op_registry["gelu_and_mul"].enabled() == ops_enabled[2] + # Unregistered subclass + class SiluAndMul2(SiluAndMul): + pass - # If registered, subclasses should follow their own name - assert Relu3().enabled() == ops_enabled[3] - assert CustomOp.op_registry["relu3"].enabled() == ops_enabled[3] - - # Unregistered subclass - class SiluAndMul2(SiluAndMul): - pass - - # Subclasses should not require registration - assert SiluAndMul2().enabled() == SiluAndMul().enabled() + # Subclasses should not require registration + assert SiluAndMul2().enabled() == SiluAndMul().enabled() @pytest.mark.parametrize( "env", ["all,none", "all,+rms_norm,all", "+rms_norm,-rms_norm"]) def test_enabled_ops_invalid(env: str): - os.environ["VLLM_CUSTOM_OPS"] = env - CustomOp.default_on.cache_clear() - - with pytest.raises(AssertionError): - RMSNorm(1024).enabled() + with pytest.raises(Exception): # noqa + vllm_config = VllmConfig(compilation_config=CompilationConfig( + custom_ops=env.split(","))) + with set_current_vllm_config(vllm_config): + RMSNorm(1024).enabled() diff --git a/tests/tpu/test_compilation.py b/tests/tpu/test_compilation.py index 86d9af88e49ea..941abe17a3378 100644 --- a/tests/tpu/test_compilation.py +++ b/tests/tpu/test_compilation.py @@ -5,7 +5,7 @@ import depyf -from vllm.compilation.levels import CompilationLevel +from vllm.config import CompilationLevel # disable custom dispatcher, let Dynamo takes over # all the control diff --git a/tests/tpu/test_custom_dispatcher.py b/tests/tpu/test_custom_dispatcher.py index 923d0f1680802..53b10c06135a1 100644 --- a/tests/tpu/test_custom_dispatcher.py +++ b/tests/tpu/test_custom_dispatcher.py @@ -1,6 +1,6 @@ import os -from vllm.compilation.levels import CompilationLevel +from vllm.config import CompilationLevel from ..utils import compare_two_settings diff --git a/vllm/compilation/backends.py b/vllm/compilation/backends.py index 5682faa158069..22c613931f082 100644 --- a/vllm/compilation/backends.py +++ b/vllm/compilation/backends.py @@ -10,13 +10,12 @@ import torch.fx as fx import vllm.envs as envs +from vllm.config import CompilationConfig, CompilationLevel from vllm.logger import init_logger from vllm.utils import combine_fx_passes, weak_ref_tensors -from .config import CompilationConfig from .counter import compilation_counter from .fusion import FusionPass -from .levels import CompilationLevel from .reshapes import RedundantReshapesPass logger = init_logger(__name__) @@ -392,7 +391,10 @@ class VllmBackend: sym_tensor_indices: List[int] input_buffers: List[torch.Tensor] - def __init__(self, post_grad_passes: Sequence[Callable] = ()): + def __init__( + self, + compilation_configs: CompilationConfig, + ): global global_graph_pool if global_graph_pool is None: global_graph_pool = torch.cuda.graph_pool_handle() @@ -401,11 +403,13 @@ def __init__(self, post_grad_passes: Sequence[Callable] = ()): # streams, it might not be safe to share a global pool. # only investigate this when we use multiple streams self.graph_pool = global_graph_pool - self.post_grad_passes = post_grad_passes + self.post_grad_passes = [] self.sym_tensor_indices = [] self.input_buffers = [] + self.compilation_configs = compilation_configs + # `torch.compile` is JIT compiled, so we don't need to # do anything here @@ -437,10 +441,10 @@ def __call__(self, graph: fx.GraphModule, example_inputs) -> Callable: assert not self._called, "VllmBackend can only be called once" self.graph = graph - # config is read now, because only here can + # config is updated now, because only here can # we get the sizes to capture for cudagraph # from compilation context - self.compilation_configs = CompilationConfig.select_and_init_config() + self.compilation_configs.init_during_runtime() self.add_passes_to_config() self.split_gm, self.piecewise_graphs = split_graph( @@ -688,4 +692,6 @@ def select_default_backend(level: int) -> Union[str, Callable]: return backend_str assert level == CompilationLevel.PIECEWISE - return VllmBackend() + from vllm.plugins import get_current_vllm_config + compilation_config = get_current_vllm_config().compilation_config + return VllmBackend(compilation_config) diff --git a/vllm/compilation/config.py b/vllm/compilation/config.py deleted file mode 100644 index 3e663505c627d..0000000000000 --- a/vllm/compilation/config.py +++ /dev/null @@ -1,159 +0,0 @@ -import copy -from pathlib import Path -from typing import Any, Dict, List, Optional - -from pydantic import BaseModel, Field, PrivateAttr - -import vllm.envs as envs -from vllm.logger import init_logger - -from .compile_context import get_compile_context - -logger = init_logger(__name__) - - -class CompilationConfig(BaseModel): - """ - Configuration for compilation. - It has two parts: - - CudaGraph capture: - - use_cudagraph: whether to use cudagraph inside compilation. - - False: cudagraph inside compilation is not used. - - True: cudagraph inside compilation is used. It requires - that all input buffers have fixed addresses. - Note that this is orthogonal to the cudagraph capture out - side of compilation. - TODO: move outside cudagraph logic into compilation. - torch.compile will handle cudagraph capture logic in the future. - - cudagraph_capture_sizes: sizes to capture cudagraph. - - None: capture sizes are inferred from compilation context. - - List[int]: capture sizes are specified. - - cudagraph_num_of_warmups: number of warmup runs for cudagraph. - It means the first several runs will be treated as warmup runs. - Only after that, the execution will be recorded, and the recorded - cudagraph will be used for subsequent runs. - - cudagraph_copy_inputs: whether to copy input tensors for - cudagraph. If the caller can guarantee that the same input buffers - are always used, it can set this to False. Otherwise, it should - set this to True, and the compiler will copy the input to an - internally managed buffer. Default is False. - - Inductor compilation: - - use_inductor: whether to use inductor compilation. - - False: inductor compilation is not used. graph runs in eager. - - True: inductor compilation is used. one graph for symbolic shape - is compiled. In addition, compile for different sizes specified - in inductor_compile_sizes, using configurations - in inductor_compile_config. - - inductor_compile_sizes: sizes to compile for inductor. - - inductor_specialize_for_cudagraph_no_more_than: an optional integer - to specialize inductor for cudagraph sizes no more than the - specified size. It is useful when we want to specialize inductor - with a subset of cudagraph sizes. - - inductor_compile_config: additional configurations for inductor. - - None: use default configurations. - - inductor_passes: additional passes for inductor. It is a dictionary - from pass name to pass function qualified name. We use function - name because the config uses json format. If we pass the config - from Python, functions can also be passed directly via Python object - constructor, e.g. `CompilationConfig(inductor_passes={"a": func})` - - Custom inductor passes: - - dump_graph_stages: list of stages for which we want to dump the graph. - Each pass defines its own stages (before, after, maybe in-between). - - dump_graph_dir: directory to dump the graph. Default is . - - enable_fusion: whether to enable the custom fusion pass. - TODO better pass enabling system. - - Why we have different sizes for cudagraph and inductor: - - cudagraph: a cudagraph captured for a specific size can only be used - for the same size. We need to capture all the sizes we want to use. - - inductor: a graph compiled by inductor for a general shape can be used - for different sizes. Inductor can also compile for specific sizes, - where it can have more information to optimize the graph with fully - static shapes. However, we find the general shape compilation is - sufficient for most cases. It might be beneficial to compile for - certain small batchsizes, where inductor is good at optimizing. - """ - use_inductor: bool = True - inductor_specialize_for_cudagraph_no_more_than: Optional[int] = None - inductor_compile_sizes: Optional[List[int]] = Field(default_factory=dict) - inductor_compile_config: Dict = Field(default_factory=dict) - inductor_passes: Dict[str, str] = Field(default_factory=dict) - - use_cudagraph: bool = False - non_cudagraph_ops: List[str] = Field(default_factory=list) - cudagraph_num_of_warmups: int = 0 - cudagraph_capture_sizes: Optional[List[int]] = None - cudagraph_copy_inputs: bool = False - - dump_graph_stages: List[str] = Field(default_factory=list) - dump_graph_dir: Path = Field(default=Path(".")) - enable_fusion: bool = True - - # not configurable, computed after init - compile_sizes: List[int] = PrivateAttr - capture_sizes: List[int] = PrivateAttr - - def model_post_init(self, __context: Any) -> None: - for k, v in self.inductor_passes.items(): - if not isinstance(v, str): - assert callable(v), ( - f"pass {k} should be a function or a qualified name") - self.inductor_compile_config[k] = v - continue - - # resolve function from qualified name - names = v.split(".") - module = ".".join(names[:-1]) - func_name = names[-1] - func = __import__(module).__dict__[func_name] - self.inductor_compile_config[k] = func - - def init_during_runtime(self): - """To complete the initialization of config, - we need to know the compile context, which is only available - during the first run of the model. - """ - context = get_compile_context() - context = copy.deepcopy(context) if context is not None else [] - sizes_to_specialize: List[int] = context - if self.cudagraph_capture_sizes is None: - self.capture_sizes = sizes_to_specialize - else: - self.capture_sizes = self.cudagraph_capture_sizes - logger.info(("cudagraph sizes specified by model runner" - " %s is overridden by config %s"), - sizes_to_specialize, self.cudagraph_capture_sizes) - if self.inductor_specialize_for_cudagraph_no_more_than is not None: - assert self.inductor_compile_sizes is None, ( - "inductor_compile_sizes should be None when " - "inductor_specialize_for_cudagraph_no_more_than is not None") - self.compile_sizes = [ - x for x in self.capture_sizes - if x <= self.inductor_specialize_for_cudagraph_no_more_than - ] - else: - assert self.inductor_compile_sizes is not None, ( - "inductor_compile_sizes should not be None when " - "inductor_specialize_for_cudagraph_no_more_than is None") - self.compile_sizes = self.inductor_compile_sizes - - @staticmethod - def select_and_init_config() -> "CompilationConfig": - """The order of selecting config is: - 1. Use the config specified in environment variable. - 2. Use the config specified in plugins. - 3. Use the default config. - """ - config_path = envs.VLLM_TORCH_COMPILE_CONFIG - if config_path is not None: - with open(config_path) as json_file: - config = CompilationConfig.model_validate_json( - json_file.read()) - else: - from vllm.plugins import get_compilation_config - predefined_config = get_compilation_config() - config = predefined_config if predefined_config is not None else ( - CompilationConfig()) - - config.init_during_runtime() - return config diff --git a/vllm/compilation/decorators.py b/vllm/compilation/decorators.py index ca1e96a33c014..4b78491bc5a48 100644 --- a/vllm/compilation/decorators.py +++ b/vllm/compilation/decorators.py @@ -3,10 +3,8 @@ import torch -import vllm.envs as envs -from vllm.compilation.levels import CompilationLevel from vllm.compilation.wrapper import TorchCompileWrapperWithCustomDispatcher -from vllm.config import VllmConfig +from vllm.config import CompilationLevel, VllmConfig from vllm.logger import init_logger from vllm.sequence import IntermediateTensors from vllm.utils import supports_dynamo @@ -126,12 +124,14 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = '', **kwargs): old_init(self, vllm_config=vllm_config, prefix=prefix, **kwargs) # for CompilationLevel.DYNAMO_AS_IS , the upper level model runner # will handle the compilation, so we don't need to do anything here. - self.do_not_compile = envs.VLLM_TORCH_COMPILE_LEVEL in [ + self.do_not_compile = \ + vllm_config.compilation_config.level in [ CompilationLevel.NO_COMPILATION, CompilationLevel.DYNAMO_AS_IS ] or not supports_dynamo() if self.do_not_compile: return - TorchCompileWrapperWithCustomDispatcher.__init__(self) + TorchCompileWrapperWithCustomDispatcher.__init__( + self, compilation_level=vllm_config.compilation_config.level) cls.__init__ = __init__ # type: ignore diff --git a/vllm/compilation/fusion.py b/vllm/compilation/fusion.py index eb43604b1399b..e6a3afef85e1b 100644 --- a/vllm/compilation/fusion.py +++ b/vllm/compilation/fusion.py @@ -6,8 +6,8 @@ from torch._inductor.pattern_matcher import (Match, PatternMatcherPass, fwd_only, register_replacement) -from vllm.compilation.config import CompilationConfig from vllm.compilation.inductor_pass import InductorPass +from vllm.config import CompilationConfig from vllm.logger import init_logger logger = init_logger(__name__) diff --git a/vllm/compilation/inductor_pass.py b/vllm/compilation/inductor_pass.py index b23351fa19759..8082a08b40019 100644 --- a/vllm/compilation/inductor_pass.py +++ b/vllm/compilation/inductor_pass.py @@ -2,7 +2,7 @@ import torch -from vllm.compilation.config import CompilationConfig +from vllm.config import CompilationConfig # yapf: disable from vllm.distributed import get_tensor_model_parallel_rank as get_tp_rank from vllm.distributed import ( diff --git a/vllm/compilation/levels.py b/vllm/compilation/levels.py deleted file mode 100644 index 19a3a2b526870..0000000000000 --- a/vllm/compilation/levels.py +++ /dev/null @@ -1,8 +0,0 @@ -# constants for the levels of the compilation process - - -class CompilationLevel: - NO_COMPILATION = 0 - DYNAMO_AS_IS = 1 - DYNAMO_ONCE = 2 - PIECEWISE = 3 diff --git a/vllm/compilation/wrapper.py b/vllm/compilation/wrapper.py index 7366ed4d16b0b..2a1aecc11ce26 100644 --- a/vllm/compilation/wrapper.py +++ b/vllm/compilation/wrapper.py @@ -8,8 +8,7 @@ import torch import vllm.envs as envs - -from .levels import CompilationLevel +from vllm.config import CompilationLevel class TorchCompileWrapperWithCustomDispatcher: @@ -25,7 +24,9 @@ class TorchCompileWrapperWithCustomDispatcher: `torch.compile` over the forward method. """ - def __init__(self, compiled_callable: Optional[Callable] = None): + def __init__(self, + compiled_callable: Optional[Callable] = None, + compilation_level: int = 0): if compiled_callable is None: # default compilation settings @@ -38,7 +39,7 @@ def __init__(self, compiled_callable: Optional[Callable] = None): backend = get_torch_compile_backend() if backend is None: from vllm.compilation.backends import select_default_backend - backend = select_default_backend(envs.VLLM_TORCH_COMPILE_LEVEL) + backend = select_default_backend(compilation_level) compiled_callable = torch.compile( self.forward, @@ -54,7 +55,7 @@ def __init__(self, compiled_callable: Optional[Callable] = None): # subclasses can use this to switch between the custom dispatcher # and the default Dynamo guard mechanism. self.use_custom_dispatcher: bool = \ - envs.VLLM_TORCH_COMPILE_LEVEL >= CompilationLevel.DYNAMO_ONCE + compilation_level >= CompilationLevel.DYNAMO_ONCE def __call__(self, *args, **kwargs): """Implement the dispatch logic here, beyond the torch.compile level. diff --git a/vllm/config.py b/vllm/config.py index 64b2f75e092de..7e37edbe594b1 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -3,10 +3,12 @@ import json import warnings from dataclasses import dataclass, field, replace +from pathlib import Path from typing import (TYPE_CHECKING, Any, Callable, ClassVar, Dict, Final, List, Literal, Mapping, Optional, Set, Tuple, Type, Union) import torch +from pydantic import BaseModel, Field, PrivateAttr from transformers import PretrainedConfig import vllm.envs as envs @@ -2052,6 +2054,185 @@ def __post_init__(self): f"installed. Original error:\n{otel_import_error_traceback}") +class CompilationLevel: + # constants for the levels of the compilation process + NO_COMPILATION = 0 + DYNAMO_AS_IS = 1 + DYNAMO_ONCE = 2 + PIECEWISE = 3 + + +class CompilationConfig(BaseModel): + """ + Configuration for compilation. + It has three parts: + - Top-level Compilation control: + - level: the level of compilation. + - 0: no compilation. + - 1: dynamo as is. + - 2: dynamo once. + - 3: piecewise compilation. + - custom_ops: fine-grained control over which custom ops to enable/disable. + Use 'all' to enable all, 'none' to disable all. + Also specify a list of custom op names to enable (prefixed with a '+'), + or disable (prefixed with a '-'). + Examples: + - 'all,-op1' to enable all except op1 + - 'none,+op1,+op2' to enable only op1 and op2 + By default, all custom ops are enabled when running without Inductor + and disabled when running with Inductor (compile_level >= Inductor). + - CudaGraph capture: + - use_cudagraph: whether to use cudagraph inside compilation. + - False: cudagraph inside compilation is not used. + - True: cudagraph inside compilation is used. It requires + that all input buffers have fixed addresses. + Note that this is orthogonal to the cudagraph capture out + side of compilation. + TODO: move outside cudagraph logic into compilation. + torch.compile will handle cudagraph capture logic in the future. + - cudagraph_capture_sizes: sizes to capture cudagraph. + - None: capture sizes are inferred from compilation context. + - List[int]: capture sizes are specified. + - cudagraph_num_of_warmups: number of warmup runs for cudagraph. + It means the first several runs will be treated as warmup runs. + Only after that, the execution will be recorded, and the recorded + cudagraph will be used for subsequent runs. + - cudagraph_copy_inputs: whether to copy input tensors for + cudagraph. If the caller can guarantee that the same input buffers + are always used, it can set this to False. Otherwise, it should + set this to True, and the compiler will copy the input to an + internally managed buffer. Default is False. + - Inductor compilation: + - use_inductor: whether to use inductor compilation. + - False: inductor compilation is not used. graph runs in eager. + - True: inductor compilation is used. one graph for symbolic shape + is compiled. In addition, compile for different sizes specified + in inductor_compile_sizes, using configurations + in inductor_compile_config. + - inductor_compile_sizes: sizes to compile for inductor. + - inductor_specialize_for_cudagraph_no_more_than: an optional integer + to specialize inductor for cudagraph sizes no more than the + specified size. It is useful when we want to specialize inductor + with a subset of cudagraph sizes. + - inductor_compile_config: additional configurations for inductor. + - None: use default configurations. + - inductor_passes: additional passes for inductor. It is a dictionary + from pass name to pass function qualified name. We use function + name because the config uses json format. If we pass the config + from Python, functions can also be passed directly via Python object + constructor, e.g. `CompilationConfig(inductor_passes={"a": func})` + - custom inductor passes: + - dump_graph_stages: list of stages for which we want to dump the graph. + Each pass defines its own stages (before, after, maybe in-between). + - dump_graph_dir: directory to dump the graph. Default is . + - enable_fusion: whether to enable the custom fusion pass. + TODO better pass enabling system. + + Why we have different sizes for cudagraph and inductor: + - cudagraph: a cudagraph captured for a specific size can only be used + for the same size. We need to capture all the sizes we want to use. + - inductor: a graph compiled by inductor for a general shape can be used + for different sizes. Inductor can also compile for specific sizes, + where it can have more information to optimize the graph with fully + static shapes. However, we find the general shape compilation is + sufficient for most cases. It might be beneficial to compile for + certain small batchsizes, where inductor is good at optimizing. + """ # noqa + level: int = 0 + custom_ops: List[str] = Field(default_factory=list) + + use_inductor: bool = True + inductor_specialize_for_cudagraph_no_more_than: Optional[int] = None + inductor_compile_sizes: Optional[List[int]] = Field(default_factory=dict) + inductor_compile_config: Dict = Field(default_factory=dict) + inductor_passes: Dict[str, str] = Field(default_factory=dict) + + use_cudagraph: bool = False + non_cudagraph_ops: List[str] = Field(default_factory=list) + cudagraph_num_of_warmups: int = 0 + cudagraph_capture_sizes: Optional[List[int]] = None + cudagraph_copy_inputs: bool = False + + dump_graph_stages: List[str] = Field(default_factory=list) + dump_graph_dir: Path = Field(default=Path(".")) + enable_fusion: bool = True + + # not configurable, computed after init + compile_sizes: List[int] = PrivateAttr + capture_sizes: List[int] = PrivateAttr + + def model_post_init(self, __context: Any) -> None: + self.level = envs.VLLM_TORCH_COMPILE_LEVEL + + count_none = self.custom_ops.count("none") + count_all = self.custom_ops.count("all") + assert count_none + count_all <= 1, "Can only specify 'none' or 'all'" + + for k, v in self.inductor_passes.items(): + if not isinstance(v, str): + assert callable(v), ( + f"pass {k} should be a function or a qualified name") + self.inductor_compile_config[k] = v + continue + + # resolve function from qualified name + names = v.split(".") + module = ".".join(names[:-1]) + func_name = names[-1] + func = __import__(module).__dict__[func_name] + self.inductor_compile_config[k] = func + + def init_during_runtime(self): + """To complete the initialization of config, + we need to know the compile context, which is only available + during the first run of the model. + """ + from vllm.compilation.compile_context import get_compile_context + context = get_compile_context() + context = copy.deepcopy(context) if context is not None else [] + sizes_to_specialize: List[int] = context + if self.cudagraph_capture_sizes is None: + self.capture_sizes = sizes_to_specialize + else: + self.capture_sizes = self.cudagraph_capture_sizes + logger.info(("cudagraph sizes specified by model runner" + " %s is overridden by config %s"), + sizes_to_specialize, self.cudagraph_capture_sizes) + if self.inductor_specialize_for_cudagraph_no_more_than is not None: + assert self.inductor_compile_sizes is None, ( + "inductor_compile_sizes should be None when " + "inductor_specialize_for_cudagraph_no_more_than is not None") + self.compile_sizes = [ + x for x in self.capture_sizes + if x <= self.inductor_specialize_for_cudagraph_no_more_than + ] + else: + assert self.inductor_compile_sizes is not None, ( + "inductor_compile_sizes should not be None when " + "inductor_specialize_for_cudagraph_no_more_than is None") + self.compile_sizes = self.inductor_compile_sizes + + @staticmethod + def select_and_init_config() -> "CompilationConfig": + """The order of selecting config is: + 1. Use the config specified in environment variable. + 2. Use the config specified in plugins. + 3. Use the default config. + """ + config_path = envs.VLLM_TORCH_COMPILE_CONFIG + if config_path is not None: + with open(config_path) as json_file: + config = CompilationConfig.model_validate_json( + json_file.read()) + else: + from vllm.plugins import get_compilation_config + predefined_config = get_compilation_config() + config = predefined_config if predefined_config is not None else ( + CompilationConfig()) + + return config + + @dataclass class VllmConfig: """Dataclass which contains all vllm-related configuration. This @@ -2073,6 +2254,8 @@ class VllmConfig: observability_config: Optional[ObservabilityConfig] = None prompt_adapter_config: Optional[PromptAdapterConfig] = None quant_config: Optional[QuantizationConfig] = None + compilation_config: CompilationConfig = field(default=None, + init=True) # type: ignore @staticmethod def _get_quantization_config( @@ -2133,6 +2316,12 @@ def __post_init__(self): self.quant_config = VllmConfig._get_quantization_config( self.model_config, self.load_config) + if self.compilation_config is None: + self.compilation_config = CompilationConfig.select_and_init_config( + ) + + current_platform.check_and_update_config(self) + def __str__(self): return ("model=%r, speculative_config=%r, tokenizer=%r, " "skip_tokenizer_init=%s, tokenizer_mode=%s, revision=%s, " diff --git a/vllm/envs.py b/vllm/envs.py index f320e35971f94..716e835a555f1 100644 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -69,7 +69,6 @@ VLLM_SKIP_P2P_CHECK: bool = False VLLM_TORCH_COMPILE_LEVEL: int = 0 VLLM_TORCH_COMPILE_CONFIG: Optional[str] = None - VLLM_CUSTOM_OPS: List[str] = [] VLLM_DISABLED_KERNELS: List[str] = [] VLLM_USE_V1: bool = False VLLM_ENABLE_V1_MULTIPROCESSING: bool = False @@ -217,18 +216,6 @@ def get_default_config_root(): "VLLM_TORCH_COMPILE_CONFIG": lambda: os.environ.get("VLLM_TORCH_COMPILE_CONFIG", None), - # Fine-grained control over which custom ops to enable/disable. - # Use 'all' to enable all, 'none' to disable all. - # Also specify a list of custom op names to enable (prefixed with a '+'), - # or disable (prefixed with a '-'). - # Examples: - # - 'all,-op1' to enable all except op1 - # - 'none,+op1,+op2' to enable only op1 and op2 - # By default, all custom ops are enabled when running without Inductor - # and disabled when running with Inductor (compile_level >= Inductor). - "VLLM_CUSTOM_OPS": - lambda: os.environ.get("VLLM_CUSTOM_OPS", "").replace(" ", "").split(","), - # local rank of the process in the distributed setting, used to determine # the GPU device id "LOCAL_RANK": diff --git a/vllm/model_executor/custom_op.py b/vllm/model_executor/custom_op.py index 24d75f4df4e02..6ae7d7cf6964f 100644 --- a/vllm/model_executor/custom_op.py +++ b/vllm/model_executor/custom_op.py @@ -1,12 +1,10 @@ -from functools import lru_cache from typing import Dict, Type import torch.nn as nn -import vllm.envs as envs -from vllm.compilation.levels import CompilationLevel from vllm.logger import init_logger from vllm.platforms import current_platform +from vllm.plugins import get_current_vllm_config from vllm.utils import print_warning_once logger = init_logger(__name__) @@ -87,6 +85,8 @@ def dispatch_forward(self): @classmethod def enabled(cls) -> bool: # if no name, then it was not registered + compilation_config = get_current_vllm_config().compilation_config + custom_ops = compilation_config.custom_ops if not hasattr(cls, "name"): print_warning_once( f"Custom op {cls.__name__} was not registered, " @@ -94,22 +94,25 @@ def enabled(cls) -> bool: f"It will be enabled/disabled based on the global settings.") return CustomOp.default_on() - enabled = f"+{cls.name}" in envs.VLLM_CUSTOM_OPS - disabled = f"-{cls.name}" in envs.VLLM_CUSTOM_OPS + enabled = f"+{cls.name}" in custom_ops + disabled = f"-{cls.name}" in custom_ops assert not (enabled and disabled), f"Cannot enable and disable {cls.name}" return (CustomOp.default_on() or enabled) and not disabled - # On by default if VLLM_TORCH_COMPILE_LEVEL < CompilationLevel.PIECEWISE - # Specifying 'all' or 'none' in VLLM_CUSTOM_OPS takes precedence. @staticmethod - @lru_cache def default_on() -> bool: - count_none = envs.VLLM_CUSTOM_OPS.count("none") - count_all = envs.VLLM_CUSTOM_OPS.count("all") - assert count_none + count_all <= 1, "Can only specify 'none' or 'all'" - return envs.VLLM_TORCH_COMPILE_LEVEL < CompilationLevel.PIECEWISE and \ + """ + On by default if level < CompilationLevel.PIECEWISE + Specifying 'all' or 'none' in custom_op takes precedence. + """ + from vllm.config import CompilationLevel + compilation_config = get_current_vllm_config().compilation_config + custom_ops = compilation_config.custom_ops + count_none = custom_ops.count("none") + count_all = custom_ops.count("all") + return compilation_config.level < CompilationLevel.PIECEWISE and \ not count_none > 0 or count_all > 0 # Dictionary of all custom ops (classes, indexed by registered name). diff --git a/vllm/model_executor/model_loader/loader.py b/vllm/model_executor/model_loader/loader.py index 140b61fe6d56a..0f8b81c3ef40c 100644 --- a/vllm/model_executor/model_loader/loader.py +++ b/vllm/model_executor/model_loader/loader.py @@ -42,6 +42,7 @@ safetensors_weights_iterator) from vllm.model_executor.utils import set_weight_attrs from vllm.platforms import current_platform +from vllm.plugins import set_current_vllm_config from vllm.utils import is_pin_memory_available @@ -97,7 +98,8 @@ def _initialize_model(vllm_config: VllmConfig, prefix: str = "") -> nn.Module: all_params = [param.name for param in signatures.parameters.values()] if "vllm_config" in all_params and "prefix" in all_params: # new-style model class - return model_class(vllm_config=vllm_config, prefix=prefix) + with set_current_vllm_config(vllm_config): + return model_class(vllm_config=vllm_config, prefix=prefix) msg = ("vLLM model class should accept `vllm_config` and `prefix` as " "input arguments. Possibly you have an old-style model class" " registered from out of tree and it is used for new vLLM version. " @@ -121,7 +123,8 @@ def _initialize_model(vllm_config: VllmConfig, prefix: str = "") -> nn.Module: kwargs["lora_config"] = vllm_config.lora_config if "scheduler_config" in all_params: kwargs["scheduler_config"] = vllm_config.scheduler_config - return model_class(**kwargs) + with set_current_vllm_config(vllm_config): + return model_class(**kwargs) class BaseModelLoader(ABC): diff --git a/vllm/platforms/interface.py b/vllm/platforms/interface.py index 81d8bdae2383c..970c0d1be617e 100644 --- a/vllm/platforms/interface.py +++ b/vllm/platforms/interface.py @@ -1,10 +1,15 @@ import enum import random -from typing import NamedTuple, Optional, Tuple, Union +from typing import TYPE_CHECKING, NamedTuple, Optional, Tuple, Union import numpy as np import torch +if TYPE_CHECKING: + from vllm.config import VllmConfig +else: + VllmConfig = None + class PlatformEnum(enum.Enum): CUDA = enum.auto() @@ -129,6 +134,19 @@ def seed_everything(cls, seed: int) -> None: np.random.seed(seed) torch.manual_seed(seed) + @classmethod + def check_and_update_config(cls, vllm_config: VllmConfig) -> None: + """ + Check and update the configuration for the current platform. + + It can raise an exception if the configuration is not compatible with + the current platform, or it can update the configuration to make it + compatible with the current platform. + + The config is passed by reference, so it can be modified in place. + """ + pass + class UnspecifiedPlatform(Platform): _enum = PlatformEnum.UNSPECIFIED diff --git a/vllm/platforms/tpu.py b/vllm/platforms/tpu.py index 8d0ce47df4040..c2e22bfc09f22 100644 --- a/vllm/platforms/tpu.py +++ b/vllm/platforms/tpu.py @@ -1,18 +1,16 @@ import os +from typing import TYPE_CHECKING import torch -import vllm.envs as envs -from vllm.compilation.levels import CompilationLevel from vllm.plugins import set_torch_compile_backend from .interface import Platform, PlatformEnum -if "VLLM_TORCH_COMPILE_LEVEL" not in os.environ: - os.environ["VLLM_TORCH_COMPILE_LEVEL"] = str(CompilationLevel.DYNAMO_ONCE) - -assert envs.VLLM_TORCH_COMPILE_LEVEL < CompilationLevel.PIECEWISE,\ - "TPU does not support Inductor." +if TYPE_CHECKING: + from vllm.config import VllmConfig +else: + VllmConfig = None set_torch_compile_backend("openxla") @@ -31,3 +29,12 @@ def get_device_total_memory(cls, device_id: int = 0) -> int: @classmethod def inference_mode(cls): return torch.no_grad() + + @classmethod + def check_and_update_config(cls, vllm_config: VllmConfig) -> None: + from vllm.config import CompilationLevel + compilation_config = vllm_config.compilation_config + if "VLLM_TORCH_COMPILE_LEVEL" not in os.environ: + compilation_config.level = CompilationLevel.DYNAMO_ONCE + assert compilation_config.level < CompilationLevel.PIECEWISE,\ + "TPU does not support Inductor." diff --git a/vllm/plugins/__init__.py b/vllm/plugins/__init__.py index 7b1bbb14c5302..c20b9ec891d5d 100644 --- a/vllm/plugins/__init__.py +++ b/vllm/plugins/__init__.py @@ -1,11 +1,11 @@ import logging +from contextlib import contextmanager from typing import TYPE_CHECKING, Callable, Optional, Union import vllm.envs as envs if TYPE_CHECKING: - from vllm.compilation.config import CompilationConfig - from vllm.config import VllmConfig + from vllm.config import CompilationConfig, VllmConfig else: CompilationConfig = None VllmConfig = None @@ -72,3 +72,29 @@ def set_compilation_config(config: Optional[CompilationConfig]): def get_compilation_config() -> Optional[CompilationConfig]: return _compilation_config + + +_current_vllm_config: Optional[VllmConfig] = None + + +@contextmanager +def set_current_vllm_config(vllm_config: VllmConfig): + """ + Temporarily set the current VLLM config. + Used during model initialization. + We save the current VLLM config in a global variable, + so that all modules can access it, e.g. custom ops + can access the VLLM config to determine how to dispatch. + """ + global _current_vllm_config + old_vllm_config = _current_vllm_config + try: + _current_vllm_config = vllm_config + yield + finally: + _current_vllm_config = old_vllm_config + + +def get_current_vllm_config() -> VllmConfig: + assert _current_vllm_config is not None, "Current VLLM config is not set." + return _current_vllm_config diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index eebd1de96537f..d60f93a44f6dd 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -1,4 +1,3 @@ -import os import time from dataclasses import dataclass from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple @@ -8,11 +7,8 @@ import torch.distributed import torch.nn as nn -from vllm import envs from vllm.compilation.compile_context import set_compile_context -from vllm.compilation.config import CompilationConfig -from vllm.compilation.levels import CompilationLevel -from vllm.config import VllmConfig +from vllm.config import CompilationConfig, CompilationLevel, VllmConfig from vllm.forward_context import set_forward_context from vllm.inputs import INPUT_REGISTRY, InputRegistry from vllm.logger import init_logger @@ -99,7 +95,7 @@ def __init__( pin_memory=self.pin_memory, ) - self.use_cuda_graph = (envs.VLLM_TORCH_COMPILE_LEVEL + self.use_cuda_graph = (self.vllm_config.compilation_config.level == CompilationLevel.PIECEWISE and not self.model_config.enforce_eager) # TODO(woosuk): Provide an option to tune the max cudagraph batch size. @@ -517,9 +513,9 @@ def load_model(self) -> None: # CUDA graphs do not work properly with the custom CUDA kernels. # FIXME(woosuk): Disable inductor to reduce the compilation time # and avoid any potential issues with the inductor. - os.environ["VLLM_CUSTOM_OPS"] = "none" set_compilation_config( CompilationConfig( + custom_ops=["none"], use_cudagraph=True, non_cudagraph_ops=["vllm.unified_v1_flash_attention"], use_inductor=True, diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py index 22ee3f9f863e4..fd89f95445565 100644 --- a/vllm/worker/model_runner.py +++ b/vllm/worker/model_runner.py @@ -19,8 +19,7 @@ from vllm.attention.backends.abstract import AttentionState from vllm.attention.backends.utils import CommonAttentionState from vllm.compilation.compile_context import set_compile_context -from vllm.compilation.levels import CompilationLevel -from vllm.config import VllmConfig +from vllm.config import CompilationLevel, VllmConfig from vllm.core.scheduler import SchedulerOutputs from vllm.distributed import get_pp_group from vllm.distributed.parallel_state import graph_capture @@ -1142,8 +1141,8 @@ def load_model(self) -> None: "provided. Defaulting to scaling factors of 1.0. " "This may lead to less accurate results!") - if envs.VLLM_TORCH_COMPILE_LEVEL == CompilationLevel.DYNAMO_AS_IS \ - and supports_dynamo(): + if self.vllm_config.compilation_config.level ==\ + CompilationLevel.DYNAMO_AS_IS and supports_dynamo(): from vllm.plugins import get_torch_compile_backend backend = get_torch_compile_backend() or "eager" self.model = torch.compile( diff --git a/vllm/worker/tpu_model_runner.py b/vllm/worker/tpu_model_runner.py index a721186137328..d7a641857a613 100644 --- a/vllm/worker/tpu_model_runner.py +++ b/vllm/worker/tpu_model_runner.py @@ -140,7 +140,7 @@ def load_model(self) -> None: model = get_model(vllm_config=self.vllm_config) model = model.eval() xm.wait_device_ops() - self.model = ModelWrapper(model) + self.model = ModelWrapper(model, self.vllm_config) def _dummy_run( self, @@ -669,13 +669,15 @@ def execute_model( class ModelWrapper(TorchCompileWrapperWithCustomDispatcher): - def __init__(self, model: nn.Module): + def __init__(self, model: nn.Module, vllm_config: VllmConfig): self.model = model compiled_callable = torch.compile(self.forward, backend="openxla", fullgraph=True, dynamic=False) - super().__init__(compiled_callable) + super().__init__( + compiled_callable, + compilation_level=vllm_config.compilation_config.level) def __call__(self, *args, is_prompt: bool, **kwargs): if len(self.compiled_codes) < 3 or not self.use_custom_dispatcher: From 643ecf7b11a3e74c838f438cfc1b3e59c018853b Mon Sep 17 00:00:00 2001 From: Roger Wang <136131678+ywang96@users.noreply.github.com> Date: Sat, 16 Nov 2024 21:18:46 -0800 Subject: [PATCH 014/397] [V1] Refactor model executable interface for all text-only language models (#10374) Signed-off-by: Roger Wang --- vllm/model_executor/models/arctic.py | 16 ++++++++++++++-- vllm/model_executor/models/baichuan.py | 16 ++++++++++++++-- vllm/model_executor/models/bloom.py | 17 ++++++++++++++--- vllm/model_executor/models/commandr.py | 16 ++++++++++++++-- vllm/model_executor/models/dbrx.py | 16 ++++++++++++++-- vllm/model_executor/models/deepseek.py | 16 ++++++++++++++-- vllm/model_executor/models/deepseek_v2.py | 16 ++++++++++++++-- vllm/model_executor/models/eagle.py | 13 ++++++++++--- vllm/model_executor/models/exaone.py | 7 ++++++- vllm/model_executor/models/falcon.py | 16 ++++++++++++++-- vllm/model_executor/models/gemma.py | 7 ++++++- vllm/model_executor/models/gemma2.py | 12 ++++++++++-- vllm/model_executor/models/gpt2.py | 7 +++++-- vllm/model_executor/models/gpt_bigcode.py | 17 +++++++++++++---- vllm/model_executor/models/gpt_j.py | 16 ++++++++++++++-- vllm/model_executor/models/gpt_neox.py | 16 ++++++++++++++-- vllm/model_executor/models/granite.py | 7 ++++++- vllm/model_executor/models/granitemoe.py | 16 ++++++++++++++-- vllm/model_executor/models/internlm2.py | 9 +++++++-- vllm/model_executor/models/jais.py | 14 ++++++++++++-- vllm/model_executor/models/jamba.py | 16 ++++++++++++++-- vllm/model_executor/models/mamba.py | 15 +++++++++++++-- vllm/model_executor/models/minicpm.py | 7 ++++++- vllm/model_executor/models/mixtral.py | 16 ++++++++++++++-- vllm/model_executor/models/mixtral_quant.py | 16 ++++++++++++++-- vllm/model_executor/models/mpt.py | 16 ++++++++++++++-- vllm/model_executor/models/nemotron.py | 7 ++++++- vllm/model_executor/models/olmo.py | 19 +++++++++++++------ vllm/model_executor/models/olmoe.py | 16 ++++++++++++++-- vllm/model_executor/models/orion.py | 16 ++++++++++++++-- vllm/model_executor/models/persimmon.py | 8 +++++++- vllm/model_executor/models/phi.py | 16 ++++++++++++++-- vllm/model_executor/models/phi3_small.py | 19 +++++++++++-------- vllm/model_executor/models/phimoe.py | 16 ++++++++++++++-- vllm/model_executor/models/qwen.py | 16 ++++++++++++++-- vllm/model_executor/models/qwen2.py | 2 +- vllm/model_executor/models/qwen2_cls.py | 7 ++++++- vllm/model_executor/models/qwen2_moe.py | 16 ++++++++++++++-- vllm/model_executor/models/qwen2_rm.py | 7 ++++++- vllm/model_executor/models/solar.py | 4 +++- vllm/model_executor/models/stablelm.py | 16 ++++++++++++++-- vllm/model_executor/models/starcoder2.py | 16 ++++++++++++++-- vllm/model_executor/models/xverse.py | 16 ++++++++++++++-- 43 files changed, 483 insertions(+), 90 deletions(-) diff --git a/vllm/model_executor/models/arctic.py b/vllm/model_executor/models/arctic.py index 9ee2a2cc09a24..d52418ee0f6f1 100644 --- a/vllm/model_executor/models/arctic.py +++ b/vllm/model_executor/models/arctic.py @@ -389,6 +389,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): make_empty_intermediate_tensors_factory(["hidden_states"], config.hidden_size)) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.embed_tokens(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -396,9 +399,13 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors], + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: if get_pp_group().is_first_rank: - hidden_states = self.embed_tokens(input_ids) + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) else: assert intermediate_tensors is not None hidden_states = intermediate_tensors["hidden_states"] @@ -439,6 +446,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -446,9 +456,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: hidden_states = self.model(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return hidden_states def compute_logits( diff --git a/vllm/model_executor/models/baichuan.py b/vllm/model_executor/models/baichuan.py index aabbd31192a40..01ce7c42cd391 100644 --- a/vllm/model_executor/models/baichuan.py +++ b/vllm/model_executor/models/baichuan.py @@ -284,6 +284,9 @@ def __init__( make_empty_intermediate_tensors_factory( ["hidden_states", "residual"], config.hidden_size)) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.embed_tokens(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -291,9 +294,13 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors], + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: if get_pp_group().is_first_rank: - hidden_states = self.embed_tokens(input_ids) + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) residual = None else: assert intermediate_tensors is not None @@ -363,6 +370,9 @@ def __init__( self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -370,9 +380,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: hidden_states = self.model(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return hidden_states def compute_logits( diff --git a/vllm/model_executor/models/bloom.py b/vllm/model_executor/models/bloom.py index 84adf574af5e2..cf2eee8172769 100644 --- a/vllm/model_executor/models/bloom.py +++ b/vllm/model_executor/models/bloom.py @@ -251,6 +251,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): make_empty_intermediate_tensors_factory(["hidden_states"], config.hidden_size)) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.word_embeddings_layernorm(self.word_embeddings(input_ids)) + def forward( self, input_ids: torch.Tensor, @@ -258,10 +261,13 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors], + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: if get_pp_group().is_first_rank: - hidden_states = self.word_embeddings(input_ids) - hidden_states = self.word_embeddings_layernorm(hidden_states) + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) else: assert intermediate_tensors is not None hidden_states = intermediate_tensors["hidden_states"] @@ -301,6 +307,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.make_empty_intermediate_tensors = ( self.transformer.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.transformer.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -308,9 +317,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: hidden_states = self.transformer(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return hidden_states def compute_logits( diff --git a/vllm/model_executor/models/commandr.py b/vllm/model_executor/models/commandr.py index cd5c1d6844716..fbb09a64cde9b 100644 --- a/vllm/model_executor/models/commandr.py +++ b/vllm/model_executor/models/commandr.py @@ -280,6 +280,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): make_empty_intermediate_tensors_factory( ["hidden_states", "residual"], config.hidden_size)) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.embed_tokens(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -287,9 +290,13 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors], + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: if get_pp_group().is_first_rank: - hidden_states = self.embed_tokens(input_ids) + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) residual = None else: assert intermediate_tensors is not None @@ -354,6 +361,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.get_input_embeddings(input_ids) + @torch.no_grad() def forward( self, @@ -362,9 +372,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: hidden_states = self.model(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return hidden_states def compute_logits( diff --git a/vllm/model_executor/models/dbrx.py b/vllm/model_executor/models/dbrx.py index fff8710f6b475..3952ff31e5cec 100644 --- a/vllm/model_executor/models/dbrx.py +++ b/vllm/model_executor/models/dbrx.py @@ -321,6 +321,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): make_empty_intermediate_tensors_factory(["hidden_states"], config.d_model)) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.wte(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -328,9 +331,13 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors], + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: if get_pp_group().is_first_rank: - hidden_states = self.wte(input_ids) + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) else: assert intermediate_tensors hidden_states = intermediate_tensors["hidden_states"] @@ -376,6 +383,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.make_empty_intermediate_tensors = ( self.transformer.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.transformer.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -383,9 +393,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: hidden_states = self.transformer(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return hidden_states def compute_logits( diff --git a/vllm/model_executor/models/deepseek.py b/vllm/model_executor/models/deepseek.py index a9bf1440c4d60..36dfea5a65656 100644 --- a/vllm/model_executor/models/deepseek.py +++ b/vllm/model_executor/models/deepseek.py @@ -353,6 +353,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): make_empty_intermediate_tensors_factory( ["hidden_states", "residual"], config.hidden_size)) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.embed_tokens(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -360,9 +363,13 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors], + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: if get_pp_group().is_first_rank: - hidden_states = self.embed_tokens(input_ids) + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) residual = None else: hidden_states = intermediate_tensors["hidden_states"] @@ -401,6 +408,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -408,9 +418,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: hidden_states = self.model(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return hidden_states def compute_logits( diff --git a/vllm/model_executor/models/deepseek_v2.py b/vllm/model_executor/models/deepseek_v2.py index 4fb1eed15a2e7..1e32fe60c7a5b 100644 --- a/vllm/model_executor/models/deepseek_v2.py +++ b/vllm/model_executor/models/deepseek_v2.py @@ -445,6 +445,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): make_empty_intermediate_tensors_factory( ["hidden_states", "residual"], config.hidden_size)) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.embed_tokens(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -452,9 +455,13 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors], + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: if get_pp_group().is_first_rank: - hidden_states = self.embed_tokens(input_ids) + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) residual = None else: assert intermediate_tensors is not None @@ -495,6 +502,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -502,9 +512,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: hidden_states = self.model(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return hidden_states def compute_logits( diff --git a/vllm/model_executor/models/eagle.py b/vllm/model_executor/models/eagle.py index 85c51e8404584..f138d13630263 100644 --- a/vllm/model_executor/models/eagle.py +++ b/vllm/model_executor/models/eagle.py @@ -78,6 +78,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): def sampler(self): return self.model.sampler + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.model.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -86,11 +89,14 @@ def forward( attn_metadata: AttentionMetadata, previous_hidden_states: torch.Tensor, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> torch.Tensor: - tok_embeds = self.model.model.embed_tokens(input_ids) + if inputs_embeds is None: + inputs_embeds = self.get_input_embeddings(input_ids) + inputs_embeds = self.fc( - torch.cat([tok_embeds, previous_hidden_states], dim=-1)) + torch.cat([inputs_embeds, previous_hidden_states], dim=-1)) inputs_embeds[positions == 0] = 0 # masking inputs at position=0 @@ -100,7 +106,8 @@ def forward( positions=positions, kv_caches=kv_caches, attn_metadata=attn_metadata, - intermediate_tensors=intermediate_tensors) + intermediate_tensors=intermediate_tensors, + ) return hidden_states def compute_logits(self, hidden_states: torch.Tensor, diff --git a/vllm/model_executor/models/exaone.py b/vllm/model_executor/models/exaone.py index cd3e7da657e0e..52dd603ca558d 100644 --- a/vllm/model_executor/models/exaone.py +++ b/vllm/model_executor/models/exaone.py @@ -479,6 +479,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.make_empty_intermediate_tensors = ( self.transformer.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -486,9 +489,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: model_output = self.transformer(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return model_output def compute_logits( diff --git a/vllm/model_executor/models/falcon.py b/vllm/model_executor/models/falcon.py index b3dbf063ac298..e97abe949ccdb 100644 --- a/vllm/model_executor/models/falcon.py +++ b/vllm/model_executor/models/falcon.py @@ -367,6 +367,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): make_empty_intermediate_tensors_factory(["hidden_states"], config.hidden_size)) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.word_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -374,9 +377,13 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors], + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: if get_pp_group().is_first_rank: - hidden_states = self.word_embeddings(input_ids) + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) else: hidden_states = intermediate_tensors["hidden_states"] for i in range(self.start_layer, self.end_layer): @@ -432,6 +439,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.make_empty_intermediate_tensors = ( self.transformer.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.transformer.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.LongTensor, @@ -439,9 +449,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> torch.Tensor: hidden_states = self.transformer(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return hidden_states def compute_logits( diff --git a/vllm/model_executor/models/gemma.py b/vllm/model_executor/models/gemma.py index 55baba809e58f..ace13664c6ea6 100644 --- a/vllm/model_executor/models/gemma.py +++ b/vllm/model_executor/models/gemma.py @@ -390,6 +390,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -397,9 +400,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: hidden_states = self.model(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return hidden_states def compute_logits( diff --git a/vllm/model_executor/models/gemma2.py b/vllm/model_executor/models/gemma2.py index eeb3fd98a7eac..a60b4e73a76d4 100644 --- a/vllm/model_executor/models/gemma2.py +++ b/vllm/model_executor/models/gemma2.py @@ -272,6 +272,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): make_empty_intermediate_tensors_factory( ["hidden_states", "residual"], config.hidden_size)) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.embed_tokens(input_ids) + def forward( self, input_ids: Optional[torch.Tensor], @@ -285,7 +288,7 @@ def forward( if inputs_embeds is not None: hidden_states = inputs_embeds else: - hidden_states = self.embed_tokens(input_ids) + hidden_states = self.get_input_embeddings(input_ids) hidden_states *= self.normalizer residual = None else: @@ -414,6 +417,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -421,9 +427,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: hidden_states = self.model(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return hidden_states def compute_logits( diff --git a/vllm/model_executor/models/gpt2.py b/vllm/model_executor/models/gpt2.py index cc85693f99526..fa0fdad28d161 100644 --- a/vllm/model_executor/models/gpt2.py +++ b/vllm/model_executor/models/gpt2.py @@ -209,6 +209,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): make_empty_intermediate_tensors_factory(["hidden_states"], config.n_embd)) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.wte(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -220,7 +223,7 @@ def forward( ) -> Union[torch.Tensor, IntermediateTensors]: if get_pp_group().is_first_rank: if inputs_embeds is None: - inputs_embeds = self.wte(input_ids) + inputs_embeds = self.get_input_embeddings(input_ids) position_embeds = self.wpe(position_ids) hidden_states = inputs_embeds + position_embeds else: @@ -262,7 +265,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.transformer.make_empty_intermediate_tensors) def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: - return self.transformer.wte(input_ids) + return self.transformer.get_input_embeddings(input_ids) def forward( self, diff --git a/vllm/model_executor/models/gpt_bigcode.py b/vllm/model_executor/models/gpt_bigcode.py index ab25c66c3a887..b2fc79d0d36dc 100644 --- a/vllm/model_executor/models/gpt_bigcode.py +++ b/vllm/model_executor/models/gpt_bigcode.py @@ -218,6 +218,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): make_empty_intermediate_tensors_factory(["hidden_states"], config.n_embd)) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.wte(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -225,11 +228,12 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors], + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: if get_pp_group().is_first_rank: - inputs_embeds = self.wte(input_ids) - position_embeds = self.wpe(position_ids) - hidden_states = inputs_embeds + position_embeds + if inputs_embeds is None: + inputs_embeds = self.get_input_embeddings(input_ids) + hidden_states = inputs_embeds + self.wpe(position_ids) else: hidden_states = intermediate_tensors["hidden_states"] @@ -285,6 +289,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.make_empty_intermediate_tensors = ( self.transformer.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.transformer.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -292,9 +299,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: hidden_states = self.transformer(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return hidden_states def compute_logits( diff --git a/vllm/model_executor/models/gpt_j.py b/vllm/model_executor/models/gpt_j.py index a83d03480dde1..cec3fd12a67d6 100644 --- a/vllm/model_executor/models/gpt_j.py +++ b/vllm/model_executor/models/gpt_j.py @@ -201,6 +201,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): make_empty_intermediate_tensors_factory(["hidden_states"], config.n_embd)) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.wte(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -208,9 +211,13 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors], + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: if get_pp_group().is_first_rank: - hidden_states = self.wte(input_ids) + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) else: hidden_states = intermediate_tensors["hidden_states"] for i in range(self.start_layer, self.end_layer): @@ -250,6 +257,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.make_empty_intermediate_tensors = ( self.transformer.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.transformer.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -257,9 +267,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: hidden_states = self.transformer(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return hidden_states def compute_logits( diff --git a/vllm/model_executor/models/gpt_neox.py b/vllm/model_executor/models/gpt_neox.py index 794b141bfa4aa..11f286d6bcba0 100644 --- a/vllm/model_executor/models/gpt_neox.py +++ b/vllm/model_executor/models/gpt_neox.py @@ -214,6 +214,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): make_empty_intermediate_tensors_factory(["hidden_states"], config.hidden_size)) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.embed_in(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -221,9 +224,13 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors], + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: if get_pp_group().is_first_rank: - hidden_states = self.embed_in(input_ids) + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) else: hidden_states = intermediate_tensors["hidden_states"] for i in range(self.start_layer, self.end_layer): @@ -262,6 +269,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.make_empty_intermediate_tensors = ( self.gpt_neox.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.gpt_neox.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -269,9 +279,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: hidden_states = self.gpt_neox(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return hidden_states def compute_logits( diff --git a/vllm/model_executor/models/granite.py b/vllm/model_executor/models/granite.py index d1e6e31f2b8d1..cb2583e69d88d 100644 --- a/vllm/model_executor/models/granite.py +++ b/vllm/model_executor/models/granite.py @@ -409,6 +409,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): else: self.lm_head = PPMissingLayer() + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -416,9 +419,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: model_output = self.model(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return model_output def compute_logits( diff --git a/vllm/model_executor/models/granitemoe.py b/vllm/model_executor/models/granitemoe.py index 2ed115c56af45..f437dd521a7d5 100644 --- a/vllm/model_executor/models/granitemoe.py +++ b/vllm/model_executor/models/granitemoe.py @@ -277,6 +277,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.embed_tokens(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -284,9 +287,13 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors], + inputs_embeds: Optional[torch.Tensor] = None, ) -> torch.Tensor: if get_pp_group().is_first_rank: - hidden_states = self.embed_tokens(input_ids) + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) hidden_states *= self.embedding_multiplier residual = None else: @@ -366,6 +373,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.sampler = get_sampler() + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -373,9 +383,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> torch.Tensor: hidden_states = self.model(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return hidden_states def compute_logits( diff --git a/vllm/model_executor/models/internlm2.py b/vllm/model_executor/models/internlm2.py index 21fa6983063b8..19bfe16e4d5fc 100644 --- a/vllm/model_executor/models/internlm2.py +++ b/vllm/model_executor/models/internlm2.py @@ -290,7 +290,7 @@ def forward( if inputs_embeds is not None: hidden_states = inputs_embeds else: - hidden_states = self.tok_embeddings(input_ids) + hidden_states = self.get_input_embeddings(input_ids) residual = None else: assert intermediate_tensors is not None @@ -335,6 +335,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -342,9 +345,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors], + inputs_embeds: Optional[torch.Tensor] = None, ) -> torch.Tensor: hidden_states = self.model(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return hidden_states def compute_logits( diff --git a/vllm/model_executor/models/jais.py b/vllm/model_executor/models/jais.py index 65800c44e5a93..ee49ffb3cd87f 100644 --- a/vllm/model_executor/models/jais.py +++ b/vllm/model_executor/models/jais.py @@ -250,6 +250,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): make_empty_intermediate_tensors_factory(["hidden_states"], config.n_embd)) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.wte(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -257,9 +260,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[IntermediateTensors, torch.Tensor]: if get_pp_group().is_first_rank: - inputs_embeds = self.wte(input_ids) + if inputs_embeds is None: + inputs_embeds = self.get_input_embeddings(input_ids) if self.wpe is not None: position_embeds = self.wpe(position_ids) hidden_states = inputs_embeds + position_embeds @@ -311,6 +316,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.make_empty_intermediate_tensors = ( self.transformer.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.transformer.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -318,9 +326,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[IntermediateTensors, torch.Tensor]: hidden_states = self.transformer(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return hidden_states def compute_logits( diff --git a/vllm/model_executor/models/jamba.py b/vllm/model_executor/models/jamba.py index 88fb8d5cf555a..5612dd6886385 100644 --- a/vllm/model_executor/models/jamba.py +++ b/vllm/model_executor/models/jamba.py @@ -292,6 +292,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.final_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.embed_tokens(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -299,8 +302,12 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, mamba_cache_params: MambaCacheParams, + inputs_embeds: Optional[torch.Tensor] = None, ) -> torch.Tensor: - hidden_states = self.embed_tokens(input_ids) + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) residual = None for i in range(len(self.layers)): layer = self.layers[i] @@ -381,12 +388,16 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config.vocab_size) self.sampler = get_sampler() + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.get_input_embeddings(input_ids) + def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, **kwargs): if self.mamba_cache is None: max_batch_size = (_get_graph_batch_size( @@ -409,7 +420,8 @@ def forward(self, mamba_cache_tensors[1], state_indices_tensor) hidden_states = self.model(input_ids, positions, kv_caches, - attn_metadata, mamba_cache_params) + attn_metadata, mamba_cache_params, + inputs_embeds) return hidden_states def copy_inputs_before_cuda_graphs(self, input_buffers, **kwargs): diff --git a/vllm/model_executor/models/mamba.py b/vllm/model_executor/models/mamba.py index 55c575e22a0f6..ac0d265a961f0 100644 --- a/vllm/model_executor/models/mamba.py +++ b/vllm/model_executor/models/mamba.py @@ -106,15 +106,22 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.norm_f = RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, positions: torch.Tensor, attn_metadata: AttentionMetadata, mamba_cache_params: MambaCacheParams, + inputs_embeds: Optional[torch.Tensor] = None, ) -> torch.Tensor: - hidden_states = self.embeddings(input_ids) + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) residual = None for i in range(len(self.layers)): @@ -168,12 +175,16 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config.vocab_size) self.sampler = get_sampler() + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.backbone.get_input_embeddings(input_ids) + def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, **kwargs): if self.mamba_cache is None: max_batch_size = (_get_graph_batch_size( @@ -194,7 +205,7 @@ def forward(self, state_indices_tensor) hidden_states = self.backbone(input_ids, positions, attn_metadata, - mamba_cache_params) + mamba_cache_params, inputs_embeds) return hidden_states diff --git a/vllm/model_executor/models/minicpm.py b/vllm/model_executor/models/minicpm.py index 2db953329fd91..6b67266c53362 100644 --- a/vllm/model_executor/models/minicpm.py +++ b/vllm/model_executor/models/minicpm.py @@ -504,6 +504,9 @@ def _init_model(self, *, vllm_config: VllmConfig, prefix: str = ""): self.model = MiniCPMModel(vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -511,9 +514,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: hidden_states = self.model(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return hidden_states def compute_logits( diff --git a/vllm/model_executor/models/mixtral.py b/vllm/model_executor/models/mixtral.py index 3eb2f60fd4fc7..eebf5bab5a288 100644 --- a/vllm/model_executor/models/mixtral.py +++ b/vllm/model_executor/models/mixtral.py @@ -281,6 +281,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): make_empty_intermediate_tensors_factory( ["hidden_states", "residual"], config.hidden_size)) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.embed_tokens(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -288,9 +291,13 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors], + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: if get_pp_group().is_first_rank: - hidden_states = self.embed_tokens(input_ids) + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) residual = None else: assert intermediate_tensors is not None @@ -363,6 +370,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -370,9 +380,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: hidden_states = self.model(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return hidden_states def compute_logits( diff --git a/vllm/model_executor/models/mixtral_quant.py b/vllm/model_executor/models/mixtral_quant.py index 95cfb6f54dc10..af2e9586988df 100644 --- a/vllm/model_executor/models/mixtral_quant.py +++ b/vllm/model_executor/models/mixtral_quant.py @@ -318,6 +318,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): make_empty_intermediate_tensors_factory( ["hidden_states", "residual"], config.hidden_size)) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.embed_tokens(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -325,9 +328,13 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors], + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: if get_pp_group().is_first_rank: - hidden_states = self.embed_tokens(input_ids) + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) residual = None else: assert intermediate_tensors is not None @@ -368,6 +375,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -375,9 +385,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: hidden_states = self.model(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return hidden_states def compute_logits( diff --git a/vllm/model_executor/models/mpt.py b/vllm/model_executor/models/mpt.py index e15c0fe8db060..3c74ef2448abb 100644 --- a/vllm/model_executor/models/mpt.py +++ b/vllm/model_executor/models/mpt.py @@ -237,6 +237,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): make_empty_intermediate_tensors_factory(["hidden_states"], config.d_model)) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.wte(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -244,9 +247,13 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors], + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: if get_pp_group().is_first_rank: - hidden_states = self.wte(input_ids) + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) else: assert intermediate_tensors is not None hidden_states = intermediate_tensors["hidden_states"] @@ -283,6 +290,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.make_empty_intermediate_tensors = ( self.transformer.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.transformer.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -290,9 +300,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: hidden_states = self.transformer(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return hidden_states def compute_logits( diff --git a/vllm/model_executor/models/nemotron.py b/vllm/model_executor/models/nemotron.py index e09d7088a69ce..eb45beae7d21a 100644 --- a/vllm/model_executor/models/nemotron.py +++ b/vllm/model_executor/models/nemotron.py @@ -440,6 +440,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -447,9 +450,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: model_output = self.model(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return model_output def compute_logits( diff --git a/vllm/model_executor/models/olmo.py b/vllm/model_executor/models/olmo.py index 3467ae5896494..98d4e1ec320a4 100644 --- a/vllm/model_executor/models/olmo.py +++ b/vllm/model_executor/models/olmo.py @@ -248,6 +248,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): make_empty_intermediate_tensors_factory(["hidden_states"], config.hidden_size)) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.embed_tokens(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -255,17 +258,16 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors], + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: """ :param input_ids: A tensor of shape `(batch_size, seq_len)`. """ if get_pp_group().is_first_rank: - # Get embeddings of input. - # shape: (batch_size, seq_len, d_model) - inputs_embeds = self.embed_tokens(input_ids) - - # embed positions - hidden_states = inputs_embeds + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) else: assert intermediate_tensors is not None hidden_states = intermediate_tensors["hidden_states"] @@ -315,6 +317,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -322,6 +327,7 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: hidden_states = self.model( input_ids=input_ids, @@ -329,6 +335,7 @@ def forward( kv_caches=kv_caches, attn_metadata=attn_metadata, intermediate_tensors=intermediate_tensors, + inputs_embeds=inputs_embeds, ) return hidden_states diff --git a/vllm/model_executor/models/olmoe.py b/vllm/model_executor/models/olmoe.py index 3d31919edd862..f4eebab8c98dd 100644 --- a/vllm/model_executor/models/olmoe.py +++ b/vllm/model_executor/models/olmoe.py @@ -269,6 +269,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): make_empty_intermediate_tensors_factory( ["hidden_states", "residual"], config.hidden_size)) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.embed_tokens(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -276,9 +279,13 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors], + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: if get_pp_group().is_first_rank: - hidden_states = self.embed_tokens(input_ids) + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) residual = None else: assert intermediate_tensors is not None @@ -326,6 +333,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -333,9 +343,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: hidden_states = self.model(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return hidden_states def compute_logits(self, hidden_states: torch.Tensor, diff --git a/vllm/model_executor/models/orion.py b/vllm/model_executor/models/orion.py index 38821c8288347..39d659c49cbcf 100644 --- a/vllm/model_executor/models/orion.py +++ b/vllm/model_executor/models/orion.py @@ -237,6 +237,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): "hidden_states", ], config.hidden_size)) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.embed_tokens(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -244,9 +247,13 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors], + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: if get_pp_group().is_first_rank: - hidden_states = self.embed_tokens(input_ids) + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) else: assert intermediate_tensors is not None hidden_states = intermediate_tensors["hidden_states"] @@ -286,6 +293,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -293,9 +303,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: hidden_states = self.model(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return hidden_states def compute_logits( diff --git a/vllm/model_executor/models/persimmon.py b/vllm/model_executor/models/persimmon.py index 2e34a7cc30873..62c509153a111 100644 --- a/vllm/model_executor/models/persimmon.py +++ b/vllm/model_executor/models/persimmon.py @@ -235,6 +235,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): make_empty_intermediate_tensors_factory(["hidden_states"], config.hidden_size)) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.embed_tokens(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -248,7 +251,7 @@ def forward( if inputs_embeds is not None: hidden_states = inputs_embeds else: - hidden_states = self.embed_tokens(input_ids) + hidden_states = self.get_input_embeddings(input_ids) else: assert intermediate_tensors is not None hidden_states = intermediate_tensors["hidden_states"] @@ -282,6 +285,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, diff --git a/vllm/model_executor/models/phi.py b/vllm/model_executor/models/phi.py index 262f6996fc374..a2ab0d74c48db 100644 --- a/vllm/model_executor/models/phi.py +++ b/vllm/model_executor/models/phi.py @@ -218,6 +218,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): make_empty_intermediate_tensors_factory(["hidden_states"], config.hidden_size)) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.embed_tokens(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -225,9 +228,13 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors], + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: if get_pp_group().is_first_rank: - hidden_states = self.embed_tokens(input_ids) + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) else: assert intermediate_tensors is not None hidden_states = intermediate_tensors["hidden_states"] @@ -303,6 +310,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -310,9 +320,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: hidden_states = self.model(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return hidden_states diff --git a/vllm/model_executor/models/phi3_small.py b/vllm/model_executor/models/phi3_small.py index 8a5fb6d303e60..2139cec441807 100644 --- a/vllm/model_executor/models/phi3_small.py +++ b/vllm/model_executor/models/phi3_small.py @@ -324,11 +324,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): make_empty_intermediate_tensors_factory(["hidden_states"], config.hidden_size)) - def get_input_embeddings(self): - return self.embed_tokens - - def set_input_embeddings(self, value): - self.embed_tokens = value + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.embed_tokens(input_ids) def forward( self, @@ -337,9 +334,13 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors], + inputs_embeds: Optional[torch.Tensor], ) -> Union[torch.Tensor, IntermediateTensors]: if get_pp_group().is_first_rank: - hidden_states = self.embed_tokens(input_ids) + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) if (self.mup_embedding_multiplier is not None and self.mup_embedding_multiplier > 0.0): hidden_states = hidden_states * self.mup_embedding_multiplier @@ -397,8 +398,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): else: self.dummy_token_indices = None - def get_input_embeddings(self): - return self.model.embed_tokens + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.get_input_embeddings(input_ids) def set_input_embeddings(self, value): self.model.embed_tokens = value @@ -433,6 +434,7 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: output_hidden_states = self.model( input_ids=input_ids, @@ -440,6 +442,7 @@ def forward( kv_caches=kv_caches, attn_metadata=attn_metadata, intermediate_tensors=intermediate_tensors, + inputs_embeds=inputs_embeds, ) output_hidden_states = output_hidden_states return output_hidden_states diff --git a/vllm/model_executor/models/phimoe.py b/vllm/model_executor/models/phimoe.py index 6d71a8949111b..b7e70f8fa2c6d 100644 --- a/vllm/model_executor/models/phimoe.py +++ b/vllm/model_executor/models/phimoe.py @@ -465,6 +465,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): make_empty_intermediate_tensors_factory( ["hidden_states", "residual"], config.hidden_size)) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.embed_tokens(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -472,9 +475,13 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors], + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: if get_pp_group().is_first_rank: - hidden_states = self.embed_tokens(input_ids) + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) residual = None else: assert intermediate_tensors is not None @@ -560,6 +567,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -567,9 +577,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: hidden_states = self.model(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return hidden_states def compute_logits(self, hidden_states: torch.Tensor, diff --git a/vllm/model_executor/models/qwen.py b/vllm/model_executor/models/qwen.py index 3d26ede722dd1..447632cefcd9a 100644 --- a/vllm/model_executor/models/qwen.py +++ b/vllm/model_executor/models/qwen.py @@ -578,6 +578,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): quant_config=quant_config) if hasattr( config, "visual") else None + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.wte(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -586,6 +589,7 @@ def forward( attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors], pixel_values: Optional[QwenImageInputs], + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: img_pos = None # If pixel / visual embeddings are provided, this is a visual model @@ -606,6 +610,10 @@ def forward( ) if get_pp_group().is_first_rank: + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) hidden_states = self.wte(input_ids) # Merge the image embeddings into the hidden states if actually have # visual features and the corresponding image tokens @@ -915,6 +923,9 @@ def _get_image_input_type( ) return None + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.transformer.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -922,7 +933,8 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, - pixel_values: Optional[torch.Tensor] = None + pixel_values: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: if intermediate_tensors is not None: input_ids = None @@ -932,7 +944,7 @@ def forward( hidden_states = self.transformer(input_ids, positions, kv_caches, attn_metadata, intermediate_tensors, - pixel_values) + pixel_values, inputs_embeds) return hidden_states def compute_logits( diff --git a/vllm/model_executor/models/qwen2.py b/vllm/model_executor/models/qwen2.py index 431e397e1e10d..8f10df808c216 100644 --- a/vllm/model_executor/models/qwen2.py +++ b/vllm/model_executor/models/qwen2.py @@ -309,7 +309,7 @@ def forward( if inputs_embeds is not None: hidden_states = inputs_embeds else: - hidden_states = self.embed_tokens(input_ids) + hidden_states = self.get_input_embeddings(input_ids) residual = None else: assert intermediate_tensors is not None diff --git a/vllm/model_executor/models/qwen2_cls.py b/vllm/model_executor/models/qwen2_cls.py index 120403e948686..07eb330620a43 100644 --- a/vllm/model_executor/models/qwen2_cls.py +++ b/vllm/model_executor/models/qwen2_cls.py @@ -72,6 +72,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): normalize=False, softmax=True) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -79,9 +82,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> torch.Tensor: hidden_states = self.model(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) logits, _ = self.score(hidden_states) return logits diff --git a/vllm/model_executor/models/qwen2_moe.py b/vllm/model_executor/models/qwen2_moe.py index 51c0cd5664fd2..249d94b5d95e9 100644 --- a/vllm/model_executor/models/qwen2_moe.py +++ b/vllm/model_executor/models/qwen2_moe.py @@ -344,6 +344,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): make_empty_intermediate_tensors_factory( ["hidden_states", "residual"], config.hidden_size)) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.embed_tokens(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -351,9 +354,13 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: if get_pp_group().is_first_rank: - hidden_states = self.embed_tokens(input_ids) + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) residual = None else: assert intermediate_tensors is not None @@ -395,6 +402,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -402,9 +412,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: hidden_states = self.model(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return hidden_states def compute_logits( diff --git a/vllm/model_executor/models/qwen2_rm.py b/vllm/model_executor/models/qwen2_rm.py index 55843d8325348..6db467af334f5 100644 --- a/vllm/model_executor/models/qwen2_rm.py +++ b/vllm/model_executor/models/qwen2_rm.py @@ -85,6 +85,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -92,9 +95,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: hidden_states = self.model(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) logits, _ = self.score(hidden_states) return logits diff --git a/vllm/model_executor/models/solar.py b/vllm/model_executor/models/solar.py index 4f03ca501fb68..affb2c975ce4a 100644 --- a/vllm/model_executor/models/solar.py +++ b/vllm/model_executor/models/solar.py @@ -456,9 +456,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: model_output = self.model(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return model_output def compute_logits(self, hidden_states: torch.Tensor, diff --git a/vllm/model_executor/models/stablelm.py b/vllm/model_executor/models/stablelm.py index 1125f9e9f9617..99acce596602e 100644 --- a/vllm/model_executor/models/stablelm.py +++ b/vllm/model_executor/models/stablelm.py @@ -218,6 +218,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): make_empty_intermediate_tensors_factory(["hidden_states"], config.hidden_size)) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.embed_tokens(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -225,9 +228,13 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors], + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: if get_pp_group().is_first_rank: - hidden_states = self.embed_tokens(input_ids) + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) else: assert intermediate_tensors is not None hidden_states = intermediate_tensors["hidden_states"] @@ -265,6 +272,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -272,9 +282,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: hidden_states = self.model(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return hidden_states def compute_logits( diff --git a/vllm/model_executor/models/starcoder2.py b/vllm/model_executor/models/starcoder2.py index ce7a7957f52c4..0ef940acebb93 100644 --- a/vllm/model_executor/models/starcoder2.py +++ b/vllm/model_executor/models/starcoder2.py @@ -221,6 +221,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): make_empty_intermediate_tensors_factory(["hidden_states"], config.hidden_size)) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.embed_tokens(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -228,9 +231,13 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors], + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: if get_pp_group().is_first_rank: - hidden_states = self.embed_tokens(input_ids) + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) else: assert intermediate_tensors is not None hidden_states = intermediate_tensors["hidden_states"] @@ -273,6 +280,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -280,9 +290,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: hidden_states = self.model(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return hidden_states def compute_logits( diff --git a/vllm/model_executor/models/xverse.py b/vllm/model_executor/models/xverse.py index 153527da20d75..51172d8782a70 100644 --- a/vllm/model_executor/models/xverse.py +++ b/vllm/model_executor/models/xverse.py @@ -252,6 +252,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): make_empty_intermediate_tensors_factory( ["hidden_states", "residual"], config.hidden_size)) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.embed_tokens(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -259,9 +262,13 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors], + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: if get_pp_group().is_first_rank: - hidden_states = self.embed_tokens(input_ids) + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) residual = None else: hidden_states = intermediate_tensors["hidden_states"] @@ -335,6 +342,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -342,9 +352,11 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: hidden_states = self.model(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + attn_metadata, intermediate_tensors, + inputs_embeds) return hidden_states def compute_logits( From 905d0f0af4e2c07893e36778da9ab02bde01ace8 Mon Sep 17 00:00:00 2001 From: "Chendi.Xue" Date: Sun, 17 Nov 2024 00:58:22 -0600 Subject: [PATCH 015/397] [CI/Build] Fix IDC hpu [Device not found] issue (#10384) Signed-off-by: Chendi Xue --- .buildkite/run-hpu-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.buildkite/run-hpu-test.sh b/.buildkite/run-hpu-test.sh index 4505dc7a9373c..fa4f74fca7a11 100644 --- a/.buildkite/run-hpu-test.sh +++ b/.buildkite/run-hpu-test.sh @@ -13,4 +13,4 @@ trap remove_docker_container EXIT remove_docker_container # Run the image and launch offline inference -docker run --runtime=habana --name=hpu-test --network=host -e VLLM_SKIP_WARMUP=true --entrypoint="" hpu-test-env python3 examples/offline_inference.py \ No newline at end of file +docker run --runtime=habana --name=hpu-test --network=host -e HABANA_VISIBLE_DEVICES=all -e VLLM_SKIP_WARMUP=true --entrypoint="" hpu-test-env python3 examples/offline_inference.py \ No newline at end of file From cf349c4a97adb36354bdc2b14448ea55279d1575 Mon Sep 17 00:00:00 2001 From: Isotr0py <2037008807@qq.com> Date: Sun, 17 Nov 2024 15:12:04 +0800 Subject: [PATCH 016/397] [Bugfix][CPU] Fix CPU embedding runner with tensor parallel (#10394) Signed-off-by: Isotr0py <2037008807@qq.com> --- vllm/worker/cpu_embedding_model_runner.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/vllm/worker/cpu_embedding_model_runner.py b/vllm/worker/cpu_embedding_model_runner.py index 7053075bf4d8f..d0b8fec48d74f 100644 --- a/vllm/worker/cpu_embedding_model_runner.py +++ b/vllm/worker/cpu_embedding_model_runner.py @@ -66,6 +66,10 @@ def execute_model( hidden_states = model_executable(**execute_model_kwargs) + # Only perform pooling in the driver worker. + if not self.is_driver_worker: + return [] + return [ self.model.pooler(hidden_states=hidden_states, pooling_metadata=model_input.pooling_metadata) From 8d74b5aee9e780852de870c936b59707835e84f5 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sat, 16 Nov 2024 23:14:23 -0800 Subject: [PATCH 017/397] [platforms] refactor cpu code (#10402) Signed-off-by: youkaichao --- vllm/executor/cpu_executor.py | 68 +---------------------------------- vllm/platforms/cpu.py | 60 +++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+), 67 deletions(-) diff --git a/vllm/executor/cpu_executor.py b/vllm/executor/cpu_executor.py index 4ceb5a837dd7f..1542a2ae367eb 100644 --- a/vllm/executor/cpu_executor.py +++ b/vllm/executor/cpu_executor.py @@ -2,9 +2,6 @@ from functools import partial from typing import Any, Awaitable, List, Optional, Set, Tuple, Union -import vllm.envs as envs -from vllm.config import (CacheConfig, ModelConfig, ParallelConfig, - SchedulerConfig) from vllm.executor.executor_base import ExecutorAsyncBase, ExecutorBase from vllm.executor.multiproc_worker_utils import (ProcessWorkerWrapper, ResultHandler, WorkerMonitor) @@ -13,7 +10,7 @@ from vllm.model_executor.layers.sampler import SamplerOutput from vllm.prompt_adapter.request import PromptAdapterRequest from vllm.sequence import ExecuteModelRequest -from vllm.utils import (GiB_bytes, get_distributed_init_method, get_open_port, +from vllm.utils import (get_distributed_init_method, get_open_port, get_vllm_instance_id, make_async) from vllm.worker.worker_base import WorkerWrapperBase @@ -57,13 +54,6 @@ def _init_executor(self) -> None: os.environ["LOCAL_WORLD_SIZE"] = str( self.parallel_config.tensor_parallel_size) - self.model_config = _verify_and_get_model_config(self.model_config) - self.cache_config = _verify_and_get_cache_config(self.cache_config) - self.scheduler_config = _verify_and_get_scheduler_config( - self.scheduler_config) - self.parallel_config = _verify_and_get_parallel_config( - self.parallel_config) - # Multiprocessing-based executor does not support multi-node setting. # Since it only works for single node, we can use the loopback address # 127.0.0.1 for communication. @@ -313,62 +303,6 @@ async def check_health_async(self) -> None: self.check_health() -def _verify_and_get_model_config(config: ModelConfig) -> ModelConfig: - # Reminder: Please update docs/source/serving/compatibility_matrix.rst - # If the feature combo become valid - if not config.enforce_eager: - logger.warning( - "CUDA graph is not supported on CPU, fallback to the eager " - "mode.") - config.enforce_eager = True - return config - - -def _verify_and_get_scheduler_config( - config: SchedulerConfig) -> SchedulerConfig: - # Reminder: Please update docs/source/serving/compatibility_matrix.rst - # If the feature combo become valid - if config.chunked_prefill_enabled: - logger.warning("Chunked prefill is not supported on CPU, disable it.") - config.chunked_prefill_enabled = False - - return config - - -def _verify_and_get_cache_config(config: CacheConfig) -> CacheConfig: - # Reminder: Please update docs/source/serving/compatibility_matrix.rst - # If the feature combo become valid - if config.enable_prefix_caching: - logger.warning("Prefix caching is not supported on CPU, disable it.") - config.enable_prefix_caching = False - - kv_cache_space = envs.VLLM_CPU_KVCACHE_SPACE - - if kv_cache_space >= 0: - if kv_cache_space == 0: - config.cpu_kvcache_space_bytes = 4 * GiB_bytes # type: ignore - logger.warning("Environment variable VLLM_CPU_KVCACHE_SPACE (GB) " - "for CPU backend is not set, using 4 by default.") - else: - config.cpu_kvcache_space_bytes = kv_cache_space * GiB_bytes # type: ignore - else: - raise RuntimeError( - "Invalid environment variable VLLM_CPU_KVCACHE_SPACE" - f" {kv_cache_space}, expect a positive integer value.") - - return config - - -def _verify_and_get_parallel_config(config: ParallelConfig) -> ParallelConfig: - if (config.distributed_executor_backend is not None - and config.distributed_executor_backend != "mp"): - logger.warning( - "%s is not supported on CPU, fallback to mp distributed executor " - "backend.", config.distributed_executor_backend) - config.distributed_executor_backend = "mp" - return config - - def _driver_method_invoker(driver, method: str, *args, **kwargs): return getattr(driver, method)(*args, **kwargs) diff --git a/vllm/platforms/cpu.py b/vllm/platforms/cpu.py index 5243f59203afc..42bee31dfb0e9 100644 --- a/vllm/platforms/cpu.py +++ b/vllm/platforms/cpu.py @@ -1,8 +1,19 @@ +from typing import TYPE_CHECKING + import psutil import torch +from vllm.logger import init_logger + from .interface import Platform, PlatformEnum +if TYPE_CHECKING: + from vllm.config import VllmConfig +else: + VllmConfig = None + +logger = init_logger(__name__) + class CpuPlatform(Platform): _enum = PlatformEnum.CPU @@ -18,3 +29,52 @@ def get_device_total_memory(cls, device_id: int = 0) -> int: @classmethod def inference_mode(cls): return torch.no_grad() + + @classmethod + def check_and_update_config(cls, vllm_config: VllmConfig) -> None: + import vllm.envs as envs + from vllm.utils import GiB_bytes + model_config = vllm_config.model_config + # Reminder: Please update docs/source/serving/compatibility_matrix.rst + # If the feature combo become valid + if not model_config.enforce_eager: + logger.warning( + "CUDA graph is not supported on CPU, fallback to the eager " + "mode.") + model_config.enforce_eager = True + + cache_config = vllm_config.cache_config + + if cache_config.enable_prefix_caching: + logger.warning( + "Prefix caching is not supported on CPU, disable it.") + cache_config.enable_prefix_caching = False + + kv_cache_space = envs.VLLM_CPU_KVCACHE_SPACE + + if kv_cache_space >= 0: + if kv_cache_space == 0: + cache_config.cpu_kvcache_space_bytes = 4 * GiB_bytes # type: ignore + logger.warning( + "Environment variable VLLM_CPU_KVCACHE_SPACE (GB) " + "for CPU backend is not set, using 4 by default.") + else: + cache_config.cpu_kvcache_space_bytes = kv_cache_space * GiB_bytes # type: ignore # noqa + else: + raise RuntimeError( + "Invalid environment variable VLLM_CPU_KVCACHE_SPACE" + f" {kv_cache_space}, expect a positive integer value.") + + scheduler_config = vllm_config.scheduler_config + if scheduler_config.chunked_prefill_enabled: + logger.warning( + "Chunked prefill is not supported on CPU, disable it.") + scheduler_config.chunked_prefill_enabled = False + + parallel_config = vllm_config.parallel_config + if (parallel_config.distributed_executor_backend is not None + and parallel_config.distributed_executor_backend != "mp"): + logger.warning(("%s is not supported on CPU, fallback to mp " + "distributed executor backend."), + parallel_config.distributed_executor_backend) + parallel_config.distributed_executor_backend = "mp" From 76aab90ab68476c353ad58019fd51fd18622056a Mon Sep 17 00:00:00 2001 From: Kunshang Ji Date: Sun, 17 Nov 2024 16:44:44 +0800 Subject: [PATCH 018/397] [Hardware] [HPU]add `mark_step` for hpu (#10239) Signed-off-by: Kunshang Ji --- vllm/worker/hpu_model_runner.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/vllm/worker/hpu_model_runner.py b/vllm/worker/hpu_model_runner.py index 1ff30d685c6b1..99cf9a7e67256 100644 --- a/vllm/worker/hpu_model_runner.py +++ b/vllm/worker/hpu_model_runner.py @@ -272,6 +272,19 @@ def precompute_indices_and_offsets(block_size, slot_mapping, is_prompt): return indices, offsets +def modify_decoder_layer(module: torch.nn.Module, suffix="DecoderLayer"): + if module.__class__.__name__.endswith(suffix): + + def forward_hook(module, args, output): + htorch.core.mark_step() + return output + + module.register_forward_hook(forward_hook) + + for child_name, child_module in module.named_children(): + modify_decoder_layer(child_module) + + class HpuModelAdapter: def __init__(self, model, block_size, dtype, enforce_eager): @@ -636,6 +649,7 @@ def load_model(self) -> None: else: self.model = self.model.to("hpu") htcore.mark_step() + modify_decoder_layer(self.model) torch.hpu.synchronize() with HabanaMemoryProfiler() as m_wrap: From 80d85c5d7bc33ce0ae210ebad3c45e4361b57640 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=94=B5=E8=84=91=E6=98=9F=E4=BA=BA?= Date: Sun, 17 Nov 2024 16:50:24 +0800 Subject: [PATCH 019/397] [Bugfix] Fix mrope_position_delta in non-last prefill chunk (#10403) Signed-off-by: imkero --- vllm/model_executor/layers/rotary_embedding.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/model_executor/layers/rotary_embedding.py b/vllm/model_executor/layers/rotary_embedding.py index b01e4c61fe101..117fe086e5e87 100644 --- a/vllm/model_executor/layers/rotary_embedding.py +++ b/vllm/model_executor/layers/rotary_embedding.py @@ -922,9 +922,9 @@ def get_input_positions( torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx) llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1) - llm_positions = llm_positions[:, context_len:seq_len] mrope_position_delta = (llm_positions.max() + 1 - len(input_tokens)).item() + llm_positions = llm_positions[:, context_len:seq_len] return llm_positions.tolist(), mrope_position_delta From d1557e66d3227355e5aed8018a945a5e6a733147 Mon Sep 17 00:00:00 2001 From: wchen61 Date: Sun, 17 Nov 2024 19:32:40 +0800 Subject: [PATCH 020/397] =?UTF-8?q?[Misc]=20Enhance=20offline=5Finference?= =?UTF-8?q?=20to=20support=20user-configurable=20paramet=E2=80=A6=20(#1039?= =?UTF-8?q?2)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: wchen61 --- examples/offline_inference.py | 98 ++++++++++++++++++++++++++++------- 1 file changed, 78 insertions(+), 20 deletions(-) diff --git a/examples/offline_inference.py b/examples/offline_inference.py index 9b758fa2479f6..391ac6b9b6b03 100644 --- a/examples/offline_inference.py +++ b/examples/offline_inference.py @@ -1,22 +1,80 @@ +from dataclasses import asdict + from vllm import LLM, SamplingParams +from vllm.engine.arg_utils import EngineArgs +from vllm.utils import FlexibleArgumentParser + + +def get_prompts(num_prompts: int): + # The default sample prompts. + prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", + ] + + if num_prompts != len(prompts): + prompts = (prompts * ((num_prompts // len(prompts)) + 1))[:num_prompts] + + return prompts + + +def main(args): + # Create prompts + prompts = get_prompts(args.num_prompts) + + # Create a sampling params object. + sampling_params = SamplingParams(n=args.n, + temperature=args.temperature, + top_p=args.top_p, + top_k=args.top_k, + max_tokens=args.max_tokens) + + # Create an LLM. + # The default model is 'facebook/opt-125m' + engine_args = EngineArgs.from_cli_args(args) + llm = LLM(**asdict(engine_args)) + + # Generate texts from the prompts. + # The output is a list of RequestOutput objects + # that contain the prompt, generated text, and other information. + outputs = llm.generate(prompts, sampling_params) + # Print the outputs. + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + + +if __name__ == '__main__': + parser = FlexibleArgumentParser() + parser = EngineArgs.add_cli_args(parser) + group = parser.add_argument_group("SamplingParams options") + group.add_argument("--num-prompts", + type=int, + default=4, + help="Number of prompts used for inference") + group.add_argument("--max-tokens", + type=int, + default=16, + help="Generated output length for sampling") + group.add_argument('--n', + type=int, + default=1, + help='Number of generated sequences per prompt') + group.add_argument('--temperature', + type=float, + default=0.8, + help='Temperature for text generation') + group.add_argument('--top-p', + type=float, + default=0.95, + help='top_p for text generation') + group.add_argument('--top-k', + type=int, + default=-1, + help='top_k for text generation') -# Sample prompts. -prompts = [ - "Hello, my name is", - "The president of the United States is", - "The capital of France is", - "The future of AI is", -] -# Create a sampling params object. -sampling_params = SamplingParams(temperature=0.8, top_p=0.95) - -# Create an LLM. -llm = LLM(model="facebook/opt-125m") -# Generate texts from the prompts. The output is a list of RequestOutput objects -# that contain the prompt, generated text, and other information. -outputs = llm.generate(prompts, sampling_params) -# Print the outputs. -for output in outputs: - prompt = output.prompt - generated_text = output.outputs[0].text - print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + args = parser.parse_args() + main(args) From c4e464333eac5a46e1cc2701e095a44057c82927 Mon Sep 17 00:00:00 2001 From: Isotr0py <2037008807@qq.com> Date: Mon, 18 Nov 2024 09:07:46 +0800 Subject: [PATCH 021/397] [Misc] Add uninitialized params tracking for `AutoWeightsLoader` (#10327) Signed-off-by: Isotr0py <2037008807@qq.com> --- vllm/model_executor/model_loader/loader.py | 12 +++++++++++- vllm/model_executor/models/arctic.py | 8 ++++++-- vllm/model_executor/models/baichuan.py | 8 ++++++-- vllm/model_executor/models/bert.py | 8 ++++++-- vllm/model_executor/models/blip.py | 12 ++++++++---- vllm/model_executor/models/blip2.py | 7 ++++--- vllm/model_executor/models/bloom.py | 8 ++++++-- vllm/model_executor/models/chameleon.py | 8 ++++++-- vllm/model_executor/models/chatglm.py | 10 ++++++++-- vllm/model_executor/models/clip.py | 11 ++++++++--- vllm/model_executor/models/commandr.py | 4 +++- vllm/model_executor/models/dbrx.py | 8 ++++++-- vllm/model_executor/models/decilm.py | 8 ++++++-- vllm/model_executor/models/deepseek.py | 8 ++++++-- vllm/model_executor/models/deepseek_v2.py | 8 ++++++-- vllm/model_executor/models/exaone.py | 9 +++++++-- vllm/model_executor/models/falcon.py | 8 ++++++-- vllm/model_executor/models/florence2.py | 17 +++++++++++------ vllm/model_executor/models/fuyu.py | 8 +++++--- vllm/model_executor/models/gemma.py | 4 +++- vllm/model_executor/models/gemma2.py | 9 ++++++--- vllm/model_executor/models/gpt2.py | 8 ++++++-- vllm/model_executor/models/gpt_bigcode.py | 8 ++++++-- vllm/model_executor/models/gpt_j.py | 8 ++++++-- vllm/model_executor/models/gpt_neox.py | 8 ++++++-- vllm/model_executor/models/granite.py | 9 +++++++-- vllm/model_executor/models/granitemoe.py | 8 +++++--- .../models/idefics2_vision_model.py | 11 ++++++++--- vllm/model_executor/models/idefics3.py | 7 ++++--- vllm/model_executor/models/intern_vit.py | 8 ++++++-- vllm/model_executor/models/internlm2.py | 8 ++++++-- vllm/model_executor/models/internvl.py | 7 ++++--- vllm/model_executor/models/jais.py | 8 ++++++-- vllm/model_executor/models/jamba.py | 8 ++++++-- vllm/model_executor/models/llama.py | 15 ++++++++++----- vllm/model_executor/models/llava.py | 7 ++++--- vllm/model_executor/models/llava_next.py | 7 ++++--- vllm/model_executor/models/llava_next_video.py | 7 ++++--- vllm/model_executor/models/llava_onevision.py | 7 ++++--- vllm/model_executor/models/mamba.py | 8 ++++++-- vllm/model_executor/models/medusa.py | 9 +++++++-- vllm/model_executor/models/minicpm.py | 8 ++++++-- vllm/model_executor/models/minicpmv.py | 14 +++++++++----- vllm/model_executor/models/mixtral.py | 8 ++++++-- vllm/model_executor/models/mixtral_quant.py | 8 ++++++-- vllm/model_executor/models/mllama.py | 9 ++++++--- vllm/model_executor/models/mlp_speculator.py | 8 ++++++-- vllm/model_executor/models/mpt.py | 8 ++++++-- vllm/model_executor/models/nemotron.py | 8 ++++++-- vllm/model_executor/models/olmo.py | 8 ++++++-- vllm/model_executor/models/olmoe.py | 8 ++++++-- vllm/model_executor/models/opt.py | 8 ++++++-- vllm/model_executor/models/orion.py | 8 ++++++-- vllm/model_executor/models/paligemma.py | 7 ++++--- vllm/model_executor/models/persimmon.py | 8 ++++++-- vllm/model_executor/models/phi.py | 8 ++++++-- vllm/model_executor/models/phi3_small.py | 8 ++++++-- vllm/model_executor/models/phi3v.py | 9 ++++++--- vllm/model_executor/models/phimoe.py | 8 ++++++-- vllm/model_executor/models/pixtral.py | 12 ++++++++---- vllm/model_executor/models/qwen.py | 8 ++++++-- vllm/model_executor/models/qwen2.py | 18 ++++++++++++------ vllm/model_executor/models/qwen2_audio.py | 9 +++++++-- vllm/model_executor/models/qwen2_cls.py | 7 ++++--- vllm/model_executor/models/qwen2_moe.py | 8 ++++++-- vllm/model_executor/models/qwen2_rm.py | 7 ++++--- vllm/model_executor/models/qwen2_vl.py | 8 ++++++-- vllm/model_executor/models/siglip.py | 11 ++++++++--- vllm/model_executor/models/solar.py | 9 +++++++-- vllm/model_executor/models/stablelm.py | 8 ++++++-- vllm/model_executor/models/starcoder2.py | 8 ++++++-- vllm/model_executor/models/ultravox.py | 7 ++++--- vllm/model_executor/models/utils.py | 11 ++++++----- vllm/model_executor/models/xverse.py | 8 ++++++-- 74 files changed, 454 insertions(+), 185 deletions(-) diff --git a/vllm/model_executor/model_loader/loader.py b/vllm/model_executor/model_loader/loader.py index 0f8b81c3ef40c..d9ce85949e4ee 100644 --- a/vllm/model_executor/model_loader/loader.py +++ b/vllm/model_executor/model_loader/loader.py @@ -334,7 +334,17 @@ def load_model(self, vllm_config: VllmConfig) -> nn.Module: with target_device: model = _initialize_model(vllm_config=vllm_config) - model.load_weights(self._get_all_weights(model_config, model)) + weights_to_load = {name for name, _ in model.named_parameters()} + loaded_weights = model.load_weights( + self._get_all_weights(model_config, model)) + # We only enable strict check for non-quantiized models + # that have loaded weights tracking currently. + if model_config.quantization is None and loaded_weights is not None: + weights_not_loaded = weights_to_load - loaded_weights + if weights_not_loaded: + raise ValueError( + "Following weights were not initialized from " + f"checkpoint: {weights_not_loaded}") for _, module in model.named_modules(): quant_method = getattr(module, "quant_method", None) diff --git a/vllm/model_executor/models/arctic.py b/vllm/model_executor/models/arctic.py index d52418ee0f6f1..e58ad19cab54c 100644 --- a/vllm/model_executor/models/arctic.py +++ b/vllm/model_executor/models/arctic.py @@ -1,5 +1,5 @@ """Inference-only Snowflake Arctic model.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -480,7 +480,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -518,6 +519,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("ws", f"experts.{expert_id}.w3.weight", expert_id)) params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() logger.info( "It will take ~10 minutes loading from the 16-bit weights. " @@ -573,3 +575,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/baichuan.py b/vllm/model_executor/models/baichuan.py index 01ce7c42cd391..3749a16a38994 100644 --- a/vllm/model_executor/models/baichuan.py +++ b/vllm/model_executor/models/baichuan.py @@ -18,7 +18,7 @@ # limitations under the License. """Inference-only BaiChuan model compatible with HuggingFace weights.""" import math -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -404,13 +404,15 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("gate_up_proj", "gate_proj", 0), ("gate_up_proj", "up_proj", 1), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -449,6 +451,8 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params class BaichuanForCausalLM(BaiChuanBaseForCausalLM): diff --git a/vllm/model_executor/models/bert.py b/vllm/model_executor/models/bert.py index 42dd6119e76f1..d8301a36acb01 100644 --- a/vllm/model_executor/models/bert.py +++ b/vllm/model_executor/models/bert.py @@ -1,4 +1,4 @@ -from typing import Iterable, List, Optional, Tuple +from typing import Iterable, List, Optional, Set, Tuple import torch from torch import nn @@ -337,7 +337,8 @@ def forward( return self.encoder(hidden_states, kv_caches, attn_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "query", "q"), @@ -346,6 +347,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "pooler" in name: continue @@ -368,6 +370,8 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params class BertEmbeddingModel(nn.Module): diff --git a/vllm/model_executor/models/blip.py b/vllm/model_executor/models/blip.py index e612010677364..6db6462e97f3f 100644 --- a/vllm/model_executor/models/blip.py +++ b/vllm/model_executor/models/blip.py @@ -1,6 +1,6 @@ """Minimal implementation of BlipVisionModel intended to be only used within a vision language model.""" -from typing import Iterable, Optional, Tuple, Union +from typing import Iterable, Optional, Set, Tuple, Union import torch import torch.nn as nn @@ -415,7 +415,8 @@ def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: return self.post_layernorm(hidden_states) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -423,6 +424,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("qkv_proj", "v_proj", "v"), ] if self.shard_weight else [] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() layer_count = len(self.encoder.layers) for name, loaded_weight in weights: @@ -440,8 +442,8 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): for (param_name, weight_name, shard_id) in stacked_params_mapping: if weight_name not in name: continue - - param = params_dict[name.replace(weight_name, param_name)] + name = name.replace(weight_name, param_name) + param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break @@ -450,3 +452,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/blip2.py b/vllm/model_executor/models/blip2.py index 03dc1d15ab697..7d7639b4a92ce 100644 --- a/vllm/model_executor/models/blip2.py +++ b/vllm/model_executor/models/blip2.py @@ -1,5 +1,5 @@ from functools import cached_property -from typing import (Iterable, List, Literal, Mapping, Optional, Tuple, +from typing import (Iterable, List, Literal, Mapping, Optional, Set, Tuple, TypedDict, Union) import torch @@ -692,6 +692,7 @@ def sample( ) -> Optional[SamplerOutput]: return self.language_model.sample(logits, sampling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader(self) - loader.load_weights(weights) + return loader.load_weights(weights) diff --git a/vllm/model_executor/models/bloom.py b/vllm/model_executor/models/bloom.py index cf2eee8172769..1060d418474ef 100644 --- a/vllm/model_executor/models/bloom.py +++ b/vllm/model_executor/models/bloom.py @@ -16,7 +16,7 @@ # limitations under the License. """Inference-only BLOOM model compatible with HuggingFace weights.""" import math -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -341,8 +341,10 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if name == "lm_head.weight": continue @@ -371,3 +373,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/chameleon.py b/vllm/model_executor/models/chameleon.py index 7b59c818e0b60..8f91abffaea90 100644 --- a/vllm/model_executor/models/chameleon.py +++ b/vllm/model_executor/models/chameleon.py @@ -1,5 +1,5 @@ from functools import cached_property -from typing import (Any, Dict, Iterable, List, Literal, Mapping, Optional, +from typing import (Any, Dict, Iterable, List, Literal, Mapping, Optional, Set, Tuple, TypedDict, Union) import torch @@ -1034,7 +1034,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) (".qkv_proj", ".q_proj", "q"), @@ -1044,6 +1045,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): (".gate_up_proj", ".up_proj", 1), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -1111,3 +1113,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/chatglm.py b/vllm/model_executor/models/chatglm.py index 70e9b607b0642..81e56381eabd8 100644 --- a/vllm/model_executor/models/chatglm.py +++ b/vllm/model_executor/models/chatglm.py @@ -3,7 +3,8 @@ """Inference-only ChatGLM model compatible with THUDM weights.""" from argparse import Namespace from array import array -from typing import Dict, Iterable, List, Mapping, Optional, Tuple, TypedDict +from typing import (Dict, Iterable, List, Mapping, Optional, Set, Tuple, + TypedDict) import torch from PIL import Image @@ -645,7 +646,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: # Merge two ColumnParallelLinear into one MergedColumnParallelLinear merged_weights_dict: Dict[str, Dict[str, Optional[torch.Tensor]]] = { "transformer.vision.linear_proj.merged_proj.weight": { @@ -655,6 +657,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): } params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() for name, loaded_weight in weights: is_weight_to_be_merge = False for _, merged_weight_dict in merged_weights_dict.items(): @@ -677,6 +680,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) for combined_name, merged_weight_dict in merged_weights_dict.items(): if combined_name in params_dict: @@ -686,3 +690,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, combined_weight) + loaded_params.add(combined_name) + return loaded_params diff --git a/vllm/model_executor/models/clip.py b/vllm/model_executor/models/clip.py index 2d81b9266826b..184758f4a8a45 100644 --- a/vllm/model_executor/models/clip.py +++ b/vllm/model_executor/models/clip.py @@ -1,6 +1,6 @@ """Minimal implementation of CLIPVisionModel intended to be only used within a vision language model.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import numpy as np import torch @@ -483,7 +483,8 @@ def device(self): # (TODO) Add prefix argument for filtering out weights to be loaded # ref: https://github.com/vllm-project/vllm/pull/7186#discussion_r1734163986 - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -491,6 +492,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("qkv_proj", "v_proj", "v"), ] if self.shard_weight else [] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() layer_count = len(self.vision_model.encoder.layers) for name, loaded_weight in weights: @@ -508,8 +510,9 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): for (param_name, weight_name, shard_id) in stacked_params_mapping: if weight_name not in name: continue + name = name.replace(weight_name, param_name) - param = params_dict[name.replace(weight_name, param_name)] + param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break @@ -518,3 +521,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/commandr.py b/vllm/model_executor/models/commandr.py index fbb09a64cde9b..9fd083e5a02a9 100644 --- a/vllm/model_executor/models/commandr.py +++ b/vllm/model_executor/models/commandr.py @@ -402,7 +402,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -447,3 +448,4 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): default_weight_loader) weight_loader(param, loaded_weight) loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/dbrx.py b/vllm/model_executor/models/dbrx.py index 3952ff31e5cec..eab338800249e 100644 --- a/vllm/model_executor/models/dbrx.py +++ b/vllm/model_executor/models/dbrx.py @@ -1,4 +1,4 @@ -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch import torch.nn as nn @@ -417,13 +417,15 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: expert_params_mapping = [( "w13_weight" if weight_name in ["w1", "v1"] else "w2_weight", f"mlp.{weight_name}", ) for weight_name in ["w1", "v1", "w2"]] params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() for name, loaded_weight in weights: for param_name, weight_name in expert_params_mapping: if weight_name not in name: @@ -447,3 +449,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/decilm.py b/vllm/model_executor/models/decilm.py index b38fd9fa49c21..c551853956b92 100644 --- a/vllm/model_executor/models/decilm.py +++ b/vllm/model_executor/models/decilm.py @@ -22,7 +22,7 @@ # limitations under the License. """Inference-only DeciLM model compatible with HuggingFace weights.""" -from typing import Iterable, Tuple +from typing import Iterable, Set, Tuple import torch @@ -57,7 +57,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): delattr(config, "num_key_value_heads_per_layer") super().__init__(vllm_config=vllm_config) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -67,6 +68,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("gate_up_proj", "up_proj", 1), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -97,6 +99,8 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params def _degroup_weight(self, loaded_weight: torch.Tensor) -> torch.Tensor: hidden_size = self.config.hidden_size diff --git a/vllm/model_executor/models/deepseek.py b/vllm/model_executor/models/deepseek.py index 36dfea5a65656..8c5ad9904e925 100644 --- a/vllm/model_executor/models/deepseek.py +++ b/vllm/model_executor/models/deepseek.py @@ -20,7 +20,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only Deepseek model.""" -from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -442,7 +442,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -453,6 +454,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -487,3 +489,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/deepseek_v2.py b/vllm/model_executor/models/deepseek_v2.py index 1e32fe60c7a5b..d2c4ca0bf85e9 100644 --- a/vllm/model_executor/models/deepseek_v2.py +++ b/vllm/model_executor/models/deepseek_v2.py @@ -20,7 +20,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only DeepseekV2 model.""" -from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -550,7 +550,8 @@ def make_empty_intermediate_tensors( device=device), }) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("gate_up_proj", "gate_proj", 0), @@ -566,6 +567,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): num_experts=self.config.n_routed_experts) params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -623,3 +625,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/exaone.py b/vllm/model_executor/models/exaone.py index 52dd603ca558d..9d739d0479548 100644 --- a/vllm/model_executor/models/exaone.py +++ b/vllm/model_executor/models/exaone.py @@ -22,7 +22,7 @@ # limitations under the License. """Inference-only Exaone model compatible with HuggingFace weights.""" -from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -513,7 +513,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) (".qkv_proj", ".q_proj", "q"), @@ -523,6 +524,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): (".gate_up_proj", ".c_fc_1", 1), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -543,6 +545,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): default_weight_loader) loaded_weight = loaded_weight[0] weight_loader(param, loaded_weight) + loaded_params.add(scale_name) continue for param_name, weight_name, shard_id in stacked_params_mapping: if weight_name not in name: @@ -576,6 +579,8 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params # If this function is called, it should always initialize KV cache scale # factors (or else raise an exception). Thus, handled exceptions should diff --git a/vllm/model_executor/models/falcon.py b/vllm/model_executor/models/falcon.py index e97abe949ccdb..2aa4b67d99894 100644 --- a/vllm/model_executor/models/falcon.py +++ b/vllm/model_executor/models/falcon.py @@ -18,7 +18,7 @@ """PyTorch Falcon model.""" import math -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -473,7 +473,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: total_num_heads = self.config.num_attention_heads if self.config.new_decoder_architecture: total_num_kv_heads = self.config.num_kv_heads @@ -483,6 +484,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): total_num_kv_heads = total_num_heads num_query_heads_per_kv_head = total_num_heads // total_num_kv_heads params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if name == "lm_head.weight" and self.tie_word_embeddings: # Falcon uses tied embeddings except Falcon-11b. @@ -519,3 +521,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/florence2.py b/vllm/model_executor/models/florence2.py index 971a71180164b..d3a9ff6915b84 100644 --- a/vllm/model_executor/models/florence2.py +++ b/vllm/model_executor/models/florence2.py @@ -1,5 +1,5 @@ import math -from typing import Iterable, List, Optional, Tuple +from typing import Iterable, List, Optional, Set, Tuple import torch import torch.nn as nn @@ -156,7 +156,8 @@ def sample(self, logits: torch.Tensor, next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -165,12 +166,13 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: for (param_name, weight_name, shard_id) in stacked_params_mapping: if weight_name not in name: continue - - param = params_dict[name.replace(weight_name, param_name)] + name = name.replace(weight_name, param_name) + param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break @@ -183,6 +185,8 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params class Florence2ForConditionalGeneration(nn.Module): @@ -248,10 +252,11 @@ def sample( ) -> SamplerOutput: return self.language_model.sample(logits, sampling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: skip_prefixes = [ 'image_projection', "vision_tower", "image_proj_norm", "image_pos_embed", "visual_temporal_embed" ] loader = AutoWeightsLoader(self, skip_prefixes=skip_prefixes) - loader.load_weights(weights) + return loader.load_weights(weights) diff --git a/vllm/model_executor/models/fuyu.py b/vllm/model_executor/models/fuyu.py index 31fc098a8bb3f..7b46907ac83ab 100644 --- a/vllm/model_executor/models/fuyu.py +++ b/vllm/model_executor/models/fuyu.py @@ -16,7 +16,8 @@ """ PyTorch Fuyu model.""" import math from array import array -from typing import Iterable, List, Literal, Mapping, Optional, Tuple, TypedDict +from typing import (Iterable, List, Literal, Mapping, Optional, Set, Tuple, + TypedDict) import torch import torch.nn as nn @@ -354,6 +355,7 @@ def sample( next_tokens = self.language_model.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader(self) - loader.load_weights(weights) + return loader.load_weights(weights) diff --git a/vllm/model_executor/models/gemma.py b/vllm/model_executor/models/gemma.py index ace13664c6ea6..64e03b30bf2f1 100644 --- a/vllm/model_executor/models/gemma.py +++ b/vllm/model_executor/models/gemma.py @@ -424,7 +424,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -469,3 +470,4 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): logger.warning( "Some weights are not initialized from checkpoints: %s", unloaded_params) + return loaded_params diff --git a/vllm/model_executor/models/gemma2.py b/vllm/model_executor/models/gemma2.py index a60b4e73a76d4..4ba39223cc07f 100644 --- a/vllm/model_executor/models/gemma2.py +++ b/vllm/model_executor/models/gemma2.py @@ -312,7 +312,8 @@ def forward( hidden_states, _ = self.norm(hidden_states, residual) return hidden_states - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -354,6 +355,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): logger.warning( "Some weights are not initialized from checkpoints: %s", unloaded_params) + return loaded_params class Gemma2ForCausalLM(nn.Module, SupportsLoRA, SupportsPP): @@ -451,13 +453,14 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader( self, skip_prefixes=(["lm_head."] if self.config.tie_word_embeddings else None), ) - loader.load_weights(weights) + return loader.load_weights(weights) class Gemma2EmbeddingModel(nn.Module, SupportsPP): diff --git a/vllm/model_executor/models/gpt2.py b/vllm/model_executor/models/gpt2.py index fa0fdad28d161..1c61408ae1dd9 100644 --- a/vllm/model_executor/models/gpt2.py +++ b/vllm/model_executor/models/gpt2.py @@ -16,7 +16,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only GPT-2 model compatible with HuggingFace weights.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -298,8 +298,10 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "lm_head.weight" in name: # GPT-2 ties the weights of the embedding layer and the final @@ -328,3 +330,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/gpt_bigcode.py b/vllm/model_executor/models/gpt_bigcode.py index b2fc79d0d36dc..50a143cb1b600 100644 --- a/vllm/model_executor/models/gpt_bigcode.py +++ b/vllm/model_executor/models/gpt_bigcode.py @@ -17,7 +17,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only GPTBigCode model compatible with HuggingFace weights.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -323,8 +323,10 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "lm_head.weight" in name: continue @@ -344,3 +346,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader(param, loaded_weight, 'v') else: weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/gpt_j.py b/vllm/model_executor/models/gpt_j.py index cec3fd12a67d6..d5defc60764e6 100644 --- a/vllm/model_executor/models/gpt_j.py +++ b/vllm/model_executor/models/gpt_j.py @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only GPT-J model compatible with HuggingFace weights.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -291,7 +291,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -301,6 +302,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("gate_up_proj", "up_proj", 1), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "attn.bias" in name or "attn.masked_bias" in name: continue @@ -330,3 +332,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/gpt_neox.py b/vllm/model_executor/models/gpt_neox.py index 11f286d6bcba0..0bb5e2f9b95f9 100644 --- a/vllm/model_executor/models/gpt_neox.py +++ b/vllm/model_executor/models/gpt_neox.py @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only GPT-NeoX model compatible with HuggingFace weights.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -303,8 +303,10 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if ("attention.bias" in name or "attention.masked_bias" in name or "rotary_emb.inv_freq" in name): @@ -337,3 +339,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/granite.py b/vllm/model_executor/models/granite.py index cb2583e69d88d..c1e2e87f08ec3 100644 --- a/vllm/model_executor/models/granite.py +++ b/vllm/model_executor/models/granite.py @@ -20,7 +20,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only IBM Granite model compatible with HuggingFace weights.""" -from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -455,7 +455,8 @@ def make_empty_intermediate_tensors( device=device), }) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) (".qkv_proj", ".q_proj", "q"), @@ -465,6 +466,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): (".gate_up_proj", ".up_proj", 1), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -485,6 +487,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): default_weight_loader) loaded_weight = loaded_weight[0] weight_loader(param, loaded_weight) + loaded_params.add(scale_name) continue for (param_name, weight_name, shard_id) in stacked_params_mapping: if weight_name not in name: @@ -518,6 +521,8 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params # If this function is called, it should always initialize KV cache scale # factors (or else raise an exception). Thus, handled exceptions should diff --git a/vllm/model_executor/models/granitemoe.py b/vllm/model_executor/models/granitemoe.py index f437dd521a7d5..a91a18816995f 100644 --- a/vllm/model_executor/models/granitemoe.py +++ b/vllm/model_executor/models/granitemoe.py @@ -20,7 +20,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only GraniteMoe model.""" -from typing import Iterable, List, Optional, Tuple +from typing import Iterable, List, Optional, Set, Tuple import torch from torch import nn @@ -419,7 +419,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: new_weights = {} for n, p in weights: if n.endswith('.block_sparse_moe.input_linear.weight'): @@ -452,4 +453,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): pass else: new_weights[n] = p - mixtral.MixtralForCausalLM.load_weights(self, new_weights.items()) + return mixtral.MixtralForCausalLM.load_weights(self, + new_weights.items()) diff --git a/vllm/model_executor/models/idefics2_vision_model.py b/vllm/model_executor/models/idefics2_vision_model.py index b21bc2a3f9ce1..16192928beb1f 100644 --- a/vllm/model_executor/models/idefics2_vision_model.py +++ b/vllm/model_executor/models/idefics2_vision_model.py @@ -15,7 +15,7 @@ # limitations under the License. """PyTorch Idefics2 model.""" -from typing import Iterable, Optional, Tuple +from typing import Iterable, Optional, Set, Tuple import torch from torch import nn @@ -331,7 +331,8 @@ def forward( last_hidden_state = self.post_layernorm(encoder_outputs) return last_hidden_state - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -339,11 +340,13 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("qkv_proj", "v_proj", "v"), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: for param_name, weight_name, shard_id in stacked_params_mapping: if weight_name not in name: continue - param = params_dict[name.replace(weight_name, param_name)] + name = name.replace(weight_name, param_name) + param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break @@ -352,3 +355,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/idefics3.py b/vllm/model_executor/models/idefics3.py index 0cecc754e916f..5d176b2a4e416 100644 --- a/vllm/model_executor/models/idefics3.py +++ b/vllm/model_executor/models/idefics3.py @@ -15,7 +15,7 @@ import math from typing import (Dict, Iterable, List, Literal, Mapping, NamedTuple, - Optional, Tuple, TypedDict, Union) + Optional, Set, Tuple, TypedDict, Union) import torch import torch.utils.checkpoint @@ -751,9 +751,10 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader(self) - loader.load_weights(weights) + return loader.load_weights(weights) def get_mm_mapping(self) -> MultiModelKeys: """ diff --git a/vllm/model_executor/models/intern_vit.py b/vllm/model_executor/models/intern_vit.py index 9761635d2a6c2..bd91a0806ae5c 100644 --- a/vllm/model_executor/models/intern_vit.py +++ b/vllm/model_executor/models/intern_vit.py @@ -5,7 +5,7 @@ # Licensed under The MIT License [see LICENSE for details] # -------------------------------------------------------- from functools import partial -from typing import Iterable, Optional, Tuple +from typing import Iterable, Optional, Set, Tuple import torch import torch.nn as nn @@ -469,10 +469,14 @@ def forward( return encoder_outputs - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: param = params_dict[name] weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/internlm2.py b/vllm/model_executor/models/internlm2.py index 19bfe16e4d5fc..94b819b5d9366 100644 --- a/vllm/model_executor/models/internlm2.py +++ b/vllm/model_executor/models/internlm2.py @@ -1,5 +1,5 @@ from functools import partial -from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -369,13 +369,15 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("gate_up_proj", "w1", 0), ("gate_up_proj", "w3", 1), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -402,3 +404,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/internvl.py b/vllm/model_executor/models/internvl.py index 92579e3aae949..7ea2f9be2191d 100644 --- a/vllm/model_executor/models/internvl.py +++ b/vllm/model_executor/models/internvl.py @@ -6,7 +6,7 @@ # -------------------------------------------------------- import re from functools import cached_property, partial -from typing import (Iterable, List, Literal, Mapping, Optional, Tuple, +from typing import (Iterable, List, Literal, Mapping, Optional, Set, Tuple, TypedDict, Union) import torch @@ -663,6 +663,7 @@ def sample( ) -> Optional[SamplerOutput]: return self.language_model.sample(logits, sampling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader(self) - loader.load_weights(weights) + return loader.load_weights(weights) diff --git a/vllm/model_executor/models/jais.py b/vllm/model_executor/models/jais.py index ee49ffb3cd87f..41db85b678456 100644 --- a/vllm/model_executor/models/jais.py +++ b/vllm/model_executor/models/jais.py @@ -19,7 +19,7 @@ """Inference-only Jais model compatible with HuggingFace weights.""" import math -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -350,8 +350,10 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "lm_head.weight" in name: # GPT-2 ties the weights of the embedding layer and the final @@ -382,3 +384,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/jamba.py b/vllm/model_executor/models/jamba.py index 5612dd6886385..f83f0fce7275f 100644 --- a/vllm/model_executor/models/jamba.py +++ b/vllm/model_executor/models/jamba.py @@ -1,5 +1,5 @@ """Inference-only Jamba model.""" -from typing import Iterable, List, Optional, Tuple +from typing import Iterable, List, Optional, Set, Tuple import torch from torch import nn @@ -462,7 +462,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -479,6 +480,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): num_experts=self.config.num_experts) params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -534,6 +536,8 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params def _is_moe_layer(name: str): diff --git a/vllm/model_executor/models/llama.py b/vllm/model_executor/models/llama.py index e53631ef19f31..2b40e9ec73fad 100644 --- a/vllm/model_executor/models/llama.py +++ b/vllm/model_executor/models/llama.py @@ -20,7 +20,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only LLaMA model compatible with HuggingFace weights.""" -from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -350,7 +350,8 @@ def forward( hidden_states, _ = self.norm(hidden_states, residual) return hidden_states - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) (".qkv_proj", ".q_proj", "q"), @@ -360,6 +361,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): (".gate_up_proj", ".up_proj", 1), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -375,6 +377,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): default_weight_loader) loaded_weight = loaded_weight[0] weight_loader(param, loaded_weight) + loaded_params.add(scale_name) continue for param_name, weight_name, shard_id in stacked_params_mapping: if weight_name not in name: @@ -390,7 +393,6 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) - break else: # Skip loading extra bias for GPTQ models. @@ -408,6 +410,8 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params # If this function is called, it should always initialize KV cache scale # factors (or else raise an exception). Thus, handled exceptions should @@ -577,13 +581,14 @@ def sample(self, logits: torch.Tensor, next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader( self, skip_prefixes=(["lm_head."] if self.config.tie_word_embeddings else None), ) - loader.load_weights( + return loader.load_weights( self.maybe_remap_mistral(name, loaded_weight) for name, loaded_weight in weights) diff --git a/vllm/model_executor/models/llava.py b/vllm/model_executor/models/llava.py index b13bcfa676811..e7d3161a7cb2d 100644 --- a/vllm/model_executor/models/llava.py +++ b/vllm/model_executor/models/llava.py @@ -1,5 +1,5 @@ from functools import cached_property -from typing import (Iterable, List, Literal, Mapping, Optional, Protocol, +from typing import (Iterable, List, Literal, Mapping, Optional, Protocol, Set, Tuple, TypedDict, Union) import torch @@ -547,6 +547,7 @@ def sample( ) -> Optional[SamplerOutput]: return self.language_model.sample(logits, sampling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader(self) - loader.load_weights(weights) + return loader.load_weights(weights) diff --git a/vllm/model_executor/models/llava_next.py b/vllm/model_executor/models/llava_next.py index dd2fa6cac969f..37e2227a52dcd 100644 --- a/vllm/model_executor/models/llava_next.py +++ b/vllm/model_executor/models/llava_next.py @@ -1,5 +1,5 @@ from functools import cached_property -from typing import (Iterable, List, Literal, Mapping, Optional, Tuple, +from typing import (Iterable, List, Literal, Mapping, Optional, Set, Tuple, TypedDict, Union) import torch @@ -654,6 +654,7 @@ def pooler( ) -> Optional[PoolerOutput]: return self._pooler(hidden_states, pooling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader(self) - loader.load_weights(weights) + return loader.load_weights(weights) diff --git a/vllm/model_executor/models/llava_next_video.py b/vllm/model_executor/models/llava_next_video.py index 5d5598d07bfde..e2880c76cf43d 100644 --- a/vllm/model_executor/models/llava_next_video.py +++ b/vllm/model_executor/models/llava_next_video.py @@ -1,6 +1,6 @@ import math from functools import cached_property -from typing import (Iterable, List, Literal, Mapping, Optional, Tuple, +from typing import (Iterable, List, Literal, Mapping, Optional, Set, Tuple, TypedDict, Union) import numpy as np @@ -445,10 +445,11 @@ def sample( ) -> Optional[SamplerOutput]: return self.language_model.sample(logits, sampling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader( self, # This model doesn't support images for now ignore_unexpected_prefixes=["image_newline"], ) - loader.load_weights(weights) + return loader.load_weights(weights) diff --git a/vllm/model_executor/models/llava_onevision.py b/vllm/model_executor/models/llava_onevision.py index a5b2108177830..705ca1e4ab6e6 100644 --- a/vllm/model_executor/models/llava_onevision.py +++ b/vllm/model_executor/models/llava_onevision.py @@ -1,6 +1,6 @@ import math from functools import cached_property -from typing import (Iterable, List, Literal, Mapping, Optional, Tuple, +from typing import (Iterable, List, Literal, Mapping, Optional, Set, Tuple, TypedDict, Union) import numpy as np @@ -887,6 +887,7 @@ def sample( ) -> Optional[SamplerOutput]: return self.language_model.sample(logits, sampling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader(self) - loader.load_weights(weights) + return loader.load_weights(weights) diff --git a/vllm/model_executor/models/mamba.py b/vllm/model_executor/models/mamba.py index ac0d265a961f0..405b8f7787ba8 100644 --- a/vllm/model_executor/models/mamba.py +++ b/vllm/model_executor/models/mamba.py @@ -1,5 +1,5 @@ """PyTorch MAMBA model.""" -from typing import Iterable, List, Optional, Tuple +from typing import Iterable, List, Optional, Set, Tuple import torch from torch import nn @@ -243,8 +243,10 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "A_log" in name: name = name.replace("A_log", "A") @@ -256,3 +258,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/medusa.py b/vllm/model_executor/models/medusa.py index b05360b55466b..b4ed6538bddac 100644 --- a/vllm/model_executor/models/medusa.py +++ b/vllm/model_executor/models/medusa.py @@ -1,4 +1,4 @@ -from typing import Iterable, List, Optional, Tuple +from typing import Iterable, List, Optional, Set, Tuple import torch import torch.nn as nn @@ -156,8 +156,10 @@ def generate_proposals( sampling_metadata=sampling_metadata, ) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() weights_map = {} @@ -181,9 +183,12 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) if self.token_map is not None: self.token_map.to(device=self.lm_heads[0].weight.device) assert (self.truncated_vocab_size == self.orig_vocab_size) or (self.token_map is not None) + + return loaded_params diff --git a/vllm/model_executor/models/minicpm.py b/vllm/model_executor/models/minicpm.py index 6b67266c53362..b92bff4d7c28c 100644 --- a/vllm/model_executor/models/minicpm.py +++ b/vllm/model_executor/models/minicpm.py @@ -21,7 +21,7 @@ # limitations under the License. """Inference-only MiniCPM model compatible with HuggingFace weights.""" import math -from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -539,7 +539,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -556,6 +557,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): for weight_name in ["w1", "w2", "w3"] ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -606,3 +608,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/minicpmv.py b/vllm/model_executor/models/minicpmv.py index fd8eda997f76f..99bf1d42d0355 100644 --- a/vllm/model_executor/models/minicpmv.py +++ b/vllm/model_executor/models/minicpmv.py @@ -24,7 +24,7 @@ import re from functools import partial from typing import (Any, Callable, Iterable, List, Literal, Mapping, Optional, - Tuple, TypedDict, Union) + Set, Tuple, TypedDict, Union) import torch import torch.types @@ -602,7 +602,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -612,6 +613,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("gate_up_proj", "up_proj", 1), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: for key_to_modify, new_key in _KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in name: @@ -630,10 +632,10 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): for param_name, weight_name, shard_id in stacked_params_mapping: if weight_name not in name: continue - if is_pp_missing_parameter( - name.replace(weight_name, param_name), self): + name = name.replace(weight_name, param_name) + if is_pp_missing_parameter(name, self): continue - param = params_dict[name.replace(weight_name, param_name)] + param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break @@ -646,6 +648,8 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params def get_mm_mapping(self) -> MultiModelKeys: """ diff --git a/vllm/model_executor/models/mixtral.py b/vllm/model_executor/models/mixtral.py index eebf5bab5a288..0faffb4f1b00c 100644 --- a/vllm/model_executor/models/mixtral.py +++ b/vllm/model_executor/models/mixtral.py @@ -20,7 +20,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only Mixtral model.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -404,7 +404,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -421,6 +422,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): num_experts=self.config.num_local_experts) params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -478,3 +480,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/mixtral_quant.py b/vllm/model_executor/models/mixtral_quant.py index af2e9586988df..ddd6afcf6a1b6 100644 --- a/vllm/model_executor/models/mixtral_quant.py +++ b/vllm/model_executor/models/mixtral_quant.py @@ -20,7 +20,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only Mixtral model.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import numpy as np import torch @@ -409,7 +409,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -418,6 +419,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -448,3 +450,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/mllama.py b/vllm/model_executor/models/mllama.py index db7ee7b2d8537..41f62b37f3bd9 100644 --- a/vllm/model_executor/models/mllama.py +++ b/vllm/model_executor/models/mllama.py @@ -13,7 +13,7 @@ # limitations under the License. """PyTorch Mllama model.""" import math -from typing import (Iterable, List, Literal, Mapping, Optional, Tuple, +from typing import (Iterable, List, Literal, Mapping, Optional, Set, Tuple, TypedDict, Union) import numpy as np @@ -1427,7 +1427,8 @@ def forward( return outputs - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) (".qkv_proj", ".q_proj", "q"), @@ -1437,7 +1438,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): (".gate_up_proj", ".up_proj", 1), ] params_dict = dict(self.named_parameters()) - updated_params = set() + updated_params: Set[str] = set() for name, loaded_weight in weights: if 'patch_embedding.weight' in name: name = name.replace('patch_embedding.weight', @@ -1457,6 +1458,8 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + updated_params.add(name) + return updated_params def skip_attention_mask(sparse_mask: List[List[int]]) -> bool: diff --git a/vllm/model_executor/models/mlp_speculator.py b/vllm/model_executor/models/mlp_speculator.py index 4d7e82880041d..f2aa2653c4f5c 100644 --- a/vllm/model_executor/models/mlp_speculator.py +++ b/vllm/model_executor/models/mlp_speculator.py @@ -1,5 +1,5 @@ import math -from typing import Iterable, List, Tuple +from typing import Iterable, List, Set, Tuple import torch import torch.nn as nn @@ -188,11 +188,15 @@ def generate_proposals( return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: param = params_dict.get(name.replace("speculator.", "")) if param is not None: weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/mpt.py b/vllm/model_executor/models/mpt.py index 3c74ef2448abb..8716e92b0f1c2 100644 --- a/vllm/model_executor/models/mpt.py +++ b/vllm/model_executor/models/mpt.py @@ -1,6 +1,6 @@ # Adapted from https://huggingface.co/mosaicml/mpt-7b/tree/main import math -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch import torch.nn as nn @@ -324,8 +324,10 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() for name, loaded_weight in weights: # Skip loading extra bias for GPTQ models. if name.endswith(".bias") and name not in params_dict: @@ -336,3 +338,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/nemotron.py b/vllm/model_executor/models/nemotron.py index eb45beae7d21a..ceab299a7950a 100644 --- a/vllm/model_executor/models/nemotron.py +++ b/vllm/model_executor/models/nemotron.py @@ -20,7 +20,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only Nemotron model compatible with HuggingFace weights.""" -from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -474,7 +474,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) (".qkv_proj", ".q_proj", "q"), @@ -482,6 +483,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): (".qkv_proj", ".v_proj", "v"), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -522,3 +524,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/olmo.py b/vllm/model_executor/models/olmo.py index 98d4e1ec320a4..dc138e2e636ad 100644 --- a/vllm/model_executor/models/olmo.py +++ b/vllm/model_executor/models/olmo.py @@ -20,7 +20,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only OLMo model compatible with HuggingFace weights.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -356,7 +356,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -366,6 +367,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("gate_up_proj", "up_proj", 1), ] params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -402,3 +404,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/olmoe.py b/vllm/model_executor/models/olmoe.py index f4eebab8c98dd..ab87695d8e650 100644 --- a/vllm/model_executor/models/olmoe.py +++ b/vllm/model_executor/models/olmoe.py @@ -10,7 +10,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only OLMoE model compatible with HuggingFace weights.""" -from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -364,7 +364,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -383,6 +384,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): num_experts=self.config.num_experts) params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -455,3 +457,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/opt.py b/vllm/model_executor/models/opt.py index 997fe642439e6..db85a494980a7 100644 --- a/vllm/model_executor/models/opt.py +++ b/vllm/model_executor/models/opt.py @@ -16,7 +16,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only OPT model compatible with HuggingFace weights.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -394,7 +394,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -402,6 +403,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("qkv_proj", "v_proj", "v"), ] params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "lm_head.weight" in name and self.config.tie_word_embeddings: continue @@ -431,3 +433,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/orion.py b/vllm/model_executor/models/orion.py index 39d659c49cbcf..b01734af8ddd8 100644 --- a/vllm/model_executor/models/orion.py +++ b/vllm/model_executor/models/orion.py @@ -3,7 +3,7 @@ # Copyright (c) OrionStar Inc. # LICENSE: https://huggingface.co/OrionStarAI/Orion-14B-Base/blob/main/LICENSE """Inference-only Orion-14B model compatible with HuggingFace weights.""" -from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -327,7 +327,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -337,6 +338,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("gate_up_proj", "up_proj", 1), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -368,3 +370,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/paligemma.py b/vllm/model_executor/models/paligemma.py index eea229359255e..dd5256eb87ab3 100644 --- a/vllm/model_executor/models/paligemma.py +++ b/vllm/model_executor/models/paligemma.py @@ -1,4 +1,4 @@ -from typing import (Iterable, List, Literal, Mapping, Optional, Tuple, +from typing import (Iterable, List, Literal, Mapping, Optional, Set, Tuple, TypedDict, Union) import torch @@ -295,6 +295,7 @@ def sample( ) -> Optional[SamplerOutput]: return self.language_model.sample(logits, sampling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader(self) - loader.load_weights(weights) + return loader.load_weights(weights) diff --git a/vllm/model_executor/models/persimmon.py b/vllm/model_executor/models/persimmon.py index 62c509153a111..3b8199f4f1661 100644 --- a/vllm/model_executor/models/persimmon.py +++ b/vllm/model_executor/models/persimmon.py @@ -19,7 +19,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only persimmon model compatible with HuggingFace weights.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -324,8 +324,10 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -358,3 +360,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/phi.py b/vllm/model_executor/models/phi.py index a2ab0d74c48db..0a117bf16c9b3 100644 --- a/vllm/model_executor/models/phi.py +++ b/vllm/model_executor/models/phi.py @@ -34,7 +34,7 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Inference-only Phi-1.5 model compatible with HuggingFace weights.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -345,7 +345,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -353,6 +354,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("qkv_proj", "v_proj", "v") ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: @@ -383,3 +385,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/phi3_small.py b/vllm/model_executor/models/phi3_small.py index 2139cec441807..a78e4d355a314 100644 --- a/vllm/model_executor/models/phi3_small.py +++ b/vllm/model_executor/models/phi3_small.py @@ -1,5 +1,5 @@ import math -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -457,9 +457,11 @@ def sample( sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -471,3 +473,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/phi3v.py b/vllm/model_executor/models/phi3v.py index 4db65edc174f1..2e583bb08e87a 100644 --- a/vllm/model_executor/models/phi3v.py +++ b/vllm/model_executor/models/phi3v.py @@ -15,7 +15,7 @@ import itertools import re from functools import cached_property, lru_cache -from typing import (Any, Dict, Iterable, List, Literal, Mapping, Optional, +from typing import (Any, Dict, Iterable, List, Literal, Mapping, Optional, Set, Tuple, TypedDict, Union) import numpy as np @@ -744,7 +744,8 @@ def pooler( ) -> Optional[PoolerOutput]: return self._pooler(hidden_states, pooling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: hf_to_vllm_mapper = WeightsMapper( orig_to_new_prefix={ "model.vision_embed_tokens.wte": "embed_tokens", @@ -759,5 +760,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): # The HF config doesn't specify whether these are tied, # so we detect it this way - if "embed_tokens" not in autoloaded_weights: + if "embed_tokens.weight" not in autoloaded_weights: self.embed_tokens = self.language_model.model.embed_tokens + autoloaded_weights.add("embed_tokens.weight") + return autoloaded_weights diff --git a/vllm/model_executor/models/phimoe.py b/vllm/model_executor/models/phimoe.py index b7e70f8fa2c6d..e475d286bd7ea 100644 --- a/vllm/model_executor/models/phimoe.py +++ b/vllm/model_executor/models/phimoe.py @@ -20,7 +20,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only PhiMoE model.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -598,7 +598,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -613,6 +614,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): num_experts=self.config.num_local_experts) params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -666,3 +668,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/pixtral.py b/vllm/model_executor/models/pixtral.py index a3e30ea2dd299..307febde7eef0 100644 --- a/vllm/model_executor/models/pixtral.py +++ b/vllm/model_executor/models/pixtral.py @@ -1,7 +1,7 @@ from dataclasses import dataclass, fields from functools import cached_property from itertools import tee -from typing import Iterable, List, Mapping, Optional, Tuple, Union +from typing import Iterable, List, Mapping, Optional, Set, Tuple, Union import numpy import torch @@ -1053,7 +1053,8 @@ def forward( # (TODO) Add prefix argument for filtering out weights to be loaded # ref: https://github.com/vllm-project/vllm/pull/7186#discussion_r1734163986 - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) (".qkv_proj", ".q_proj", "q"), @@ -1063,6 +1064,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): (".gate_up_proj", ".up_proj", 1), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() layer_count = len(self.transformer.layers) for name, loaded_weight in weights: @@ -1075,8 +1077,8 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): for (param_name, weight_name, shard_id) in stacked_params_mapping: if weight_name not in name: continue - - param = params_dict[name.replace(weight_name, param_name)] + name = name.replace(weight_name, param_name) + param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break @@ -1085,3 +1087,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/qwen.py b/vllm/model_executor/models/qwen.py index 447632cefcd9a..3978c176a2144 100644 --- a/vllm/model_executor/models/qwen.py +++ b/vllm/model_executor/models/qwen.py @@ -8,7 +8,7 @@ import re from functools import partial from typing import (Any, Callable, Dict, Iterable, List, Literal, Mapping, - Optional, Tuple, TypedDict, Union) + Optional, Set, Tuple, TypedDict, Union) import numpy as np import torch @@ -964,13 +964,15 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("gate_up_proj", "w2", 0), ("gate_up_proj", "w1", 1), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -999,6 +1001,8 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params class QWenLLM(QWenBaseModel): diff --git a/vllm/model_executor/models/qwen2.py b/vllm/model_executor/models/qwen2.py index 8f10df808c216..370cff5fa153f 100644 --- a/vllm/model_executor/models/qwen2.py +++ b/vllm/model_executor/models/qwen2.py @@ -21,7 +21,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only Qwen2 model compatible with HuggingFace weights.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -332,7 +332,8 @@ def forward( hidden_states, _ = self.norm(hidden_states, residual) return hidden_states - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -342,6 +343,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("gate_up_proj", "up_proj", 1), ] params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -372,6 +374,8 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params class Qwen2ForCausalLM(nn.Module, SupportsLoRA, SupportsPP): @@ -494,13 +498,14 @@ def pooler( ) -> Optional[PoolerOutput]: return self._pooler(hidden_states, pooling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader( self, skip_prefixes=(["lm_head."] if self.config.tie_word_embeddings else None), ) - loader.load_weights(weights) + return loader.load_weights(weights) class Qwen2EmbeddingModel(nn.Module, SupportsLoRA, SupportsPP): @@ -564,7 +569,8 @@ def pooler( ) -> Optional[PoolerOutput]: return self._pooler(hidden_states, pooling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader(self, ignore_unexpected_prefixes=["lm_head."]) - loader.load_weights(weights) + return loader.load_weights(weights) diff --git a/vllm/model_executor/models/qwen2_audio.py b/vllm/model_executor/models/qwen2_audio.py index d30950361ad89..a4965f34b1ca8 100644 --- a/vllm/model_executor/models/qwen2_audio.py +++ b/vllm/model_executor/models/qwen2_audio.py @@ -20,7 +20,8 @@ # limitations under the License. """Inference-only Qwen2-Audio model compatible with HuggingFace weights.""" from functools import lru_cache -from typing import Iterable, List, Mapping, Optional, Tuple, TypedDict, Union +from typing import (Iterable, List, Mapping, Optional, Set, Tuple, TypedDict, + Union) import librosa import numpy as np @@ -420,7 +421,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -430,6 +432,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("gate_up_proj", "up_proj", 1), ] params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -463,3 +466,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/qwen2_cls.py b/vllm/model_executor/models/qwen2_cls.py index 07eb330620a43..dc5dabf6fc38b 100644 --- a/vllm/model_executor/models/qwen2_cls.py +++ b/vllm/model_executor/models/qwen2_cls.py @@ -4,7 +4,7 @@ # Copyright 2024 The Qwen team. # Copyright 2023 The vLLM team. """Inference-only Qwen2-Classification model compatible with HF weights.""" -from typing import Iterable, List, Optional, Tuple +from typing import Iterable, List, Optional, Set, Tuple import torch from torch import nn @@ -97,7 +97,8 @@ def pooler( ) -> Optional[PoolerOutput]: return self._pooler(hidden_states, pooling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader(self, ignore_unexpected_prefixes=["lm_head."]) - loader.load_weights(weights) + return loader.load_weights(weights) diff --git a/vllm/model_executor/models/qwen2_moe.py b/vllm/model_executor/models/qwen2_moe.py index 249d94b5d95e9..96a9bc451f4df 100644 --- a/vllm/model_executor/models/qwen2_moe.py +++ b/vllm/model_executor/models/qwen2_moe.py @@ -21,7 +21,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only Qwen2MoE model compatible with HuggingFace weights.""" -from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import torch import torch.nn.functional as F @@ -436,7 +436,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -455,6 +456,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): num_experts=self.config.num_experts) params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -532,3 +534,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/qwen2_rm.py b/vllm/model_executor/models/qwen2_rm.py index 6db467af334f5..988d682d36be3 100644 --- a/vllm/model_executor/models/qwen2_rm.py +++ b/vllm/model_executor/models/qwen2_rm.py @@ -3,7 +3,7 @@ # Copyright 2024 The Qwen team. # Copyright 2023 The vLLM team. """Inference-only Qwen2-RM model compatible with HuggingFace weights.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -110,7 +110,8 @@ def pooler( ) -> Optional[PoolerOutput]: return self._pooler(hidden_states, pooling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader(self, ignore_unexpected_prefixes=["lm_head."]) - loader.load_weights(weights) + return loader.load_weights(weights) diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index 2335baf459771..ef6b52db6e17d 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -23,7 +23,7 @@ """Inference-only Qwen2-VL model compatible with HuggingFace weights.""" from functools import partial from typing import (Any, Callable, Dict, Iterable, List, Literal, Mapping, - Optional, Tuple, Type, TypedDict, Union) + Optional, Set, Tuple, Type, TypedDict, Union) import torch import torch.nn as nn @@ -1333,7 +1333,8 @@ def pooler( ) -> Optional[PoolerOutput]: return self._pooler(hidden_states, pooling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -1343,6 +1344,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("gate_up_proj", "gate_proj", 0), ] params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -1392,3 +1394,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/siglip.py b/vllm/model_executor/models/siglip.py index acaf4afdecfe5..c9e09b879843a 100644 --- a/vllm/model_executor/models/siglip.py +++ b/vllm/model_executor/models/siglip.py @@ -2,7 +2,7 @@ within a vision language model.""" import math -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import numpy as np import torch @@ -594,7 +594,8 @@ def forward( interpolate_pos_encoding=interpolate_pos_encoding, ) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -602,6 +603,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("qkv_proj", "v_proj", "v"), ] if self.shard_weight else [] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() layer_count = len(self.vision_model.encoder.layers) for name, loaded_weight in weights: @@ -619,8 +621,9 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): for (param_name, weight_name, shard_id) in stacked_params_mapping: if weight_name not in name: continue + name = name.replace(weight_name, param_name) - param = params_dict[name.replace(weight_name, param_name)] + param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break @@ -629,3 +632,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/solar.py b/vllm/model_executor/models/solar.py index affb2c975ce4a..6d6fafc5ab0eb 100644 --- a/vllm/model_executor/models/solar.py +++ b/vllm/model_executor/models/solar.py @@ -21,7 +21,7 @@ # limitations under the License. """Inference-only Solar model compatible with HuggingFace weights.""" -from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -477,7 +477,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) (".qkv_proj", ".q_proj", "q"), @@ -487,6 +488,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): (".gate_up_proj", ".up_proj", 1), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -502,6 +504,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): default_weight_loader) loaded_weight = loaded_weight[0] weight_loader(param, loaded_weight) + loaded_params.add(scale_name) continue for param_name, weight_name, shard_id in stacked_params_mapping: if weight_name not in name: @@ -535,6 +538,8 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params # If this function is called, it should always initialize KV cache scale # factors (or else raise an exception). Thus, handled exceptions should diff --git a/vllm/model_executor/models/stablelm.py b/vllm/model_executor/models/stablelm.py index 99acce596602e..e11d2e916730a 100644 --- a/vllm/model_executor/models/stablelm.py +++ b/vllm/model_executor/models/stablelm.py @@ -18,7 +18,7 @@ # https://huggingface.co/stabilityai/stablelm-3b-4e1t/blob/main/config.json """Inference-only StabeLM (https://github.com/Stability-AI/StableLM) model compatible with HuggingFace weights.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -306,7 +306,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -316,6 +317,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("gate_up_proj", "up_proj", 1), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -347,3 +349,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/starcoder2.py b/vllm/model_executor/models/starcoder2.py index 0ef940acebb93..74c66042226de 100644 --- a/vllm/model_executor/models/starcoder2.py +++ b/vllm/model_executor/models/starcoder2.py @@ -17,7 +17,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Starcoder2 model.""" -from typing import Iterable, List, Optional, Tuple, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -314,7 +314,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), @@ -323,6 +324,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ] params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "rotary_emb.inv_freq" in name: continue @@ -346,3 +348,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/ultravox.py b/vllm/model_executor/models/ultravox.py index 9fde22c016de0..512adbc7db35e 100644 --- a/vllm/model_executor/models/ultravox.py +++ b/vllm/model_executor/models/ultravox.py @@ -3,7 +3,7 @@ import math from functools import cached_property, lru_cache -from typing import (Iterable, List, Literal, Mapping, Optional, Tuple, +from typing import (Iterable, List, Literal, Mapping, Optional, Set, Tuple, TypedDict, Union, cast) import numpy as np @@ -504,10 +504,11 @@ def sample( ) -> Optional[SamplerOutput]: return self.language_model.sample(logits, sampling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: hf_to_vllm_mapper = WeightsMapper( orig_to_new_prefix={"audio_tower.model.encoder.": "audio_tower."}) loader = AutoWeightsLoader(self, ignore_unexpected_prefixes=["audio_tower."]) - loader.load_weights(weights, mapper=hf_to_vllm_mapper) + return loader.load_weights(weights, mapper=hf_to_vllm_mapper) diff --git a/vllm/model_executor/models/utils.py b/vllm/model_executor/models/utils.py index 1d51885f9094a..7a4fcce95603d 100644 --- a/vllm/model_executor/models/utils.py +++ b/vllm/model_executor/models/utils.py @@ -1,7 +1,7 @@ import itertools from dataclasses import dataclass, field from typing import (Any, Callable, Dict, Iterable, List, Literal, Mapping, - Optional, Protocol, Tuple, Union, overload) + Optional, Protocol, Set, Tuple, Union, overload) import torch import torch.nn as nn @@ -172,8 +172,9 @@ def _load_module( if module != self.module: module_load_weights = getattr(module, "load_weights", None) if callable(module_load_weights): - module_load_weights(weights) - return + loaded_params = module_load_weights(weights) + yield from map(lambda x: self._get_qualname(base_prefix, x), + loaded_params) child_modules = dict(module.named_children()) child_params = dict(module.named_parameters(recurse=False)) @@ -222,11 +223,11 @@ def load_weights( weights: Iterable[Tuple[str, torch.Tensor]], *, mapper: Optional[WeightsMapper] = None, - ) -> List[str]: + ) -> Set[str]: if mapper is not None: weights = mapper.apply(weights) - autoloaded_weights = list(self._load_module("", self.module, weights)) + autoloaded_weights = set(self._load_module("", self.module, weights)) return autoloaded_weights diff --git a/vllm/model_executor/models/xverse.py b/vllm/model_executor/models/xverse.py index 51172d8782a70..bc37a997eabb5 100644 --- a/vllm/model_executor/models/xverse.py +++ b/vllm/model_executor/models/xverse.py @@ -19,7 +19,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only Xverse model compatible with HuggingFace weights.""" -from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -376,7 +376,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ ("qkv_proj", "q_proj", "q"), ("qkv_proj", "k_proj", "k"), @@ -385,6 +386,7 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): ("gate_up_proj", "up_proj", 1), ] params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if ("rotary_emb.inv_freq" in name or "rotary_emb.cos_cached" in name @@ -413,3 +415,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params From 47826cacf0e037b4e109f0b2d8d594e47def500e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=84=8D=F0=9D=95=A0=F0=9D=95=9D=F0=9D=95=9D=F0=9D=95=A0?= =?UTF-8?q?=F0=9D=95=A8=20=F0=9D=95=84=F0=9D=95=92=F0=9D=95=9F?= Date: Mon, 18 Nov 2024 05:29:26 +0200 Subject: [PATCH 022/397] [Bugfix] Ignore ray reinit error when current platform is ROCm or XPU (#10375) Signed-off-by: Hollow Man --- vllm/executor/ray_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/executor/ray_utils.py b/vllm/executor/ray_utils.py index 41dd59bc65ec5..4f28efd639084 100644 --- a/vllm/executor/ray_utils.py +++ b/vllm/executor/ray_utils.py @@ -234,7 +234,7 @@ def initialize_ray_cluster( if current_platform.is_rocm() or current_platform.is_xpu(): # Try to connect existing ray instance and create a new one if not found try: - ray.init("auto") + ray.init("auto", ignore_reinit_error=True) except ConnectionError: logger.warning( "No existing RAY instance detected. " From 51bb12d17b374d5c4521cd01e5b066fd2419a8fa Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sun, 17 Nov 2024 23:57:20 -0800 Subject: [PATCH 023/397] [4/N][torch.compile] clean up set_torch_compile_backend (#10401) Signed-off-by: youkaichao --- vllm/compilation/backends.py | 16 ++-------------- vllm/compilation/wrapper.py | 11 +++-------- vllm/config.py | 31 ++++++++++++++++++++++++++++++- vllm/platforms/tpu.py | 7 +++---- vllm/plugins/__init__.py | 14 +------------- vllm/utils.py | 9 +++++++++ vllm/worker/model_runner.py | 3 +-- 7 files changed, 49 insertions(+), 42 deletions(-) diff --git a/vllm/compilation/backends.py b/vllm/compilation/backends.py index 22c613931f082..0cf1e3a95fcba 100644 --- a/vllm/compilation/backends.py +++ b/vllm/compilation/backends.py @@ -2,15 +2,14 @@ import dataclasses import operator from contextlib import ExitStack -from typing import (Any, Callable, Dict, List, Optional, Sequence, Set, Tuple, - Union) +from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple from unittest.mock import patch import torch import torch.fx as fx import vllm.envs as envs -from vllm.config import CompilationConfig, CompilationLevel +from vllm.config import CompilationConfig from vllm.logger import init_logger from vllm.utils import combine_fx_passes, weak_ref_tensors @@ -684,14 +683,3 @@ def __call__(self, *args) -> Any: entry.cudagraph.replay() return entry.output - - -def select_default_backend(level: int) -> Union[str, Callable]: - if level in [CompilationLevel.DYNAMO_AS_IS, CompilationLevel.DYNAMO_ONCE]: - backend_str = "eager" - return backend_str - assert level == CompilationLevel.PIECEWISE - - from vllm.plugins import get_current_vllm_config - compilation_config = get_current_vllm_config().compilation_config - return VllmBackend(compilation_config) diff --git a/vllm/compilation/wrapper.py b/vllm/compilation/wrapper.py index 2a1aecc11ce26..0143d0301ca1a 100644 --- a/vllm/compilation/wrapper.py +++ b/vllm/compilation/wrapper.py @@ -32,14 +32,9 @@ def __init__(self, # default compilation settings # compiling the forward method - # choose the compile backend - - # if the user has set the backend, use it - from vllm.plugins import get_torch_compile_backend - backend = get_torch_compile_backend() - if backend is None: - from vllm.compilation.backends import select_default_backend - backend = select_default_backend(compilation_level) + from vllm.plugins import get_current_vllm_config + backend = get_current_vllm_config( + ).compilation_config.init_backend() compiled_callable = torch.compile( self.forward, diff --git a/vllm/config.py b/vllm/config.py index 7e37edbe594b1..14017bbdb3cf2 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -22,7 +22,7 @@ get_hf_text_config, get_pooling_config, get_sentence_transformer_tokenizer_config, is_encoder_decoder, uses_mrope) from vllm.utils import (GiB_bytes, cuda_device_count_stateless, get_cpu_memory, - identity, print_warning_once) + identity, print_warning_once, resolve_obj_by_qualname) if TYPE_CHECKING: from ray.util.placement_group import PlacementGroup @@ -2072,6 +2072,13 @@ class CompilationConfig(BaseModel): - 1: dynamo as is. - 2: dynamo once. - 3: piecewise compilation. + - backend: the backend for compilation. It needs to be a string. + - "" (empty string): use the default backend. + - "eager"/"openxla"/...: use the specified backend registered in PyTorch. + - "full.module.name": a qualified name which can be used to import the backend function. + We use string to avoid serialization issues when using compilation in a distributed setting. + When the compilation level is 1 or 2, the backend is used for the compilation directly (it sees the whole graph). + When the compilation level is 3, the backend is used for the piecewise compilation (it sees a part of the graph). - custom_ops: fine-grained control over which custom ops to enable/disable. Use 'all' to enable all, 'none' to disable all. Also specify a list of custom op names to enable (prefixed with a '+'), @@ -2139,6 +2146,7 @@ class CompilationConfig(BaseModel): certain small batchsizes, where inductor is good at optimizing. """ # noqa level: int = 0 + backend: str = "" custom_ops: List[str] = Field(default_factory=list) use_inductor: bool = True @@ -2182,6 +2190,27 @@ def model_post_init(self, __context: Any) -> None: func = __import__(module).__dict__[func_name] self.inductor_compile_config[k] = func + def init_backend(self) -> Union[str, Callable]: + if self.level == CompilationLevel.NO_COMPILATION: + raise ValueError("No compilation level is set.") + + from torch._dynamo.backends.registry import list_backends + torch_backends = list_backends(exclude_tags=tuple()) + if self.level in [ + CompilationLevel.DYNAMO_AS_IS, CompilationLevel.DYNAMO_ONCE + ]: + if self.backend == "": + return "eager" + if self.backend in torch_backends: + return self.backend + return resolve_obj_by_qualname(self.backend) + + # TODO: pass user-specified backend to piecewise compilation + # merge with the config use_inductor + assert self.level == CompilationLevel.PIECEWISE + from vllm.compilation.backends import VllmBackend + return VllmBackend(self) + def init_during_runtime(self): """To complete the initialization of config, we need to know the compile context, which is only available diff --git a/vllm/platforms/tpu.py b/vllm/platforms/tpu.py index c2e22bfc09f22..643db835c85ff 100644 --- a/vllm/platforms/tpu.py +++ b/vllm/platforms/tpu.py @@ -3,8 +3,6 @@ import torch -from vllm.plugins import set_torch_compile_backend - from .interface import Platform, PlatformEnum if TYPE_CHECKING: @@ -12,8 +10,6 @@ else: VllmConfig = None -set_torch_compile_backend("openxla") - class TpuPlatform(Platform): _enum = PlatformEnum.TPU @@ -38,3 +34,6 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: compilation_config.level = CompilationLevel.DYNAMO_ONCE assert compilation_config.level < CompilationLevel.PIECEWISE,\ "TPU does not support Inductor." + + if compilation_config.backend == "": + compilation_config.backend = "openxla" diff --git a/vllm/plugins/__init__.py b/vllm/plugins/__init__.py index c20b9ec891d5d..a0c73a752b5e8 100644 --- a/vllm/plugins/__init__.py +++ b/vllm/plugins/__init__.py @@ -1,6 +1,6 @@ import logging from contextlib import contextmanager -from typing import TYPE_CHECKING, Callable, Optional, Union +from typing import TYPE_CHECKING, Optional import vllm.envs as envs @@ -50,18 +50,6 @@ def load_general_plugins(): logger.exception("Failed to load plugin %s", plugin.name) -_torch_compile_backend: Optional[Union[Callable, str]] = None - - -def set_torch_compile_backend(backend: Union[Callable, str]): - global _torch_compile_backend - _torch_compile_backend = backend - - -def get_torch_compile_backend() -> Optional[Union[Callable, str]]: - return _torch_compile_backend - - _compilation_config: Optional[CompilationConfig] = None diff --git a/vllm/utils.py b/vllm/utils.py index 111460a29de47..5d0514cd9d168 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -1600,3 +1600,12 @@ def direct_register_custom_op( my_lib.impl(op_name, op_func, "CUDA") if fake_impl is not None: my_lib._register_fake(op_name, fake_impl) + + +def resolve_obj_by_qualname(qualname: str) -> Any: + """ + Resolve an object by its fully qualified name. + """ + module_name, obj_name = qualname.rsplit(".", 1) + module = importlib.import_module(module_name) + return getattr(module, obj_name) diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py index fd89f95445565..fb5813651680b 100644 --- a/vllm/worker/model_runner.py +++ b/vllm/worker/model_runner.py @@ -1143,8 +1143,7 @@ def load_model(self) -> None: if self.vllm_config.compilation_config.level ==\ CompilationLevel.DYNAMO_AS_IS and supports_dynamo(): - from vllm.plugins import get_torch_compile_backend - backend = get_torch_compile_backend() or "eager" + backend = self.vllm_config.compilation_config.init_backend() self.model = torch.compile( self.model, fullgraph=envs.VLLM_TEST_DYNAMO_FULLGRAPH_CAPTURE, From c7dec926f6f1beaed759b8689373926e68867358 Mon Sep 17 00:00:00 2001 From: lkchen Date: Mon, 18 Nov 2024 00:06:16 -0800 Subject: [PATCH 024/397] [VLM] Report multi_modal_placeholders in output (#10407) Signed-off-by: Linkun Chen --- .../vision_language/test_pixtral.py | 79 ++++++++++++++++++- vllm/model_executor/models/pixtral.py | 16 +++- vllm/outputs.py | 30 +++++-- 3 files changed, 115 insertions(+), 10 deletions(-) diff --git a/tests/models/decoder_only/vision_language/test_pixtral.py b/tests/models/decoder_only/vision_language/test_pixtral.py index d8a98a0f84d3b..6233860747b9c 100644 --- a/tests/models/decoder_only/vision_language/test_pixtral.py +++ b/tests/models/decoder_only/vision_language/test_pixtral.py @@ -8,13 +8,17 @@ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import pytest +from mistral_common.multimodal import download_image from mistral_common.protocol.instruct.messages import ImageURLChunk from mistral_common.protocol.instruct.request import ChatCompletionRequest from mistral_common.tokens.tokenizers.mistral import MistralTokenizer from mistral_common.tokens.tokenizers.multimodal import image_from_chunk +from transformers import AutoProcessor -from vllm import EngineArgs, LLMEngine, SamplingParams, TokensPrompt +from vllm import (EngineArgs, LLMEngine, RequestOutput, SamplingParams, + TextPrompt, TokensPrompt) from vllm.multimodal import MultiModalDataBuiltins +from vllm.multimodal.inputs import PlaceholderRange from vllm.sequence import Logprob, SampleLogprobs from ....utils import VLLM_PATH, large_gpu_test @@ -49,6 +53,20 @@ def _create_msg_format(urls: List[str]) -> List[Dict[str, Any]]: }] +def _create_msg_format_hf(urls: List[str]) -> List[Dict[str, Any]]: + return [{ + "role": + "user", + "content": [{ + "type": "text", + "content": PROMPT, + }, *({ + "type": "image", + "image": download_image(url) + } for url in urls)], + }] + + def _create_engine_inputs(urls: List[str]) -> TokensPrompt: msg = _create_msg_format(urls) @@ -70,6 +88,23 @@ def _create_engine_inputs(urls: List[str]) -> TokensPrompt: return engine_inputs +def _create_engine_inputs_hf(urls: List[str]) -> TextPrompt: + msg = _create_msg_format_hf(urls) + + tokenizer = AutoProcessor.from_pretrained("mistral-community/pixtral-12b") + prompt = tokenizer.apply_chat_template(msg) + + images = [] + for chunk in msg[0]["content"]: + if chunk["type"] == "image": + images.append(chunk["image"]) + + mm_data = MultiModalDataBuiltins(image=images) + engine_inputs = TextPrompt(prompt=prompt, multi_modal_data=mm_data) + + return engine_inputs + + MSGS = [ _create_msg_format(IMG_URLS[:1]), _create_msg_format(IMG_URLS[:2]), @@ -191,3 +226,45 @@ def test_model_engine(vllm_runner, model: str, dtype: str) -> None: outputs_1_lst=logprobs, name_0="h100_ref", name_1="output") + + +@large_gpu_test(min_gb=24) +@pytest.mark.parametrize( + "prompt,expected_ranges", + [(_create_engine_inputs_hf(IMG_URLS[:1]), [{ + "offset": 10, + "length": 494 + }]), + (_create_engine_inputs_hf(IMG_URLS[1:4]), [{ + "offset": 10, + "length": 266 + }, { + "offset": 276, + "length": 1056 + }, { + "offset": 1332, + "length": 418 + }])]) +def test_multi_modal_placeholders( + vllm_runner, prompt, expected_ranges: list[PlaceholderRange]) -> None: + with vllm_runner( + "mistral-community/pixtral-12b", + max_model_len=8192, + limit_mm_per_prompt=LIMIT_MM_PER_PROMPT, + ) as vllm_model: + outputs = vllm_model.model.generate(prompt) + + assert len(outputs) == 1, f"{len(outputs)=}" + output: RequestOutput = outputs[0] + assert hasattr(output, + "multi_modal_placeholders"), f"{output.__dict__=}" + assert "image" in output.multi_modal_placeholders, \ + f"{output.multi_modal_placeholders.keys()=}" + image_placeholder_ranges: list[ + PlaceholderRange] = output.multi_modal_placeholders["image"] + assert len(image_placeholder_ranges) == len( + expected_ranges), f"{image_placeholder_ranges=}" + for real_range, expected_range in zip(image_placeholder_ranges, + expected_ranges): + assert real_range == expected_range, \ + f"{real_range=} {expected_range=}" diff --git a/vllm/model_executor/models/pixtral.py b/vllm/model_executor/models/pixtral.py index 307febde7eef0..d44a538d56b8c 100644 --- a/vllm/model_executor/models/pixtral.py +++ b/vllm/model_executor/models/pixtral.py @@ -30,6 +30,7 @@ from vllm.model_executor.models.utils import merge_multimodal_embeddings from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs +from vllm.multimodal.inputs import PlaceholderRange from vllm.multimodal.utils import (cached_get_tokenizer, consecutive_placeholder_ranges) from vllm.sequence import IntermediateTensors, SequenceData @@ -773,15 +774,28 @@ def input_processor_for_pixtral_hf( replace_tokens[-1] = image_end_id replace_tokens_list.append(replace_tokens) + reverse_offsets: List[int] = [] # Backward iteration for replacement without affecting known indices for placeholder_idx, replace_tokens in zip(reversed(placeholder_indices), reversed(replace_tokens_list)): + reverse_offsets.append( + len(new_token_ids) - placeholder_idx + len(replace_tokens)) new_token_ids[placeholder_idx:placeholder_idx + 1] = replace_tokens + placeholder_ranges: List[PlaceholderRange] = [] + for reverse_offset, replace_tokens in zip(reversed(reverse_offsets), + replace_tokens_list): + placeholder_ranges.append( + PlaceholderRange( + offset=len(new_token_ids) - reverse_offset, + length=len(replace_tokens), + )) + # NOTE: Create a defensive copy of the original inputs return token_inputs(prompt_token_ids=new_token_ids, prompt=new_prompt, - multi_modal_data=multi_modal_data) + multi_modal_data=multi_modal_data, + multi_modal_placeholders={"image": placeholder_ranges}) class PixtralHFMLP(nn.Module): diff --git a/vllm/outputs.py b/vllm/outputs.py index badf50d0602d6..4ae9b377ae693 100644 --- a/vllm/outputs.py +++ b/vllm/outputs.py @@ -5,6 +5,7 @@ from typing import Union from vllm.lora.request import LoRARequest +from vllm.multimodal.inputs import MultiModalPlaceholderDict from vllm.sampling_params import RequestOutputKind from vllm.sequence import (PromptLogprobs, RequestMetrics, SampleLogprobs, SequenceGroup, SequenceGroupBase, SequenceStatus) @@ -103,10 +104,13 @@ def __init__( encoder_prompt: Optional[str] = None, encoder_prompt_token_ids: Optional[List[int]] = None, num_cached_tokens: Optional[int] = None, + *, + multi_modal_placeholders: Optional[MultiModalPlaceholderDict] = None, ) -> None: self.request_id = request_id self.prompt = prompt self.prompt_token_ids = prompt_token_ids + self.multi_modal_placeholders = multi_modal_placeholders or {} self.prompt_logprobs = prompt_logprobs self.outputs = outputs self.finished = finished @@ -275,17 +279,26 @@ def from_seq_group( finished_time = time.time() if finished else None seq_group.set_finished_time(finished_time) - init_args = (seq_group.request_id, prompt, prompt_token_ids, - prompt_logprobs, outputs, finished, seq_group.metrics, - seq_group.lora_request, encoder_prompt, - encoder_prompt_token_ids, num_cached_tokens) + init_kwargs = { + "request_id": seq_group.request_id, + "prompt": prompt, + "prompt_token_ids": prompt_token_ids, + "prompt_logprobs": prompt_logprobs, + "outputs": outputs, + "finished": finished, + "metrics": seq_group.metrics, + "lora_request": seq_group.lora_request, + "encoder_prompt": encoder_prompt, + "encoder_prompt_token_ids": encoder_prompt_token_ids, + "num_cached_tokens": num_cached_tokens, + "multi_modal_placeholders": seq_group.multi_modal_placeholders + } if use_cache: request_output = seq_group.cached_request_output - request_output.__init__(*init_args) # type: ignore - + request_output.__init__(**init_kwargs) # type: ignore else: - request_output = cls(*init_args) + request_output = cls(**init_kwargs) # type: ignore return request_output @@ -300,7 +313,8 @@ def __repr__(self) -> str: f"finished={self.finished}, " f"metrics={self.metrics}, " f"lora_request={self.lora_request}, " - f"num_cached_tokens={self.num_cached_tokens})") + f"num_cached_tokens={self.num_cached_tokens}, " + f"multi_modal_placeholders={self.multi_modal_placeholders})") class EmbeddingRequestOutput: From 01aae1cc68d6013dd91e87418a6d82fa02c58457 Mon Sep 17 00:00:00 2001 From: Maybewuss <38156589+Maybewuss@users.noreply.github.com> Date: Mon, 18 Nov 2024 18:05:36 +0800 Subject: [PATCH 025/397] [Model] Remove redundant softmax when using PoolingType.STEP (#10415) --- vllm/model_executor/layers/pooler.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/vllm/model_executor/layers/pooler.py b/vllm/model_executor/layers/pooler.py index 6fee57a0a03eb..bfe2d7d0f382e 100644 --- a/vllm/model_executor/layers/pooler.py +++ b/vllm/model_executor/layers/pooler.py @@ -118,14 +118,13 @@ def forward( if returned_token_ids is not None and len(returned_token_ids) > 0: hidden_states = hidden_states[:, returned_token_ids] - logits = hidden_states.softmax(dim=-1) step_tag_id = self.step_tag_id offset = 0 pooled_data_lst = [] for prompt_len, seq_data_i in zip( prompt_lens, pooling_metadata.seq_data.values()): - pooled_data_i = logits[offset:offset + prompt_len] + pooled_data_i = hidden_states[offset:offset + prompt_len] if step_tag_id is not None: token_ids = torch.tensor(seq_data_i.prompt_token_ids) pooled_data_i = pooled_data_i[token_ids == step_tag_id] From 5be4e52b6522113f7276e60b32cb5c1f912de6fd Mon Sep 17 00:00:00 2001 From: B-201 Date: Mon, 18 Nov 2024 20:57:10 +0800 Subject: [PATCH 026/397] [Model][LoRA]LoRA support added for glm-4v (#10418) Signed-off-by: B-201 --- vllm/model_executor/models/chatglm.py | 98 +++++++++++++++++++++------ 1 file changed, 79 insertions(+), 19 deletions(-) diff --git a/vllm/model_executor/models/chatglm.py b/vllm/model_executor/models/chatglm.py index 81e56381eabd8..625e31bb0d368 100644 --- a/vllm/model_executor/models/chatglm.py +++ b/vllm/model_executor/models/chatglm.py @@ -30,6 +30,7 @@ ParallelLMHead, VocabParallelEmbedding) from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.model_executor.models.glm4_vision_encoder import EVA2CLIPModel +from vllm.model_executor.models.module_mapping import MultiModelKeys from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY from vllm.multimodal.inputs import MultiModalData, MultiModalKwargs @@ -574,25 +575,8 @@ def forward( return hidden_states -@MULTIMODAL_REGISTRY.register_image_input_mapper(mm_input_mapper_for_glmv) -@MULTIMODAL_REGISTRY.register_max_image_tokens(get_max_glmv_image_tokens) -@INPUT_REGISTRY.register_dummy_data(dummy_data_for_glmv) -@INPUT_REGISTRY.register_input_processor(input_processor_for_glmv) -class ChatGLMForCausalLM(nn.Module, SupportsLoRA, SupportsPP, - SupportsMultiModal): - packed_modules_mapping = { - "query_key_value": ["query_key_value"], - "dense_h_to_4h": ["dense_h_to_4h"] - } - # LoRA specific attributes - supported_lora_modules = [ - "query_key_value", - "dense", - "dense_h_to_4h", - "dense_4h_to_h", - ] - embedding_modules = {} - embedding_padding_modules = [] +class ChatGLMBaseModel(nn.Module, SupportsLoRA, SupportsPP, + SupportsMultiModal): def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): super().__init__() @@ -692,3 +676,79 @@ def load_weights(self, weights: Iterable[Tuple[str, weight_loader(param, combined_weight) loaded_params.add(combined_name) return loaded_params + + +class ChatGLM(ChatGLMBaseModel): + packed_modules_mapping = { + "query_key_value": ["query_key_value"], + "dense_h_to_4h": ["dense_h_to_4h"] + } + # LoRA specific attributes + supported_lora_modules = [ + "query_key_value", + "dense", + "dense_h_to_4h", + "dense_4h_to_h", + ] + + embedding_modules = {} + embedding_padding_modules = [] + + +class ChatGLMV(ChatGLMBaseModel): + packed_modules_mapping = { + "query_key_value": ["query_key_value"], + "dense_h_to_4h": ["dense_h_to_4h"], + "merged_proj": ["gate_proj", "dense_h_to_4h"] + } + # LoRA specific attributes + supported_lora_modules = [ + "query_key_value", + "dense", + "dense_h_to_4h", + "dense_4h_to_h", + # vision + "fc1", + "fc2", + "merged_proj", + "linear_proj" + ] + + embedding_modules = {} + embedding_padding_modules = [] + + def get_mm_mapping(self) -> MultiModelKeys: + """ + Get the module prefix in multimodal models + """ + return MultiModelKeys.from_string_field( + language_model="transformer.encoder", + connector="transformer.vision.linear_proj", + tower_model="transformer.vision.transformer") + + +@MULTIMODAL_REGISTRY.register_image_input_mapper(mm_input_mapper_for_glmv) +@MULTIMODAL_REGISTRY.register_max_image_tokens(get_max_glmv_image_tokens) +@INPUT_REGISTRY.register_dummy_data(dummy_data_for_glmv) +@INPUT_REGISTRY.register_input_processor(input_processor_for_glmv) +class ChatGLMForCausalLM(ChatGLMBaseModel, SupportsLoRA, SupportsPP, + SupportsMultiModal): + # Ensure that the LoRA support check passes when the class is not + # initialized, but set all these attributes to empty. + packed_modules_mapping = {} + supported_lora_modules = [] + embedding_modules = {} + embedding_padding_modules = [] + + def __new__( + cls, + vllm_config: VllmConfig, + prefix: str = "", + ) -> None: + config = vllm_config.model_config.hf_config + # Initialize VL + if hasattr(config, "visual"): + return ChatGLM(vllm_config=vllm_config, prefix=prefix) + # Initialize LLM + else: + return ChatGLMV(vllm_config=vllm_config, prefix=prefix) From e7ebb662d777a9617644428031c1cf80c38939ba Mon Sep 17 00:00:00 2001 From: Isotr0py <2037008807@qq.com> Date: Mon, 18 Nov 2024 21:45:21 +0800 Subject: [PATCH 027/397] [Model] Remove transformers attention porting in VITs (#10414) Signed-off-by: Isotr0py <2037008807@qq.com> --- vllm/model_executor/models/blip.py | 66 +++++++++++++----------- vllm/model_executor/models/clip.py | 65 ++++++++++++----------- vllm/model_executor/models/intern_vit.py | 32 ++++++++---- vllm/model_executor/models/molmo.py | 2 +- vllm/model_executor/models/qwen2_vl.py | 2 +- vllm/model_executor/models/siglip.py | 63 ++++++++++++---------- vllm/model_executor/models/utils.py | 11 ++-- 7 files changed, 139 insertions(+), 102 deletions(-) diff --git a/vllm/model_executor/models/blip.py b/vllm/model_executor/models/blip.py index 6db6462e97f3f..6af59697160a0 100644 --- a/vllm/model_executor/models/blip.py +++ b/vllm/model_executor/models/blip.py @@ -4,10 +4,11 @@ import torch import torch.nn as nn +import torch.nn.functional as F from PIL import Image from transformers import Blip2VisionConfig, BlipVisionConfig -from transformers.models.blip.modeling_blip import BlipAttention +from vllm.attention.selector import _Backend from vllm.config import ModelConfig from vllm.distributed import divide, get_tensor_model_parallel_world_size from vllm.inputs import DecoderOnlyInputs, token_inputs @@ -21,11 +22,7 @@ repeat_and_pad_placeholder_tokens) from vllm.sequence import SequenceData -try: - from xformers import ops as xops - USE_XFORMERS_OPS = True -except ImportError: - USE_XFORMERS_OPS = False +from .utils import get_vit_attn_backend def get_blip_patch_grid_length(*, image_size: int, patch_size: int) -> int: @@ -168,7 +165,7 @@ def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: return embeddings -class BlipParallelAttention(nn.Module): +class BlipAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( @@ -208,6 +205,12 @@ def __init__( self.tp_size = get_tensor_model_parallel_world_size() self.num_heads_per_partition = divide(self.num_heads, self.tp_size) + # Detect attention implementation. + self.attn_backend = get_vit_attn_backend(support_fa=False) + if self.attn_backend not in {_Backend.TORCH_SDPA, _Backend.XFORMERS}: + raise RuntimeError( + f"BLIP does not support {self.attn_backend} backend now.") + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() @@ -231,11 +234,26 @@ def forward( self.num_heads_per_partition, self.head_dim) - out = xops.memory_efficient_attention_forward(query_states, - key_states, - value_states, - p=self.dropout, - scale=self.scale) + if self.attn_backend == _Backend.XFORMERS: + from xformers import ops as xops + + out = xops.memory_efficient_attention_forward(query_states, + key_states, + value_states, + p=self.dropout, + scale=self.scale) + elif self.attn_backend == _Backend.TORCH_SDPA: + query_states, key_states, value_states = (x.transpose(1, 2) + for x in (query_states, + key_states, + value_states)) + out = F.scaled_dot_product_attention(query_states, + key_states, + value_states, + dropout_p=self.dropout, + scale=self.scale) + out = out.transpose(1, 2) + out = out.view(bsz, tgt_len, -1) attn_output, _ = self.projection(out) @@ -285,18 +303,11 @@ def __init__( super().__init__() # fallback to sdpa attention if tp unavailable - num_heads = config.num_attention_heads - tp_size = get_tensor_model_parallel_world_size() - if USE_XFORMERS_OPS and num_heads % tp_size == 0: - self.self_attn = BlipParallelAttention( - config, - quant_config=quant_config, - prefix=f"{prefix}.self_attn", - ) - else: - # Blip doesn't have SDPA attention implemented in transformers - # use eager attention instead for cpu backend - self.self_attn = BlipAttention(config) + self.self_attn = BlipAttention( + config, + quant_config=quant_config, + prefix=f"{prefix}.self_attn", + ) self.layer_norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.mlp = BlipMLP(config, @@ -374,11 +385,6 @@ def __init__( prefix: str = "", ) -> None: super().__init__() - - tp_size = get_tensor_model_parallel_world_size() - num_heads = config.num_attention_heads - self.shard_weight = USE_XFORMERS_OPS and num_heads % tp_size == 0 - self.config = config self.embeddings = BlipVisionEmbeddings(config) @@ -422,7 +428,7 @@ def load_weights(self, weights: Iterable[Tuple[str, ("qkv_proj", "q_proj", "q"), ("qkv_proj", "k_proj", "k"), ("qkv_proj", "v_proj", "v"), - ] if self.shard_weight else [] + ] params_dict = dict(self.named_parameters()) loaded_params: Set[str] = set() layer_count = len(self.encoder.layers) diff --git a/vllm/model_executor/models/clip.py b/vllm/model_executor/models/clip.py index 184758f4a8a45..7f638506f9fb2 100644 --- a/vllm/model_executor/models/clip.py +++ b/vllm/model_executor/models/clip.py @@ -5,10 +5,11 @@ import numpy as np import torch import torch.nn as nn +import torch.nn.functional as F from PIL import Image from transformers import CLIPVisionConfig -from transformers.models.clip.modeling_clip import CLIPSdpaAttention +from vllm.attention.selector import _Backend from vllm.config import ModelConfig from vllm.distributed import divide, get_tensor_model_parallel_world_size from vllm.inputs import DecoderOnlyInputs, token_inputs @@ -23,11 +24,7 @@ repeat_and_pad_placeholder_tokens) from vllm.sequence import SequenceData -try: - from xformers import ops as xops - USE_XFORMERS_OPS = True -except ImportError: - USE_XFORMERS_OPS = False +from .utils import get_vit_attn_backend def get_clip_patch_grid_length(*, image_size: int, patch_size: int) -> int: @@ -197,7 +194,7 @@ def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: return embeddings -class CLIPParallelAttention(nn.Module): +class CLIPAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( @@ -237,6 +234,12 @@ def __init__( self.tp_size = get_tensor_model_parallel_world_size() self.num_heads_per_partition = divide(self.num_heads, self.tp_size) + # Detect attention implementation. + self.attn_backend = get_vit_attn_backend(support_fa=False) + if self.attn_backend not in {_Backend.TORCH_SDPA, _Backend.XFORMERS}: + raise RuntimeError( + f"CLIP does not support {self.attn_backend} backend now.") + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() @@ -261,11 +264,26 @@ def forward( self.num_heads_per_partition, self.head_dim) - out = xops.memory_efficient_attention_forward(query_states, - key_states, - value_states, - p=self.dropout, - scale=self.scale) + if self.attn_backend == _Backend.XFORMERS: + from xformers import ops as xops + + out = xops.memory_efficient_attention_forward(query_states, + key_states, + value_states, + p=self.dropout, + scale=self.scale) + elif self.attn_backend == _Backend.TORCH_SDPA: + query_states, key_states, value_states = (x.transpose(1, 2) + for x in (query_states, + key_states, + value_states)) + out = F.scaled_dot_product_attention(query_states, + key_states, + value_states, + dropout_p=self.dropout, + scale=self.scale) + out = out.transpose(1, 2) + out = out.view(bsz, tgt_len, -1) attn_output, _ = self.out_proj(out) @@ -311,17 +329,11 @@ def __init__( prefix: str = "", ) -> None: super().__init__() - - num_heads = config.num_attention_heads - tp_size = get_tensor_model_parallel_world_size() - if USE_XFORMERS_OPS and num_heads % tp_size == 0: - self.self_attn = CLIPParallelAttention( - config, - quant_config=quant_config, - prefix=f"{prefix}.self_attn", - ) - else: - self.self_attn = CLIPSdpaAttention(config) + self.self_attn = CLIPAttention( + config, + quant_config=quant_config, + prefix=f"{prefix}.self_attn", + ) self.layer_norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.mlp = CLIPMLP(config, @@ -461,11 +473,6 @@ def __init__( prefix: str = "", ) -> None: super().__init__() - - tp_size = get_tensor_model_parallel_world_size() - num_heads = config.num_attention_heads - self.shard_weight = USE_XFORMERS_OPS and num_heads % tp_size == 0 - self.vision_model = CLIPVisionTransformer( config=config, quant_config=quant_config, @@ -490,7 +497,7 @@ def load_weights(self, weights: Iterable[Tuple[str, ("qkv_proj", "q_proj", "q"), ("qkv_proj", "k_proj", "k"), ("qkv_proj", "v_proj", "v"), - ] if self.shard_weight else [] + ] params_dict = dict(self.named_parameters()) loaded_params: Set[str] = set() layer_count = len(self.vision_model.encoder.layers) diff --git a/vllm/model_executor/models/intern_vit.py b/vllm/model_executor/models/intern_vit.py index bd91a0806ae5c..c4346fcb3bd2a 100644 --- a/vllm/model_executor/models/intern_vit.py +++ b/vllm/model_executor/models/intern_vit.py @@ -12,6 +12,7 @@ import torch.nn.functional as F from transformers import PretrainedConfig +from vllm.attention.selector import _Backend from vllm.distributed import (divide, get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size, split_tensor_along_last_dim, @@ -24,11 +25,7 @@ from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.model_loader.weight_utils import default_weight_loader -try: - from xformers import ops as xops - USE_XFORMERS_OPS = True -except ImportError: - USE_XFORMERS_OPS = False +from .utils import get_vit_attn_backend NORM2FN = { 'rms_norm': RMSNorm, @@ -186,6 +183,11 @@ def __init__( prefix=f"{prefix}.proj", ) + self.attn_backend = get_vit_attn_backend(support_fa=False) + if self.attn_backend not in {_Backend.TORCH_SDPA, _Backend.XFORMERS}: + raise RuntimeError( + f"InternViT does not support {self.attn_backend} backend now.") + def _apply_qk_norm(self, q: torch.Tensor, k: torch.Tensor): if self.tp_size > 1: q = tensor_model_parallel_all_gather(q.contiguous()) @@ -211,11 +213,21 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: k = k.view(B, N, self.num_heads_per_partition, self.head_dim) v = v.view(B, N, self.num_heads_per_partition, self.head_dim) - x = xops.memory_efficient_attention_forward(q, k, v, scale=self.scale) - x = x.view(B, N, -1) + if self.attn_backend == _Backend.XFORMERS: + from xformers import ops as xops - x, _ = self.proj(x) - return x + out = xops.memory_efficient_attention_forward(q, + k, + v, + scale=self.scale) + elif self.attn_backend == _Backend.TORCH_SDPA: + q, k, v = (x.transpose(1, 2) for x in (q, k, v)) + out = F.scaled_dot_product_attention(q, k, v, scale=self.scale) + out = out.transpose(1, 2) + + out = out.view(B, N, -1) + out, _ = self.proj(out) + return out class InternSdpaAttention(nn.Module): @@ -362,7 +374,7 @@ def _init_attn( tp_size = get_tensor_model_parallel_world_size() num_heads = config.num_attention_heads - if USE_XFORMERS_OPS and (num_heads + num_dummy_heads) % tp_size == 0: + if (num_heads + num_dummy_heads) % tp_size == 0: return InternParallelAttention(config, quant_config=quant_config, num_dummy_heads=num_dummy_heads, diff --git a/vllm/model_executor/models/molmo.py b/vllm/model_executor/models/molmo.py index 035a1e2ab7b02..a7c90a3f5031b 100644 --- a/vllm/model_executor/models/molmo.py +++ b/vllm/model_executor/models/molmo.py @@ -187,7 +187,7 @@ def __init__( ) # Detect attention implementation. - self.attn_backend: _Backend = get_vit_attn_backend() + self.attn_backend: _Backend = get_vit_attn_backend(support_fa=True) if self.attn_backend not in { _Backend.FLASH_ATTN, _Backend.TORCH_SDPA, _Backend.XFORMERS }: diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index ef6b52db6e17d..a929b9323b245 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -260,7 +260,7 @@ def __init__( prefix=f"{prefix}.proj") # Detect attention implementation. - self.attn_backend: _Backend = get_vit_attn_backend() + self.attn_backend: _Backend = get_vit_attn_backend(support_fa=True) if self.attn_backend not in { _Backend.FLASH_ATTN, _Backend.TORCH_SDPA, _Backend.XFORMERS }: diff --git a/vllm/model_executor/models/siglip.py b/vllm/model_executor/models/siglip.py index c9e09b879843a..c58ad99692900 100644 --- a/vllm/model_executor/models/siglip.py +++ b/vllm/model_executor/models/siglip.py @@ -6,11 +6,12 @@ import numpy as np import torch +import torch.nn.functional as F from PIL import Image from torch import nn from transformers import SiglipVisionConfig -from transformers.models.siglip.modeling_siglip import SiglipSdpaAttention +from vllm.attention.selector import _Backend from vllm.config import ModelConfig from vllm.distributed import divide, get_tensor_model_parallel_world_size from vllm.inputs import DecoderOnlyInputs, token_inputs @@ -27,11 +28,7 @@ repeat_and_pad_placeholder_tokens) from vllm.sequence import SequenceData -try: - from xformers import ops as xops - USE_XFORMERS_OPS = True -except ImportError: - USE_XFORMERS_OPS = False +from .utils import get_vit_attn_backend def get_siglip_patch_grid_length(*, image_size: int, patch_size: int) -> int: @@ -254,7 +251,7 @@ def forward(self, return embeddings -class SiglipParallelAttention(nn.Module): +class SiglipAttention(nn.Module): def __init__( self, @@ -293,6 +290,11 @@ def __init__( self.tp_size = get_tensor_model_parallel_world_size() self.num_heads_per_partition = divide(self.num_heads, self.tp_size) + self.attn_backend = get_vit_attn_backend(support_fa=False) + if self.attn_backend not in {_Backend.TORCH_SDPA, _Backend.XFORMERS}: + raise RuntimeError( + f"SIGLIP does not support {self.attn_backend} backend now.") + def forward( self, hidden_states: torch.Tensor, @@ -313,11 +315,26 @@ def forward( self.num_heads_per_partition, self.head_dim) - out = xops.memory_efficient_attention_forward(query_states, - key_states, - value_states, - p=self.dropout, - scale=self.scale) + if self.attn_backend == _Backend.XFORMERS: + from xformers import ops as xops + + out = xops.memory_efficient_attention_forward(query_states, + key_states, + value_states, + p=self.dropout, + scale=self.scale) + elif self.attn_backend == _Backend.TORCH_SDPA: + query_states, key_states, value_states = (x.transpose(1, 2) + for x in (query_states, + key_states, + value_states)) + out = F.scaled_dot_product_attention(query_states, + key_states, + value_states, + dropout_p=self.dropout, + scale=self.scale) + out = out.transpose(1, 2) + out = out.view(batch_size, q_len, -1) attn_output, _ = self.out_proj(out) @@ -372,17 +389,11 @@ def __init__( self.embed_dim = config.hidden_size - num_heads = config.num_attention_heads - tp_size = get_tensor_model_parallel_world_size() - if USE_XFORMERS_OPS and num_heads % tp_size == 0: - self.self_attn = SiglipParallelAttention( - config, - quant_config=quant_config, - prefix=f"{prefix}.self_attn", - ) - else: - self.self_attn = SiglipSdpaAttention(config) - + self.self_attn = SiglipAttention( + config, + quant_config=quant_config, + prefix=f"{prefix}.self_attn", + ) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = SiglipMLP( @@ -569,10 +580,6 @@ def __init__( ) -> None: super().__init__() - num_heads = config.num_attention_heads - tp_size = get_tensor_model_parallel_world_size() - self.shard_weight = USE_XFORMERS_OPS and num_heads % tp_size == 0 - self.vision_model = SiglipVisionTransformer( config, quant_config, @@ -601,7 +608,7 @@ def load_weights(self, weights: Iterable[Tuple[str, ("qkv_proj", "q_proj", "q"), ("qkv_proj", "k_proj", "k"), ("qkv_proj", "v_proj", "v"), - ] if self.shard_weight else [] + ] params_dict = dict(self.named_parameters()) loaded_params: Set[str] = set() layer_count = len(self.vision_model.encoder.layers) diff --git a/vllm/model_executor/models/utils.py b/vllm/model_executor/models/utils.py index 7a4fcce95603d..03226f42ee053 100644 --- a/vllm/model_executor/models/utils.py +++ b/vllm/model_executor/models/utils.py @@ -587,7 +587,11 @@ def __call__(self, *args: Any, **kwargs: Any) -> Any: return llm(*args, **kwargs) -def get_vit_attn_backend() -> _Backend: +def get_vit_attn_backend(support_fa: bool = False) -> _Backend: + """ + Get the available attention backend for Vision Transformer. + """ + # TODO(Isotr0py): Remove `support_fa` after support FA for all ViTs attn. selected_backend: Optional[_Backend] = get_global_forced_attn_backend() if selected_backend is None: backend_by_env_var: Optional[str] = envs.VLLM_ATTENTION_BACKEND @@ -596,7 +600,7 @@ def get_vit_attn_backend() -> _Backend: if selected_backend is None: # For Volta and Turing GPUs, use xformers instead. device_available = current_platform.has_device_capability(80) - if device_available: + if device_available and support_fa: from transformers.utils import is_flash_attn_2_available if is_flash_attn_2_available(): selected_backend = _Backend.FLASH_ATTN @@ -606,7 +610,8 @@ def get_vit_attn_backend() -> _Backend: "so we use xformers backend instead. You can run " "`pip install flash-attn` to use flash-attention backend.") selected_backend = _Backend.XFORMERS - elif current_platform.is_cpu(): + elif current_platform.is_cpu() or current_platform.is_rocm(): + # ROCM doesn't support xformers selected_backend = _Backend.TORCH_SDPA else: selected_backend = _Backend.XFORMERS From 4186be8111e20c64d0cbcbdebbdd1081e77f1075 Mon Sep 17 00:00:00 2001 From: B-201 Date: Mon, 18 Nov 2024 23:08:30 +0800 Subject: [PATCH 028/397] [Doc] Update doc for LoRA support in GLM-4V (#10425) Signed-off-by: B-201 --- docs/source/models/supported_models.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/models/supported_models.rst b/docs/source/models/supported_models.rst index 96a513d42753b..e902d393f2f70 100644 --- a/docs/source/models/supported_models.rst +++ b/docs/source/models/supported_models.rst @@ -446,7 +446,7 @@ Text Generation - GLM-4V - T + I - :code:`THUDM/glm-4v-9b` etc. - - + - ✅︎ - ✅︎ * - :code:`H2OVLChatModel` - H2OVL From 7851b45196aff994277ec832c0cf5bec0073f08e Mon Sep 17 00:00:00 2001 From: youkaichao Date: Mon, 18 Nov 2024 07:20:06 -0800 Subject: [PATCH 029/397] [5/N][torch.compile] torch.jit.script --> torch.compile (#10406) Signed-off-by: youkaichao --- vllm/model_executor/layers/rejection_sampler.py | 2 +- vllm/model_executor/layers/vocab_parallel_embedding.py | 4 ++-- vllm/model_executor/models/phi3_small.py | 4 ++-- vllm/worker/model_runner.py | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/vllm/model_executor/layers/rejection_sampler.py b/vllm/model_executor/layers/rejection_sampler.py index 2e9a0e170693b..3ab0ba9e9f5c2 100644 --- a/vllm/model_executor/layers/rejection_sampler.py +++ b/vllm/model_executor/layers/rejection_sampler.py @@ -368,7 +368,7 @@ def _smallest_positive_value(self) -> float: # Note that we always sample with replacement. # probs will be modified in place, but this is fine, as we pass # in a copy already. -@torch.jit.script +@torch.compile(dynamic=True) def _multinomial( probs: torch.Tensor, num_samples: int, diff --git a/vllm/model_executor/layers/vocab_parallel_embedding.py b/vllm/model_executor/layers/vocab_parallel_embedding.py index 52771f50a7a23..30548e656c557 100644 --- a/vllm/model_executor/layers/vocab_parallel_embedding.py +++ b/vllm/model_executor/layers/vocab_parallel_embedding.py @@ -133,13 +133,13 @@ def __post_init__(self): assert self.num_added_elements <= self.num_added_elements_padded -@torch.jit.script +@torch.compile(dynamic=True) def get_masked_input_and_mask( input_: torch.Tensor, org_vocab_start_index: int, org_vocab_end_index: int, num_org_vocab_padding: int, added_vocab_start_index: int, added_vocab_end_index: int) -> Tuple[torch.Tensor, torch.Tensor]: - # torch.jit.script will fuse all of the pointwise ops below + # torch.compile will fuse all of the pointwise ops below # into a single kernel, making it very fast org_vocab_mask = (input_ >= org_vocab_start_index) & (input_ < org_vocab_end_index) diff --git a/vllm/model_executor/models/phi3_small.py b/vllm/model_executor/models/phi3_small.py index a78e4d355a314..f71cbd1264c45 100644 --- a/vllm/model_executor/models/phi3_small.py +++ b/vllm/model_executor/models/phi3_small.py @@ -54,12 +54,12 @@ def weight_loader(self, param: torch.nn.Parameter, return load_column_parallel_weight(param, loaded_weight) -@torch.jit.script +@torch.compile(dynamic=True) def quick_gelu(x): return x * torch.sigmoid(1.702 * x) -@torch.jit.script +@torch.compile(dynamic=True) def gegelu(input, limit: Optional[float] = None): a_gelu, a_linear = input[..., ::2], input[..., 1::2] if limit is not None: diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py index fb5813651680b..ed0360fb7f727 100644 --- a/vllm/worker/model_runner.py +++ b/vllm/worker/model_runner.py @@ -1769,7 +1769,7 @@ def capture( # Run the model a few times without capturing the graph. # This is to make sure that the captured graph does not include the # kernel launches for initial benchmarking (e.g., Triton autotune). - # Note one iteration is not enough for torch.jit.script + # Note one iteration is not enough for torch.compile for _ in range(_NUM_WARMUP_ITERS): self.model( input_ids=input_ids, From 31894a21559436f4a9d72f751e8bd7ba4ab18613 Mon Sep 17 00:00:00 2001 From: ismael-dm Date: Mon, 18 Nov 2024 18:52:12 +0100 Subject: [PATCH 030/397] [Doc] Add documentation for Structured Outputs (#9943) Signed-off-by: ismael-dm --- docs/source/index.rst | 1 + docs/source/models/structured_outputs.rst | 173 ++++++++++++++++++ .../offline_inference_structured_outputs.py | 78 ++++++++ ...enai_chat_completion_structured_outputs.py | 94 ++++++++++ 4 files changed, 346 insertions(+) create mode 100644 docs/source/models/structured_outputs.rst create mode 100644 examples/offline_inference_structured_outputs.py create mode 100644 examples/openai_chat_completion_structured_outputs.py diff --git a/docs/source/index.rst b/docs/source/index.rst index 3b2698a8845ed..b04acbbce4169 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -101,6 +101,7 @@ Documentation models/engine_args models/lora models/vlm + models/structured_outputs models/spec_decode models/performance diff --git a/docs/source/models/structured_outputs.rst b/docs/source/models/structured_outputs.rst new file mode 100644 index 0000000000000..ff4ff7169fc5f --- /dev/null +++ b/docs/source/models/structured_outputs.rst @@ -0,0 +1,173 @@ +.. _structured_outputs: + +Structured Outputs +================== + +vLLM supports the generation of structured outputs using `outlines `_ or `lm-format-enforcer `_ as backends for the guided decoding. +This document shows you some examples of the different options that are available to generate structured outputs. + + +Online Inference (OpenAI API) +----------------------------- + +You can generate structured outputs using the OpenAI’s `Completions `_ and `Chat `_ API. + +The following parameters are supported, which must be added as extra parameters: + +- ``guided_choice``: the output will be exactly one of the choices. +- ``guided_regex``: the output will follow the regex pattern. +- ``guided_json``: the output will follow the JSON schema. +- ``guided_grammar``: the output will follow the context free grammar. +- ``guided_whitespace_pattern``: used to override the default whitespace pattern for guided json decoding. +- ``guided_decoding_backend``: used to select the guided decoding backend to use. + +You can see the complete list of supported parameters on the `OpenAI Compatible Server `_ page. + +Now let´s see an example for each of the cases, starting with the ``guided_choice``, as it´s the easiest one: + +.. code-block:: python + + from openai import OpenAI + client = OpenAI( + base_url="http://localhost:8000/v1", + api_key="-", + ) + + completion = client.chat.completions.create( + model="Qwen/Qwen2.5-3B-Instruct", + messages=[ + {"role": "user", "content": "Classify this sentiment: vLLM is wonderful!"} + ], + extra_body={"guided_choice": ["positive", "negative"]}, + ) + print(completion.choices[0].message.content) + + +The next example shows how to use the ``guided_regex``. The idea is to generate an email address, given a simple regex template: + +.. code-block:: python + + completion = client.chat.completions.create( + model="Qwen/Qwen2.5-3B-Instruct", + messages=[ + { + "role": "user", + "content": "Generate an example email address for Alan Turing, who works in Enigma. End in .com and new line. Example result: alan.turing@enigma.com\n", + } + ], + extra_body={"guided_regex": "\w+@\w+\.com\n", "stop": ["\n"]}, + ) + print(completion.choices[0].message.content) + +One of the most relevant features in structured text generation is the option to generate a valid JSON with pre-defined fields and formats. +For this we can use the ``guided_json`` parameter in two different ways: + +- Using directly a `JSON Schema `_ +- Defining a `Pydantic model `_ and then extracting the JSON Schema from it (which is normally an easier option). + +The next example shows how to use the ``guided_json`` parameter with a Pydantic model: + +.. code-block:: python + + from pydantic import BaseModel + from enum import Enum + + class CarType(str, Enum): + sedan = "sedan" + suv = "SUV" + truck = "Truck" + coupe = "Coupe" + + + class CarDescription(BaseModel): + brand: str + model: str + car_type: CarType + + + json_schema = CarDescription.model_json_schema() + + completion = client.chat.completions.create( + model="Qwen/Qwen2.5-3B-Instruct", + messages=[ + { + "role": "user", + "content": "Generate a JSON with the brand, model and car_type of the most iconic car from the 90's", + } + ], + extra_body={"guided_json": json_schema}, + ) + print(completion.choices[0].message.content) + +.. tip:: + While not strictly necessary, normally it´s better to indicate in the prompt that a JSON needs to be generated and which fields and how should the LLM fill them. + This can improve the results notably in most cases. + + +Finally we have the ``guided_grammar``, which probably is the most difficult one to use but it´s really powerful, as it allows us to define complete languages like SQL queries. +It works by using a context free EBNF grammar, which for example we can use to define a specific format of simplified SQL queries, like in the example below: + +.. code-block:: python + + simplified_sql_grammar = """ + ?start: select_statement + + ?select_statement: "SELECT " column_list " FROM " table_name + + ?column_list: column_name ("," column_name)* + + ?table_name: identifier + + ?column_name: identifier + + ?identifier: /[a-zA-Z_][a-zA-Z0-9_]*/ + """ + + completion = client.chat.completions.create( + model="Qwen/Qwen2.5-3B-Instruct", + messages=[ + { + "role": "user", + "content": "Generate an SQL query to show the 'username' and 'email' from the 'users' table.", + } + ], + extra_body={"guided_grammar": simplified_sql_grammar}, + ) + print(completion.choices[0].message.content) + +The complete code of the examples can be found on `examples/openai_chat_completion_structured_outputs.py `_. + + +Offline Inference +----------------- + +Offline inference allows for the same types of guided decoding. +To use it, we´ll need to configure the guided decoding using the class ``GuidedDecodingParams`` inside ``SamplingParams``. +The main available options inside ``GuidedDecodingParams`` are: + +- ``json`` +- ``regex`` +- ``choice`` +- ``grammar`` +- ``backend`` +- ``whitespace_pattern`` + +These parameters can be used in the same way as the parameters from the Online Inference examples above. +One example for the usage of the ``choices`` parameter is shown below: + +.. code-block:: python + + from vllm import LLM, SamplingParams + from vllm.sampling_params import GuidedDecodingParams + + llm = LLM(model="HuggingFaceTB/SmolLM2-1.7B-Instruct") + + guided_decoding_params = GuidedDecodingParams(choice=["Positive", "Negative"]) + sampling_params = SamplingParams(guided_decoding=guided_decoding_params) + outputs = llm.generate( + prompts="Classify this sentiment: vLLM is wonderful!", + sampling_params=sampling_params, + ) + print(outputs[0].outputs[0].text) + +A complete example with all options can be found in `examples/offline_inference_structured_outputs.py `_. \ No newline at end of file diff --git a/examples/offline_inference_structured_outputs.py b/examples/offline_inference_structured_outputs.py new file mode 100644 index 0000000000000..00d864606eeff --- /dev/null +++ b/examples/offline_inference_structured_outputs.py @@ -0,0 +1,78 @@ +from enum import Enum + +from pydantic import BaseModel + +from vllm import LLM, SamplingParams +from vllm.sampling_params import GuidedDecodingParams + +llm = LLM(model="Qwen/Qwen2.5-3B-Instruct", max_model_len=100) + +# Guided decoding by Choice (list of possible options) +guided_decoding_params = GuidedDecodingParams(choice=["Positive", "Negative"]) +sampling_params = SamplingParams(guided_decoding=guided_decoding_params) +outputs = llm.generate( + prompts="Classify this sentiment: vLLM is wonderful!", + sampling_params=sampling_params, +) +print(outputs[0].outputs[0].text) + +# Guided decoding by Regex +guided_decoding_params = GuidedDecodingParams(regex="\w+@\w+\.com\n") +sampling_params = SamplingParams(guided_decoding=guided_decoding_params, + stop=["\n"]) +prompt = ("Generate an email address for Alan Turing, who works in Enigma." + "End in .com and new line. Example result:" + "alan.turing@enigma.com\n") +outputs = llm.generate(prompts=prompt, sampling_params=sampling_params) +print(outputs[0].outputs[0].text) + + +# Guided decoding by JSON using Pydantic schema +class CarType(str, Enum): + sedan = "sedan" + suv = "SUV" + truck = "Truck" + coupe = "Coupe" + + +class CarDescription(BaseModel): + brand: str + model: str + car_type: CarType + + +json_schema = CarDescription.model_json_schema() + +guided_decoding_params = GuidedDecodingParams(json=json_schema) +sampling_params = SamplingParams(guided_decoding=guided_decoding_params) +prompt = ("Generate a JSON with the brand, model and car_type of" + "the most iconic car from the 90's") +outputs = llm.generate( + prompts=prompt, + sampling_params=sampling_params, +) +print(outputs[0].outputs[0].text) + +# Guided decoding by Grammar +simplified_sql_grammar = """ + ?start: select_statement + + ?select_statement: "SELECT " column_list " FROM " table_name + + ?column_list: column_name ("," column_name)* + + ?table_name: identifier + + ?column_name: identifier + + ?identifier: /[a-zA-Z_][a-zA-Z0-9_]*/ +""" +guided_decoding_params = GuidedDecodingParams(grammar=simplified_sql_grammar) +sampling_params = SamplingParams(guided_decoding=guided_decoding_params) +prompt = ("Generate an SQL query to show the 'username' and 'email'" + "from the 'users' table.") +outputs = llm.generate( + prompts=prompt, + sampling_params=sampling_params, +) +print(outputs[0].outputs[0].text) diff --git a/examples/openai_chat_completion_structured_outputs.py b/examples/openai_chat_completion_structured_outputs.py new file mode 100644 index 0000000000000..8c059c7ca07ce --- /dev/null +++ b/examples/openai_chat_completion_structured_outputs.py @@ -0,0 +1,94 @@ +from enum import Enum + +from openai import OpenAI +from pydantic import BaseModel + +client = OpenAI( + base_url="http://localhost:8000/v1", + api_key="-", +) + +# Guided decoding by Choice (list of possible options) +completion = client.chat.completions.create( + model="Qwen/Qwen2.5-3B-Instruct", + messages=[{ + "role": "user", + "content": "Classify this sentiment: vLLM is wonderful!" + }], + extra_body={"guided_choice": ["positive", "negative"]}, +) +print(completion.choices[0].message.content) + +# Guided decoding by Regex +prompt = ("Generate an email address for Alan Turing, who works in Enigma." + "End in .com and new line. Example result:" + "alan.turing@enigma.com\n") + +completion = client.chat.completions.create( + model="Qwen/Qwen2.5-3B-Instruct", + messages=[{ + "role": "user", + "content": prompt, + }], + extra_body={ + "guided_regex": "\w+@\w+\.com\n", + "stop": ["\n"] + }, +) +print(completion.choices[0].message.content) + + +# Guided decoding by JSON using Pydantic schema +class CarType(str, Enum): + sedan = "sedan" + suv = "SUV" + truck = "Truck" + coupe = "Coupe" + + +class CarDescription(BaseModel): + brand: str + model: str + car_type: CarType + + +json_schema = CarDescription.model_json_schema() + +prompt = ("Generate a JSON with the brand, model and car_type of" + "the most iconic car from the 90's") +completion = client.chat.completions.create( + model="Qwen/Qwen2.5-3B-Instruct", + messages=[{ + "role": "user", + "content": prompt, + }], + extra_body={"guided_json": json_schema}, +) +print(completion.choices[0].message.content) + +# Guided decoding by Grammar +simplified_sql_grammar = """ + ?start: select_statement + + ?select_statement: "SELECT " column_list " FROM " table_name + + ?column_list: column_name ("," column_name)* + + ?table_name: identifier + + ?column_name: identifier + + ?identifier: /[a-zA-Z_][a-zA-Z0-9_]*/ +""" + +prompt = ("Generate an SQL query to show the 'username' and 'email'" + "from the 'users' table.") +completion = client.chat.completions.create( + model="Qwen/Qwen2.5-3B-Instruct", + messages=[{ + "role": "user", + "content": prompt, + }], + extra_body={"guided_grammar": simplified_sql_grammar}, +) +print(completion.choices[0].message.content) From 4f686d139f6acb31ea31eaf57ed1bb3920a77682 Mon Sep 17 00:00:00 2001 From: Andrew Nesbitt Date: Mon, 18 Nov 2024 17:52:42 +0000 Subject: [PATCH 031/397] Fix open_collective value in FUNDING.yml (#10426) Signed-off-by: Andrew Nesbitt --- .github/FUNDING.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml index 71f4e520135d4..d1f6105a47166 100644 --- a/.github/FUNDING.yml +++ b/.github/FUNDING.yml @@ -1,2 +1,2 @@ github: [vllm-project] -open_collective: [vllm] +open_collective: vllm From 281cc4b3cd2f6c84c2cd8272ef83d97edd1c323a Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Mon, 18 Nov 2024 13:04:14 -0500 Subject: [PATCH 032/397] [Model][Bugfix] Support TP for PixtralHF ViT (#10405) Signed-off-by: mgoin --- vllm/model_executor/models/pixtral.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/vllm/model_executor/models/pixtral.py b/vllm/model_executor/models/pixtral.py index d44a538d56b8c..f7f46770057e2 100644 --- a/vllm/model_executor/models/pixtral.py +++ b/vllm/model_executor/models/pixtral.py @@ -17,6 +17,7 @@ from vllm.attention import AttentionMetadata from vllm.config import ModelConfig, VllmConfig +from vllm.distributed import divide, get_tensor_model_parallel_world_size from vllm.inputs import (INPUT_REGISTRY, DecoderOnlyInputs, DummyData, InputContext, token_inputs) from vllm.model_executor.layers.activation import get_act_and_mul_fn @@ -843,17 +844,20 @@ def __init__( self.config = config assert not config.hidden_size % config.num_attention_heads - self.n_heads = config.num_attention_heads + self.total_num_heads = config.num_attention_heads + tp_size = get_tensor_model_parallel_world_size() + self.n_heads = divide(config.num_attention_heads, tp_size) self.head_dim = config.hidden_size // config.num_attention_heads self.qkv_proj = QKVParallelLinear( hidden_size=config.hidden_size, head_size=self.head_dim, - total_num_heads=self.n_heads, + total_num_heads=self.total_num_heads, bias=False, quant_config=quant_config, prefix=f"{prefix}.qkv_proj", ) + assert self.total_num_heads * self.head_dim == config.hidden_size self.o_proj = RowParallelLinear( input_size=config.hidden_size, output_size=config.hidden_size, From 6b2d25efc78f21867ca37e3f707c5a94f906478f Mon Sep 17 00:00:00 2001 From: Yan Ma Date: Tue, 19 Nov 2024 02:18:05 +0800 Subject: [PATCH 033/397] [Hardware][XPU] AWQ/GPTQ support for xpu backend (#10107) Signed-off-by: yan ma --- .../quantization/supported_hardware.rst | 8 +- tests/quantization/test_ipex_quant.py | 10 +- vllm/model_executor/layers/linear.py | 2 +- .../layers/quantization/gptq.py | 1 - .../layers/quantization/gptq_marlin.py | 4 + .../layers/quantization/ipex_quant.py | 169 +++++++++++++----- vllm/model_executor/model_loader/loader.py | 4 +- 7 files changed, 146 insertions(+), 52 deletions(-) diff --git a/docs/source/quantization/supported_hardware.rst b/docs/source/quantization/supported_hardware.rst index 9bf0cdb80376d..09f8e7112cf0c 100644 --- a/docs/source/quantization/supported_hardware.rst +++ b/docs/source/quantization/supported_hardware.rst @@ -27,7 +27,7 @@ The table below shows the compatibility of various quantization implementations - ✅︎ - ✅︎ - ✗ - - ✗ + - ✅︎ - ✅︎ - ✗ - ✗ @@ -38,8 +38,8 @@ The table below shows the compatibility of various quantization implementations - ✅︎ - ✅︎ - ✗ - - ✗ - - ✗ + - ✅︎ + - ✅︎ - ✗ - ✗ * - Marlin (GPTQ/AWQ/FP8) @@ -129,4 +129,4 @@ Notes: Please note that this compatibility chart may be subject to change as vLLM continues to evolve and expand its support for different hardware platforms and quantization methods. -For the most up-to-date information on hardware support and quantization methods, please check the `quantization directory `_ or consult with the vLLM development team. \ No newline at end of file +For the most up-to-date information on hardware support and quantization methods, please check the `quantization directory `_ or consult with the vLLM development team. diff --git a/tests/quantization/test_ipex_quant.py b/tests/quantization/test_ipex_quant.py index d541efcefcac3..68a73f0f8ab48 100644 --- a/tests/quantization/test_ipex_quant.py +++ b/tests/quantization/test_ipex_quant.py @@ -1,5 +1,5 @@ """Test model set-up and inference for quantized HF models supported - on the CPU backend using IPEX (including AWQ). + on the CPU/GPU backend using IPEX (including AWQ/GPTQ). Validating the configuration and printing results for manual checking. @@ -11,13 +11,15 @@ from vllm.platforms import current_platform MODELS = [ - "casperhansen/llama-3-8b-instruct-awq", + "AMead10/Llama-3.2-1B-Instruct-AWQ", + "shuyuej/Llama-3.2-1B-Instruct-GPTQ", # with g_idx ] DTYPE = ["bfloat16"] -@pytest.mark.skipif(not current_platform.is_cpu(), - reason="only supports the CPU backend.") +@pytest.mark.skipif(not current_platform.is_cpu() + and not current_platform.is_xpu(), + reason="only supports Intel CPU/XPU backend.") @pytest.mark.parametrize("model", MODELS) @pytest.mark.parametrize("dtype", DTYPE) def test_ipex_quant(vllm_runner, model, dtype): diff --git a/vllm/model_executor/layers/linear.py b/vllm/model_executor/layers/linear.py index 94f30412e43b3..e1f8a6e36d781 100644 --- a/vllm/model_executor/layers/linear.py +++ b/vllm/model_executor/layers/linear.py @@ -27,7 +27,7 @@ "AWQLinearMethod", "GPTQMarlinLinearMethod", "Fp8LinearMethod", "MarlinLinearMethod", "QQQLinearMethod", "GPTQMarlin24LinearMethod", "TPUInt8LinearMethod", "GPTQLinearMethod", "FBGEMMFp8LinearMethod", - "ModelOptFp8LinearMethod", "IPEXAWQLinearMethod" + "ModelOptFp8LinearMethod", "IPEXAWQLinearMethod", "IPEXGPTQLinearMethod" ] diff --git a/vllm/model_executor/layers/quantization/gptq.py b/vllm/model_executor/layers/quantization/gptq.py index 0aa605e62454e..abafad0f1047e 100644 --- a/vllm/model_executor/layers/quantization/gptq.py +++ b/vllm/model_executor/layers/quantization/gptq.py @@ -210,7 +210,6 @@ def create_weights( def process_weights_after_loading(self, layer: torch.nn.Module) -> None: # for torch.compile - layer.qweight = Parameter(layer.qweight.data, requires_grad=False) layer.qzeros = Parameter(layer.qzeros.data, requires_grad=False) layer.qweight = Parameter(layer.qweight.data, requires_grad=False) layer.g_idx = Parameter(layer.g_idx.data, requires_grad=False) diff --git a/vllm/model_executor/layers/quantization/gptq_marlin.py b/vllm/model_executor/layers/quantization/gptq_marlin.py index 1f72e3afbbce5..a3e58bf1b2a4c 100644 --- a/vllm/model_executor/layers/quantization/gptq_marlin.py +++ b/vllm/model_executor/layers/quantization/gptq_marlin.py @@ -23,6 +23,7 @@ PackedColumnParameter, PackedvLLMParameter, RowvLLMParameter) +from vllm.platforms import current_platform from vllm.scalar_type import scalar_types logger = init_logger(__name__) @@ -134,6 +135,9 @@ def is_gptq_marlin_compatible(cls, quant_config: Dict[str, Any]): sym = quant_config.get("sym") desc_act = quant_config.get("desc_act") + if not current_platform.is_cuda(): + return False + if quant_method != "gptq": return False diff --git a/vllm/model_executor/layers/quantization/ipex_quant.py b/vllm/model_executor/layers/quantization/ipex_quant.py index 330c2ad195d78..c16a962134d06 100644 --- a/vllm/model_executor/layers/quantization/ipex_quant.py +++ b/vllm/model_executor/layers/quantization/ipex_quant.py @@ -2,21 +2,26 @@ import torch -from vllm.model_executor.layers.linear import LinearBase, LinearMethodBase -from vllm.model_executor.layers.quantization.awq import AWQLinearMethod +from vllm.model_executor.layers.linear import (LinearBase, LinearMethodBase, + UnquantizedLinearMethod) +from vllm.model_executor.layers.quantization.awq import (AWQLinearMethod, + is_layer_skipped_awq) from vllm.model_executor.layers.quantization.base_config import ( QuantizationConfig) +from vllm.model_executor.layers.quantization.gptq import GPTQLinearMethod from vllm.platforms import current_platform +MIN_IPEX_VERSION = "2.5.0" + class IPEXConfig(QuantizationConfig): - """INT8 quantization config class using IPEX for the CPU backend, - including AWQ. + """INT8 quantization config class using IPEX for the CPU/XPU backend, + including AWQ, GPTQ. """ IPEX_QUANT_METHOD_MAP = { "awq": 1, - "gptq": 2, + "gptq": 0, } def __init__( @@ -24,29 +29,30 @@ def __init__( method: str, weight_bits: int, group_size: int, + modules_to_not_convert: Optional[List[str]] = None, + desc_act: Optional[bool] = None, + lm_head_quantized: Optional[bool] = None, ) -> None: self.method = method self.weight_bits = weight_bits self.group_size = group_size + self.modules_to_not_convert = modules_to_not_convert or [] + self.desc_act = desc_act + self.lm_head_quantized = lm_head_quantized self.pack_factor = 32 // self.weight_bits if self.weight_bits not in [4]: raise ValueError(f"IPEX quantization supports weight bits [4], " f"but got {self.weight_bits}.") - if self.method == "awq": - self.quant_method = IPEXAWQLinearMethod - else: - raise ValueError(f"IPEX quantization supports [awq], " + if self.method not in ["awq", "gptq"]: + raise ValueError(f"IPEX quantization supports [awq, gptq], " f"but got {self.method}.") def __repr__(self) -> str: - return (f"IPEXConfig(method={self.method}" + return (f"IPEXConfig(method={self.method}," f"weight_bits={self.weight_bits}, " - f"group_size={self.group_size}") - - def get_ipex_quant_method_id(self) -> int: - return IPEXConfig.IPEX_QUANT_METHOD_MAP[self.method] + f"group_size={self.group_size})") @classmethod def get_name(cls) -> str: @@ -70,19 +76,32 @@ def get_config_filenames() -> List[str]: @classmethod def from_config(cls, config: Dict[str, Any]) -> "IPEXConfig": method = cls.get_from_keys(config, ["quant_method"]).lower() - weight_bits = cls.get_from_keys(config, ["w_bit", "bits"]) - group_size = cls.get_from_keys(config, ["q_group_size", "group_size"]) - return cls(method, weight_bits, group_size) + if method == "awq": + weight_bits = cls.get_from_keys(config, ["w_bit", "bits"]) + group_size = cls.get_from_keys(config, + ["q_group_size", "group_size"]) + modules_to_not_convert = cls.get_from_keys_or( + config, ["modules_to_not_convert"], None) + return cls(method, weight_bits, group_size, modules_to_not_convert, + False, False) + # otherwise for gptq + weight_bits = cls.get_from_keys(config, ["bits"]) + group_size = cls.get_from_keys(config, ["group_size"]) + lm_head_quantized = cls.get_from_keys_or(config, ["lm_head"], + default=False) + desc_act = cls.get_from_keys_or(config, ["desc_act"], default=False) + return cls(method, weight_bits, group_size, [], desc_act, + lm_head_quantized) @classmethod def override_quantization_method(cls, hf_quant_cfg, user_quant) -> Optional[str]: - if not current_platform.is_cpu(): + if not current_platform.is_cpu() and not current_platform.is_xpu(): return None quant_method = hf_quant_cfg.get("quant_method", "").lower() - if quant_method in ["awq"]: + if quant_method in ["awq", "gptq"]: return cls.get_name() return None @@ -90,12 +109,81 @@ def override_quantization_method(cls, hf_quant_cfg, def get_quant_method(self, layer: torch.nn.Module, prefix: str) -> Optional["LinearMethodBase"]: if isinstance(layer, LinearBase): - return self.quant_method(self) + if self.method == "awq": + if is_layer_skipped_awq(prefix, self.modules_to_not_convert): + return UnquantizedLinearMethod() + return IPEXAWQLinearMethod(self) + if self.method == "gptq": + return IPEXGPTQLinearMethod(self) return None +class IPEXGPTQLinearMethod(GPTQLinearMethod): + """GPTQ linear method using IPEX for the CPU/XPU backend. + """ + + def __init__(self, quant_config: IPEXConfig): + self.quant_config = quant_config # type: ignore + + def process_weights_after_loading(self, layer: torch.nn.Module) -> None: + bias = layer.bias if not layer.skip_bias_add else None + + try: + import intel_extension_for_pytorch as ipex + if ipex.__version__ < MIN_IPEX_VERSION: + raise ImportError( + "intel_extension_for_pytorch version is " + "wrong. Please install " + f"intel_extension_for_pytorch>={MIN_IPEX_VERSION}.") + except ImportError as err: + raise ImportError( + "Please install " + f"intel_extension_for_pytorch>={MIN_IPEX_VERSION} via " + f"`pip install intel_extension_for_pytorch>={MIN_IPEX_VERSION}`" + " to use IPEX-AWQ linear method.") from err + # Using the compute dtype (lowp_mode) as INT8 to leverage instructions + # with better performance. + lowp_mode = ipex.quantization.WoqLowpMode.INT8 + # The weight will be de-packed from INT4 to INT8. + weight_dtype = ipex.quantization.WoqWeightDtype.INT4 + # The float activation will be quantized (dynamic, per-token) to INT8. + act_quant_mode = ipex.quantization.WoqActQuantMode.PER_BATCH_IC_BLOCK + + qconfig = ipex.quantization.get_weight_only_quant_qconfig_mapping( + weight_dtype=weight_dtype, + lowp_mode=lowp_mode, + act_quant_mode=act_quant_mode, + group_size=self.quant_config.group_size, + ) + layer.ipex_output_size = layer.qweight.shape[-1] + g_idx = layer.g_idx if self.quant_config.desc_act else None + layer.ipex_qlinear = ipex.llm.quantization.woq_linear. \ + IPEXWeightOnlyQuantizedLinear.from_weight( + layer.qweight, + layer.scales, + layer.qzeros, + layer.qweight.size(0), + layer.ipex_output_size, + qconfig=qconfig, + g_idx=g_idx, + bias=bias, + group_size=self.quant_config.group_size, + quant_method=IPEXConfig.IPEX_QUANT_METHOD_MAP["gptq"] + ) + + def apply(self, + layer: torch.nn.Module, + x: torch.Tensor, + bias: Optional[torch.Tensor] = None) -> torch.Tensor: + reshaped_x = x.reshape(-1, x.shape[-1]) + out = layer.ipex_qlinear(reshaped_x) + if bias is not None: + out.add_(bias) + return out.reshape(x.shape[:-1] + (layer.ipex_output_size, )) + + class IPEXAWQLinearMethod(AWQLinearMethod): - """AWQ linear method using IPEX for the CPU backend. + """AWQ linear method using IPEX for the CPU/XPU backend. """ def __init__(self, quant_config: IPEXConfig): @@ -108,15 +196,16 @@ def process_weights_after_loading(self, layer: torch.nn.Module) -> None: try: import intel_extension_for_pytorch as ipex - if ipex.__version__ < "2.4.0": - raise ImportError("intel_extension_for_pytorch version is " - "wrong. Please install " - "intel_extension_for_pytorch>=2.4.0.") + if ipex.__version__ < MIN_IPEX_VERSION: + raise ImportError( + "intel_extension_for_pytorch version is " + "wrong. Please install " + f"intel_extension_for_pytorch>={MIN_IPEX_VERSION}.") except ImportError as err: raise ImportError( "Please install " - "intel_extension_for_pytorch>=2.4.0 via " - "`pip install intel_extension_for_pytorch>=2.4.0`" + f"intel_extension_for_pytorch>={MIN_IPEX_VERSION} via " + f"`pip install intel_extension_for_pytorch>={MIN_IPEX_VERSION}`" " to use IPEX-AWQ linear method.") from err # Using the compute dtype (lowp_mode) as INT8 to leverage instructions @@ -136,19 +225,18 @@ def process_weights_after_loading(self, layer: torch.nn.Module) -> None: layer.ipex_output_size = layer.qweight.size( 1) * self.quant_config.pack_factor - layer.ipex_qlinear = ipex.nn.modules.weight_only_quantization.\ - WeightOnlyQuantizedLinear.from_weight( - layer.qweight, - layer.scales, - layer.qzeros, - layer.qweight.size(0), - layer.ipex_output_size, - qconfig=qconfig, - bias=bias, - group_size=self.quant_config.group_size, - quant_method= - self.quant_config.get_ipex_quant_method_id() # type: ignore - ) + layer.ipex_qlinear = ipex.llm.quantization.woq_linear. \ + IPEXWeightOnlyQuantizedLinear.from_weight( + layer.qweight, + layer.scales, + layer.qzeros, + layer.qweight.size(0), + layer.ipex_output_size, + qconfig=qconfig, + bias=bias, + group_size=self.quant_config.group_size, + quant_method=IPEXConfig.IPEX_QUANT_METHOD_MAP["awq"] # type: ignore + ) def apply(self, layer: torch.nn.Module, @@ -156,5 +244,4 @@ def apply(self, bias: Optional[torch.Tensor] = None) -> torch.Tensor: reshaped_x = x.reshape(-1, x.shape[-1]) out = layer.ipex_qlinear(reshaped_x) - return out.reshape(x.shape[:-1] + (layer.ipex_output_size, )) diff --git a/vllm/model_executor/model_loader/loader.py b/vllm/model_executor/model_loader/loader.py index d9ce85949e4ee..b41c23704b7ff 100644 --- a/vllm/model_executor/model_loader/loader.py +++ b/vllm/model_executor/model_loader/loader.py @@ -29,6 +29,8 @@ from vllm.logger import init_logger from vllm.model_executor.layers.linear import (ReplicatedLinear, RowParallelLinear) +from vllm.model_executor.layers.quantization.base_config import ( + QuantizeMethodBase) from vllm.model_executor.model_loader.tensorizer import ( TensorizerConfig, is_vllm_tensorized, load_with_tensorizer, serialize_vllm_model, tensorizer_weights_iterator) @@ -348,7 +350,7 @@ def load_model(self, vllm_config: VllmConfig) -> nn.Module: for _, module in model.named_modules(): quant_method = getattr(module, "quant_method", None) - if quant_method is not None: + if isinstance(quant_method, QuantizeMethodBase): # When quant methods need to process weights after loading # (for repacking, quantizing, etc), they expect parameters # to be on the global target device. This scope is for the From c2170a5b395acb9f5f4ce8425c3be18aacb67513 Mon Sep 17 00:00:00 2001 From: Angus Wang Date: Mon, 18 Nov 2024 11:39:40 -0800 Subject: [PATCH 034/397] [Kernel] Explicitly specify other value in tl.load calls (#9014) Signed-off-by: Angus Wang --- .../blocksparse_attention_kernel.py | 13 ++++++++++--- vllm/lora/ops/bgmv_expand.py | 4 +++- vllm/lora/ops/bgmv_expand_slice.py | 8 +++++++- vllm/lora/ops/sgmv_expand.py | 5 ++++- vllm/lora/ops/sgmv_expand_slice.py | 5 ++++- .../layers/quantization/awq_triton.py | 14 +++++++------- 6 files changed, 35 insertions(+), 14 deletions(-) diff --git a/vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py b/vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py index ec1c37c5bcb0e..727a470ba6d0e 100644 --- a/vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py +++ b/vllm/attention/ops/blocksparse_attention/blocksparse_attention_kernel.py @@ -157,19 +157,22 @@ def _fwd_kernel_inner( k = tl.load( k_ptrs + start_n * stride_kt, mask=offs_n[None, :] + start_n < k_seqlen, + other=0.0, ) else: k = tl.load( k_ptrs + start_n * stride_kt, mask=(offs_n[None, :] + start_n < k_seqlen) & (offs_d[:, None] < D_HEAD), + other=0.0, ) else: if EVEN_D: k = tl.load(k_ptrs + start_n * stride_kt) else: k = tl.load(k_ptrs + start_n * stride_kt, - mask=offs_d[:, None] < D_HEAD) + mask=offs_d[:, None] < D_HEAD, + other=0.0) qk = tl.zeros([BLOCK_M_LOADING, BLOCK_N], dtype=tl.float32) qk += tl.dot(q, k) @@ -200,19 +203,22 @@ def _fwd_kernel_inner( v = tl.load( v_ptrs + start_n * stride_vt, mask=offs_n[:, None] + start_n < k_seqlen, + other=0.0, ) else: v = tl.load( v_ptrs + start_n * stride_vt, mask=(offs_n[:, None] + start_n < k_seqlen) & (offs_d[None, :] < D_HEAD), + other=0.0, ) else: if EVEN_D: v = tl.load(v_ptrs + start_n * stride_vt) else: v = tl.load(v_ptrs + start_n * stride_vt, - mask=offs_d[None, :] < D_HEAD) + mask=offs_d[None, :] < D_HEAD, + other=0.0) acc += tl.dot(p, v) @@ -318,12 +324,13 @@ def _fwd_kernel_batch_inference( q = tl.load( Q + offs_m[:, None] * stride_qt + offs_d[None, :] * stride_qd, mask=offs_m[:, None] < q_seqlen, + other=0.0, ) else: q = tl.load( Q + offs_m[:, None] * stride_qt + offs_d[None, :] * stride_qd, mask=(offs_m[:, None] < q_seqlen) & (offs_d[None, :] < D_HEAD), - other=0, + other=0.0, ) sparse_crow_ptr = (layout_crow_ptr + off_h * layout_crow_stride_h + diff --git a/vllm/lora/ops/bgmv_expand.py b/vllm/lora/ops/bgmv_expand.py index 6a32387a6f36c..f176259fddc78 100644 --- a/vllm/lora/ops/bgmv_expand.py +++ b/vllm/lora/ops/bgmv_expand.py @@ -75,7 +75,9 @@ def _bgmv_expand_kernel( other=0.0, ) # [BLOCK_N,BLOCK_K] if ADD_INPUTS: - tiled_out = tl.load(c_ptr + current_n * cn_stride, mask=c_mask) + tiled_out = tl.load(c_ptr + current_n * cn_stride, + mask=c_mask, + other=0.0) accumulator = tl.sum(tiled_a * tiled_b, 1) + tiled_out else: accumulator = tl.sum(tiled_a * tiled_b, 1) diff --git a/vllm/lora/ops/bgmv_expand_slice.py b/vllm/lora/ops/bgmv_expand_slice.py index 73628fd20d327..2c6ed96c253f0 100644 --- a/vllm/lora/ops/bgmv_expand_slice.py +++ b/vllm/lora/ops/bgmv_expand_slice.py @@ -78,7 +78,13 @@ def _bgmv_expand_slice_kernel( ) # [BLOCK_N,BLOCK_K] if ADD_INPUTS: - tiled_out = tl.load(c_ptr + current_n * cn_stride, mask=c_mask) + # explicitly pass in other=None to tell triton that masked values + # can be uninitialized. This is OK because the later tl.store + # operation uses the same mask, eliminating the risk of garbage + # values propagating + tiled_out = tl.load(c_ptr + current_n * cn_stride, + mask=c_mask, + other=None) accumulator = tl.sum(tiled_a * tiled_b, 1) + tiled_out else: accumulator = tl.sum(tiled_a * tiled_b, 1) diff --git a/vllm/lora/ops/sgmv_expand.py b/vllm/lora/ops/sgmv_expand.py index 4910cb4061298..ee2cd2e05e2ee 100644 --- a/vllm/lora/ops/sgmv_expand.py +++ b/vllm/lora/ops/sgmv_expand.py @@ -88,7 +88,10 @@ def _sgmv_expand_kernel( c_mask = (offset_cm[:, None] < (cur_seq_start + M)) & (offset_cn[None, :] < N) if ADD_INPUTS: - tiled_out = tl.load(c_ptr, mask=c_mask) + # explicitly pass in other=None to tell triton that masked values + # can be uninitialized. This is OK because the later tl.store operation + # uses the same mask, eliminating the risk of garbage values propagating + tiled_out = tl.load(c_ptr, mask=c_mask, other=None) tiled_c += tiled_out tl.store(c_ptr, tiled_c, mask=c_mask) diff --git a/vllm/lora/ops/sgmv_expand_slice.py b/vllm/lora/ops/sgmv_expand_slice.py index 844f5cec39e93..5244fa14913a4 100644 --- a/vllm/lora/ops/sgmv_expand_slice.py +++ b/vllm/lora/ops/sgmv_expand_slice.py @@ -94,7 +94,10 @@ def _sgmv_expand_slice_kernel( c_mask = (offset_cm[:, None] < (cur_seq_start + M)) & (offset_cn[None, :] < (slice_offset + N)) if ADD_INPUTS: - tiled_out = tl.load(c_ptr, mask=c_mask) + # explicitly pass in other=None to tell triton that masked values + # can be uninitialized. This is OK because the later tl.store operation + # uses the same mask, eliminating the risk of garbage values propagating + tiled_out = tl.load(c_ptr, mask=c_mask, other=None) tiled_c += tiled_out tl.store(c_ptr, tiled_c, mask=c_mask) diff --git a/vllm/model_executor/layers/quantization/awq_triton.py b/vllm/model_executor/layers/quantization/awq_triton.py index bbb7fc8ad5087..ace8f4a348812 100644 --- a/vllm/model_executor/layers/quantization/awq_triton.py +++ b/vllm/model_executor/layers/quantization/awq_triton.py @@ -42,7 +42,7 @@ def awq_dequantize_kernel( result_masks = result_masks_y[:, None] & result_masks_x[None, :] # Load the weights. - iweights = tl.load(qweight_ptr + offsets, masks) + iweights = tl.load(qweight_ptr + offsets, masks, 0.0) iweights = tl.interleave(iweights, iweights) iweights = tl.interleave(iweights, iweights) iweights = tl.interleave(iweights, iweights) @@ -71,7 +71,7 @@ def awq_dequantize_kernel( zero_masks = zero_masks_y[:, None] & zero_masks_x[None, :] # Load the zeros. - zeros = tl.load(zeros_ptr + zero_offsets, zero_masks) + zeros = tl.load(zeros_ptr + zero_offsets, zero_masks, 0.0) zeros = tl.interleave(zeros, zeros) zeros = tl.interleave(zeros, zeros) zeros = tl.interleave(zeros, zeros) @@ -91,7 +91,7 @@ def awq_dequantize_kernel( scale_masks = scale_masks_y[:, None] & scale_masks_x[None, :] # Load the scales. - scales = tl.load(scales_ptr + scale_offsets, scale_masks) + scales = tl.load(scales_ptr + scale_offsets, scale_masks, 0.0) scales = tl.broadcast_to(scales, (BLOCK_SIZE_Y, BLOCK_SIZE_X * 8)) # Dequantize. @@ -165,10 +165,10 @@ def awq_gemm_kernel(a_ptr, b_ptr, c_ptr, zeros_ptr, scales_ptr, M, N, K, for k in range(0, tl.cdiv(K, BLOCK_SIZE_K * SPLIT_K)): masks_k = offsets_k < K masks_a = masks_am[:, None] & masks_k[None, :] - a = tl.load(a_ptrs, mask=masks_a) + a = tl.load(a_ptrs, mask=masks_a, other=0.0) masks_b = masks_k[:, None] & masks_bn[None, :] - b = tl.load(b_ptrs, mask=masks_b) + b = tl.load(b_ptrs, mask=masks_b, other=0.0) b = tl.interleave(b, b) b = tl.interleave(b, b) b = tl.interleave(b, b) @@ -181,7 +181,7 @@ def awq_gemm_kernel(a_ptr, b_ptr, c_ptr, zeros_ptr, scales_ptr, M, N, K, masks_zk = offsets_szk < K // group_size masks_z = masks_zk[:, None] & masks_zn[None, :] zeros_ptrs = zeros_ptr + offsets_z - zeros = tl.load(zeros_ptrs, mask=masks_z) + zeros = tl.load(zeros_ptrs, mask=masks_z, other=0.0) zeros = tl.interleave(zeros, zeros) zeros = tl.interleave(zeros, zeros) zeros = tl.interleave(zeros, zeros) @@ -191,7 +191,7 @@ def awq_gemm_kernel(a_ptr, b_ptr, c_ptr, zeros_ptr, scales_ptr, M, N, K, masks_sk = offsets_szk < K // group_size masks_s = masks_sk[:, None] & masks_sn[None, :] scales_ptrs = scales_ptr + offsets_s - scales = tl.load(scales_ptrs, mask=masks_s) + scales = tl.load(scales_ptrs, mask=masks_s, other=0.0) scales = tl.broadcast_to(scales, (BLOCK_SIZE_K, BLOCK_SIZE_N)) b = (b >> shifts) & 0xF From 96d999fbe8d610fa4c5b7cad6bb0d0158d1d5b8b Mon Sep 17 00:00:00 2001 From: Lucas Wilkinson Date: Mon, 18 Nov 2024 14:59:29 -0500 Subject: [PATCH 035/397] [Kernel] Initial Machete W4A8 support + Refactors (#9855) Signed-off-by: Lucas Wilkinson --- benchmarks/kernels/benchmark_machete.py | 519 +++++++++---- benchmarks/kernels/graph_machete_bench.py | 5 +- benchmarks/kernels/weight_shapes.py | 6 + csrc/cutlass_extensions/cute_utils.cuh | 4 +- .../epilogue}/broadcast_load_epilogue_c2x.hpp | 1 + .../epilogue}/broadcast_load_epilogue_c3x.hpp | 0 .../epilogue/scaled_mm_epilogues_c2x.hpp | 317 ++++++++ .../epilogue/scaled_mm_epilogues_c3x.hpp | 315 ++++++++ .../vllm_cutlass_library_extension.py | 29 + .../vllm_numeric_conversion.cuh | 239 +++++- csrc/cutlass_extensions/vllm_type_utils.cuh | 42 + .../cutlass_w8a8/scaled_mm_c2x.cu | 53 +- .../cutlass_w8a8/scaled_mm_c2x.cuh | 302 -------- .../cutlass_w8a8/scaled_mm_c3x.cu | 312 +------- csrc/quantization/machete/generate.py | 732 ++++++++++-------- .../quantization/machete/machete_mainloop.cuh | 25 +- .../machete/machete_mm_kernel.cuh | 206 +++-- .../machete/machete_mm_launcher.cuh | 90 +-- .../machete/machete_prepack_kernel.cuh | 63 +- .../machete/machete_prepack_launcher.cuh | 15 +- .../machete/machete_prepacked_layout.cuh | 54 +- csrc/quantization/machete/machete_pytorch.cu | 120 ++- csrc/torch_bindings.cpp | 35 +- tests/kernels/test_machete_gemm.py | 284 ------- tests/kernels/test_machete_mm.py | 406 ++++++++++ vllm/_custom_ops.py | 75 +- .../layers/quantization/kernels/machete.py | 16 +- .../layers/quantization/utils/quant_utils.py | 45 +- 28 files changed, 2616 insertions(+), 1694 deletions(-) rename csrc/{quantization/cutlass_w8a8 => cutlass_extensions/epilogue}/broadcast_load_epilogue_c2x.hpp (99%) rename csrc/{quantization/cutlass_w8a8 => cutlass_extensions/epilogue}/broadcast_load_epilogue_c3x.hpp (100%) create mode 100644 csrc/cutlass_extensions/epilogue/scaled_mm_epilogues_c2x.hpp create mode 100644 csrc/cutlass_extensions/epilogue/scaled_mm_epilogues_c3x.hpp create mode 100644 csrc/cutlass_extensions/vllm_type_utils.cuh delete mode 100644 tests/kernels/test_machete_gemm.py create mode 100644 tests/kernels/test_machete_mm.py diff --git a/benchmarks/kernels/benchmark_machete.py b/benchmarks/kernels/benchmark_machete.py index 665b50bf18cf0..a0342d08f1db8 100644 --- a/benchmarks/kernels/benchmark_machete.py +++ b/benchmarks/kernels/benchmark_machete.py @@ -2,8 +2,10 @@ import copy import itertools import math +import os import pickle as pkl import time +from dataclasses import dataclass from itertools import product from typing import Callable, Iterable, List, Optional, Tuple @@ -15,11 +17,12 @@ from vllm import _custom_ops as ops from vllm.model_executor.layers.quantization.utils.marlin_utils import ( - GPTQ_MARLIN_MAX_PARALLEL, GPTQ_MARLIN_MIN_THREAD_N, marlin_permute_scales) + GPTQ_MARLIN_MAX_PARALLEL, GPTQ_MARLIN_MIN_THREAD_N, marlin_permute_scales, + marlin_zero_points) from vllm.model_executor.layers.quantization.utils.marlin_utils_test import ( MarlinWorkspace) from vllm.model_executor.layers.quantization.utils.quant_utils import ( - gptq_pack, pack_rows, quantize_weights) + pack_rows, quantize_weights) from vllm.scalar_type import ScalarType, scalar_types from vllm.utils import FlexibleArgumentParser @@ -27,149 +30,349 @@ DEFAULT_BATCH_SIZES = [1, 16, 32, 64, 128, 256, 512, 1024] DEFAULT_TP_SIZES = [1] +NVTX_PROFILE = os.environ.get("NVTX_PROFILE", False) + +if NVTX_PROFILE: + import nvtx + + +def terse_type_name(dt): + return { + torch.bfloat16: "bf16", + torch.float16: "fp16", + torch.int8: "int8", + torch.float8_e4m3fn: "fp8", + torch.bfloat16: "bf16", + torch.float: "float", + torch.int: "int", + }[dt] + + +@dataclass +class BenchmarkTensors: + w_ref: torch.Tensor + a: torch.Tensor + + w_q: torch.Tensor + group_size: Optional[int] + wtype: ScalarType + w_g_s: torch.Tensor + w_g_zp: Optional[torch.Tensor] + w_ch_s: Optional[torch.Tensor] + w_tok_s: Optional[torch.Tensor] + + +@dataclass +class TypeConfig: + act_type: torch.dtype + weight_type: ScalarType + output_type: Optional[torch.dtype] + group_scale_type: Optional[torch.dtype] + group_zero_type: Optional[torch.dtype] + channel_scale_type: Optional[torch.dtype] + token_scale_type: Optional[torch.dtype] + + +def rand_data(shape, dtype=torch.float16, scale=1): + if dtype.is_floating_point: + return (scale * torch.rand(shape, device="cuda") - 0.3).to(dtype) + else: + return torch.randint(-15, 15, shape, dtype=dtype, device="cuda") + + +def quantize_and_pack(atype: torch.dtype, + w: torch.Tensor, + wtype: ScalarType, + stype: Optional[torch.dtype], + group_size: Optional[int], + zero_points: bool = False): + assert wtype.is_integer(), "TODO: support floating point weights" + + w_ref, w_q, w_s, w_zp = quantize_weights( + w, + wtype, + group_size=group_size, + zero_points=zero_points, + # to match how the kernel applies zps + ref_zero_points_after_scales=True) -def machete_pack_weights(w_q: torch.tensor, wtype: ScalarType) -> torch.tensor: w_q = pack_rows(w_q, wtype.size_bits, *w_q.shape) - w_q = w_q.t().contiguous().t() # make col major - return ops.machete_prepack_B(w_q, wtype) + return w_ref, w_q, w_s, w_zp -def make_bench_tensors( - atype: torch.dtype, wtype: ScalarType, group_size: int, m: int, n: int, - k: int -) -> Tuple[torch.tensor, List[Tuple[torch.tensor, torch.tensor, torch.tensor, - torch.tensor]]]: - assert wtype.is_integer(), "TODO: support floating point weights" +def create_bench_tensors(shape: Tuple[int, int, int], types: TypeConfig, + group_size: Optional[int]) -> List[BenchmarkTensors]: + m, n, k = shape # we want to make sure that weights don't fit into L2 cache between runs so # we construct enough weights to exceed L2 cache, which is 50mb on a H100 # so we target total weight size > 2*50mb - num_weights = math.ceil(2 * 50 * 1024**2 * 8 / (k * n * wtype.size_bits)) - - a = torch.randn((m, k), device="cuda", dtype=atype) * 5 - weights = [ - torch.randn((k, n), device="cuda", dtype=atype) - for _ in range(num_weights) - ] - quanitized_weights = [ - quantize_weights(w, wtype, group_size) for w in weights - ] - - return a, quanitized_weights + num_weights = math.ceil(2 * 50 * 1024**2 * 8 / + (k * n * types.weight_type.size_bits)) + + a = rand_data((m, k), types.act_type, scale=5) + + benchmark_tensors: List[BenchmarkTensors] = [] + for _ in range(num_weights): + w = rand_data((k, n), types.act_type, scale=5) + + if types.group_scale_type is not None: + w = w.to(types.group_scale_type) + if w.dtype.itemsize == 1: + w = w.to(torch.float16) + + w_ref, w_q_packed, w_s, w_zp = quantize_and_pack( + a.dtype, w, types.weight_type, types.group_scale_type, group_size, + types.group_zero_type is not None) + + if not a.dtype.is_floating_point: + aiinfo = torch.iinfo(a.dtype) + w_ref = w_ref.round().clamp(aiinfo.min, aiinfo.max) + + w_ref = w_ref.to(torch.float32) + + w_ch_s = None if types.channel_scale_type is None else\ + rand_data((n,), types.channel_scale_type) + w_tok_s = None if types.token_scale_type is None else\ + rand_data((m,), types.token_scale_type) + + benchmark_tensors.append( + BenchmarkTensors(w_ref=w_ref, + a=a, + w_q=w_q_packed, + wtype=types.weight_type, + w_g_s=w_s, + w_g_zp=w_zp, + group_size=group_size, + w_ch_s=w_ch_s, + w_tok_s=w_tok_s)) + + return benchmark_tensors + + +def torch_matmul_f16_create_bench_fn(bt: BenchmarkTensors) -> Callable: + a = bt.a + w = bt.w_ref.to(bt.a.dtype) # use float reference tensor + if a.dtype not in [torch.float16, torch.bfloat16]: + a = a.to(torch.float16) + w = w.to(torch.float16) + return lambda: torch.matmul(a, w) + + +def cutlass_scaled_mm_create_bench_fn(bt: BenchmarkTensors) -> Callable: + if bt.w_ch_s is not None and bt.w_tok_s is not None: + scale_a = bt.w_tok_s.to(torch.float32) + scale_b = bt.w_ch_s.to(torch.float32) + else: + scale_a = torch.tensor(1.0, dtype=torch.float32, device=bt.a.device) + scale_b = torch.tensor(1.0, dtype=torch.float32, device=bt.a.device) + w_col_major = bt.w_ref.to(bt.a.dtype).t().contiguous().t() + return lambda: ops.cutlass_scaled_mm( + bt.a, w_col_major, scale_a, scale_b, out_dtype=torch.float16) + + +def marlin_create_bench_fn(bt: BenchmarkTensors) -> Callable: + device = bt.a.device + + workspace = MarlinWorkspace(bt.w_ref.shape[1], GPTQ_MARLIN_MIN_THREAD_N, + GPTQ_MARLIN_MAX_PARALLEL) + + if bt.w_g_zp is None: + w_zp = torch.empty(0, dtype=torch.int, device=device) + else: + w_zp = marlin_zero_points(bt.w_g_zp, bt.w_ref.shape[0], + bt.w_ref.shape[1], bt.wtype.size_bits) + + if bt.group_size is None: + w_s = torch.tensor([], device="cuda", dtype=torch.half) + else: + w_s = marlin_permute_scales(bt.w_g_s, bt.w_ref.shape[0], + bt.w_ref.shape[1], bt.group_size) + + sort_indices = torch.empty(0, dtype=torch.int, device=device) + g_idx = torch.empty(0, dtype=torch.int, device=device) + w_q = ops.gptq_marlin_repack(bt.w_q, sort_indices, bt.w_ref.shape[0], + bt.w_ref.shape[1], bt.wtype.size_bits) + + if bt.a.dtype.is_floating_point: + assert bt.w_ch_s is None + assert bt.w_tok_s is None + assert bt.group_size is not None + + fn = lambda: ops.gptq_marlin_gemm(a=bt.a, + b_q_weight=w_q, + b_scales=w_s, + b_zeros=w_zp, + g_idx=g_idx, + perm=sort_indices, + workspace=workspace.scratch, + b_q_type=bt.wtype, + size_m=bt.a.shape[0], + size_n=bt.w_ref.shape[1], + size_k=bt.w_ref.shape[0], + is_k_full=True) + else: + assert bt.a.dtype == torch.int8 + assert bt.wtype == scalar_types.uint4b8 + + if bt.w_ch_s is not None: + s_ch = bt.w_ch_s.to(torch.float32) + else: + s_ch = torch.ones(bt.w_ref.shape[1], + dtype=torch.float32, + device=device) + + if bt.w_tok_s is not None: + s_tok = bt.w_tok_s.to(torch.float32) + else: + s_tok = torch.ones(bt.a.shape[0], + dtype=torch.float32, + device=device) + + fn = lambda: ops.marlin_qqq_gemm(a=bt.a, + b_q_weight=w_q, + s_group=w_s, + s_tok=s_tok, + s_ch=s_ch, + workspace=workspace.scratch, + size_m=bt.a.shape[0], + size_n=bt.w_ref.shape[1], + size_k=bt.w_ref.shape[0]) + + return fn + + +def machete_create_bench_fn(bt: BenchmarkTensors, + out_type=torch.dtype, + schedule=None) -> Callable: + w_q = bt.w_q.t().contiguous().t() # make col major + w_q = ops.machete_prepack_B(w_q, bt.a.dtype, bt.wtype, + None if bt.w_g_s is None else bt.w_g_s.dtype) + + w_g_zp = bt.w_g_zp + if w_g_zp is not None: + w_g_zp = -1 * bt.w_g_s * (w_g_zp.to(bt.w_g_s.dtype)) + + return lambda: ops.machete_mm( + a=bt.a, + b_q=bt.w_q, + b_type=bt.wtype, + b_group_scales=bt.w_g_s, + b_group_zeros=w_g_zp, + b_group_size=bt.group_size, + b_channel_scales=bt.w_ch_s, + a_token_scales=bt.w_tok_s, + out_type=out_type, + schedule=schedule, + ) # impl - # bench -def bench_fn(label: str, sub_label: str, description: str, - fn: Callable) -> TMeasurement: - min_run_time = 1 - return TBenchmark.Timer( - stmt="fn()", + +def bench_fns(label: str, sub_label: str, description: str, + fns: List[Callable]): + + min_run_time = 1 if not NVTX_PROFILE else 0.1 + res = TBenchmark.Timer( + stmt=""" + for fn in fns: + fn() + """, globals={ - "fn": fn + "fns": fns }, label=label, sub_label=sub_label, description=description, ).blocked_autorange(min_run_time=min_run_time) + if NVTX_PROFILE: + with nvtx.annotate("mm-bench"), nvtx.annotate( + f"{label}|{sub_label}|{description}"): + fns[0]() -def loop_over_weights( - a: torch.tensor, weights: List[Tuple[torch.tensor, torch.tensor, - torch.tensor, torch.tensor]], - fn: Callable[[torch.tensor, torch.tensor, torch.tensor, torch.tensor], - None]): - for w_ref, w_q, w_s, _ in weights: - fn(a, w_ref, w_q, w_s) + return res _SWEEP_SCHEDULES_RESULTS: Optional[pd.DataFrame] = None _SWEEP_SCHEDULES_RESULTS_CSV: Optional[str] = None -def bench(atype: torch.dtype, - wtype: ScalarType, +def bench(types: TypeConfig, group_size: int, m: int, k: int, n: int, label: str, sub_label: str, - benchmark_marlinv1: bool = True, - sweep_schedules: bool = True) -> Iterable[TMeasurement]: - global _SWEEP_SCHEDULES_RESULTS - - a, weights = make_bench_tensors(atype, wtype, group_size, m, n, k) - sub_label += f", L={len(weights)}" - - weights_machete = [(w_ref, machete_pack_weights(w_q, wtype), w_s, w_zp) - for w_ref, w_q, w_s, w_zp in weights] + sweep_schedules: bool = True) -> List[TMeasurement]: + benchmark_tensors = create_bench_tensors((m, n, k), types, group_size) + sub_label += f", L={len(benchmark_tensors)}" + + name_type_string = f"W{types.weight_type}"+\ + f"-A{terse_type_name(types.act_type)}" + if types.group_scale_type is not None: + name_type_string += f"-GS{terse_type_name(types.group_scale_type)}" + if types.group_zero_type is not None: + name_type_string += f"-GZ{terse_type_name(types.group_zero_type)}" + if group_size is not None: + name_type_string += f"-G{group_size}" + if types.channel_scale_type is not None: + name_type_string += f"-CS{terse_type_name(types.channel_scale_type)}" + if types.token_scale_type is not None: + name_type_string += f"-TS{terse_type_name(types.token_scale_type)}" timers = [] # pytorch impl timers.append( - bench_fn( - label, sub_label, "torch.matmul", lambda: loop_over_weights( - a, - weights, - lambda a, w_ref, w_q, w_s: torch.matmul(a, w_ref), - ))) + bench_fns( + label, sub_label, "torch.matmul (fp16)", + [torch_matmul_f16_create_bench_fn(bt) + for bt in benchmark_tensors])) - if benchmark_marlinv1: - w_ref = weights[0][0] - - w_zp_empty = torch.empty(0, dtype=torch.int, device=w_ref.device) - sort_indices = torch.empty(0, dtype=torch.int, device=w_ref.device) - g_idx = torch.empty(0, dtype=torch.int, device=w_ref.device) - - def marlinv1_pack_weights(w_q: torch.tensor) -> torch.tensor: - w_q_gptq = gptq_pack(w_q, wtype.size_bits, *w_ref.shape) - return ops.gptq_marlin_repack(w_q_gptq, sort_indices, *w_ref.shape, - wtype.size_bits) - - def marlinv1_permute_scales(w_s: torch.tensor) -> torch.tensor: - return marlin_permute_scales(w_s, *w_ref.shape, group_size) - - weights_marlinv1 = [(w_ref, marlinv1_pack_weights(w_q), - marlinv1_permute_scales(w_s), w_zp) - for w_ref, w_q, w_s, w_zp in weights] - - workspace = MarlinWorkspace(w_ref.shape[1], GPTQ_MARLIN_MIN_THREAD_N, - GPTQ_MARLIN_MAX_PARALLEL) - - # marlinv1 + if types.act_type == torch.int8 or types.act_type == torch.float8_e4m3fn: + timers.append( + bench_fns( + label, sub_label, + f"cutlass_scaled_mm ({terse_type_name(types.act_type)})", [ + cutlass_scaled_mm_create_bench_fn(bt) + for bt in benchmark_tensors + ])) + + if types.act_type != torch.float8_e4m3fn: timers.append( - bench_fn( - label, sub_label, "marlin_orig", lambda: loop_over_weights( - a, weights_marlinv1, lambda a, w_ref, w_q, w_s: ops. - gptq_marlin_gemm(a, - w_q, - w_s, - w_zp_empty, - g_idx, - sort_indices, - workspace.scratch, - wtype, - size_m=a.shape[0], - size_n=w_ref.shape[1], - size_k=w_ref.shape[0], - is_k_full=True)))) + bench_fns(label, sub_label, f"marlin ({name_type_string})", + [marlin_create_bench_fn(bt) + for bt in benchmark_tensors])) # machete timers.append( - bench_fn( - label, sub_label, "machete_heuristic", lambda: loop_over_weights( - a, weights_machete, lambda a, _, w_q, w_s: ops.machete_gemm( - a, w_q, wtype, b_scales=w_s, b_group_size=group_size)))) + bench_fns(label, sub_label, f"machete ({name_type_string})", [ + machete_create_bench_fn(bt, out_type=types.output_type) + for bt in benchmark_tensors + ])) if sweep_schedules: + global _SWEEP_SCHEDULES_RESULTS + print("Finding best schedule for machete") best = None best_schedule = None - schedules = ops.machete_supported_schedules(wtype) + schedules = ops.machete_supported_schedules( + a_type=types.act_type, + b_type=types.weight_type, + group_scales_type=types.group_scale_type, + group_zeros_type=types.group_zero_type, + token_scales_type=types.token_scale_type, + channel_scales_type=types.channel_scale_type, + out_type=types.output_type) + + if schedules is None or len(schedules) == 0: + raise ValueError("No schedules found to sweep") + for schedule in reversed(schedules): schedule_M = int(schedule.split("_")[0].split("x")[1]) @@ -177,16 +380,11 @@ def marlinv1_permute_scales(w_s: torch.tensor) -> torch.tensor: if schedule_M >= 2 * max(m, 16) or schedule_M < m // 4: continue - def run(a, _, w_q, w_s, schedule=schedule): - ops.machete_gemm(a, - w_q, - wtype, - w_s, - b_group_size=group_size, - schedule=schedule) - - res = bench_fn(label, sub_label, "machete_best", - lambda: loop_over_weights(a, weights_machete, run)) + res = bench_fns(label, sub_label, "machete_best", [ + machete_create_bench_fn( + bt, out_type=types.output_type, schedule=schedule) + for bt in benchmark_tensors + ]) results_row = { "M": m, @@ -213,25 +411,33 @@ def run(a, _, w_q, w_s, schedule=schedule): # runner -def print_timers(timers: Iterable[TMeasurement]): +def print_timers(timers: List[TMeasurement]): compare = TBenchmark.Compare(timers) compare.print() -def run(dtype: torch.dtype, sweep_schedules: bool, - MKNs: Iterable[Tuple[int, int, int]]) -> Iterable[TMeasurement]: +def run(args, MKNs: Iterable[Tuple[int, int, int]]) -> Iterable[TMeasurement]: + types = TypeConfig( + act_type=args.act_type, + weight_type=scalar_types.uint4b8 if args.group_zero_type is None \ + else scalar_types.uint4, + output_type=args.out_type, + group_scale_type=args.group_scale_type, + group_zero_type=args.group_zero_type, + channel_scale_type=args.channel_scale_type, + token_scale_type=args.token_scale_type, + ) - results = [] + results: List[TMeasurement] = [] for m, k, n in MKNs: - timers = bench(dtype, - scalar_types.uint4b8, - 128, + timers = bench(types, + args.group_size, m, k, n, - f"{dtype}-gemm", + f"{args.act_type}-gemm", f"MKN=({m}x{k}x{n})", - sweep_schedules=sweep_schedules) + sweep_schedules=args.sweep_schedules) print_timers(timers) results.extend(timers) @@ -240,7 +446,7 @@ def run(dtype: torch.dtype, sweep_schedules: bool, # output makers def make_output( - data: Iterable[TMeasurement], + data: List[TMeasurement], MKNs: Iterable[Tuple[int, int, int]], base_description: str, timestamp=None, @@ -262,7 +468,6 @@ def run_square_bench(args): dim_sizes = list( range(args.dim_start, args.dim_end + 1, args.dim_increment)) MKNs = list(zip(dim_sizes, dim_sizes, dim_sizes)) - data = run(args.dtype, args.sweep_schedules, MKNs) make_output(data, MKNs, f"square_bench-{args.dtype}") @@ -306,33 +511,49 @@ def model_shapes(model_name: str, tp_size: int) -> List[Tuple[int, int]]: for k, n in KNs: MKNs.append((m, k, n)) - data = run(args.dtype, args.sweep_schedules, MKNs) + data = run(args, MKNs) model_bench_data.append(data) + type_string = f"{args.act_type}" + # Print all results for data, model_tp in zip(model_bench_data, models_tps): model, tp_size = model_tp - print(f"== Results {args.dtype} {model}-TP{tp_size} ====") + print(f"== Results {type_string} {model}-TP{tp_size} ====") print_timers(data) - timestamp = int(time.time()) + timestr = time.strftime("%Y%m%d-%H%M%S") - all_data = [] + all_results = [] for d in model_bench_data: - all_data.extend(d) + all_results.extend(d) + # pickle all data - with open(f"model_bench-{args.dtype}-{timestamp}.pkl", "wb") as f: - pkl.dump(all_data, f) + with open(f"model_bench-{type_string}-{timestr}.pkl", "wb") as f: + args_dict = vars(args) + args_dict.pop("func") + pkl.dump({ + "args": args_dict, + "results": all_results, + }, f) if __name__ == "__main__": def to_torch_dtype(dt): - if dt == "bfloat16": - return torch.bfloat16 - if dt == "float16": - return torch.float16 - raise ValueError("unsupported dtype") + return { + "bfloat16": torch.bfloat16, + "float16": torch.float16, + "int8": torch.int8, + "float8_e4m3fn": torch.float8_e4m3fn, + "int": torch.int, + "float": torch.float, + }[dt] + + class ToTorchDtype(argparse.Action): + + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, to_torch_dtype(values)) parser = FlexibleArgumentParser( description=""" @@ -352,12 +573,42 @@ def to_torch_dtype(dt): """, # noqa: E501 formatter_class=argparse.RawTextHelpFormatter, ) - parser.add_argument( - "--dtype", - type=to_torch_dtype, + "--act-type", + action=ToTorchDtype, required=True, - help="Available options are ['bfloat16', 'float16']", + choices=['bfloat16', 'float16', 'int8', 'float8_e4m3fn'], + ) + parser.add_argument( + "--group-scale-type", + action=ToTorchDtype, + choices=['bfloat16', 'float16'], + ) + parser.add_argument( + "--group-zero-type", + type=to_torch_dtype, + choices=['bfloat16', 'float16'], + ) + parser.add_argument( + "--channel-scale-type", + action=ToTorchDtype, + choices=['float'], + ) + parser.add_argument( + "--token-scale-type", + action=ToTorchDtype, + choices=['float'], + ) + parser.add_argument( + "--out-type", + action=ToTorchDtype, + choices=['bfloat16', 'float16'], + ) + parser.add_argument( + "--group-size", + type=int, + help="Available options are ['None', '-1', '128'], default=128", + default=128, ) parser.add_argument( "--sweep-schedules", diff --git a/benchmarks/kernels/graph_machete_bench.py b/benchmarks/kernels/graph_machete_bench.py index de608fd05af70..7d0bd84150a27 100644 --- a/benchmarks/kernels/graph_machete_bench.py +++ b/benchmarks/kernels/graph_machete_bench.py @@ -20,10 +20,11 @@ args = parser.parse_args() with open(args.filename, 'rb') as f: - data: List[TMeasurement] = pickle.load(f) + data = pickle.load(f) + raw_results: List[TMeasurement] = data["results"] results = defaultdict(lambda: list()) - for v in data: + for v in raw_results: result = re.search(r"MKN=\(\d+x(\d+x\d+)\)", v.task_spec.sub_label) if result is not None: KN = result.group(1) diff --git a/benchmarks/kernels/weight_shapes.py b/benchmarks/kernels/weight_shapes.py index 25ec9d6028627..51f24f3ba1774 100644 --- a/benchmarks/kernels/weight_shapes.py +++ b/benchmarks/kernels/weight_shapes.py @@ -40,4 +40,10 @@ ([8192, 57344], 1), ([28672, 8192], 0), ], + "meta-llama/Llama-3.1-405b-hf": [ + ([16384, 18432], 1), + ([16384, 16384], 0), + ([16384, 106496], 1), + ([53248, 16384], 0), + ], } diff --git a/csrc/cutlass_extensions/cute_utils.cuh b/csrc/cutlass_extensions/cute_utils.cuh index 1842fab8b2cac..f61fe3ceb978a 100644 --- a/csrc/cutlass_extensions/cute_utils.cuh +++ b/csrc/cutlass_extensions/cute_utils.cuh @@ -20,9 +20,9 @@ CUTE_HOST_DEVICE static constexpr auto permute_layout(Layout l) { // is the layout f(x) = x template CUTE_HOST_DEVICE static constexpr bool is_identity_layout() { - if constexpr (std::is_same_v) + if constexpr (std::is_same_v) { return true; - else { + } else { constexpr auto coalesced_layout = coalesce(Layout{}); if constexpr (rank(coalesced_layout) == 1 && stride<0>(coalesced_layout) == 1) { diff --git a/csrc/quantization/cutlass_w8a8/broadcast_load_epilogue_c2x.hpp b/csrc/cutlass_extensions/epilogue/broadcast_load_epilogue_c2x.hpp similarity index 99% rename from csrc/quantization/cutlass_w8a8/broadcast_load_epilogue_c2x.hpp rename to csrc/cutlass_extensions/epilogue/broadcast_load_epilogue_c2x.hpp index d407d66ab2aa6..7aa87feb4cce2 100644 --- a/csrc/quantization/cutlass_w8a8/broadcast_load_epilogue_c2x.hpp +++ b/csrc/cutlass_extensions/epilogue/broadcast_load_epilogue_c2x.hpp @@ -52,6 +52,7 @@ // clang-format off #include "cutlass/epilogue/threadblock/fusion/visitor_2x.hpp" +#include "cutlass/epilogue/threadblock/fusion/visitors.hpp" #include "cute/tensor.hpp" namespace cutlass::epilogue::threadblock { diff --git a/csrc/quantization/cutlass_w8a8/broadcast_load_epilogue_c3x.hpp b/csrc/cutlass_extensions/epilogue/broadcast_load_epilogue_c3x.hpp similarity index 100% rename from csrc/quantization/cutlass_w8a8/broadcast_load_epilogue_c3x.hpp rename to csrc/cutlass_extensions/epilogue/broadcast_load_epilogue_c3x.hpp diff --git a/csrc/cutlass_extensions/epilogue/scaled_mm_epilogues_c2x.hpp b/csrc/cutlass_extensions/epilogue/scaled_mm_epilogues_c2x.hpp new file mode 100644 index 0000000000000..c69e87999ae71 --- /dev/null +++ b/csrc/cutlass_extensions/epilogue/scaled_mm_epilogues_c2x.hpp @@ -0,0 +1,317 @@ +#include "cutlass_extensions/epilogue/broadcast_load_epilogue_c2x.hpp" + +/* + This file defines custom epilogues for fusing channel scales, token scales, + bias, and activation zero-points onto a GEMM operation using the + CUTLASS 2.x API, for sm80 (Ampere) NVIDIA GPUs. + + Epilogues must contain a public type named EVTCompute of type Sm80EVT, + as well as a static prepare_args function that constructs an + EVTCompute::Arguments struct. +*/ + +namespace vllm::c2x { + +using namespace cute; + +/* + * This class provides the common load descriptors for the + * ScaledEpilogue[...] classes + */ +template +struct ScaledEpilogueBase { + protected: + using Accum = cutlass::epilogue::threadblock::VisitorAccFetch; + + template + using ColOrScalarLoad = + cutlass::epilogue::threadblock::VisitorColOrScalarBroadcast< + OutputTileThreadMap, T, Stride, Int<0>, Int<0>>>; + + template + using RowOrScalarLoad = + cutlass::epilogue::threadblock::VisitorRowOrScalarBroadcast< + OutputTileThreadMap, T, Stride, Int<1>, Int<0>>>; + + template + using ColLoad = cutlass::epilogue::threadblock::VisitorColBroadcast< + OutputTileThreadMap, T, Stride, Int<0>, Int<0>>>; + + template + using RowLoad = cutlass::epilogue::threadblock::VisitorRowBroadcast< + OutputTileThreadMap, T, Stride, Int<1>, Int<0>>>; + + template + using RowOrZeroLoad = + cutlass::epilogue::threadblock::VisitorRowOrZeroBroadcast< + OutputTileThreadMap, T, Stride, Int<1>, Int<0>>>; + + // This utility function constructs the arguments for the load descriptors + // from a tensor. It can handle both row and column, as well as row/column or + // scalar cases. + template + static auto args_from_tensor(torch::Tensor const& tensor) { + using Arguments = typename Descriptor::Arguments; + auto* data_ptr = static_cast(tensor.data_ptr()); + if constexpr (std::is_same_v> || + std::is_same_v>) { + return Arguments{data_ptr, tensor.numel() != 1}; + } else { + // it would technically work but no use case as data_ptr is never nullptr + static_assert(!std::is_same_v>); + return Arguments{data_ptr}; + } + } + + // This overload handles the case where there might not be a tensor, in which + // case a nullptr is passed and a constant (0) is used. + template + static auto args_from_tensor(c10::optional const& tensor) { + static_assert(std::is_same_v>); + using Arguments = typename Descriptor::Arguments; + auto* data_ptr = tensor ? static_cast(tensor->data_ptr()) : nullptr; + return Arguments{data_ptr}; + } +}; + +/* + This epilogue function defines a quantized GEMM operation similar to + torch._scaled_mm. + + A and B may be both either int8 or fp8_e4m3. A can be quantized per-tensor or + per-row. B can be quantized per-tensor or per-column. + Any combination of per-tensor and per-row or column is supported. + A and B must have symmetric quantization (zero point == 0). + + So the GEMM operation is D = (a_scales * A) (b_scales * B), where the + scales are applied elementwise with numpy-style broadcasting. + + ScaleA and ScaleB define the epilogue functions that apply the scales for + the A and B operands respectively. These scales may be either per-tensor or + per row or column. +*/ +template +struct ScaledEpilogue + : private ScaledEpilogueBase { + private: + using SUPER = ScaledEpilogueBase; + using Accum = typename SUPER::Accum; + using ScaleA = typename SUPER::template ColOrScalarLoad; + using ScaleB = typename SUPER::template RowOrScalarLoad; + + using Compute0 = cutlass::epilogue::threadblock::VisitorCompute< + cutlass::multiplies, float, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + using EVTCompute0 = + cutlass::epilogue::threadblock::Sm80EVT; + + using Compute1 = cutlass::epilogue::threadblock::VisitorCompute< + cutlass::multiplies, ElementD, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + public: + using EVTCompute = + cutlass::epilogue::threadblock::Sm80EVT; + using ArgumentType = typename EVTCompute::Arguments; + + static ArgumentType prepare_args(torch::Tensor const& a_scales, + torch::Tensor const& b_scales) { + auto a_args = SUPER::template args_from_tensor(a_scales); + auto b_args = SUPER::template args_from_tensor(b_scales); + + typename EVTCompute0::Arguments evt0_args{b_args}; + return ArgumentType{a_args, evt0_args}; + } +}; + +/* + * This epilogue performs the same operation as ScaledEpilogue, but adds a bias. + * This bias can also be used in the per-tensor azp case, where the activation + * zero point (azp) is used to compute an azp correction term, + * which is folded into the bias. + * + * The bias tensor must be per-output channel. + * ScaleA and ScaleB can be per-tensor or per-token/per-channel. + */ +template +struct ScaledEpilogueBias + : protected ScaledEpilogueBase { + protected: + using SUPER = ScaledEpilogueBase; + using Accum = typename SUPER::Accum; + using ScaleA = typename SUPER::template ColOrScalarLoad; + using ScaleB = typename SUPER::template RowOrScalarLoad; + using Bias = typename SUPER::template RowLoad; + using Compute0 = cutlass::epilogue::threadblock::VisitorCompute< + cutlass::multiplies, float, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + using EVTCompute0 = + cutlass::epilogue::threadblock::Sm80EVT; + + using Compute1 = cutlass::epilogue::threadblock::VisitorCompute< + cutlass::multiply_add, ElementD, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + public: + using EVTCompute = cutlass::epilogue::threadblock::Sm80EVT; + using ArgumentType = typename EVTCompute::Arguments; + static ArgumentType prepare_args(torch::Tensor const& a_scales, + torch::Tensor const& b_scales, + torch::Tensor const& bias) { + auto a_args = SUPER::template args_from_tensor(a_scales); + auto b_args = SUPER::template args_from_tensor(b_scales); + auto bias_args = SUPER::template args_from_tensor(bias); + + typename EVTCompute0::Arguments evt0_args{b_args}; + return ArgumentType{a_args, evt0_args, bias_args}; + } +}; + +/* + * This epilogue directly supports per-tensor azp in int32 form. + * As opposed to the per-token epilogue below, this epilogue only has an azp_adj + * term, which should already be multiplied with the scalar azp. + * The azp_adj term is a 1D tensor of shape (1,n), computed as azp * J @ B. + * + * This epilogue also supports bias, which remains per-channel. + */ +template +struct ScaledEpilogueBiasAzp + : protected ScaledEpilogueBase { + private: + using SUPER = ScaledEpilogueBase; + using Accum = typename SUPER::Accum; + using ScaleA = typename SUPER::template ColOrScalarLoad; + using ScaleB = typename SUPER::template RowOrScalarLoad; + using Bias = typename SUPER::template RowOrZeroLoad; + + // This is the full AZP term, azp * J @ B, shape (1,n) + using AzpWithAdj = typename SUPER::template RowLoad; + + // Compute float(accum - azp_adj), both operands are int32_t + using ComputeAzp = cutlass::epilogue::threadblock::VisitorCompute< + cutlass::minus, float, int32_t, + cutlass::FloatRoundStyle::round_to_nearest>; + + using EVTComputeAzp = + cutlass::epilogue::threadblock::Sm80EVT; + + using ComputeScaleB = cutlass::epilogue::threadblock::VisitorCompute< + cutlass::multiplies, float, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + using EVTComputeScaleB = + cutlass::epilogue::threadblock::Sm80EVT; + + using ComputeScaleBiasA = cutlass::epilogue::threadblock::VisitorCompute< + cutlass::multiply_add, ElementD, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + public: + using EVTCompute = + cutlass::epilogue::threadblock::Sm80EVT; + + using ArgumentType = typename EVTCompute::Arguments; + + static ArgumentType prepare_args(torch::Tensor const& a_scales, + torch::Tensor const& b_scales, + torch::Tensor const& azp_adj, + c10::optional const& bias) { + auto a_args = SUPER::template args_from_tensor(a_scales); + auto b_args = SUPER::template args_from_tensor(b_scales); + auto bias_args = SUPER::template args_from_tensor(bias); + auto azp_adj_args = + SUPER::template args_from_tensor(azp_adj); + + typename EVTComputeAzp::Arguments evt_azp_args{{}, azp_adj_args}; + typename EVTComputeScaleB::Arguments evt_scale_b_args{b_args, evt_azp_args}; + return ArgumentType{a_args, evt_scale_b_args, bias_args}; + } +}; + +/* + * This epilogue supports per-token azp by computing and applying + * the correction term using a rank-1 update. If the term were materialized, + * it would require O(m*n) space, and this way it only requires O(m+n) space. + * The azp term is a 1D tensor of shape (m,1), and represents the unscaled zero + * point for each row of A. + * The azp_adj term is a 1D tensor of shape (1,n), computed as J @ B. + * + * This epilogue also supports bias, which remains per-channel. + */ +template +struct ScaledEpilogueBiasAzpToken + : protected ScaledEpilogueBase { + private: + using SUPER = ScaledEpilogueBase; + using Accum = typename SUPER::Accum; + using ScaleA = typename SUPER::template ColOrScalarLoad; + using ScaleB = typename SUPER::template RowOrScalarLoad; + using Bias = typename SUPER::template RowOrZeroLoad; + + // Per-token azp term, shape (m,1) + using Azp = typename SUPER::template ColLoad; + + // This is the AZP adjustment term, J @ B, shape (1,n) + using AzpAdj = typename SUPER::template RowLoad; + + // Compute azp * azp_adj + using ComputeAzp = cutlass::epilogue::threadblock::VisitorCompute< + cutlass::multiplies, int32_t, int32_t, + cutlass::FloatRoundStyle::round_to_nearest>; + + using EVTComputeAzp = + cutlass::epilogue::threadblock::Sm80EVT; + + // Compute float(accum - azp*azp_adj), all operands are int32_t + using ComputeAcc = cutlass::epilogue::threadblock::VisitorCompute< + cutlass::minus, float, int32_t, + cutlass::FloatRoundStyle::round_to_nearest>; + + using EVTComputeAcc = + cutlass::epilogue::threadblock::Sm80EVT; + + using ComputeScaleB = cutlass::epilogue::threadblock::VisitorCompute< + cutlass::multiplies, float, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + using EVTComputeScaleB = + cutlass::epilogue::threadblock::Sm80EVT; + + using ComputeScaleBiasA = cutlass::epilogue::threadblock::VisitorCompute< + cutlass::multiply_add, ElementD, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + public: + using EVTCompute = + cutlass::epilogue::threadblock::Sm80EVT; + + using ArgumentType = typename EVTCompute::Arguments; + + static ArgumentType prepare_args(torch::Tensor const& a_scales, + torch::Tensor const& b_scales, + torch::Tensor const& azp_adj, + torch::Tensor const& azp, + c10::optional const& bias) { + auto a_args = SUPER::template args_from_tensor(a_scales); + auto b_args = SUPER::template args_from_tensor(b_scales); + auto bias_args = SUPER::template args_from_tensor(bias); + auto azp_args = SUPER::template args_from_tensor(azp); + auto azp_adj_args = + SUPER::template args_from_tensor(azp_adj); + + typename EVTComputeAzp::Arguments evt_azp_args{azp_args, azp_adj_args}; + typename EVTComputeAcc::Arguments evt_acc_args{{}, evt_azp_args}; + typename EVTComputeScaleB::Arguments evt_scale_b_args{b_args, evt_acc_args}; + return ArgumentType{a_args, evt_scale_b_args, bias_args}; + } +}; + +}; // namespace vllm::c2x \ No newline at end of file diff --git a/csrc/cutlass_extensions/epilogue/scaled_mm_epilogues_c3x.hpp b/csrc/cutlass_extensions/epilogue/scaled_mm_epilogues_c3x.hpp new file mode 100644 index 0000000000000..95764ecddc79f --- /dev/null +++ b/csrc/cutlass_extensions/epilogue/scaled_mm_epilogues_c3x.hpp @@ -0,0 +1,315 @@ +#include "cutlass_extensions/epilogue/broadcast_load_epilogue_c3x.hpp" + +/* + This file defines custom epilogues for fusing channel scales, token scales, + bias, and activation zero-points onto a GEMM operation using the + CUTLASS 3.x API, for NVIDIA GPUs with sm90a (Hopper) or later. + + Epilogues must contain a public type named EVTCompute of type Sm90EVT, + as well as a static prepare_args function that constructs an + EVTCompute::Arguments struct. +*/ + +namespace vllm::c3x { + +using namespace cute; + +/* + * This class provides the common load descriptors for the + * ScaledEpilogue[...] classes + */ +template +struct ScaledEpilogueBase { + protected: + using Accum = cutlass::epilogue::fusion::Sm90AccFetch; + + template + using ColOrScalarLoad = cutlass::epilogue::fusion::Sm90ColOrScalarBroadcast< + 0 /*Stages*/, typename EpilogueDescriptor::TileShape, T, + Stride, Int<0>, Int<0>>>; + + template + using RowOrScalarLoad = cutlass::epilogue::fusion::Sm90RowOrScalarBroadcast< + 0 /*Stages*/, typename EpilogueDescriptor::TileShape, T, + Stride, Int<1>, Int<0>>>; + + // Don't want to support nullptr by default + template + using ColLoad = cutlass::epilogue::fusion::Sm90ColBroadcast< + 0 /*Stages*/, typename EpilogueDescriptor::TileShape, T, + Stride, Int<0>, Int<0>>, 128 / sizeof_bits_v, EnableNullPtr>; + + // Don't want to support nullptr by default + template + using RowLoad = cutlass::epilogue::fusion::Sm90RowBroadcast< + 0 /*Stages*/, typename EpilogueDescriptor::TileShape, T, + Stride, Int<1>, Int<0>>, 128 / sizeof_bits_v, EnableNullPtr>; + + // This utility function constructs the arguments for the load descriptors + // from a tensor. It can handle both row and column, as well as row/column or + // scalar cases. + template + static auto args_from_tensor(torch::Tensor const& tensor) { + using Arguments = typename Descriptor::Arguments; + auto* data_ptr = static_cast(tensor.data_ptr()); + if constexpr (std::is_same_v> || + std::is_same_v>) { + return Arguments{data_ptr, tensor.numel() != 1}; + } else { + static_assert(!std::is_same_v> && + !std::is_same_v>); + return Arguments{data_ptr}; + } + } + + // This overload handles the case where there might not be a tensor, in which + // case a nullptr is passed and a constant (0) is used. + template + static auto args_from_tensor(c10::optional const& tensor) { + using Arguments = typename Descriptor::Arguments; + auto* data_ptr = tensor ? static_cast(tensor->data_ptr()) : nullptr; + static_assert(std::is_same_v> || + std::is_same_v>); + return Arguments{data_ptr}; + } +}; + +/* + This epilogue function defines a quantized GEMM operation similar to + torch.scaled_mm_. + + A and B may be both either int8 or fp8_e4m3. A can be + quantized per-tensor or per-row. B can be quantized per-tensor or per-column. + Any combination of per-tensor and per-row or column is supported. + A and B must have symmetric quantization (zero point == 0). + + So the GEMM operation is D = (a_scales * A) (b_scales * B), where the + scales are applied elementwise with numpy-style broadcasting. + + ScaleA and ScaleB define the epilogue functions that apply the scales for + the A and B operands respectively. These scales may be either per-tensor or + per row or column. +*/ +template +struct ScaledEpilogue + : private ScaledEpilogueBase { + private: + using SUPER = ScaledEpilogueBase; + using Accum = typename SUPER::Accum; + using ScaleA = typename SUPER::template ColOrScalarLoad; + using ScaleB = typename SUPER::template RowOrScalarLoad; + + using Compute0 = cutlass::epilogue::fusion::Sm90Compute< + cutlass::multiplies, float, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + using EVTCompute0 = + cutlass::epilogue::fusion::Sm90EVT; + + using Compute1 = cutlass::epilogue::fusion::Sm90Compute< + cutlass::multiplies, ElementD, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + public: + using EVTCompute = + cutlass::epilogue::fusion::Sm90EVT; + using ArgumentType = typename EVTCompute::Arguments; + + static ArgumentType prepare_args(torch::Tensor const& a_scales, + torch::Tensor const& b_scales) { + auto a_args = SUPER::template args_from_tensor(a_scales); + auto b_args = SUPER::template args_from_tensor(b_scales); + + typename EVTCompute0::Arguments evt0_args{b_args}; + return ArgumentType{a_args, evt0_args}; + } +}; + +/* + * This epilogue performs the same operation as ScaledEpilogue, but adds a bias. + * This bias can also be used in the per-tensor azp case, where the activation + * zero point (azp) is used to compute an azp correction term, + * which is folded into the bias. + * + * The bias tensor must be per-output channel. + * ScaleA and ScaleB can be per-tensor or per-token/per-channel. + */ +template +struct ScaledEpilogueBias + : private ScaledEpilogueBase { + private: + using SUPER = ScaledEpilogueBase; + using Accum = typename SUPER::Accum; + using ScaleA = typename SUPER::template ColOrScalarLoad; + using ScaleB = typename SUPER::template RowOrScalarLoad; + using Bias = typename SUPER::template RowLoad; + + using Compute0 = cutlass::epilogue::fusion::Sm90Compute< + cutlass::multiplies, float, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + using EVTCompute0 = + cutlass::epilogue::fusion::Sm90EVT; + + using Compute1 = cutlass::epilogue::fusion::Sm90Compute< + cutlass::multiply_add, ElementD, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + public: + using EVTCompute = + cutlass::epilogue::fusion::Sm90EVT; + + using ArgumentType = typename EVTCompute::Arguments; + static ArgumentType prepare_args(torch::Tensor const& a_scales, + torch::Tensor const& b_scales, + torch::Tensor const& bias) { + auto a_args = SUPER::template args_from_tensor(a_scales); + auto b_args = SUPER::template args_from_tensor(b_scales); + auto bias_args = SUPER::template args_from_tensor(bias); + + typename EVTCompute0::Arguments evt0_args{b_args}; + return ArgumentType{a_args, evt0_args, bias_args}; + } +}; + +/* + * This epilogue directly supports per-tensor azp in int32 form. + * As opposed to the per-token epilogue below, this epilogue only has an azp_adj + * term, which should already be multiplied with the scalar azp. + * The azp_adj term is a 1D tensor of shape (1,n), computed as azp * J @ B. + * + * This epilogue also supports bias, which remains per-channel. + */ +template +struct ScaledEpilogueBiasAzp + : private ScaledEpilogueBase { + private: + using SUPER = ScaledEpilogueBase; + using Accum = typename SUPER::Accum; + using ScaleA = typename SUPER::template ColOrScalarLoad; + using ScaleB = typename SUPER::template RowOrScalarLoad; + using Bias = typename SUPER::template RowLoad; + + // This is the full AZP term, azp * J @ B, shape (1,n) + using AzpWithAdj = typename SUPER::template RowLoad; + + // Compute float(accum - azp_adj), both operands are int32_t + using ComputeAzp = cutlass::epilogue::fusion::Sm90Compute< + cutlass::minus, float, int32_t, + cutlass::FloatRoundStyle::round_to_nearest>; + + using EVTComputeAzp = + cutlass::epilogue::fusion::Sm90EVT; + + using ComputeScaleB = cutlass::epilogue::fusion::Sm90Compute< + cutlass::multiplies, float, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + using EVTComputeScaleB = + cutlass::epilogue::fusion::Sm90EVT; + + using ComputeScaleBiasA = cutlass::epilogue::fusion::Sm90Compute< + cutlass::multiply_add, ElementD, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + public: + using EVTCompute = + cutlass::epilogue::fusion::Sm90EVT; + using ArgumentType = typename EVTCompute::Arguments; + + static ArgumentType prepare_args(torch::Tensor const& a_scales, + torch::Tensor const& b_scales, + torch::Tensor const& azp_adj, + c10::optional const& bias) { + auto a_args = SUPER::template args_from_tensor(a_scales); + auto b_args = SUPER::template args_from_tensor(b_scales); + auto bias_args = SUPER::template args_from_tensor(bias); + auto azp_adj_args = + SUPER::template args_from_tensor(azp_adj); + + typename EVTComputeAzp::Arguments evt_azp_args{{}, azp_adj_args}; + typename EVTComputeScaleB::Arguments evt_scale_b_args{b_args, evt_azp_args}; + return ArgumentType{a_args, evt_scale_b_args, bias_args}; + } +}; + +/* + * This epilogue supports per-token azp by computing and applying + * the correction term using a rank-1 update. If the term were materialized, + * it would require O(m*n) space, and this way it only requires O(m+n) space. + * The azp term is a 1D tensor of shape (m,1), and represents the unscaled zero + * point for each row of A. + * The azp_adj term is a 1D tensor of shape (1,n), computed as J @ B. + * + * This epilogue also supports bias, which remains per-channel. + */ +template +struct ScaledEpilogueBiasAzpToken + : private ScaledEpilogueBase { + private: + using SUPER = ScaledEpilogueBase; + using Accum = typename SUPER::Accum; + using ScaleA = typename SUPER::template ColOrScalarLoad; + using ScaleB = typename SUPER::template RowOrScalarLoad; + using Bias = typename SUPER::template RowLoad; + + // Per-token azp term, shape (m,1) + using Azp = typename SUPER::template ColLoad; + + // This is the AZP adjustment term, J @ B, shape (1,n) + using AzpAdj = typename SUPER::template RowLoad; + + // Compute azp * azp_adj + using ComputeAzp = cutlass::epilogue::fusion::Sm90Compute< + cutlass::multiplies, int32_t, int32_t, + cutlass::FloatRoundStyle::round_to_nearest>; + + using EVTComputeAzp = + cutlass::epilogue::fusion::Sm90EVT; + + // Compute float(accum - azp*azp_adj), all operands are int32_t + using ComputeAcc = cutlass::epilogue::fusion::Sm90Compute< + cutlass::minus, float, int32_t, + cutlass::FloatRoundStyle::round_to_nearest>; + + using EVTComputeAcc = + cutlass::epilogue::fusion::Sm90EVT; + + using ComputeScaleB = cutlass::epilogue::fusion::Sm90Compute< + cutlass::multiplies, float, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + using EVTComputeScaleB = + cutlass::epilogue::fusion::Sm90EVT; + + using ComputeScaleBiasA = cutlass::epilogue::fusion::Sm90Compute< + cutlass::multiply_add, ElementD, float, + cutlass::FloatRoundStyle::round_to_nearest>; + + public: + using EVTCompute = + cutlass::epilogue::fusion::Sm90EVT; + using ArgumentType = typename EVTCompute::Arguments; + + static ArgumentType prepare_args(torch::Tensor const& a_scales, + torch::Tensor const& b_scales, + torch::Tensor const& azp_adj, + torch::Tensor const& azp, + c10::optional const& bias) { + auto a_args = SUPER::template args_from_tensor(a_scales); + auto b_args = SUPER::template args_from_tensor(b_scales); + auto bias_args = SUPER::template args_from_tensor(bias); + auto azp_args = SUPER::template args_from_tensor(azp); + auto azp_adj_args = + SUPER::template args_from_tensor(azp_adj); + + typename EVTComputeAzp::Arguments evt_azp_args{azp_args, azp_adj_args}; + typename EVTComputeAcc::Arguments evt_acc_args{{}, evt_azp_args}; + typename EVTComputeScaleB::Arguments evt_scale_b_args{b_args, evt_acc_args}; + return ArgumentType{a_args, evt_scale_b_args, bias_args}; + } +}; + +}; // namespace vllm::c3x \ No newline at end of file diff --git a/csrc/cutlass_extensions/vllm_cutlass_library_extension.py b/csrc/cutlass_extensions/vllm_cutlass_library_extension.py index 4fcfcd311aa91..a5beea1a35e49 100644 --- a/csrc/cutlass_extensions/vllm_cutlass_library_extension.py +++ b/csrc/cutlass_extensions/vllm_cutlass_library_extension.py @@ -35,6 +35,35 @@ class MixedInputKernelScheduleType(enum.Enum): } } +VLLMDataTypeSize: Dict[Union[VLLMDataType, DataType], int] = { + **DataTypeSize, # type: ignore + **{ + VLLMDataType.u4b8: 4, + VLLMDataType.u8b128: 8, + } +} + +VLLMDataTypeVLLMScalarTypeTag: Dict[Union[VLLMDataType, DataType], str] = { + VLLMDataType.u4b8: "vllm::kU4B8", + VLLMDataType.u8b128: "vllm::kU8B128", + DataType.u4: "vllm::kU4", + DataType.u8: "vllm::kU8", + DataType.s4: "vllm::kS4", + DataType.s8: "vllm::kS8", + DataType.f16: "vllm::kFloat16", + DataType.bf16: "vllm::kBfloat16", +} + +VLLMDataTypeTorchDataTypeTag: Dict[Union[VLLMDataType, DataType], str] = { + DataType.u8: "at::ScalarType::Byte", + DataType.s8: "at::ScalarType::Char", + DataType.e4m3: "at::ScalarType::Float8_e4m3fn", + DataType.s32: "at::ScalarType::Int", + DataType.f16: "at::ScalarType::Half", + DataType.bf16: "at::ScalarType::BFloat16", + DataType.f32: "at::ScalarType::Float", +} + VLLMKernelScheduleTag: Dict[Union[ MixedInputKernelScheduleType, KernelScheduleType], str] = { **KernelScheduleTag, # type: ignore diff --git a/csrc/cutlass_extensions/vllm_numeric_conversion.cuh b/csrc/cutlass_extensions/vllm_numeric_conversion.cuh index 2ad914f8e9868..90f226cf64c0a 100644 --- a/csrc/cutlass_extensions/vllm_numeric_conversion.cuh +++ b/csrc/cutlass_extensions/vllm_numeric_conversion.cuh @@ -3,6 +3,7 @@ #include "cutlass/numeric_conversion.h" #include "cutlass_extensions/vllm_custom_types.cuh" #include "cutlass_extensions/cute_utils.cuh" +#include "cutlass_extensions/vllm_type_utils.cuh" // this file extends: // https://github.com/NVIDIA/cutlass/blob/cutlass-3.5.0/include/cutlass/numeric_conversion.h @@ -28,8 +29,19 @@ struct InterleavedNumericArrayConverter { CUTLASS_DEVICE static result_type convert(source_type const& source) { - CUTE_INVALID_CONTROL_PATH( - "InterleavedNumericArrayConverter not implemented\n"); + if (cute::elect_one_sync()) { + if constexpr (std::is_same_v) { + printf( + "Convert %s <= %s (N = %d, IlvBlkLayout = void), not implemented\n", + nameof_v, nameof_v, N); + } else { + printf( + "Convert %s <= %s (N = %d, size(IlvBlkLayout{}) = %d), not " + "implemented\n", + nameof_v, nameof_v, N, size(IlvBlkLayout{})); + } + __brkpt(); + } return {}; } @@ -56,11 +68,6 @@ struct InterleavedNumericArrayConverter< result_type operator()(source_type const& s) const { return convert(s); } }; -// TODO (LucasWilkinson): Implement -// for Array <= Array - -// .... - template struct ArrayConverterPacked32Bit { using result_type = Array; @@ -86,14 +93,16 @@ struct ArrayConverterPacked32Bit { using ScalarConverter = NumericConverter; template - CUTLASS_DEVICE static uint32_t to_reg(PackedSrc const& source) { + CUTLASS_DEVICE static auto to_regs(PackedSrc const& src) { if constexpr (sizeof(PackedSrc) == 1) { - return static_cast(reinterpret_cast(source)); + return Array{reinterpret_cast(src)}; } else if constexpr (sizeof(PackedSrc) == 2) { - return static_cast(reinterpret_cast(source)); + return Array{reinterpret_cast(src)}; + } else if constexpr (sizeof(PackedSrc) == 4) { + return Array{reinterpret_cast(src)}; } else { - static_assert(sizeof(PackedSrc) == 4); - return reinterpret_cast(source); + static_assert(sizeof(PackedSrc) == 8); + return reinterpret_cast const&>(src); } } @@ -110,7 +119,7 @@ struct ArrayConverterPacked32Bit { static_assert(std::is_same_v); static_assert(std::is_same_v); - return RegConvert32bit::template convert(to_reg(source)); + return RegConvert32bit::template convert(to_regs(source)); } friend class detail::VectorizedConverter; @@ -140,6 +149,131 @@ struct ArrayConverterPacked32Bit { } }; +// Convert 8 4bit values packed into a 32bit register to 8 8bit values packed +// into 2 32bit register. +template +CUTLASS_DEVICE cutlass::AlignedArray lut_4bit_to_8bit_convert( + uint32_t src) { + cutlass::AlignedArray r; + // Determines if the value is in the top half of the LUT if set or + // (i.e. LUT[8:15]) in the bottom half (i.e. LUT[0:7]) if not set. Then move + // into bit position 0x4 of each nibble so when or'd with final_prmt_base it + // selects the correct candidate. When elements in final_prmt_base + // are >= 0x4, the high candidate is selected (i.e. LUT[8:15]), when elements + // are < 0x4, the low candidate is selected (i.e. LUT[0:7]) + uint32_t high_bit = (src & 0x88888888) >> 1; + + // `high_bit` is OR'd with 0x31203120 to find the correct value in the LUT + // (selects correct high or low candidate) + const uint32_t final_prmt_base = 0x32103210; + + // Ignore the high bit when indexing into LUT, for each 4bit value + // we index into both the high and low candidates then use + // high_bit | final_prmt_base to select the correct candidate + uint32_t lut_idx = (src & 0x77777777); + + auto pack = [](uint8_t a, uint8_t b, uint8_t c, uint8_t d) { + return uint32_t(a) | (uint32_t(b) << 8) | (uint32_t(c) << 16) | + (uint32_t(d) << 24); + }; + + static constexpr uint32_t LOW_0 = pack(LUT0, LUT1, LUT2, LUT3); + static constexpr uint32_t LOW_1 = pack(LUT4, LUT5, LUT6, LUT7); + static constexpr uint32_t HIGH_0 = pack(LUT8, LUT9, LUT10, LUT11); + static constexpr uint32_t HIGH_1 = pack(LUT12, LUT13, LUT14, LUT15); + + CUTLASS_PRAGMA_UNROLL + for (int ii = 0; ii < 2; ++ii, lut_idx >>= 16, high_bit >>= 16) { + uint32_t final_prmt_idx = final_prmt_base | high_bit; + + // This uses a look up table to convert packed int4s to packed int8s, + // using the int4 value as the index to prmt. It first select both the + // high and low candidates, then uses the high bit (i.e. `high_bit`) to + // select the correct candidate. + asm volatile( + "{\n" + " .reg .b32 low, high;\n" + " prmt.b32 low, %1, %2, %5;\n" + " prmt.b32 high, %3, %4, %5;\n" + " prmt.b32 %0, low, high, %6;\n" + "}\n" + : "=r"(r[ii]) + : "n"(LOW_0), "n"(LOW_1), "n"(HIGH_0), "n"(HIGH_1), "r"(lut_idx), + "r"(final_prmt_idx)); + } + + return r; +}; + +// for Array <= Array +template +struct NumericArrayConverter { + using result_type = Array; + using source_type = Array; + + static FloatRoundStyle const round_style = Round; + + private: + struct RegConvert { + template + CUTLASS_DEVICE static PackedResultType convert(Array src_) { + // [-8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7] as int8s + auto r = lut_4bit_to_8bit_convert<0xF8, 0xF9, 0xFA, 0xFB, // + 0xFC, 0xFD, 0xFE, 0xFF, // + 0x00, 0x01, 0x02, 0x03, // + 0x04, 0x05, 0x06, 0x07>(src_[0]); + return reinterpret_cast(r); + }; + }; + + public: + CUTLASS_DEVICE + static result_type convert(source_type const& source) { + return ArrayConverterPacked32Bit::convert(source); + } + + CUTLASS_DEVICE + result_type operator()(source_type const& s) const { return convert(s); } +}; + +// for Array <= Array +template +struct NumericArrayConverter { + using result_type = Array; + using source_type = Array; + + static FloatRoundStyle const round_style = Round; + + private: + struct RegConvert { + template + CUTLASS_DEVICE static PackedResultType convert(Array src_) { + // [-8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7] as fp8s + auto r = lut_4bit_to_8bit_convert<0xD0, 0xCE, 0xCC, 0xCA, // + 0xC8, 0xC4, 0xC0, 0xB8, // + 0x00, 0x38, 0x40, 0x44, // + 0x48, 0x4A, 0x4C, 0x4E>(src_[0]); + return reinterpret_cast(r); + }; + }; + + public: + CUTLASS_DEVICE + static result_type convert(source_type const& source) { + return ArrayConverterPacked32Bit::convert(source); + } + + CUTLASS_DEVICE + result_type operator()(source_type const& s) const { return convert(s); } +}; + // for Array <= Array template struct NumericArrayConverter { @@ -148,7 +282,8 @@ struct NumericArrayConverter { struct RegConvert { template - CUTLASS_DEVICE static PackedResultType convert(uint32_t src) { + CUTLASS_DEVICE static PackedResultType convert(Array src_) { + uint32_t src = src_[0]; using RegArray = cutlass::AlignedArray; @@ -249,7 +384,8 @@ struct InterleavedNumericArrayConverter, Stride<_4, _1>>, private: struct RegConvert { template - CUTLASS_DEVICE static PackedResultType convert(uint32_t src) { + CUTLASS_DEVICE static PackedResultType convert(Array src_) { + uint32_t src = src_[0]; using RegArray = cutlass::AlignedArray; @@ -338,7 +474,8 @@ struct InterleavedNumericArrayConverter, Stride<_4, _1>>, private: struct RegConvert { template - CUTLASS_DEVICE static PackedResultType convert(uint32_t src) { + CUTLASS_DEVICE static PackedResultType convert(Array src_) { + uint32_t src = src_[0]; using RegArray = cutlass::AlignedArray; @@ -417,7 +554,8 @@ struct NumericArrayConverter { struct RegConvert { template - CUTLASS_DEVICE static PackedResultType convert(uint32_t src) { + CUTLASS_DEVICE static PackedResultType convert(Array src_) { + uint32_t src = src_[0]; // Hold output FP16s in reg. We need 1 reg for every 2 elements using RegArray = cutlass::AlignedArray { private: struct RegConvert { template - CUTLASS_DEVICE static PackedResultType convert(uint32_t src) { + CUTLASS_DEVICE static PackedResultType convert(Array src_) { + uint32_t src = src_[0]; PackedResultType r; // __byte_perm simulates the add.u32 0x4B000000 to every u8 element of @@ -513,7 +652,8 @@ struct NumericArrayConverter { private: struct RegConvert { template - CUTLASS_DEVICE static PackedResultType convert(uint32_t src_reg) { + CUTLASS_DEVICE static PackedResultType convert(Array src_) { + uint32_t src_reg = src_[0]; // Hold output BF16s in reg. We need 1 reg for every 2 elements using RegArray = cutlass::AlignedArray, Stride<_4, _1>>, private: struct RegConvert { template - CUTLASS_DEVICE static PackedResultType convert(uint32_t src) { + CUTLASS_DEVICE static PackedResultType convert(Array src_) { + uint32_t src = src_[0]; using RegArray = cutlass::AlignedArray; @@ -671,7 +812,8 @@ struct InterleavedNumericArrayConverter, Stride<_4, _1>>, private: struct RegConvert { template - CUTLASS_DEVICE static PackedResultType convert(uint32_t src) { + CUTLASS_DEVICE static PackedResultType convert(Array src_) { + uint32_t src = src_[0]; using RegArray = cutlass::AlignedArray; @@ -788,6 +930,61 @@ struct NumericArrayConverter { #endif +// for Array <= Array +// FastFP16toINT8 from https://arxiv.org/pdf/2406.09904 +template +struct NumericArrayConverter { + using result_type = Array; + using source_type = Array; + + struct RegConvert { + // FastFP16toINT8 from https://arxiv.org/pdf/2406.09904 + template + CUTLASS_DEVICE static PackedResultType convert( + Array src) { + // Hold output int8s in reg. We need 1 reg for every 4 elements + using RegArray = cutlass::AlignedArray< + uint32_t, std::max(PackedResultType::kElements / 4, size_t(1))>; + RegArray r; + + static constexpr uint32_t MAGIC_BIAS_ = 0x64806480; + auto MAGIC_BIAS = *reinterpret_cast(&MAGIC_BIAS_); + + *reinterpret_cast(&src[0]) = + __hadd2(*reinterpret_cast(&src[0]), MAGIC_BIAS); + + if constexpr (src_regs > 1) { + *reinterpret_cast(&src[1]) = + __hadd2(*reinterpret_cast(&src[1]), MAGIC_BIAS); + } + + static_assert(PackedResultType::kElements <= 4); + uint32_t uint8s; + static constexpr uint32_t MASK_0246 = 0x6420; + static constexpr uint32_t UINT8s_TO_INT8s_MASK = 0x80808080; + asm volatile("prmt.b32 %0,%1,%2,%3;\n" + : "=r"(uint8s) + : "r"(src[0]), "r"((src_regs > 1) ? src[1] : src[0]), + "n"(MASK_0246)); + + uint32_t int8s = (uint8s ^ UINT8s_TO_INT8s_MASK); + + return reinterpret_cast(int8s); + }; + }; + + public: + CUTLASS_DEVICE + static result_type convert(source_type const& source) { + return ArrayConverterPacked32Bit::convert(source); + } + + CUTLASS_DEVICE + result_type operator()(source_type const& s) const { return convert(s); } +}; + ///////////////////////////////////////////////////////////////////////////////////////////////// } // namespace cutlass diff --git a/csrc/cutlass_extensions/vllm_type_utils.cuh b/csrc/cutlass_extensions/vllm_type_utils.cuh new file mode 100644 index 0000000000000..500ed508c8303 --- /dev/null +++ b/csrc/cutlass_extensions/vllm_type_utils.cuh @@ -0,0 +1,42 @@ +#include "cutlass/bfloat16.h" +#include "cutlass/half.h" +#include "cuda_bf16.h" + +#include "cutlass_extensions/vllm_custom_types.cuh" + +namespace cutlass { + +template +struct nameof { + static constexpr char const* value = "unknown"; +}; + +template +inline constexpr auto nameof_v = nameof::value; + +#define NAMEOF_TYPE(T) \ + template <> \ + struct nameof { \ + static constexpr char const* value = #T; \ + }; + +NAMEOF_TYPE(float_e4m3_t) +NAMEOF_TYPE(float_e5m2_t) +NAMEOF_TYPE(half_t) +NAMEOF_TYPE(nv_bfloat16) +NAMEOF_TYPE(bfloat16_t) +NAMEOF_TYPE(float) + +NAMEOF_TYPE(int4b_t) +NAMEOF_TYPE(int8_t) +NAMEOF_TYPE(int32_t) +NAMEOF_TYPE(int64_t) + +NAMEOF_TYPE(vllm_uint4b8_t) +NAMEOF_TYPE(uint4b_t) +NAMEOF_TYPE(uint8_t) +NAMEOF_TYPE(vllm_uint8b128_t) +NAMEOF_TYPE(uint32_t) +NAMEOF_TYPE(uint64_t) + +}; // namespace cutlass \ No newline at end of file diff --git a/csrc/quantization/cutlass_w8a8/scaled_mm_c2x.cu b/csrc/quantization/cutlass_w8a8/scaled_mm_c2x.cu index ee801e16573d4..dbb72e8bbd3f5 100644 --- a/csrc/quantization/cutlass_w8a8/scaled_mm_c2x.cu +++ b/csrc/quantization/cutlass_w8a8/scaled_mm_c2x.cu @@ -8,6 +8,10 @@ #include "scaled_mm_c2x_sm89_fp8_dispatch.cuh" #include "scaled_mm_c2x_sm89_int8_dispatch.cuh" +#include "cutlass_extensions/epilogue/scaled_mm_epilogues_c2x.hpp" + +using namespace vllm; + /* This file defines quantized GEMM operations using the CUTLASS 2.x API, for NVIDIA GPUs with SM versions prior to sm90 (Hopper). @@ -22,12 +26,11 @@ void cutlass_scaled_mm_sm75_epilogue(torch::Tensor& out, torch::Tensor const& a, TORCH_CHECK(b.dtype() == torch::kInt8); if (out.dtype() == torch::kBFloat16) { - return vllm::cutlass_gemm_sm75_dispatch( + return cutlass_gemm_sm75_dispatch( out, a, b, std::forward(epilogue_args)...); } else { TORCH_CHECK(out.dtype() == torch::kFloat16); - return vllm::cutlass_gemm_sm75_dispatch( + return cutlass_gemm_sm75_dispatch( out, a, b, std::forward(epilogue_args)...); } } @@ -42,10 +45,10 @@ void cutlass_scaled_mm_sm75(torch::Tensor& out, torch::Tensor const& a, if (bias) { TORCH_CHECK(bias->dtype() == out.dtype(), "currently bias dtype must match output dtype ", out.dtype()); - return cutlass_scaled_mm_sm75_epilogue( + return cutlass_scaled_mm_sm75_epilogue( out, a, b, a_scales, b_scales, *bias); } else { - return cutlass_scaled_mm_sm75_epilogue( + return cutlass_scaled_mm_sm75_epilogue( out, a, b, a_scales, b_scales); } } @@ -61,10 +64,10 @@ void cutlass_scaled_mm_azp_sm75(torch::Tensor& out, torch::Tensor const& a, TORCH_CHECK(b_scales.dtype() == torch::kFloat32); if (azp) { - return cutlass_scaled_mm_sm75_epilogue( + return cutlass_scaled_mm_sm75_epilogue( out, a, b, a_scales, b_scales, azp_adj, *azp, bias); } else { - return cutlass_scaled_mm_sm75_epilogue( + return cutlass_scaled_mm_sm75_epilogue( out, a, b, a_scales, b_scales, azp_adj, bias); } } @@ -78,12 +81,11 @@ void cutlass_scaled_mm_sm80_epilogue(torch::Tensor& out, torch::Tensor const& a, TORCH_CHECK(b.dtype() == torch::kInt8); if (out.dtype() == torch::kBFloat16) { - return vllm::cutlass_gemm_sm80_dispatch( + return cutlass_gemm_sm80_dispatch( out, a, b, std::forward(epilogue_args)...); } else { TORCH_CHECK(out.dtype() == torch::kFloat16); - return vllm::cutlass_gemm_sm80_dispatch( + return cutlass_gemm_sm80_dispatch( out, a, b, std::forward(epilogue_args)...); } } @@ -98,10 +100,10 @@ void cutlass_scaled_mm_sm80(torch::Tensor& out, torch::Tensor const& a, if (bias) { TORCH_CHECK(bias->dtype() == out.dtype(), "currently bias dtype must match output dtype ", out.dtype()); - return cutlass_scaled_mm_sm80_epilogue( + return cutlass_scaled_mm_sm80_epilogue( out, a, b, a_scales, b_scales, *bias); } else { - return cutlass_scaled_mm_sm80_epilogue( + return cutlass_scaled_mm_sm80_epilogue( out, a, b, a_scales, b_scales); } } @@ -117,10 +119,10 @@ void cutlass_scaled_mm_azp_sm80(torch::Tensor& out, torch::Tensor const& a, TORCH_CHECK(b_scales.dtype() == torch::kFloat32); if (azp) { - return cutlass_scaled_mm_sm80_epilogue( + return cutlass_scaled_mm_sm80_epilogue( out, a, b, a_scales, b_scales, azp_adj, *azp, bias); } else { - return cutlass_scaled_mm_sm80_epilogue( + return cutlass_scaled_mm_sm80_epilogue( out, a, b, a_scales, b_scales, azp_adj, bias); } } @@ -134,13 +136,12 @@ void cutlass_scaled_mm_sm89_epilogue(torch::Tensor& out, torch::Tensor const& a, TORCH_CHECK(b.dtype() == torch::kInt8); if (out.dtype() == torch::kBFloat16) { - return vllm::cutlass_gemm_sm89_int8_dispatch( + return cutlass_gemm_sm89_int8_dispatch( out, a, b, std::forward(epilogue_args)...); } else { assert(out.dtype() == torch::kFloat16); - return vllm::cutlass_gemm_sm89_int8_dispatch( + return cutlass_gemm_sm89_int8_dispatch( out, a, b, std::forward(epilogue_args)...); } } else { @@ -148,13 +149,13 @@ void cutlass_scaled_mm_sm89_epilogue(torch::Tensor& out, torch::Tensor const& a, TORCH_CHECK(b.dtype() == torch::kFloat8_e4m3fn); if (out.dtype() == torch::kBFloat16) { - return vllm::cutlass_gemm_sm89_fp8_dispatch< - cutlass::float_e4m3_t, cutlass::bfloat16_t, Epilogue>( + return cutlass_gemm_sm89_fp8_dispatch( out, a, b, std::forward(epilogue_args)...); } else { TORCH_CHECK(out.dtype() == torch::kFloat16); - return vllm::cutlass_gemm_sm89_fp8_dispatch( + return cutlass_gemm_sm89_fp8_dispatch( out, a, b, std::forward(epilogue_args)...); } } @@ -170,10 +171,10 @@ void cutlass_scaled_mm_sm89(torch::Tensor& out, torch::Tensor const& a, if (bias) { TORCH_CHECK(bias->dtype() == out.dtype(), "currently bias dtype must match output dtype ", out.dtype()); - return cutlass_scaled_mm_sm89_epilogue( + return cutlass_scaled_mm_sm89_epilogue( out, a, b, a_scales, b_scales, *bias); } else { - return cutlass_scaled_mm_sm89_epilogue( + return cutlass_scaled_mm_sm89_epilogue( out, a, b, a_scales, b_scales); } } @@ -189,10 +190,10 @@ void cutlass_scaled_mm_azp_sm89(torch::Tensor& out, torch::Tensor const& a, TORCH_CHECK(b_scales.dtype() == torch::kFloat32); if (azp) { - return cutlass_scaled_mm_sm89_epilogue( + return cutlass_scaled_mm_sm89_epilogue( out, a, b, a_scales, b_scales, azp_adj, *azp, bias); } else { - return cutlass_scaled_mm_sm89_epilogue( + return cutlass_scaled_mm_sm89_epilogue( out, a, b, a_scales, b_scales, azp_adj, bias); } } diff --git a/csrc/quantization/cutlass_w8a8/scaled_mm_c2x.cuh b/csrc/quantization/cutlass_w8a8/scaled_mm_c2x.cuh index 6329ff63623e2..d03242f44ab1d 100644 --- a/csrc/quantization/cutlass_w8a8/scaled_mm_c2x.cuh +++ b/csrc/quantization/cutlass_w8a8/scaled_mm_c2x.cuh @@ -21,7 +21,6 @@ #include "cutlass/epilogue/threadblock/fusion/visitors.hpp" #include "cutlass/gemm/kernel/default_gemm_universal_with_visitor.h" -#include "broadcast_load_epilogue_c2x.hpp" #include "common.hpp" // clang-format on @@ -71,307 +70,6 @@ struct enable_sm89_to_sm90 : Kernel { #endif } }; - -/* - * This class provides the common load descriptors for the - * ScaledEpilogue[...] classes - */ -template -struct ScaledEpilogueBase { - protected: - using Accum = cutlass::epilogue::threadblock::VisitorAccFetch; - - template - using ColOrScalarLoad = - cutlass::epilogue::threadblock::VisitorColOrScalarBroadcast< - OutputTileThreadMap, T, Stride, Int<0>, Int<0>>>; - - template - using RowOrScalarLoad = - cutlass::epilogue::threadblock::VisitorRowOrScalarBroadcast< - OutputTileThreadMap, T, Stride, Int<1>, Int<0>>>; - - template - using ColLoad = cutlass::epilogue::threadblock::VisitorColBroadcast< - OutputTileThreadMap, T, Stride, Int<0>, Int<0>>>; - - template - using RowLoad = cutlass::epilogue::threadblock::VisitorRowBroadcast< - OutputTileThreadMap, T, Stride, Int<1>, Int<0>>>; - - template - using RowOrZeroLoad = - cutlass::epilogue::threadblock::VisitorRowOrZeroBroadcast< - OutputTileThreadMap, T, Stride, Int<1>, Int<0>>>; - - // This utility function constructs the arguments for the load descriptors - // from a tensor. It can handle both row and column, as well as row/column or - // scalar cases. - template - static auto args_from_tensor(torch::Tensor const& tensor) { - using Arguments = typename Descriptor::Arguments; - auto* data_ptr = static_cast(tensor.data_ptr()); - if constexpr (std::is_same_v> || - std::is_same_v>) { - return Arguments{data_ptr, tensor.numel() != 1}; - } else { - // it would technically work but no use case as data_ptr is never nullptr - static_assert(!std::is_same_v>); - return Arguments{data_ptr}; - } - } - - // This overload handles the case where there might not be a tensor, in which - // case a nullptr is passed and a constant (0) is used. - template - static auto args_from_tensor(c10::optional const& tensor) { - static_assert(std::is_same_v>); - using Arguments = typename Descriptor::Arguments; - auto* data_ptr = tensor ? static_cast(tensor->data_ptr()) : nullptr; - return Arguments{data_ptr}; - } -}; - -/* - This epilogue function defines a quantized GEMM operation similar to - torch._scaled_mm. - - A and B may be both either int8 or fp8_e4m3. A can be quantized per-tensor or - per-row. B can be quantized per-tensor or per-column. - Any combination of per-tensor and per-row or column is supported. - A and B must have symmetric quantization (zero point == 0). - - So the GEMM operation is D = (a_scales * A) (b_scales * B), where the - scales are applied elementwise with numpy-style broadcasting. - - ScaleA and ScaleB define the epilogue functions that apply the scales for - the A and B operands respectively. These scales may be either per-tensor or - per row or column. -*/ -template -struct ScaledEpilogue - : private ScaledEpilogueBase { - private: - using SUPER = ScaledEpilogueBase; - using Accum = typename SUPER::Accum; - using ScaleA = typename SUPER::template ColOrScalarLoad; - using ScaleB = typename SUPER::template RowOrScalarLoad; - - using Compute0 = cutlass::epilogue::threadblock::VisitorCompute< - cutlass::multiplies, float, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - using EVTCompute0 = - cutlass::epilogue::threadblock::Sm80EVT; - - using Compute1 = cutlass::epilogue::threadblock::VisitorCompute< - cutlass::multiplies, ElementD, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - public: - using EVTCompute = - cutlass::epilogue::threadblock::Sm80EVT; - using ArgumentType = typename EVTCompute::Arguments; - - static ArgumentType prepare_args(torch::Tensor const& a_scales, - torch::Tensor const& b_scales) { - auto a_args = SUPER::template args_from_tensor(a_scales); - auto b_args = SUPER::template args_from_tensor(b_scales); - - typename EVTCompute0::Arguments evt0_args{b_args}; - return ArgumentType{a_args, evt0_args}; - } -}; - -/* - * This epilogue performs the same operation as ScaledEpilogue, but adds a bias. - * This bias can also be used in the per-tensor azp case, where the activation - * zero point (azp) is used to compute an azp correction term, - * which is folded into the bias. - * - * The bias tensor must be per-output channel. - * ScaleA and ScaleB can be per-tensor or per-token/per-channel. - */ -template -struct ScaledEpilogueBias - : protected ScaledEpilogueBase { - protected: - using SUPER = ScaledEpilogueBase; - using Accum = typename SUPER::Accum; - using ScaleA = typename SUPER::template ColOrScalarLoad; - using ScaleB = typename SUPER::template RowOrScalarLoad; - using Bias = typename SUPER::template RowLoad; - using Compute0 = cutlass::epilogue::threadblock::VisitorCompute< - cutlass::multiplies, float, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - using EVTCompute0 = - cutlass::epilogue::threadblock::Sm80EVT; - - using Compute1 = cutlass::epilogue::threadblock::VisitorCompute< - cutlass::multiply_add, ElementD, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - public: - using EVTCompute = cutlass::epilogue::threadblock::Sm80EVT; - using ArgumentType = typename EVTCompute::Arguments; - static ArgumentType prepare_args(torch::Tensor const& a_scales, - torch::Tensor const& b_scales, - torch::Tensor const& bias) { - auto a_args = SUPER::template args_from_tensor(a_scales); - auto b_args = SUPER::template args_from_tensor(b_scales); - auto bias_args = SUPER::template args_from_tensor(bias); - - typename EVTCompute0::Arguments evt0_args{b_args}; - return ArgumentType{a_args, evt0_args, bias_args}; - } -}; - -/* - * This epilogue directly supports per-tensor azp in int32 form. - * As opposed to the per-token epilogue below, this epilogue only has an azp_adj - * term, which should already be multiplied with the scalar azp. - * The azp_adj term is a 1D tensor of shape (1,n), computed as azp * J @ B. - * - * This epilogue also supports bias, which remains per-channel. - */ -template -struct ScaledEpilogueBiasAzp - : protected ScaledEpilogueBase { - private: - using SUPER = ScaledEpilogueBase; - using Accum = typename SUPER::Accum; - using ScaleA = typename SUPER::template ColOrScalarLoad; - using ScaleB = typename SUPER::template RowOrScalarLoad; - using Bias = typename SUPER::template RowOrZeroLoad; - - // This is the full AZP term, azp * J @ B, shape (1,n) - using AzpWithAdj = typename SUPER::template RowLoad; - - // Compute float(accum - azp_adj), both operands are int32_t - using ComputeAzp = cutlass::epilogue::threadblock::VisitorCompute< - cutlass::minus, float, int32_t, - cutlass::FloatRoundStyle::round_to_nearest>; - - using EVTComputeAzp = - cutlass::epilogue::threadblock::Sm80EVT; - - using ComputeScaleB = cutlass::epilogue::threadblock::VisitorCompute< - cutlass::multiplies, float, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - using EVTComputeScaleB = - cutlass::epilogue::threadblock::Sm80EVT; - - using ComputeScaleBiasA = cutlass::epilogue::threadblock::VisitorCompute< - cutlass::multiply_add, ElementD, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - public: - using EVTCompute = - cutlass::epilogue::threadblock::Sm80EVT; - - using ArgumentType = typename EVTCompute::Arguments; - - static ArgumentType prepare_args(torch::Tensor const& a_scales, - torch::Tensor const& b_scales, - torch::Tensor const& azp_adj, - c10::optional const& bias) { - auto a_args = SUPER::template args_from_tensor(a_scales); - auto b_args = SUPER::template args_from_tensor(b_scales); - auto bias_args = SUPER::template args_from_tensor(bias); - auto azp_adj_args = - SUPER::template args_from_tensor(azp_adj); - - typename EVTComputeAzp::Arguments evt_azp_args{{}, azp_adj_args}; - typename EVTComputeScaleB::Arguments evt_scale_b_args{b_args, evt_azp_args}; - return ArgumentType{a_args, evt_scale_b_args, bias_args}; - } -}; - -/* - * This epilogue supports per-token azp by computing and applying - * the correction term using a rank-1 update. If the term were materialized, - * it would require O(m*n) space, and this way it only requires O(m+n) space. - * The azp term is a 1D tensor of shape (m,1), and represents the unscaled zero - * point for each row of A. - * The azp_adj term is a 1D tensor of shape (1,n), computed as J @ B. - * - * This epilogue also supports bias, which remains per-channel. - */ -template -struct ScaledEpilogueBiasAzpToken - : protected ScaledEpilogueBase { - private: - using SUPER = ScaledEpilogueBase; - using Accum = typename SUPER::Accum; - using ScaleA = typename SUPER::template ColOrScalarLoad; - using ScaleB = typename SUPER::template RowOrScalarLoad; - using Bias = typename SUPER::template RowOrZeroLoad; - - // Per-token azp term, shape (m,1) - using Azp = typename SUPER::template ColLoad; - - // This is the AZP adjustment term, J @ B, shape (1,n) - using AzpAdj = typename SUPER::template RowLoad; - - // Compute azp * azp_adj - using ComputeAzp = cutlass::epilogue::threadblock::VisitorCompute< - cutlass::multiplies, int32_t, int32_t, - cutlass::FloatRoundStyle::round_to_nearest>; - - using EVTComputeAzp = - cutlass::epilogue::threadblock::Sm80EVT; - - // Compute float(accum - azp*azp_adj), all operands are int32_t - using ComputeAcc = cutlass::epilogue::threadblock::VisitorCompute< - cutlass::minus, float, int32_t, - cutlass::FloatRoundStyle::round_to_nearest>; - - using EVTComputeAcc = - cutlass::epilogue::threadblock::Sm80EVT; - - using ComputeScaleB = cutlass::epilogue::threadblock::VisitorCompute< - cutlass::multiplies, float, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - using EVTComputeScaleB = - cutlass::epilogue::threadblock::Sm80EVT; - - using ComputeScaleBiasA = cutlass::epilogue::threadblock::VisitorCompute< - cutlass::multiply_add, ElementD, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - public: - using EVTCompute = - cutlass::epilogue::threadblock::Sm80EVT; - - using ArgumentType = typename EVTCompute::Arguments; - - static ArgumentType prepare_args(torch::Tensor const& a_scales, - torch::Tensor const& b_scales, - torch::Tensor const& azp_adj, - torch::Tensor const& azp, - c10::optional const& bias) { - auto a_args = SUPER::template args_from_tensor(a_scales); - auto b_args = SUPER::template args_from_tensor(b_scales); - auto bias_args = SUPER::template args_from_tensor(bias); - auto azp_args = SUPER::template args_from_tensor(azp); - auto azp_adj_args = - SUPER::template args_from_tensor(azp_adj); - - typename EVTComputeAzp::Arguments evt_azp_args{azp_args, azp_adj_args}; - typename EVTComputeAcc::Arguments evt_acc_args{{}, evt_azp_args}; - typename EVTComputeScaleB::Arguments evt_scale_b_args{b_args, evt_acc_args}; - return ArgumentType{a_args, evt_scale_b_args, bias_args}; - } -}; - template typename ArchGuard, typename ElementAB_, typename ElementD_, template typename Epilogue_, typename TileShape, diff --git a/csrc/quantization/cutlass_w8a8/scaled_mm_c3x.cu b/csrc/quantization/cutlass_w8a8/scaled_mm_c3x.cu index 292c9e4b34e1c..33581a63d4c3d 100644 --- a/csrc/quantization/cutlass_w8a8/scaled_mm_c3x.cu +++ b/csrc/quantization/cutlass_w8a8/scaled_mm_c3x.cu @@ -23,11 +23,12 @@ #include "cutlass/epilogue/collective/collective_builder.hpp" #include "cutlass/gemm/collective/collective_builder.hpp" -#include "broadcast_load_epilogue_c3x.hpp" +#include "cutlass_extensions/epilogue/scaled_mm_epilogues_c3x.hpp" #include "common.hpp" // clang-format on using namespace cute; +using namespace vllm; /* This file defines quantized GEMM operations using the CUTLASS 3.x API, for @@ -56,305 +57,6 @@ struct enable_sm90_or_later : Kernel { #endif } }; - -/* - * This class provides the common load descriptors for the - * ScaledEpilogue[...] classes - */ -template -struct ScaledEpilogueBase { - protected: - using Accum = cutlass::epilogue::fusion::Sm90AccFetch; - - template - using ColOrScalarLoad = cutlass::epilogue::fusion::Sm90ColOrScalarBroadcast< - 0 /*Stages*/, typename EpilogueDescriptor::TileShape, T, - Stride, Int<0>, Int<0>>>; - - template - using RowOrScalarLoad = cutlass::epilogue::fusion::Sm90RowOrScalarBroadcast< - 0 /*Stages*/, typename EpilogueDescriptor::TileShape, T, - Stride, Int<1>, Int<0>>>; - - // Don't want to support nullptr by default - template - using ColLoad = cutlass::epilogue::fusion::Sm90ColBroadcast< - 0 /*Stages*/, typename EpilogueDescriptor::TileShape, T, - Stride, Int<0>, Int<0>>, 128 / sizeof_bits_v, EnableNullPtr>; - - // Don't want to support nullptr by default - template - using RowLoad = cutlass::epilogue::fusion::Sm90RowBroadcast< - 0 /*Stages*/, typename EpilogueDescriptor::TileShape, T, - Stride, Int<1>, Int<0>>, 128 / sizeof_bits_v, EnableNullPtr>; - - // This utility function constructs the arguments for the load descriptors - // from a tensor. It can handle both row and column, as well as row/column or - // scalar cases. - template - static auto args_from_tensor(torch::Tensor const& tensor) { - using Arguments = typename Descriptor::Arguments; - auto* data_ptr = static_cast(tensor.data_ptr()); - if constexpr (std::is_same_v> || - std::is_same_v>) { - return Arguments{data_ptr, tensor.numel() != 1}; - } else { - static_assert(!std::is_same_v> && - !std::is_same_v>); - return Arguments{data_ptr}; - } - } - - // This overload handles the case where there might not be a tensor, in which - // case a nullptr is passed and a constant (0) is used. - template - static auto args_from_tensor(c10::optional const& tensor) { - using Arguments = typename Descriptor::Arguments; - auto* data_ptr = tensor ? static_cast(tensor->data_ptr()) : nullptr; - static_assert(std::is_same_v> || - std::is_same_v>); - return Arguments{data_ptr}; - } -}; - -/* - This epilogue function defines a quantized GEMM operation similar to - torch.scaled_mm_. - - A and B may be both either int8 or fp8_e4m3. A can be - quantized per-tensor or per-row. B can be quantized per-tensor or per-column. - Any combination of per-tensor and per-row or column is supported. - A and B must have symmetric quantization (zero point == 0). - - So the GEMM operation is D = (a_scales * A) (b_scales * B), where the - scales are applied elementwise with numpy-style broadcasting. - - ScaleA and ScaleB define the epilogue functions that apply the scales for - the A and B operands respectively. These scales may be either per-tensor or - per row or column. -*/ -template -struct ScaledEpilogue - : private ScaledEpilogueBase { - private: - using SUPER = ScaledEpilogueBase; - using Accum = typename SUPER::Accum; - using ScaleA = typename SUPER::template ColOrScalarLoad; - using ScaleB = typename SUPER::template RowOrScalarLoad; - - using Compute0 = cutlass::epilogue::fusion::Sm90Compute< - cutlass::multiplies, float, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - using EVTCompute0 = - cutlass::epilogue::fusion::Sm90EVT; - - using Compute1 = cutlass::epilogue::fusion::Sm90Compute< - cutlass::multiplies, ElementD, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - public: - using EVTCompute = - cutlass::epilogue::fusion::Sm90EVT; - using ArgumentType = typename EVTCompute::Arguments; - - static ArgumentType prepare_args(torch::Tensor const& a_scales, - torch::Tensor const& b_scales) { - auto a_args = SUPER::template args_from_tensor(a_scales); - auto b_args = SUPER::template args_from_tensor(b_scales); - - typename EVTCompute0::Arguments evt0_args{b_args}; - return ArgumentType{a_args, evt0_args}; - } -}; - -/* - * This epilogue performs the same operation as ScaledEpilogue, but adds a bias. - * This bias can also be used in the per-tensor azp case, where the activation - * zero point (azp) is used to compute an azp correction term, - * which is folded into the bias. - * - * The bias tensor must be per-output channel. - * ScaleA and ScaleB can be per-tensor or per-token/per-channel. - */ -template -struct ScaledEpilogueBias - : private ScaledEpilogueBase { - private: - using SUPER = ScaledEpilogueBase; - using Accum = typename SUPER::Accum; - using ScaleA = typename SUPER::template ColOrScalarLoad; - using ScaleB = typename SUPER::template RowOrScalarLoad; - using Bias = typename SUPER::template RowLoad; - - using Compute0 = cutlass::epilogue::fusion::Sm90Compute< - cutlass::multiplies, float, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - using EVTCompute0 = - cutlass::epilogue::fusion::Sm90EVT; - - using Compute1 = cutlass::epilogue::fusion::Sm90Compute< - cutlass::multiply_add, ElementD, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - public: - using EVTCompute = - cutlass::epilogue::fusion::Sm90EVT; - - using ArgumentType = typename EVTCompute::Arguments; - static ArgumentType prepare_args(torch::Tensor const& a_scales, - torch::Tensor const& b_scales, - torch::Tensor const& bias) { - auto a_args = SUPER::template args_from_tensor(a_scales); - auto b_args = SUPER::template args_from_tensor(b_scales); - auto bias_args = SUPER::template args_from_tensor(bias); - - typename EVTCompute0::Arguments evt0_args{b_args}; - return ArgumentType{a_args, evt0_args, bias_args}; - } -}; - -/* - * This epilogue directly supports per-tensor azp in int32 form. - * As opposed to the per-token epilogue below, this epilogue only has an azp_adj - * term, which should already be multiplied with the scalar azp. - * The azp_adj term is a 1D tensor of shape (1,n), computed as azp * J @ B. - * - * This epilogue also supports bias, which remains per-channel. - */ -template -struct ScaledEpilogueBiasAzp - : private ScaledEpilogueBase { - private: - using SUPER = ScaledEpilogueBase; - using Accum = typename SUPER::Accum; - using ScaleA = typename SUPER::template ColOrScalarLoad; - using ScaleB = typename SUPER::template RowOrScalarLoad; - using Bias = typename SUPER::template RowLoad; - - // This is the full AZP term, azp * J @ B, shape (1,n) - using AzpWithAdj = typename SUPER::template RowLoad; - - // Compute float(accum - azp_adj), both operands are int32_t - using ComputeAzp = cutlass::epilogue::fusion::Sm90Compute< - cutlass::minus, float, int32_t, - cutlass::FloatRoundStyle::round_to_nearest>; - - using EVTComputeAzp = - cutlass::epilogue::fusion::Sm90EVT; - - using ComputeScaleB = cutlass::epilogue::fusion::Sm90Compute< - cutlass::multiplies, float, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - using EVTComputeScaleB = - cutlass::epilogue::fusion::Sm90EVT; - - using ComputeScaleBiasA = cutlass::epilogue::fusion::Sm90Compute< - cutlass::multiply_add, ElementD, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - public: - using EVTCompute = - cutlass::epilogue::fusion::Sm90EVT; - using ArgumentType = typename EVTCompute::Arguments; - - static ArgumentType prepare_args(torch::Tensor const& a_scales, - torch::Tensor const& b_scales, - torch::Tensor const& azp_adj, - c10::optional const& bias) { - auto a_args = SUPER::template args_from_tensor(a_scales); - auto b_args = SUPER::template args_from_tensor(b_scales); - auto bias_args = SUPER::template args_from_tensor(bias); - auto azp_adj_args = - SUPER::template args_from_tensor(azp_adj); - - typename EVTComputeAzp::Arguments evt_azp_args{{}, azp_adj_args}; - typename EVTComputeScaleB::Arguments evt_scale_b_args{b_args, evt_azp_args}; - return ArgumentType{a_args, evt_scale_b_args, bias_args}; - } -}; - -/* - * This epilogue supports per-token azp by computing and applying - * the correction term using a rank-1 update. If the term were materialized, - * it would require O(m*n) space, and this way it only requires O(m+n) space. - * The azp term is a 1D tensor of shape (m,1), and represents the unscaled zero - * point for each row of A. - * The azp_adj term is a 1D tensor of shape (1,n), computed as J @ B. - * - * This epilogue also supports bias, which remains per-channel. - */ -template -struct ScaledEpilogueBiasAzpToken - : private ScaledEpilogueBase { - private: - using SUPER = ScaledEpilogueBase; - using Accum = typename SUPER::Accum; - using ScaleA = typename SUPER::template ColOrScalarLoad; - using ScaleB = typename SUPER::template RowOrScalarLoad; - using Bias = typename SUPER::template RowLoad; - - // Per-token azp term, shape (m,1) - using Azp = typename SUPER::template ColLoad; - - // This is the AZP adjustment term, J @ B, shape (1,n) - using AzpAdj = typename SUPER::template RowLoad; - - // Compute azp * azp_adj - using ComputeAzp = cutlass::epilogue::fusion::Sm90Compute< - cutlass::multiplies, int32_t, int32_t, - cutlass::FloatRoundStyle::round_to_nearest>; - - using EVTComputeAzp = - cutlass::epilogue::fusion::Sm90EVT; - - // Compute float(accum - azp*azp_adj), all operands are int32_t - using ComputeAcc = cutlass::epilogue::fusion::Sm90Compute< - cutlass::minus, float, int32_t, - cutlass::FloatRoundStyle::round_to_nearest>; - - using EVTComputeAcc = - cutlass::epilogue::fusion::Sm90EVT; - - using ComputeScaleB = cutlass::epilogue::fusion::Sm90Compute< - cutlass::multiplies, float, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - using EVTComputeScaleB = - cutlass::epilogue::fusion::Sm90EVT; - - using ComputeScaleBiasA = cutlass::epilogue::fusion::Sm90Compute< - cutlass::multiply_add, ElementD, float, - cutlass::FloatRoundStyle::round_to_nearest>; - - public: - using EVTCompute = - cutlass::epilogue::fusion::Sm90EVT; - using ArgumentType = typename EVTCompute::Arguments; - - static ArgumentType prepare_args(torch::Tensor const& a_scales, - torch::Tensor const& b_scales, - torch::Tensor const& azp_adj, - torch::Tensor const& azp, - c10::optional const& bias) { - auto a_args = SUPER::template args_from_tensor(a_scales); - auto b_args = SUPER::template args_from_tensor(b_scales); - auto bias_args = SUPER::template args_from_tensor(bias); - auto azp_args = SUPER::template args_from_tensor(azp); - auto azp_adj_args = - SUPER::template args_from_tensor(azp_adj); - - typename EVTComputeAzp::Arguments evt_azp_args{azp_args, azp_adj_args}; - typename EVTComputeAcc::Arguments evt_acc_args{{}, evt_azp_args}; - typename EVTComputeScaleB::Arguments evt_scale_b_args{b_args, evt_acc_args}; - return ArgumentType{a_args, evt_scale_b_args, bias_args}; - } -}; - template typename Epilogue_, typename TileShape, typename ClusterShape, typename KernelSchedule, @@ -721,11 +423,11 @@ void cutlass_scaled_mm_sm90(torch::Tensor& c, torch::Tensor const& a, if (bias) { TORCH_CHECK(bias->dtype() == c.dtype(), "currently bias dtype must match output dtype ", c.dtype()); - return cutlass_scaled_mm_sm90_epilogue( + return cutlass_scaled_mm_sm90_epilogue( c, a, b, a_scales, b_scales, *bias); } else { - return cutlass_scaled_mm_sm90_epilogue(c, a, b, a_scales, - b_scales); + return cutlass_scaled_mm_sm90_epilogue( + c, a, b, a_scales, b_scales); } } @@ -740,10 +442,10 @@ void cutlass_scaled_mm_azp_sm90(torch::Tensor& out, torch::Tensor const& a, TORCH_CHECK(b_scales.dtype() == torch::kFloat32); if (azp) { - return cutlass_scaled_mm_sm90_epilogue( + return cutlass_scaled_mm_sm90_epilogue( out, a, b, a_scales, b_scales, azp_adj, *azp, bias); } else { - return cutlass_scaled_mm_sm90_epilogue( + return cutlass_scaled_mm_sm90_epilogue( out, a, b, a_scales, b_scales, azp_adj, bias); } } diff --git a/csrc/quantization/machete/generate.py b/csrc/quantization/machete/generate.py index d126af1849024..ac63afe79a255 100644 --- a/csrc/quantization/machete/generate.py +++ b/csrc/quantization/machete/generate.py @@ -3,8 +3,10 @@ import os import shutil from collections.abc import Iterable -from dataclasses import dataclass -from typing import List, Optional, Tuple, Union +from copy import deepcopy +from dataclasses import dataclass, fields +from functools import reduce +from typing import Dict, List, Optional, Tuple, Union import jinja2 # yapf conflicts with isort for this block @@ -14,7 +16,10 @@ MixedInputKernelScheduleType, TileSchedulerTag, TileSchedulerType, VLLMDataType, - VLLMDataTypeNames, VLLMDataTypeTag, + VLLMDataTypeNames, + VLLMDataTypeSize, VLLMDataTypeTag, + VLLMDataTypeTorchDataTypeTag, + VLLMDataTypeVLLMScalarTypeTag, VLLMKernelScheduleTag) # yapf: enable @@ -27,49 +32,125 @@ #include "../machete_mm_launcher.cuh" namespace machete { -using GemmDispatcher_ = GemmDispatcher< - {{DataTypeTag[type_config.element_a]}}, // ElementA - {{DataTypeTag[type_config.element_b]}}, // ElementB - {{DataTypeTag[type_config.element_d]}}, // ElementD - {{DataTypeTag[type_config.accumulator]}}, // Accumulator - {{DataTypeTag[type_config.element_b_scale]}}, // Scales - {{DataTypeTag[type_config.element_b_zeropoint]}}>; // Zeropoints - -{% for s in schedules %}extern torch::Tensor -impl_{{type_name}}_sch_{{ gen_sch_name(s) }}(PyTorchArguments args); -{% endfor %} -template <> -torch::Tensor GemmDispatcher_::dispatch(PyTorchArguments args) { + +{% for impl_config in impl_configs %} +{% set type_sig = gen_type_sig(impl_config.types) -%} +{% for s in impl_config.schedules %} +extern torch::Tensor impl_{{type_sig}}_sch_{{gen_sch_sig(s)}}(MMArgs); +{%- endfor %} + +torch::Tensor mm_dispatch_{{type_sig}}(MMArgs args) { [[maybe_unused]] auto M = args.A.size(0); [[maybe_unused]] auto N = args.B.size(1); [[maybe_unused]] auto K = args.A.size(1); - if (!args.schedule) { - {%- for cond, s in heuristic %} + if (!args.maybe_schedule) { + {%- for cond, s in impl_config.heuristic %} {%if cond is not none%}if ({{cond}}) {%- else %}else {%- endif %} - return impl_{{ type_name }}_sch_{{ gen_sch_name(s) }}(args);{% endfor %} + return impl_{{type_sig}}_sch_{{ gen_sch_sig(s) }}(args);{% endfor %} } - {% for s in schedules %} - if (*args.schedule == "{{ gen_sch_name(s) }}") { - return impl_{{ type_name }}_sch_{{ gen_sch_name(s) }}(args); - } - {% endfor %} + {%- for s in impl_config.schedules %} + if (*args.maybe_schedule == "{{ gen_sch_sig(s) }}") + return impl_{{type_sig}}_sch_{{ gen_sch_sig(s) }}(args); + {%- endfor %} TORCH_CHECK_NOT_IMPLEMENTED(false, "machete_gemm(..) is not implemented for " - "schedule = ", *args.schedule); + "schedule = ", *args.maybe_schedule); } +{%- endfor %} + -template <> -std::vector GemmDispatcher_::supported_schedules() { - return { - {% for s in schedules -%} - "{{ gen_sch_name(s) }}"{{ ", - " if not loop.last }}{%- endfor %} - }; +static inline std::optional maybe_scalartype( + c10::optional const& t) { + if (!t) { + return std::nullopt; + } else { + return t->scalar_type(); + }; +} + +torch::Tensor mm_dispatch(MMArgs args) { + auto out_type = args.maybe_out_type.value_or(args.A.scalar_type()); + auto a_type = args.A.scalar_type(); + auto maybe_g_scales_type = maybe_scalartype(args.maybe_group_scales); + auto maybe_g_zeros_type = maybe_scalartype(args.maybe_group_zeros); + auto maybe_ch_scales_type = maybe_scalartype(args.maybe_channel_scales); + auto maybe_tok_scales_type = maybe_scalartype(args.maybe_token_scales); + + {% for impl_config in impl_configs %} + {% set t = impl_config.types -%} + {% set type_sig = gen_type_sig(t) -%} + if (args.b_type == {{VLLMScalarTypeTag[t.b]}} + && a_type == {{TorchTypeTag[t.a]}} + && out_type == {{TorchTypeTag[t.out]}} + && {%if t.b_group_scale != void -%} + maybe_g_scales_type == {{TorchTypeTag[t.b_group_scale]}} + {%- else %}!maybe_g_scales_type{%endif%} + && {%if t.b_group_zeropoint != void -%} + maybe_g_zeros_type == {{TorchTypeTag[t.b_group_zeropoint]}} + {%- else %}!maybe_g_zeros_type{%endif%} + && {%if t.b_channel_scale != void -%} + maybe_ch_scales_type == {{TorchTypeTag[t.b_channel_scale]}} + {%- else %}!maybe_ch_scales_type{%endif%} + && {%if t.a_token_scale != void -%} + maybe_tok_scales_type == {{TorchTypeTag[t.a_token_scale]}} + {%- else %}!maybe_tok_scales_type{%endif%} + ) { + return mm_dispatch_{{type_sig}}(args); + } + {%- endfor %} + + TORCH_CHECK_NOT_IMPLEMENTED( + false, "machete_mm(..) is not implemented for " + "a_type=", args.A.scalar_type(), + ", b_type=", args.b_type.str(), + ", out_type=", out_type, + ", with_group_scale_type=", maybe_g_scales_type + ? toString(*maybe_g_scales_type) : "None", + ", with_group_zeropoint_type=", maybe_g_zeros_type + ? toString(*maybe_g_zeros_type) : "None", + ", with_channel_scale_type=", maybe_ch_scales_type + ? toString(*maybe_ch_scales_type) : "None", + ", with_token_scale_type=", maybe_tok_scales_type + ? toString(*maybe_tok_scales_type) : "None", + "; implemented types are: \\n", + {%- for impl_config in impl_configs %} + {% set t = impl_config.types -%} + "\\t{{gen_type_option_name(t)}}\\n", + {%- endfor %} + ""); } +std::vector supported_schedules_dispatch( + SupportedSchedulesArgs args) { + auto out_type = args.maybe_out_type.value_or(args.a_type); + + {% for impl_config in impl_configs %} + {% set t = impl_config.types -%} + {% set schs = impl_config.schedules -%} + if (args.b_type == {{VLLMScalarTypeTag[t.b]}} + && args.a_type == {{TorchTypeTag[t.a]}} + && out_type == {{TorchTypeTag[t.out]}} + && {%if t.b_group_scale != void -%} + args.maybe_group_scales_type == {{TorchTypeTag[t.b_group_scale]}} + {%- else %}!args.maybe_group_scales_type{%endif%} + && {%if t.b_group_zeropoint != void-%} + args.maybe_group_zeros_type == {{TorchTypeTag[t.b_group_zeropoint]}} + {%- else %}!args.maybe_group_zeros_type{%endif%} + ) { + return { + {%- for s in impl_config.schedules %} + "{{gen_sch_sig(s)}}"{% if not loop.last %},{% endif %} + {%- endfor %} + }; + } + {%- endfor %} + + return {}; +}; + }; // namespace machete """ @@ -77,20 +158,10 @@ #include "../machete_mm_launcher.cuh" namespace machete { -template -using Kernel = MacheteKernelTemplate< - {{DataTypeTag[type_config.element_a]}}, // ElementA - {{DataTypeTag[type_config.element_b]}}, // ElementB - {{DataTypeTag[type_config.element_d]}}, // ElementD - {{DataTypeTag[type_config.accumulator]}}, // Accumulator - {{DataTypeTag[type_config.element_b_scale]}}, // Scales - {{DataTypeTag[type_config.element_b_zeropoint]}}, // Zeropoints - cutlass::gemm::KernelTmaWarpSpecializedCooperativeMixedInput, - Config, with_C, with_scales, with_zeropoints>; - -{% for sch in schedules %} -{% set schedule_name = gen_sch_name(sch) -%} -struct sch_{{schedule_name}} { + +{% for sch in unique_schedules(impl_configs) %} +{% set sch_sig = gen_sch_sig(sch) -%} +struct sch_{{sch_sig}} { using TileShapeNM = Shape<{{ to_cute_constant(sch.tile_shape_mn)|join(', ')}}>; using ClusterShape = Shape<{{ @@ -101,27 +172,34 @@ using TileScheduler = {{TileSchedulerTag[sch.tile_scheduler]}}; using EpilogueTileType = cutlass::epilogue::collective::EpilogueTileAuto; }; - +{% endfor %} + +{% for impl_config in impl_configs %} +{% set t = impl_config.types -%} +{% set schs = impl_config.schedules -%} +{% set type_sig = gen_type_sig(t) -%} + +template +using Kernel_{{type_sig}} = MacheteKernelTemplate< + {{DataTypeTag[t.a]}}, // ElementA + {{DataTypeTag[t.b]}}, // ElementB + {{DataTypeTag[t.out]}}, // ElementD + {{DataTypeTag[t.accumulator]}}, // Accumulator + {{DataTypeTag[t.b_group_scale]}}, // GroupScaleT + {{DataTypeTag[t.b_group_zeropoint]}}, // GroupZeroT + {{DataTypeTag[t.b_channel_scale]}}, // ChannelScaleT + {{DataTypeTag[t.a_token_scale]}}, // TokenScaleT + cutlass::gemm::KernelTmaWarpSpecializedCooperativeMixedInput, + Sch>; + +{% for sch in schs %} +{% set sch_sig = gen_sch_sig(sch) -%} torch::Tensor -impl_{{type_name}}_sch_{{schedule_name}}(PyTorchArguments args) { - bool with_C = args.C.has_value(), with_scales = args.scales.has_value(), - with_zeropoints = args.zeros.has_value(); - - {% for s in specializations %} - if (with_C == {{s.with_C|lower}} - && with_zeropoints == {{s.with_zeropoints|lower}} - && with_scales == {{s.with_scales|lower}}) { - return run_impl>(args); - }{% endfor %} - - TORCH_CHECK_NOT_IMPLEMENTED( - false, "for the sake of compile times and binary size machete_mm(..) is " - " not implemented for with_C=", with_C, ", with_scales=", with_scales, - ", with_zeropoints=", with_zeropoints, - " (for {{type_name}}_sch_{{schedule_name}})"); +impl_{{type_sig}}_sch_{{sch_sig}}(MMArgs args) { + return run_impl>(args); } -{% endfor %} +{%- endfor %} +{%- endfor %} }; // namespace machete """ @@ -130,26 +208,34 @@ #include "../machete_prepack_launcher.cuh" namespace machete { -using PrepackBDispatcher_ = PrepackBDispatcher< - {{DataTypeTag[type_config.element_a]}}, // ElementA - {{DataTypeTag[type_config.element_b]}}, // ElementB - {{DataTypeTag[type_config.element_d]}}, // ElementD - {{DataTypeTag[type_config.accumulator]}}, // Accumulator - {{DataTypeTag[type_config.element_b_scale]}}, // Scales - {{DataTypeTag[type_config.element_b_zeropoint]}}>; // Zeropoints - -using PrepackedLayoutB = PrepackedLayoutBTemplate< - {{DataTypeTag[type_config.element_a]}}, // ElementA - {{DataTypeTag[type_config.element_b]}}, // ElementB - {{DataTypeTag[type_config.element_d]}}, // ElementD - {{DataTypeTag[type_config.accumulator]}}, // Accumulator - cutlass::layout::ColumnMajor, - cutlass::gemm::KernelTmaWarpSpecializedCooperativeMixedInput>; - -template <> -torch::Tensor PrepackBDispatcher_::dispatch(torch::Tensor B) { - return prepack_impl(B); + +torch::Tensor prepack_B_dispatch(PrepackBArgs args) { + auto convert_type = args.maybe_group_scales_type.value_or(args.a_type); + {%- for t in types %} + {% set b_type = unsigned_type_with_bitwidth(t.b_num_bits) %} + if (args.a_type == {{TorchTypeTag[t.a]}} + && args.b_type.size_bits() == {{t.b_num_bits}} + && convert_type == {{TorchTypeTag[t.convert]}}) { + return prepack_impl< + PrepackedLayoutBTemplate< + {{DataTypeTag[t.a]}}, // ElementA + {{DataTypeTag[b_type]}}, // ElementB + {{DataTypeTag[t.convert]}}, // ElementConvert + {{DataTypeTag[t.accumulator]}}, // Accumulator + cutlass::layout::ColumnMajor, + cutlass::gemm::KernelTmaWarpSpecializedCooperativeMixedInput> + >(args.B); + } + {%- endfor %} + + TORCH_CHECK_NOT_IMPLEMENTED(false, + "prepack_B_dispatch(..) is not implemented for " + "atype = ", args.a_type, + ", b_type = ", args.b_type.str(), + ", with_group_scales_type= ", args.maybe_group_scales_type ? + toString(*args.maybe_group_scales_type) : "None"); } + }; // namespace machete """ @@ -166,32 +252,34 @@ class ScheduleConfig: tile_scheduler: TileSchedulerType -@dataclass +@dataclass(frozen=True) class TypeConfig: - element_a: DataType - element_b: Union[DataType, VLLMDataType] - element_b_scale: DataType - element_b_zeropoint: DataType - element_d: DataType + a: DataType + b: Union[DataType, VLLMDataType] + b_group_scale: DataType + b_group_zeropoint: DataType + b_channel_scale: DataType + a_token_scale: DataType + out: DataType accumulator: DataType -@dataclass -class Specialization: - with_C: bool - with_zeropoints: bool - with_scales: bool +@dataclass(frozen=True) +class PrepackTypeConfig: + a: DataType + b_num_bits: int + convert: DataType + accumulator: DataType @dataclass class ImplConfig: - type_config: TypeConfig - schedule_configs: List[ScheduleConfig] - specializations: List[Specialization] + types: TypeConfig + schedules: List[ScheduleConfig] heuristic: List[Tuple[Optional[str], ScheduleConfig]] -def generate_schedule_name(schedule_config: ScheduleConfig) -> str: +def generate_sch_sig(schedule_config: ScheduleConfig) -> str: tile_shape = ( f"{schedule_config.tile_shape_mn[0]}x{schedule_config.tile_shape_mn[1]}" ) @@ -209,40 +297,34 @@ def generate_schedule_name(schedule_config: ScheduleConfig) -> str: f"_{epilogue_schedule}_{tile_scheduler}") -# mostly unique shorter schedule_name -def generate_terse_schedule_name(schedule_config: ScheduleConfig) -> str: +# mostly unique shorter sch_sig +def generate_terse_sch_sig(schedule_config: ScheduleConfig) -> str: kernel_terse_names_replace = { "KernelTmaWarpSpecializedCooperativeMixedInput_": "TmaMI_", "TmaWarpSpecializedCooperative_": "TmaCoop_", "StreamKScheduler": "streamK", } - schedule_name = generate_schedule_name(schedule_config) + sch_sig = generate_sch_sig(schedule_config) for orig, terse in kernel_terse_names_replace.items(): - schedule_name = schedule_name.replace(orig, terse) - return schedule_name + sch_sig = sch_sig.replace(orig, terse) + return sch_sig # unique type_name -def generate_type_signature(kernel_type_config: TypeConfig): - element_a = VLLMDataTypeNames[kernel_type_config.element_a] - element_b = VLLMDataTypeNames[kernel_type_config.element_b] - element_d = VLLMDataTypeNames[kernel_type_config.element_d] - accumulator = VLLMDataTypeNames[kernel_type_config.accumulator] - element_scale = VLLMDataTypeNames[kernel_type_config.element_b_scale] - element_zeropoint = VLLMDataTypeNames[ - kernel_type_config.element_b_zeropoint] - - return (f"{element_a}{element_b}{element_d}" - f"{accumulator}{element_scale}{element_zeropoint}") - +def generate_type_signature(kernel_types: TypeConfig): + return str("".join([ + VLLMDataTypeNames[getattr(kernel_types, field.name)] + for field in fields(TypeConfig) + ])) -# non-unique shorter type_name -def generate_terse_type_signature(kernel_type_config: TypeConfig): - element_a = VLLMDataTypeNames[kernel_type_config.element_a] - element_b = VLLMDataTypeNames[kernel_type_config.element_b] - return f"{element_a}{element_b}" +def generate_type_option_name(kernel_types: TypeConfig): + return ", ".join([ + f"{field.name.replace('b_', 'with_')+'_type'}=" + + VLLMDataTypeNames[getattr(kernel_types, field.name)] + for field in fields(TypeConfig) + ]) def is_power_of_two(n): @@ -263,13 +345,36 @@ def _to_cute_constant(value: int): return _to_cute_constant(value) +def unique_schedules(impl_configs: List[ImplConfig]): + return list( + set(sch for impl_config in impl_configs + for sch in impl_config.schedules)) + + +def unsigned_type_with_bitwidth(num_bits): + return { + 4: DataType.u4, + 8: DataType.u8, + 16: DataType.u16, + 32: DataType.u32, + 64: DataType.u64, + }[num_bits] + + template_globals = { + "void": DataType.void, "DataTypeTag": VLLMDataTypeTag, + "VLLMScalarTypeTag": VLLMDataTypeVLLMScalarTypeTag, + "TorchTypeTag": VLLMDataTypeTorchDataTypeTag, "KernelScheduleTag": VLLMKernelScheduleTag, "EpilogueScheduleTag": EpilogueScheduleTag, "TileSchedulerTag": TileSchedulerTag, "to_cute_constant": to_cute_constant, - "gen_sch_name": generate_terse_schedule_name, + "gen_sch_sig": generate_terse_sch_sig, + "gen_type_sig": generate_type_signature, + "unique_schedules": unique_schedules, + "unsigned_type_with_bitwidth": unsigned_type_with_bitwidth, + "gen_type_option_name": generate_type_option_name } @@ -284,42 +389,82 @@ def create_template(template_str): prepack_dispatch_template = create_template(PREPACK_TEMPLATE) -def create_sources(impl_config: ImplConfig, num_impl_files=1): +def create_sources(impl_configs: List[ImplConfig], num_impl_files=8): sources = [] - type_name = generate_type_signature(impl_config.type_config) - terse_type_name = generate_terse_type_signature(impl_config.type_config) - sources.append(( - f"machete_mm_{terse_type_name}", - mm_dispatch_template.render(type_name=type_name, - type_config=impl_config.type_config, - schedules=impl_config.schedule_configs, - heuristic=impl_config.heuristic), + "machete_mm_dispatch", + mm_dispatch_template.render(impl_configs=impl_configs), )) + prepack_types = [] + for impl_config in impl_configs: + convert_type = impl_config.types.a \ + if impl_config.types.b_group_scale == DataType.void \ + else impl_config.types.b_group_scale + prepack_types.append( + PrepackTypeConfig( + a=impl_config.types.a, + b_num_bits=VLLMDataTypeSize[impl_config.types.b], + convert=convert_type, + accumulator=impl_config.types.accumulator, + )) + + def prepacked_type_key(prepack_type: PrepackTypeConfig): + # For now we we can just use the first accumulator type seen since + # the tensor core shapes/layouts don't vary based on accumulator + # type so we can generate less code this way + return (prepack_type.a, prepack_type.b_num_bits, prepack_type.convert) + + unique_prepack_types = [] + prepack_types_seen = set() + for prepack_type in prepack_types: + key = prepacked_type_key(prepack_type) + if key not in prepack_types_seen: + unique_prepack_types.append(prepack_type) + prepack_types_seen.add(key) + sources.append(( - f"machete_prepack_{terse_type_name}", - prepack_dispatch_template.render( - type_name=type_name, - type_config=impl_config.type_config, - ), + "machete_prepack", + prepack_dispatch_template.render(types=unique_prepack_types, ), )) - num_schedules = len(impl_config.schedule_configs) - schedules_per_file = math.ceil(num_schedules / num_impl_files) - for part, i in enumerate(range(0, num_schedules, schedules_per_file)): - file_schedules = impl_config.schedule_configs[i:i + schedules_per_file] + # Split up impls across files + num_impls = reduce(lambda x, y: x + len(y.schedules), impl_configs, 0) + num_impls_per_file = math.ceil(num_impls / num_impl_files) + + files_impls: List[List[ImplConfig]] = [[]] + + curr_num_impls_assigned = 0 + curr_impl_in_file = 0 + curr_impl_configs = deepcopy(list(reversed(impl_configs))) + + while curr_num_impls_assigned < num_impls: + room_left_in_file = num_impls_per_file - curr_impl_in_file + if room_left_in_file == 0: + files_impls.append([]) + room_left_in_file = num_impls_per_file + curr_impl_in_file = 0 + + curr_ic = curr_impl_configs[-1] + if len(curr_ic.schedules) >= room_left_in_file: + # Break apart the current impl config + tmp_ic = deepcopy(curr_ic) + tmp_ic.schedules = curr_ic.schedules[:room_left_in_file] + curr_ic.schedules = curr_ic.schedules[room_left_in_file:] + files_impls[-1].append(tmp_ic) + else: + files_impls[-1].append(curr_ic) + curr_impl_configs.pop() + curr_num_impls_assigned += len(files_impls[-1][-1].schedules) + curr_impl_in_file += len(files_impls[-1][-1].schedules) + for part, file_impls in enumerate(files_impls): sources.append(( - f"machete_mm_{terse_type_name}_impl_part{part}", - mm_impl_template.render( - type_name=type_name, - type_config=impl_config.type_config, - schedules=file_schedules, - specializations=impl_config.specializations, - ), + f"machete_mm_impl_part{part+1}", + mm_impl_template.render(impl_configs=file_impls), )) + return sources @@ -328,187 +473,169 @@ def generate(): # about how this works SCRIPT_DIR = os.path.dirname(__file__) - schedule_common_params = dict( + sch_common_params = dict( kernel_schedule=TmaMI, epilogue_schedule=TmaCoop, tile_scheduler=TileSchedulerType.StreamK, ) - # For now we use the same heuristic for all types - # Heuristic is currently tuned for H100s - default_heuristic = [ + # Stored as "condition": ((tile_shape_mn), (cluster_shape_mnk)) + default_tile_heuristic_config = { #### M = 257+ - ( - "M > 256 && K <= 16384 && N <= 4096", - ScheduleConfig( - tile_shape_mn=(128, 128), - cluster_shape_mnk=(2, 1, 1), - **schedule_common_params # type: ignore - )), - ( - "M > 256", - ScheduleConfig( - tile_shape_mn=(128, 256), - cluster_shape_mnk=(2, 1, 1), - **schedule_common_params # type: ignore - )), + "M > 256 && K <= 16384 && N <= 4096": ((128, 128), (2, 1, 1)), + "M > 256": ((128, 256), (2, 1, 1)), #### M = 129-256 - ( - "M > 128 && K <= 4096 && N <= 4096", - ScheduleConfig( - tile_shape_mn=(128, 64), - cluster_shape_mnk=(2, 1, 1), - **schedule_common_params # type: ignore - )), - ( - "M > 128 && K <= 8192 && N <= 8192", - ScheduleConfig( - tile_shape_mn=(128, 128), - cluster_shape_mnk=(2, 1, 1), - **schedule_common_params # type: ignore - )), - ( - "M > 128", - ScheduleConfig( - tile_shape_mn=(128, 256), - cluster_shape_mnk=(2, 1, 1), - **schedule_common_params # type: ignore - )), + "M > 128 && K <= 4096 && N <= 4096": ((128, 64), (2, 1, 1)), + "M > 128 && K <= 8192 && N <= 8192": ((128, 128), (2, 1, 1)), + "M > 128": ((128, 256), (2, 1, 1)), #### M = 65-128 - ( - "M > 64 && K <= 4069 && N <= 4069", - ScheduleConfig( - tile_shape_mn=(128, 32), - cluster_shape_mnk=(2, 1, 1), - **schedule_common_params # type: ignore - )), - ( - "M > 64 && K <= 4069 && N <= 8192", - ScheduleConfig( - tile_shape_mn=(128, 64), - cluster_shape_mnk=(2, 1, 1), - **schedule_common_params # type: ignore - )), - ( - "M > 64 && K >= 8192 && N >= 12288", - ScheduleConfig( - tile_shape_mn=(256, 128), - cluster_shape_mnk=(2, 1, 1), - **schedule_common_params # type: ignore - )), - ( - "M > 64", - ScheduleConfig( - tile_shape_mn=(128, 128), - cluster_shape_mnk=(2, 1, 1), - **schedule_common_params # type: ignore - )), + "M > 64 && K <= 4069 && N <= 4069": ((128, 32), (2, 1, 1)), + "M > 64 && K <= 4069 && N <= 8192": ((128, 64), (2, 1, 1)), + "M > 64 && K >= 8192 && N >= 12288": ((256, 128), (2, 1, 1)), + "M > 64": ((128, 128), (2, 1, 1)), #### M = 33-64 - ( - "M > 32 && K <= 6144 && N <= 6144", - ScheduleConfig( - tile_shape_mn=(128, 16), - cluster_shape_mnk=(1, 1, 1), - **schedule_common_params # type: ignore - )), - ( - "M > 32 && K >= 16384 && N >= 12288", - ScheduleConfig( - tile_shape_mn=(256, 64), - cluster_shape_mnk=(2, 1, 1), - **schedule_common_params # type: ignore - )), - ( - "M > 32", - ScheduleConfig( - tile_shape_mn=(128, 64), - cluster_shape_mnk=(2, 1, 1), - **schedule_common_params # type: ignore - )), + "M > 32 && K <= 6144 && N <= 6144": ((128, 16), (1, 1, 1)), + "M > 32 && K >= 16384 && N >= 12288": ((256, 64), (2, 1, 1)), + "M > 32": ((128, 64), (2, 1, 1)), #### M = 17-32 - ( - "M > 16 && K <= 12288 && N <= 8192", - ScheduleConfig( - tile_shape_mn=(128, 32), - cluster_shape_mnk=(2, 1, 1), - **schedule_common_params # type: ignore - )), - ( - "M > 16", - ScheduleConfig( - tile_shape_mn=(256, 32), - cluster_shape_mnk=(2, 1, 1), - **schedule_common_params # type: ignore - )), + "M > 16 && K <= 12288 && N <= 8192": ((128, 32), (2, 1, 1)), + "M > 16": ((256, 32), (2, 1, 1)), #### M = 1-16 - ( - "N >= 26624", - ScheduleConfig( - tile_shape_mn=(256, 16), - cluster_shape_mnk=(1, 1, 1), - **schedule_common_params # type: ignore - )), - ( - None, - ScheduleConfig( - tile_shape_mn=(128, 16), - cluster_shape_mnk=(1, 1, 1), - **schedule_common_params # type: ignore - )), + "N >= 26624": ((256, 16), (1, 1, 1)), + None: ((128, 16), (1, 1, 1)), + } + + # For now we use the same heuristic for all types + # Heuristic is currently tuned for H100s + default_heuristic = [ + (cond, ScheduleConfig(*tile_config, + **sch_common_params)) # type: ignore + for cond, tile_config in default_tile_heuristic_config.items() ] - # Do not use schedules = list(set(...)) because we need to make sure - # the output list is deterministic; otherwise the generated kernel file - # will be non-deterministic and causes ccache miss. - schedules = [] - for _, schedule_config in default_heuristic: - if schedule_config not in schedules: - schedules.append(schedule_config) + def get_unique_schedules(heuristic: Dict[str, ScheduleConfig]): + # Do not use schedules = list(set(...)) because we need to make sure + # the output list is deterministic; otherwise the generated kernel file + # will be non-deterministic and causes ccache miss. + schedules = [] + for _, schedule_config in heuristic: + if schedule_config not in schedules: + schedules.append(schedule_config) + return schedules impl_configs = [] GPTQ_kernel_type_configs = list( TypeConfig( - element_a=element_a, - element_b=element_b, - element_b_scale=element_a, - element_b_zeropoint=element_a, - element_d=element_a, + a=a, + b=b, + b_group_scale=a, + b_group_zeropoint=DataType.void, + b_channel_scale=DataType.void, + a_token_scale=DataType.void, + out=a, accumulator=DataType.f32, - ) for element_b in (VLLMDataType.u4b8, VLLMDataType.u8b128) - for element_a in (DataType.f16, DataType.bf16)) - - GPTQ_kernel_specializations = [ - Specialization(with_C=False, with_zeropoints=False, with_scales=True) - ] + ) for b in (VLLMDataType.u4b8, VLLMDataType.u8b128) + for a in (DataType.f16, DataType.bf16)) impl_configs += [ - ImplConfig(x[0], x[1], x[2], x[3]) - for x in zip(GPTQ_kernel_type_configs, itertools.repeat(schedules), - itertools.repeat(GPTQ_kernel_specializations), + ImplConfig(x[0], x[1], x[2]) + for x in zip(GPTQ_kernel_type_configs, + itertools.repeat(get_unique_schedules(default_heuristic)), itertools.repeat(default_heuristic)) ] AWQ_kernel_type_configs = list( TypeConfig( - element_a=element_a, - element_b=element_b, - element_b_scale=element_a, - element_b_zeropoint=element_a, - element_d=element_a, + a=a, + b=b, + b_group_scale=a, + b_group_zeropoint=a, + b_channel_scale=DataType.void, + a_token_scale=DataType.void, + out=a, accumulator=DataType.f32, - ) for element_b in (DataType.u4, DataType.u8) - for element_a in (DataType.f16, DataType.bf16)) + ) for b in (DataType.u4, DataType.u8) + for a in (DataType.f16, DataType.bf16)) + + impl_configs += [ + ImplConfig(x[0], x[1], x[2]) + for x in zip(AWQ_kernel_type_configs, + itertools.repeat(get_unique_schedules(default_heuristic)), + itertools.repeat(default_heuristic)) + ] - AWQ_kernel_specializations = [ - Specialization(with_C=False, with_zeropoints=True, with_scales=True) + # Stored as "condition": ((tile_shape_mn), (cluster_shape_mnk)) + # TODO (LucasWilkinson): Further tuning required + qqq_tile_heuristic_config = { + #### M = 257+ + # ((128, 256), (2, 1, 1)) Broken for QQQ types + # TODO (LucasWilkinson): Investigate further + # "M > 256 && K <= 16384 && N <= 4096": ((128, 128), (2, 1, 1)), + # "M > 256": ((128, 256), (2, 1, 1)), + "M > 256": ((128, 128), (2, 1, 1)), + #### M = 129-256 + "M > 128 && K <= 4096 && N <= 4096": ((128, 64), (2, 1, 1)), + "M > 128 && K <= 8192 && N <= 8192": ((128, 128), (2, 1, 1)), + # ((128, 256), (2, 1, 1)) Broken for QQQ types + # TODO (LucasWilkinson): Investigate further + # "M > 128": ((128, 256), (2, 1, 1)), + "M > 128": ((128, 128), (2, 1, 1)), + #### M = 65-128 + "M > 64 && K <= 4069 && N <= 4069": ((128, 32), (2, 1, 1)), + "M > 64 && K <= 4069 && N <= 8192": ((128, 64), (2, 1, 1)), + "M > 64 && K >= 8192 && N >= 12288": ((256, 128), (2, 1, 1)), + "M > 64": ((128, 128), (2, 1, 1)), + #### M = 33-64 + "M > 32 && K <= 6144 && N <= 6144": ((128, 16), (1, 1, 1)), + # Broken for QQQ types + # TODO (LucasWilkinson): Investigate further + #"M > 32 && K >= 16384 && N >= 12288": ((256, 64), (2, 1, 1)), + "M > 32": ((128, 64), (2, 1, 1)), + #### M = 17-32 + "M > 16 && K <= 12288 && N <= 8192": ((128, 32), (2, 1, 1)), + "M > 16": ((256, 32), (2, 1, 1)), + #### M = 1-16 + "N >= 26624": ((256, 16), (1, 1, 1)), + None: ((128, 16), (1, 1, 1)), + } + + # For now we use the same heuristic for all types + # Heuristic is currently tuned for H100s + qqq_heuristic = [ + (cond, ScheduleConfig(*tile_config, + **sch_common_params)) # type: ignore + for cond, tile_config in qqq_tile_heuristic_config.items() + ] + + QQQ_kernel_types = [ + *(TypeConfig( + a=DataType.s8, + b=VLLMDataType.u4b8, + b_group_scale=b_group_scale, + b_group_zeropoint=DataType.void, + b_channel_scale=DataType.f32, + a_token_scale=DataType.f32, + out=DataType.f16, + accumulator=DataType.s32, + ) for b_group_scale in (DataType.f16, DataType.void)), + *(TypeConfig( + a=DataType.e4m3, + b=VLLMDataType.u4b8, + b_group_scale=b_group_scale, + b_group_zeropoint=DataType.void, + b_channel_scale=DataType.f32, + a_token_scale=DataType.f32, + out=DataType.f16, + accumulator=DataType.f32, + ) for b_group_scale in (DataType.f16, DataType.void)), ] impl_configs += [ - ImplConfig(x[0], x[1], x[2], x[3]) - for x in zip(AWQ_kernel_type_configs, itertools.repeat(schedules), - itertools.repeat(AWQ_kernel_specializations), - itertools.repeat(default_heuristic)) + ImplConfig(x[0], x[1], x[2]) + for x in zip(QQQ_kernel_types, + itertools.repeat(get_unique_schedules(qqq_heuristic)), + itertools.repeat(qqq_heuristic)) ] output_dir = os.path.join(SCRIPT_DIR, "generated") @@ -521,12 +648,11 @@ def generate(): os.makedirs(output_dir) # Render each group of configurations into separate files - for impl_config in impl_configs: - for filename, code in create_sources(impl_config): - filepath = os.path.join(output_dir, f"{filename}.cu") - with open(filepath, "w") as output_file: - output_file.write(code) - print(f"Rendered template to {filepath}") + for filename, code in create_sources(impl_configs): + filepath = os.path.join(output_dir, f"{filename}.cu") + with open(filepath, "w") as output_file: + output_file.write(code) + print(f"Rendered template to {filepath}") if __name__ == "__main__": diff --git a/csrc/quantization/machete/machete_mainloop.cuh b/csrc/quantization/machete/machete_mainloop.cuh index e8e7b14de0da1..816f33a1078e5 100644 --- a/csrc/quantization/machete/machete_mainloop.cuh +++ b/csrc/quantization/machete/machete_mainloop.cuh @@ -171,6 +171,10 @@ struct MacheteCollectiveMma { make_shape(size<0>(TileShape_MNK{}), size<2>(TileShape_MNK{}), Int{}))); + using SmemLayoutACopy = decltype(GmemLayoutA::TVbNbKL_to_offset_copy( + make_shape(size<0>(TileShape_MNK{}), size<2>(TileShape_MNK{}), + Int{}))); + using SmemLayoutAtomARowMajor = decltype(rs_smem_selector(TileShape_MNK{})), @@ -288,14 +292,7 @@ struct MacheteCollectiveMma { static_assert((size<2>(TileShape{}) % size<1>(SmemLayoutAtomScale{})) == 0, "SmemLayoutAtomScale must evenly divide tile k shape."); - // Tile along modes in a way that maximizes the TMA box size. - using SmemLayoutACopy = decltype(tile_to_shape( - SmemLayoutAtomARowMajor{}, - make_shape(shape<0>(TileShape{}), shape<2>(TileShape{}), - Int{}), - conditional_t<::cutlass::gemm::detail::is_major<0, StrideA>(), - Step<_2, _1, _3>, Step<_1, _2, _3>>{})); - + // Tile along modes in a way that maximizes the TMA box size using SmemLayoutB = decltype(tile_to_shape( SmemLayoutAtomB{}, make_shape(shape<1>(TileShape{}), shape<2>(TileShape{}), @@ -428,12 +425,12 @@ struct MacheteCollectiveMma { // clang-format on // ((athrid, val), (BlocksM, BlockK), L) -> (storage_idx) - using PrepackedStrideA = decltype(stride(GmemLayoutA::TVbNbKL_to_offset( + using PrepackedStrideA = decltype(stride(GmemLayoutA::TVbNbKL_to_offset_copy( make_shape(int32_t(0), int32_t(0), int32_t(0))))); using ATensor = decltype(make_tensor( get_logical_ptr(static_cast(nullptr)), - shape(GmemLayoutA::TVbNbKL_to_offset( + shape(GmemLayoutA::TVbNbKL_to_offset_copy( make_shape(int32_t(0), int32_t(0), int32_t(0)))), PrepackedStrideA{})); @@ -450,8 +447,8 @@ struct MacheteCollectiveMma { static constexpr auto make_tma_copy_A(ATensor tensor_a = ATensor{}) { return make_tma_copy( - GmemTiledCopyA{}, tensor_a, SmemLayoutA{}(_, _, cute::Int<0>{}), - shape(SmemLayoutA{}(_, _, cute::Int<0>{})), + GmemTiledCopyA{}, tensor_a, SmemLayoutACopy{}(_, _, cute::Int<0>{}), + shape(SmemLayoutACopy{}(_, _, cute::Int<0>{})), size<1>(ClusterShape{})); // mcast along N mode for this M load, if any } @@ -584,7 +581,7 @@ struct MacheteCollectiveMma { typename Params::TMA_Scale tma_load_scale; typename Params::TMA_Zero tma_load_zero; - auto layout = GmemLayoutA::TVbNbKL_to_offset(make_shape(M, K, L)); + auto layout = GmemLayoutA::TVbNbKL_to_offset_copy(make_shape(M, K, L)); tma_load_a = make_tma_copy_A( make_logical_tensor(ptr_A, shape(layout), stride(layout))); @@ -722,7 +719,7 @@ struct MacheteCollectiveMma { // (TILE_V,TILE_B,m,k,l) auto make_gA_mkl = [&]() { // ((athrid, val), (BlocksM, BlockK), L) -> (storage_idx) - auto layout = GmemLayoutA::TVbNbKL_to_offset(make_shape(M, K, L)); + auto layout = GmemLayoutA::TVbNbKL_to_offset_copy(make_shape(M, K, L)); Tensor mA_mkl = mainloop_params.tma_load_a.get_tma_tensor(shape(layout)); return local_tile(mA_mkl, make_shape(size<0>(layout), PPBlocksPerTile_MK{}), diff --git a/csrc/quantization/machete/machete_mm_kernel.cuh b/csrc/quantization/machete/machete_mm_kernel.cuh index 4d41b8d291484..d4d19ae5deec7 100644 --- a/csrc/quantization/machete/machete_mm_kernel.cuh +++ b/csrc/quantization/machete/machete_mm_kernel.cuh @@ -21,6 +21,8 @@ #include "cutlass_extensions/cute_utils.cuh" #include "cutlass_extensions/vllm_numeric_conversion.cuh" +#include "cutlass_extensions/epilogue/scaled_mm_epilogues_c3x.hpp" +#include "cutlass_extensions/torch_utils.hpp" #include "machete_collective_builder.cuh" #include "machete_prepacked_layout.cuh" #include "machete_interleaving_utils.cuh" @@ -37,27 +39,42 @@ using namespace cute; // W is quantized, in this situation or right-hand operand is quantized so // we compute the transpose to move it to the left-hand side. template + typename AccumulatorT, typename GroupScaleT, typename GroupZeroT, + typename ChannelScaleT, typename TokenScaleT, class KernelSchedule, + typename ScheduleConfig> struct MacheteKernelTemplate { + static constexpr bool with_C = false; // not ever used + static constexpr bool with_group_scales = !std::is_same_v; + static constexpr bool with_group_zeropoints = + !std::is_same_v; + static constexpr bool with_channel_scales = + !std::is_same_v; + static constexpr bool with_token_scales = !std::is_same_v; + using MmaType = ElementA_; using ElementA = ElementA_; using ElementB = ElementB_; using ElementD = ElementD_; using ElementC = cute::conditional_t; - using ElementZ = ZeroT; - using ElementS = ScaleT; - - using ElementAccumulator = - AccumulatorT; // Element type for internal accumulation + using ElementAccumulator = AccumulatorT; using ElementCompute = AccumulatorT; // For Epilogue + // Use dummy values when we don't have scales or zeropoints + using ElementZGroup = + cute::conditional_t; + using ElementSGroup = + cute::conditional_t; + using ElementConvertGroup = + cute::conditional_t; + using ElementSChannel = + cute::conditional_t; + using ElementSToken = + cute::conditional_t; using BTypeTuple = cute::conditional_t< - with_scales, - cute::conditional_t, - cute::tuple>, + with_group_scales, + cute::conditional_t, + cute::tuple>, ElementB>; using LayoutA = cutlass::layout::RowMajor; @@ -71,8 +88,8 @@ struct MacheteKernelTemplate { using StrideA = cutlass::detail::TagToStrideA_t; using StrideC = cutlass::detail::TagToStrideA_t; using StrideD = cutlass::detail::TagToStrideA_t; - using StrideS = cutlass::detail::TagToStrideA_t; - using StrideZ = StrideS; + using StrideSGroup = cutlass::detail::TagToStrideA_t; + using StrideZGroup = StrideSGroup; using LayoutA_Transpose = typename cutlass::layout::LayoutTranspose::type; @@ -85,8 +102,8 @@ struct MacheteKernelTemplate { using OperatorClass = cutlass::arch::OpClassTensorOp; using PrepackedLayoutB = - PrepackedLayoutBTemplate; + PrepackedLayoutBTemplate; static int constexpr TileShapeK = 128 * 8 / cutlass::sizeof_bits::value; @@ -103,12 +120,42 @@ struct MacheteKernelTemplate { using EpilogueTileType = typename ScheduleConfig::EpilogueTileType; using TileScheduler = typename ScheduleConfig::TileScheduler; + static_assert( + (!with_channel_scales && !with_token_scales) || + ((with_channel_scales && with_token_scales) && + std::is_same_v), + "Currently token and channel scales (if present) must be the same type"); + + using EpilogueDescriptor = + cutlass::epilogue::collective::detail::EpilogueDescriptor< + TileShape, cutlass::epilogue::collective::EpilogueTileAuto, ElementD, + ElementD, EpilogueSchedule>; + + // Currently only supports float scales + using ChTokScalesEpilogue = + typename vllm::c3x::ScaledEpilogue; + static_assert((with_channel_scales || with_token_scales) || + (std::is_same_v && + std::is_same_v), + "Currently token and channel scales (if present) must be float " + "(and if one is present the other must be too)"); + + using StoreEpilogueCompute = typename cutlass::epilogue::fusion::Sm90EVT< + cutlass::epilogue::fusion::Sm90AccFetch>; + + using EVTCompute = + std::conditional_t; + + // EVTCompute using CollectiveEpilogue = typename cutlass::epilogue::collective::CollectiveBuilder< ArchTag, OperatorClass, TileShape, ClusterShape, EpilogueTileType, - ElementAccumulator, ElementAccumulator, ElementC, LayoutC_Transpose, - AlignmentC, ElementD, LayoutD_Transpose, AlignmentD, - EpilogueSchedule>::CollectiveOp; + ElementAccumulator, ElementSChannel, ElementC, LayoutC_Transpose, + AlignmentC, ElementD, LayoutD_Transpose, AlignmentD, EpilogueSchedule, + EVTCompute>::CollectiveOp; using CollectiveMainloop = typename cutlass::gemm::collective::VLLMCollectiveBuilder< @@ -131,26 +178,44 @@ struct MacheteKernelTemplate { using MainloopArguments = typename GemmKernel::MainloopArguments; using EpilogueArguments = typename GemmKernel::EpilogueArguments; - template static Arguments create_arguments( cudaStream_t stream, - ElementA const* A_ptr, // A is an MxK matrix - Layout const& layout_A, - ElementB const* B_ptr, // B is an KxN prepacked matrix - ElementD* D_ptr, // D is an MxN matrix - Layout const& layout_D, - ElementC const* C_ptr, // C is an MxN matrix - std::optional> const& layout_C, - ElementS const* S_ptr, // S is an scale_KxN matrix - std::optional> const& layout_S, - ElementZ const* Z_ptr, // Z is an scale_KxN matrix - std::optional> const& layout_Z, - ElementCompute alpha, ElementCompute beta, - std::optional maybe_group_size) { - static_assert(!with_zeropoints || with_scales); - - int M = size<0>(layout_A), N = size<1>(layout_D), K = size<1>(layout_A); + torch::Tensor const& A, // MxK matrix + torch::Tensor const& B, // KxN prepacked matrix + torch::Tensor& D, // MxN matrix + c10::optional const& maybe_g_scales, // scale_KxN matrix + c10::optional const& maybe_g_zeros, // scale_KxN matrix + c10::optional maybe_group_size, + c10::optional const& maybe_ch_scales, // len N vector + c10::optional const& maybe_tok_scales) // len M vector + { + static_assert(!with_group_zeropoints || with_group_scales); + + int M = A.size(0), N = B.size(1), K = A.size(1); + TORCH_CHECK(D.size(0) == M && D.size(1) == N); + + auto layout_A = make_cute_layout(A, "A"); + auto layout_D = make_cute_layout(D, "D"); + auto layout_S_group = + maybe_make_cute_layout(maybe_g_scales, "group_scales"); + auto layout_Z_group = + maybe_make_cute_layout(maybe_g_zeros, "group_zeros"); + int64_t numel_S_channel = maybe_ch_scales ? maybe_ch_scales->numel() : 0; + int64_t numel_S_token = maybe_tok_scales ? maybe_tok_scales->numel() : 0; + + auto unwrap = [](auto const& t) { + return t ? t->const_data_ptr() : nullptr; + }; + auto A_ptr = static_cast(A.const_data_ptr()); + auto B_ptr = static_cast(B.const_data_ptr()); + auto D_ptr = static_cast(D.mutable_data_ptr()); + auto S_group_ptr = + static_cast(unwrap(maybe_g_scales)); + auto Z_group_ptr = static_cast(unwrap(maybe_g_zeros)); + auto S_channel_ptr = + static_cast(unwrap(maybe_ch_scales)); + auto S_token_ptr = + static_cast(unwrap(maybe_tok_scales)); int const group_size = maybe_group_size == -1 ? K : maybe_group_size.value_or(K); @@ -159,26 +224,28 @@ struct MacheteKernelTemplate { TORCH_CHECK(size<0>(layout_A) == M && size<1>(layout_A) == K); TORCH_CHECK(size<0>(layout_D) == M && size<1>(layout_D) == N); - if constexpr (with_C) { - TORCH_CHECK(C_ptr && layout_C); + if constexpr (with_group_scales) { + TORCH_CHECK(S_group_ptr && layout_S_group); + TORCH_CHECK((size<0>(*layout_S_group) == scale_k && + size<1>(*layout_S_group) == N)); } else { - TORCH_CHECK(!C_ptr, "C not supported"); + TORCH_CHECK(!S_group_ptr, "Scales not supported"); } - if constexpr (with_scales) { - TORCH_CHECK(S_ptr && layout_S); - TORCH_CHECK((size<0>(*layout_S) == scale_k && size<1>(*layout_S) == N)); + if constexpr (with_group_zeropoints) { + TORCH_CHECK(Z_group_ptr && layout_Z_group); + TORCH_CHECK((size<0>(*layout_Z_group) == scale_k && + size<1>(*layout_Z_group) == N)); + TORCH_CHECK(layout_S_group && *layout_Z_group == *layout_S_group, + "Scales and zeros must have the same layout"); } else { - TORCH_CHECK(!S_ptr, "Scales not supported"); + TORCH_CHECK(!Z_group_ptr, "Zeropoints not supported"); } - if constexpr (with_zeropoints) { - TORCH_CHECK(Z_ptr && layout_Z); - TORCH_CHECK((size<0>(*layout_Z) == scale_k && size<1>(*layout_Z) == N)); - TORCH_CHECK(layout_S && *layout_Z == *layout_S, - "Scales and zeros must have the same layout"); - } else { - TORCH_CHECK(!Z_ptr, "Zeropoints not supported"); + if constexpr (with_channel_scales || with_token_scales) { + TORCH_CHECK( + (maybe_ch_scales->numel() == N || maybe_ch_scales->numel() == 1) && + (maybe_tok_scales->numel() == M || maybe_tok_scales->numel() == 1)); } // Transpose A and D @@ -186,24 +253,33 @@ struct MacheteKernelTemplate { // for B (which is At) auto stride_At = layout_A.stride(); auto stride_Dt = permute_layout<1, 0, 2>(layout_D).stride(); - auto stride_Ct = stride_Dt; - if (layout_C) { - stride_Ct = permute_layout<1, 0, 2>(*layout_C).stride(); - } MainloopArguments mainloop_arguments{}; - EpilogueArguments epilogue_arguments{ - {alpha, beta}, C_ptr, stride_Ct, D_ptr, stride_Dt}; + // {Accum, C, C_layout, D, D} + EpilogueArguments epilogue_arguments{}; + + if constexpr (with_channel_scales || with_token_scales) { + epilogue_arguments = + EpilogueArguments{ChTokScalesEpilogue::prepare_args( + *maybe_ch_scales, *maybe_tok_scales), + nullptr, + {}, + D_ptr, + stride_Dt}; + } else { + epilogue_arguments = EpilogueArguments{{}, nullptr, {}, D_ptr, stride_Dt}; + } - if constexpr (with_scales && with_zeropoints) { - auto stride_S = permute_layout<1, 0, 2>(*layout_S).stride(); - mainloop_arguments = - MainloopArguments{B_ptr, _StrideB{}, A_ptr, stride_At, - S_ptr, stride_S, group_size, Z_ptr}; - } else if constexpr (with_scales) { - auto stride_S = permute_layout<1, 0, 2>(*layout_S).stride(); + if constexpr (with_group_scales && with_group_zeropoints) { + auto stride_S_group = permute_layout<1, 0, 2>(*layout_S_group).stride(); mainloop_arguments = MainloopArguments{ - B_ptr, _StrideB{}, A_ptr, stride_At, S_ptr, stride_S, group_size}; + B_ptr, _StrideB{}, A_ptr, stride_At, + S_group_ptr, stride_S_group, group_size, Z_group_ptr}; + } else if constexpr (with_group_scales) { + auto stride_S_group = permute_layout<1, 0, 2>(*layout_S_group).stride(); + mainloop_arguments = + MainloopArguments{B_ptr, _StrideB{}, A_ptr, stride_At, + S_group_ptr, stride_S_group, group_size}; } else { mainloop_arguments = MainloopArguments{B_ptr, _StrideB{}, A_ptr, stride_At}; diff --git a/csrc/quantization/machete/machete_mm_launcher.cuh b/csrc/quantization/machete/machete_mm_launcher.cuh index 60a4ed60535b7..4b0da5b303e0c 100644 --- a/csrc/quantization/machete/machete_mm_launcher.cuh +++ b/csrc/quantization/machete/machete_mm_launcher.cuh @@ -5,73 +5,61 @@ #include "machete_mm_kernel.cuh" #include "cutlass_extensions/torch_utils.hpp" +#include "core/scalar_type.hpp" namespace machete { -struct PyTorchArguments { +struct MMArgs { torch::Tensor const& A; torch::Tensor const& B; - c10::optional const& scales; - c10::optional const& zeros; - c10::optional group_size; - c10::optional const& C; - c10::optional alpha; - c10::optional beta; - c10::optional schedule; + vllm::ScalarType const& b_type; + c10::optional const& maybe_out_type; + c10::optional const& maybe_group_scales; + c10::optional const& maybe_group_zeros; + c10::optional maybe_group_size; + c10::optional const& maybe_channel_scales; + c10::optional const& maybe_token_scales; + c10::optional maybe_schedule; }; +struct SupportedSchedulesArgs { + at::ScalarType a_type; + vllm::ScalarType b_type; + c10::optional maybe_group_scales_type; + c10::optional maybe_group_zeros_type; + c10::optional maybe_channel_scales_type; + c10::optional maybe_token_scales_type; + c10::optional maybe_out_type; +}; + +torch::Tensor mm_dispatch(MMArgs args); + +std::vector supported_schedules_dispatch( + SupportedSchedulesArgs args); + template -torch::Tensor run_impl(PyTorchArguments args) { +torch::Tensor run_impl(MMArgs args) { const at::cuda::OptionalCUDAGuard device_guard(device_of(args.A)); auto device = args.A.device(); auto stream = at::cuda::getCurrentCUDAStream(device.index()); - using EleA = typename MacheteKernel::ElementA; - using EleB = typename MacheteKernel::ElementB; - using EleC = typename MacheteKernel::ElementC; - using EleD = typename MacheteKernel::ElementD; - using EleScale = typename MacheteKernel::ElementS; - using EleZero = typename MacheteKernel::ElementZ; - - using StrideA = typename MacheteKernel::StrideA; - using StrideC = typename MacheteKernel::StrideC; - using StrideD = typename MacheteKernel::StrideD; - using StrideS = typename MacheteKernel::StrideS; - using StrideZ = typename MacheteKernel::StrideZ; - int M = args.A.size(0); int N = args.B.size(1); int K = args.A.size(1); // Allocate output - torch::Tensor D = - torch::empty({M, N}, torch::TensorOptions() - .dtype(equivalent_scalar_type_v) - .device(device)); - - auto const &A = args.A, &B = args.B; - auto const &C = args.C, &scales = args.scales, &zeros = args.zeros; - - auto layout_A = make_cute_layout(A, "A"); - auto layout_D = make_cute_layout(D, "D"); - auto layout_C = maybe_make_cute_layout(C, "C"); - auto layout_S = maybe_make_cute_layout(scales, "scales"); - auto layout_Z = maybe_make_cute_layout(zeros, "zeros"); - - auto A_ptr = static_cast(A.const_data_ptr()); - auto B_ptr = static_cast(B.const_data_ptr()); - auto D_ptr = static_cast(D.mutable_data_ptr()); - auto C_ptr = static_cast(C ? C->const_data_ptr() : nullptr); - auto S_ptr = - static_cast(scales ? scales->const_data_ptr() : nullptr); - auto Z_ptr = - static_cast(zeros ? zeros->const_data_ptr() : nullptr); + torch::Tensor D = torch::empty( + {M, N}, + torch::TensorOptions() + .dtype(equivalent_scalar_type_v) + .device(device)); auto arguments = MacheteKernel::create_arguments( - stream, A_ptr, layout_A, B_ptr, D_ptr, layout_D, C_ptr, layout_C, S_ptr, - layout_S, Z_ptr, layout_Z, args.alpha.value_or(1), args.beta.value_or(0), - args.group_size); + stream, // + args.A, args.B, D, args.maybe_group_scales, args.maybe_group_zeros, + args.maybe_group_size, args.maybe_channel_scales, + args.maybe_token_scales); TORCH_CHECK(MacheteKernel::can_implement(arguments), "Machete kernel cannot be run with these arguments"); @@ -84,12 +72,4 @@ torch::Tensor run_impl(PyTorchArguments args) { return D; }; -template -struct GemmDispatcher { - static torch::Tensor dispatch(PyTorchArguments args); - static std::vector supported_schedules(); -}; - }; // namespace machete \ No newline at end of file diff --git a/csrc/quantization/machete/machete_prepack_kernel.cuh b/csrc/quantization/machete/machete_prepack_kernel.cuh index f23483f928b47..d002355ca49d6 100644 --- a/csrc/quantization/machete/machete_prepack_kernel.cuh +++ b/csrc/quantization/machete/machete_prepack_kernel.cuh @@ -6,31 +6,49 @@ namespace machete { -template -static __global__ void prepack_B_kernel(BInTensor B_in, - BTiledOutTensor B_tiled_out) { - auto tB_in = local_tile(B_in, TileShapeNKL{}, - make_coord(blockIdx.x, blockIdx.y, blockIdx.z)); - auto tB_out = B_tiled_out(make_coord(_, _), - make_coord(blockIdx.x, blockIdx.y), blockIdx.z); +template +static __global__ void prepack_B_kernel(BInTensor B_in, ElementB* B_out_ptr) { + auto constexpr block_size = + Int{}; + auto constexpr eles_per_thread = Int{}; + static_assert(block_size % threads == 0, + "block_size must be divisible by the number of threads"); - auto tiled_copy = make_tiled_copy(Copy_Atom{}, - Layout, Stride<_32, _1>>{}, - Layout>{}); + // Which pre-packed are we responsible for + auto blk_coord = make_coord(blockIdx.x, blockIdx.y, blockIdx.z); + auto tB_in = local_tile( + B_in, append(typename PrepackedLayoutB::PPBlockShape_NK{}, _1{}), + blk_coord); - auto thr_copy = tiled_copy.get_thread_slice(threadIdx.x); + // Find the start offset in the output for this pre-packed block + auto bNbKL_to_offset = PrepackedLayoutB::bNbKL_to_offset(shape(B_in)); - Tensor thr_tile_S = thr_copy.partition_S(tB_in); - Tensor thr_tile_D = thr_copy.partition_D(tB_out); + // Tensor representing a 1:1 mapping to the output space in 1D + auto tB_out_linear = + make_tensor(get_logical_ptr(B_out_ptr) + bNbKL_to_offset(blk_coord), + make_layout(make_shape(block_size))); + // Mapping from output space (1D) to input space + auto tB_in_linear = make_tensor( + tB_in.data(), + tB_in.layout() + .compose(right_inverse(PrepackedLayoutB::ppblock_ilvd_NK_to_offset())) + .with_shape(make_shape(block_size))); + + // Tile for this specific thread (could have used a TiledCopy but these work + // best with 2d layouts, this is a simple 1d layout so local_tile is enough, + // we are also not that concerned with performance for this kernel) + auto thr_tB_in_linear = + local_tile(tB_in_linear, make_shape(eles_per_thread), threadIdx.x); + auto thr_tB_out_linear = + local_tile(tB_out_linear, make_shape(eles_per_thread), threadIdx.x); // Construct a register-backed Tensor with the same shape as each thread's // partition - auto fragment = make_tensor(shape(thr_tile_D)); + auto fragment = make_tensor(shape(thr_tB_in_linear)); - // Copy from GMEM to RMEM and from RMEM to GMEM - copy(tiled_copy, thr_tile_S, fragment); - copy(Copy_Atom{}, fragment, thr_tile_D); + copy(thr_tB_in_linear, fragment); + copy(Copy_Atom{}, fragment, thr_tB_out_linear); } template @@ -44,18 +62,15 @@ static void prepack_B_template( TORCH_CHECK(size<0>(B_layout) % size<0>(TileShapeNKL{}) == 0); TORCH_CHECK(size<1>(B_layout) % size<1>(TileShapeNKL{}) == 0); - TORCH_CHECK(size<2>(B_layout) % size<2>(TileShapeNKL{}) == 0); auto N_tiles = size<0>(B_layout) / size<0>(TileShapeNKL{}); auto K_tiles = size<1>(B_layout) / size<1>(TileShapeNKL{}); - auto L_tiles = size<2>(B_layout) / size<2>(TileShapeNKL{}); + auto L_tiles = size<2>(B_layout); auto B_in = make_tensor(get_logical_ptr(B_in_ptr), B_layout); - auto B_tiled_out = - make_tensor(get_logical_ptr(B_out_ptr), ilvd_NKbNbKL_to_offset); - prepack_B_kernel - <<>>(B_in, B_tiled_out); + prepack_B_kernel<128, PrepackedLayoutB> + <<>>(B_in, B_out_ptr); } }; // namespace machete \ No newline at end of file diff --git a/csrc/quantization/machete/machete_prepack_launcher.cuh b/csrc/quantization/machete/machete_prepack_launcher.cuh index a33d8f9484cfe..3486d28be2126 100644 --- a/csrc/quantization/machete/machete_prepack_launcher.cuh +++ b/csrc/quantization/machete/machete_prepack_launcher.cuh @@ -2,9 +2,17 @@ #include "machete_prepack_kernel.cuh" #include "cutlass_extensions/torch_utils.hpp" +#include "core/scalar_type.hpp" namespace machete { +struct PrepackBArgs { + torch::Tensor const& B; + at::ScalarType a_type; + vllm::ScalarType b_type; + c10::optional maybe_group_scales_type; +}; + template torch::Tensor prepack_impl(torch::Tensor const B) { const at::cuda::OptionalCUDAGuard device_guard(device_of(B)); @@ -61,11 +69,6 @@ torch::Tensor prepack_impl(torch::Tensor const B) { return D; }; -template -struct PrepackBDispatcher { - static torch::Tensor dispatch(torch::Tensor B); -}; +torch::Tensor prepack_B_dispatch(PrepackBArgs args); }; // namespace machete \ No newline at end of file diff --git a/csrc/quantization/machete/machete_prepacked_layout.cuh b/csrc/quantization/machete/machete_prepacked_layout.cuh index 78e2cc5eec7d8..680a858a893c1 100644 --- a/csrc/quantization/machete/machete_prepacked_layout.cuh +++ b/csrc/quantization/machete/machete_prepacked_layout.cuh @@ -41,7 +41,7 @@ struct IlvBlkLayoutAuto {}; // The contract here is that the `TiledMma` determined below matches the one // ultimately used in the kernel. (this is also why the other element types are // required along with the kernel schedule) -template // clang-format on @@ -49,20 +49,27 @@ struct PrepackedLayoutBTemplate { using MmaType = ElementA_; using ElementA = ElementA_; using ElementB = ElementB_; - using ElementD = ElementD_; - using ElementAccumulator = - AccumulatorT; // Element type for internal accumulation + using ElementAccumulator = AccumulatorT; using ElementMma = MmaType; - // Only use interleaved layouts for subbyte weights, prmt instructions makes - // non-interleaved layouts for 8bit+ weights efficient enough we don't need - // iterleaved layouts + // Interleave for 4bit bit types when we are not upconverting to fp8 or int8, + // in those cases case we use a LUT using prmt instructions to upconvert and + // is more efficient if the data is not interleaved For 8bit+ prmt + // instructions makes non-interleaved layouts efficient enough we don't need + // iterleaved layouts (and can reuse more of the existing cutlass converts) + static constexpr bool should_interleave = + sizeof_bits_v <= 4 && + !std::is_same_v && + !std::is_same_v; + + // Only use interleaved layouts for subbyte weights, using IlvdBlkLayout = std::conditional_t< std::is_same_v, - std::conditional_t <= 4, - decltype(get_interleaved_blk_layout< - ElementB, sizeof_bits_v, 32>()), - void>, + std::conditional_t< + should_interleave, + decltype(get_interleaved_blk_layout< + ElementB, sizeof_bits_v, 32>()), + void>, IlvBlkLayout_>; // TODO (LucasWilkinson): compare the performance for other sizes @@ -135,7 +142,8 @@ struct PrepackedLayoutBTemplate { // then ((IlvBlk), FrgB) is {A, C, B, D, C, G, D, H} auto frgV = get<1, 0>(layout_no_interleave); auto ilvdBlk = IlvdBlkLayout{}; - static_assert(size(frgV) % 4 == 0, "FrgV must be divisible by 4"); + static_assert(size(frgV) % size(ilvdBlk) == 0, + "FrgV must be divisible by size(ilvdBlk)"); auto ilvd_FrgV = make_layout( make_shape(shape(ilvdBlk), Int{}), make_stride(stride(ilvdBlk), size(ilvdBlk))); @@ -175,6 +183,15 @@ struct PrepackedLayoutBTemplate { return group<1, 3>(result(_, repeat(result)>(_))); } + // ((athrid_val), (BlocksN, BlocksK, L)) -> (N, K, L) + template + CUTE_HOST_DEVICE static constexpr auto TVbNbKL_to_offset_copy( + Shape_NKL shape_mkl) { + auto layout = TVbNbKL_to_offset(shape_mkl); + return make_layout(coalesce(get<0>(layout)), get<1>(layout), + get<2>(layout)); + } + // ((BlockN, BlockK), (BlocksN, BlocksK), L) -> (storage_idx) template CUTE_HOST_DEVICE static constexpr auto ilvd_NKbNbKL_to_offset( @@ -197,6 +214,19 @@ struct PrepackedLayoutBTemplate { return group<1, 3>(result(_, repeat(result)>(_))); } + // (BlocksN, BlocksK, L) -> (storage_idx) + template + CUTE_HOST_DEVICE static constexpr auto bNbKL_to_offset(Shape_NKL shape_mkl) { + // (BlocksN, BlocksK, L) + auto blocks_shape = + cute::transform(shape_mkl, append(PPBlockShape_NK{}, _1{}), + [](auto x, auto y) { return x / y; }); + auto stride = size(PPBlockShape_NK{}); + + // (BlocksN, BlocksK, L) -> (storage_idx) + return make_layout(blocks_shape, compact_col_major(blocks_shape, stride)); + } + // ((athrid, val), (BlocksN, BlocksK, L)) -> (N, K, L) template CUTE_HOST_DEVICE static auto TVbNbK_to_NKL(Shape_NKL shape_mkl) { diff --git a/csrc/quantization/machete/machete_pytorch.cu b/csrc/quantization/machete/machete_pytorch.cu index 9f9073ded6191..da2c2fb0d3e77 100644 --- a/csrc/quantization/machete/machete_pytorch.cu +++ b/csrc/quantization/machete/machete_pytorch.cu @@ -8,89 +8,61 @@ namespace machete { using namespace vllm; -// -// Utils (type dispatching) -// - -template -static auto scalar_type_dispatch(ScalarType const& type, Fn fn) { - if (type == vllm::kU4) { - return fn(cutlass::uint4b_t{}); - } else if (type == vllm::kU8) { - return fn(cutlass::uint8_t{}); - } else if (type == vllm::kU4B8) { - return fn(cutlass::vllm_uint4b8_t{}); - } else if (type == vllm::kU8B128) { - return fn(cutlass::vllm_uint8b128_t{}); - } else { - TORCH_CHECK(false, "Unsupported type ", type.str()); - } -} - -#define AT_DISPATCH_CASE_SUPPORTED_COMPUTE_TYPES(...) \ - AT_DISPATCH_CASE_REDUCED_FLOATING_TYPES(__VA_ARGS__) - -#define AT_DISPATCH_SUPPORTED_COMPUTE_TYPES(TYPE, NAME, ...) \ - AT_DISPATCH_SWITCH(TYPE, NAME, \ - AT_DISPATCH_CASE_SUPPORTED_COMPUTE_TYPES(__VA_ARGS__)) - -// -// Interface -// - -std::vector supported_schedules(ScalarTypeId const btype_id) { -#if defined(__CUDACC_VER_MAJOR__) && __CUDACC_VER_MAJOR__ >= 12 - vllm::ScalarType b_type = ScalarType::from_id(btype_id); - return scalar_type_dispatch(b_type, [&](auto BType) { - return GemmDispatcher::supported_schedules(); +std::vector supported_schedules( + at::ScalarType a_type, int64_t b_type_id, + c10::optional maybe_group_scales_type, + c10::optional maybe_group_zeros_type, + c10::optional maybe_channel_scales_type, + c10::optional maybe_token_scales_type, + c10::optional maybe_out_type) { + ScalarType const b_type = ScalarType::from_id(b_type_id); + return supported_schedules_dispatch({ + .a_type = a_type, + .b_type = b_type, + .maybe_group_scales_type = maybe_group_scales_type, + .maybe_group_zeros_type = maybe_group_zeros_type, + .maybe_channel_scales_type = maybe_channel_scales_type, + .maybe_token_scales_type = maybe_token_scales_type, + .maybe_out_type = maybe_out_type, }); -#else - TORCH_CHECK(false, "Machete requires CUDA 12.0 or later"); -#endif } -torch::Tensor gemm(torch::Tensor const& A, torch::Tensor const& B, - ScalarTypeId const btype_id, - c10::optional const& scales, - c10::optional const& zeros, - c10::optional group_size, - c10::optional const& C, - c10::optional alpha, c10::optional beta, - c10::optional schedule) { -#if defined(__CUDACC_VER_MAJOR__) && __CUDACC_VER_MAJOR__ >= 12 - ScalarType const btype = ScalarType::from_id(btype_id); - auto args = PyTorchArguments{.A = A, - .B = B, - .scales = scales, - .zeros = zeros, - .group_size = group_size, - .C = C, - .alpha = alpha, - .beta = beta, - .schedule = schedule}; - - return scalar_type_dispatch(btype, [&](auto BType) { - return AT_DISPATCH_SUPPORTED_COMPUTE_TYPES( - A.scalar_type(), "machete_gemm", [&] { - using ComputeType = equivalent_cutlass_type_t; - return GemmDispatcher::dispatch(args); - }); - }); -#else - TORCH_CHECK(false, "Machete requires CUDA 12.0 or later"); -#endif +torch::Tensor mm(torch::Tensor const& A, torch::Tensor const& B, + int64_t b_type_id, + c10::optional const& maybe_out_type, + c10::optional const& maybe_group_scales, + c10::optional const& maybe_group_zeros, + c10::optional maybe_group_size, + c10::optional const& maybe_channel_scales, + c10::optional const& maybe_token_scales, + c10::optional maybe_schedule) { + ScalarType const b_type = ScalarType::from_id(b_type_id); + return mm_dispatch({.A = A, + .B = B, + .b_type = b_type, + .maybe_out_type = maybe_out_type, + .maybe_group_scales = maybe_group_scales, + .maybe_group_zeros = maybe_group_zeros, + .maybe_group_size = maybe_group_size, + .maybe_channel_scales = maybe_channel_scales, + .maybe_token_scales = maybe_token_scales, + .maybe_schedule = maybe_schedule}); } -torch::Tensor prepack_B(torch::Tensor const& B, ScalarTypeId const btype_id) { - ScalarType const btype = ScalarType::from_id(btype_id); - return scalar_type_dispatch(btype, [&](auto BType) { - return PrepackBDispatcher::dispatch(B); - }); +torch::Tensor prepack_B( + torch::Tensor const& B, at::ScalarType const& a_type, int64_t b_type_id, + c10::optional const& maybe_group_scales_type) { + ScalarType const b_type = ScalarType::from_id(b_type_id); + return prepack_B_dispatch( + {.B = B, + .a_type = a_type, + .b_type = b_type, + .maybe_group_scales_type = maybe_group_scales_type}); } TORCH_LIBRARY_IMPL_EXPAND(TORCH_EXTENSION_NAME, CUDA, m) { m.impl("machete_prepack_B", &prepack_B); - m.impl("machete_gemm", &gemm); + m.impl("machete_mm", &mm); } // use CatchAll since supported_schedules has no tensor arguments diff --git a/csrc/torch_bindings.cpp b/csrc/torch_bindings.cpp index 229fd554d3eee..e4cc7ec951848 100644 --- a/csrc/torch_bindings.cpp +++ b/csrc/torch_bindings.cpp @@ -203,13 +203,36 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) { // conditionally compiled so impl in source file // Machete (Dense) Optimized Mixed Precision GEMM for Hopper. - ops.def("machete_supported_schedules(int btype) -> str[]"); ops.def( - "machete_gemm(Tensor A, Tensor B, int btype, " - " Tensor? scales, Tensor? zeros, int? group_size, " - " Tensor? C, float? alpha, float? beta, str? schedule)" - "-> Tensor"); - ops.def("machete_prepack_B(Tensor B, int btype) -> Tensor"); + "machete_supported_schedules(" + " ScalarType a_type," + " int b_type," + " ScalarType? maybe_group_scales_type," + " ScalarType? maybe_group_zeros_type," + " ScalarType? maybe_channel_scales_type," + " ScalarType? maybe_token_scales_type," + " ScalarType? maybe_out_type" + ") -> str[]"); + ops.def( + "machete_mm(" + " Tensor A," + " Tensor B," + " int b_type," + " ScalarType? out_type," + " Tensor? group_scales," + " Tensor? group_zeros," + " int? group_size," + " Tensor? channel_scales," + " Tensor? token_scales," + " str? schedule" + ") -> Tensor"); + ops.def( + "machete_prepack_B(" + " Tensor B," + " ScalarType a_type," + " int b_type," + " ScalarType? group_scales_type" + ") -> Tensor"); // conditionally compiled so impl registration is in source file ops.def("permute_cols(Tensor A, Tensor perm) -> Tensor"); diff --git a/tests/kernels/test_machete_gemm.py b/tests/kernels/test_machete_gemm.py deleted file mode 100644 index 59c0a24753c3b..0000000000000 --- a/tests/kernels/test_machete_gemm.py +++ /dev/null @@ -1,284 +0,0 @@ -"""Tests for the machete kernel. - -Run `pytest tests/kernels/test_machete_gemm.py`. -""" - -import math -from typing import Optional, Tuple - -import pytest -import torch - -from tests.kernels.utils import opcheck -from vllm import _custom_ops as ops -from vllm.model_executor.layers.quantization.utils.quant_utils import ( - pack_rows, quantize_weights) -from vllm.platforms import current_platform -from vllm.scalar_type import ScalarType, scalar_types - -CUDA_DEVICES = [ - f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2) -] - -MNK_SHAPES = [ - (1, 128, 128), - (1, 512, 1024), - (1, 4096, 4096), - (1, 8192, 28672), - (13, 8192, 4096), - (26, 4096, 8192), - (64, 4096, 4096), - (64, 8192, 28672), - (257, 128, 4096), - (257, 4224, 4160), - (257, 4096, 4096), - (1024, 4096, 8192), - (1024, 8192, 4096), -] - -ACT_TYPES = [torch.float16, torch.bfloat16] -WTYPE_ZEROPOINTS = [ - # GPTQ style - (scalar_types.uint4b8, False), - (scalar_types.uint8b128, False), - # AWQ style - (scalar_types.uint4, True), - (scalar_types.uint8, True), -] - -# TODO: in future PR refactor this and `is_quant_method_supported` in the kernel -# unit tests to a common utility function. Currently the use of -# `is_quant_method_supported` conflates kernels with quantization methods -# an assumption which is breaking down as quantizations methods can have -# have kernels and some kernels support multiple quantization methods. -IS_SUPPORTED_BY_GPU = current_platform.has_device_capability(90) - - -def rand_data(shape, dtype=torch.float16): - return 10 * (torch.rand(shape, dtype=dtype, device="cuda") - 0.3) - - -def maybe_convert_zeropoints(zps: Optional[torch.Tensor], s: torch.Tensor): - return zps if zps is None else -1 * s * (zps.to(s.dtype)) - - -def machete_quantize_and_pack(w: torch.Tensor, - wtype: ScalarType, - group_size: int, - zero_points: bool = False): - assert wtype.is_integer(), "TODO: support floating point weights" - - w_ref, w_q, w_s, w_zp = quantize_weights( - w, - wtype, - group_size, - zero_points=zero_points, - # to match how the kernel applies zps - ref_zero_points_after_scales=True) - - w_q = pack_rows(w_q, wtype.size_bits, *w_q.shape) - w_q = w_q.t().contiguous().t() # convert to col major - w_q_machete = ops.machete_prepack_B(w_q, wtype) - - opcheck(torch.ops._C.machete_prepack_B, (w_q, wtype.id)) - - return w_ref, w_q_machete, w_s, w_zp - - -def machete_gemm_test_helper(a: torch.Tensor, b: torch.Tensor, - wtype: ScalarType, group_size: int, - zero_points: bool): - w_ref, w_q_packed, w_s, w_zp = machete_quantize_and_pack( - b, wtype, group_size, zero_points) - - output_ref = torch.matmul(a, w_ref) - - output = ops.machete_gemm( - a=a, - b_q=w_q_packed, - b_type=wtype, - b_scales=w_s, - b_zeros=maybe_convert_zeropoints(w_zp, w_s), - b_group_size=group_size, - ) - - # Relax atol as our reduction dim becomes larger (more rounding error) - # Relax atol when we have zeropoints since the way machete applies - # zeropoints (after scales) causes noise around 0 - atol = 1 if zero_points else min(5e-2 * math.sqrt(a.shape[1]), 1) - torch.testing.assert_close(output, output_ref, rtol=1e-1, atol=atol) - - -@pytest.mark.skipif(not IS_SUPPORTED_BY_GPU, - reason="Machete is not supported on this GPU type.") -@pytest.mark.parametrize("shape", - MNK_SHAPES, - ids=lambda x: "x".join(str(v) for v in x)) -@pytest.mark.parametrize("atype", ACT_TYPES, ids=lambda x: str(x)) -@pytest.mark.parametrize("wtype_zeropoints", WTYPE_ZEROPOINTS) -@pytest.mark.parametrize("group_size", [128, None]) -def test_machete_all_schedules(shape, atype: torch.dtype, - wtype_zeropoints: Tuple[ScalarType, bool], - group_size: Optional[int]): - m, n, k = shape - wtype, zero_points = wtype_zeropoints - - if group_size is not None and k % group_size != 0: - return - - print(f"MNK = {m} {n} {k}") - - # Normalize group_size - if group_size is None: - group_size = k - assert group_size <= k - - a = rand_data((m, k), atype) - w = rand_data((k, n), atype) - - w_ref, w_q_machete, w_s, w_zp = machete_quantize_and_pack( - w, wtype, group_size, zero_points) - - output_ref = torch.matmul(a, w_ref) - - for schedule in ops.machete_supported_schedules(wtype): - print(f"Testing schedule {schedule}") - output = ops.machete_gemm( - a, - b_q=w_q_machete, - b_type=wtype, - b_scales=w_s, - b_zeros=maybe_convert_zeropoints(w_zp, w_s), - b_group_size=group_size, - schedule=schedule, - ) - - opcheck( - torch.ops._C.machete_gemm, - (a, w_q_machete, wtype.id, w_s, maybe_convert_zeropoints( - w_zp, w_s), group_size, None, None, None, schedule)) - - # Relax atol as our reduction dim becomes larger (more rounding error) - # Relax atol when we have zeropoints since the way machete applies - # zeropoints (after scales) causes noise around 0 - atol = 1 if zero_points else min(5e-2 * math.sqrt(k), 1) - torch.testing.assert_close(output, output_ref, rtol=1e-1, atol=atol),\ - f"Schedule failed {schedule}" - - -@pytest.mark.skipif(not IS_SUPPORTED_BY_GPU, - reason="Machete is not supported on this GPU type.") -@pytest.mark.parametrize("shape", - MNK_SHAPES, - ids=lambda x: "x".join(str(v) for v in x)) -@pytest.mark.parametrize("atype", ACT_TYPES, ids=lambda x: str(x)) -@pytest.mark.parametrize("wtype_zeropoints", WTYPE_ZEROPOINTS) -@pytest.mark.parametrize("group_size", [128, None]) -def test_machete_heuristic(shape, atype: torch.dtype, - wtype_zeropoints: Tuple[ScalarType, bool], - group_size: Optional[int]): - m, n, k = shape - wtype, zero_points = wtype_zeropoints - - if group_size is not None and k % group_size != 0: - return - - # Normalize group_size - if group_size is None: - group_size = k - assert group_size <= k - - a = rand_data((m, k), atype) - b = rand_data((k, n), atype) - - machete_gemm_test_helper(a, b, wtype, group_size, zero_points) - - -# Test working on other devices -@pytest.mark.skipif(not IS_SUPPORTED_BY_GPU, - reason="Machete is not supported on this GPU type.") -@pytest.mark.parametrize("device", CUDA_DEVICES) -def test_machete_devices(device: str): - m, n, k = 512, 4096, 4096 - wtype = scalar_types.uint4b8 - group_size = 128 - zero_points = False - - print(f"MNK = {m} {n} {k}, device = {device}") - - a = rand_data((m, k), torch.float16).to(device) - b = rand_data((k, n), torch.float16).to(device) - - machete_gemm_test_helper(a, b, wtype, group_size, zero_points) - - -# Test working with a subset of A and B -@pytest.mark.skipif(not IS_SUPPORTED_BY_GPU, - reason="Machete is not supported on this GPU type.") -def test_machete_subset(): - big_m, big_n, big_k = 1024, 1024, 1024 - m, n, k = 512, 512, 512 - wtype = scalar_types.uint4b8 - group_size = 128 - zero_points = False - - whole_a = rand_data((big_m, big_k), torch.float16) - whole_b = rand_data((big_k, big_n), torch.float16) - - a = whole_a[0:m, 0:k] - b = whole_b[0:k, 0:n] - - machete_gemm_test_helper(a, b, wtype, group_size, zero_points) - - -# Test to make sure cuda graphs work -class MacheteLayer(torch.nn.Module): - - def __init__(self, **kwargs): - super().__init__() - self.kwargs = kwargs - - def forward(self, a): - return ops.machete_gemm(**self.kwargs) - - -@pytest.mark.skipif(not IS_SUPPORTED_BY_GPU, - reason="Machete is not supported on this GPU type.") -def test_machete_cuda_graph(): - m, n, k = 512, 4096, 4096 - - a = rand_data((m, k), torch.float16) - b = rand_data((k, n), torch.float16) - wtype = scalar_types.uint4b8 - group_size = 128 - zero_points = False - - w_ref, w_q_packed, w_s, w_zp = machete_quantize_and_pack( - b, wtype, group_size, zero_points) - - # Construct a trivial model with a single layer that calls a machete kernel - model = MacheteLayer( - a=a, - b_q=w_q_packed, - b_type=wtype, - b_scales=w_s, - b_zeros=maybe_convert_zeropoints(w_zp, w_s), - b_group_size=group_size, - ) - - output_ref = torch.matmul(a, w_ref) - - # Run the model with a cuda graph - stream = torch.cuda.Stream() - with torch.cuda.stream(stream): - g = torch.cuda.CUDAGraph() - with torch.cuda.graph(g): - output = model(a) - output.zero_() - g.replay() - - # Relax atol as our reduction dim becomes larger (more rounding error) - # Relax atol when we have zeropoints since the way machete applies - # zeropoints (after scales) causes noise around 0 - atol = 1 if zero_points else min(5e-2 * math.sqrt(k), 1) - torch.testing.assert_close(output, output_ref, rtol=1e-1, atol=atol) diff --git a/tests/kernels/test_machete_mm.py b/tests/kernels/test_machete_mm.py new file mode 100644 index 0000000000000..1c6eb2dd9a228 --- /dev/null +++ b/tests/kernels/test_machete_mm.py @@ -0,0 +1,406 @@ +"""Tests for the machete kernel. + +Run `pytest tests/kernels/test_machete_mm.py`. +""" + +import math +from dataclasses import dataclass, fields +from typing import List, Optional, Tuple + +import pytest +import torch + +from tests.kernels.utils import opcheck +from vllm import _custom_ops as ops +from vllm.model_executor.layers.quantization.utils.quant_utils import ( + pack_rows, quantize_weights) +from vllm.platforms import current_platform +from vllm.scalar_type import ScalarType, scalar_types + +CUDA_DEVICES = [ + f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2) +] + +# TODO: in future PR refactor this and `is_quant_method_supported` in the kernel +# unit tests to a common utility function. Currently the use of +# `is_quant_method_supported` conflates kernels with quantization methods +# an assumption which is breaking down as quantizations methods can have +# have kernels and some kernels support multiple quantization methods. +IS_SUPPORTED_BY_GPU = current_platform.get_device_capability()[0] >= 9 + +MNK_SHAPES = [ + (1, 128, 128), + (1, 512, 1024), + (1, 4096, 4096), + (1, 8192, 28672), + (13, 8192, 4096), + (26, 4096, 8192), + (64, 4096, 4096), + (64, 8192, 28672), + (257, 128, 4096), + (257, 4224, 4160), + (257, 4096, 4096), + (1024, 4096, 8192), + (1024, 8192, 4096), +] + +GROUP_SIZES_TO_TEST: List[Optional[int]] = [128, -1] + + +@dataclass +class TypeConfig: + act_type: torch.dtype + weight_type: ScalarType + output_type: Optional[torch.dtype] + group_scale_type: Optional[torch.dtype] + group_zero_type: Optional[torch.dtype] + channel_scale_type: Optional[torch.dtype] + token_scale_type: Optional[torch.dtype] + + +@dataclass +class Tensors: + w_ref: torch.Tensor + a_ref: torch.Tensor + a: torch.Tensor + w_q: torch.Tensor + w_g_s: Optional[torch.Tensor] + w_g_zp: Optional[torch.Tensor] + w_ch_s: Optional[torch.Tensor] + w_tok_s: Optional[torch.Tensor] + + +# (Act Type, Weight Type, Output Type, Scale Type, ZeroPoints, +# Ch Scales Type, Tok Scales Type) +# NOTE: None "Scale Type" means the act type is floating point +# None "Output Type" means the output type is the same as the act type +TestTypeTuple = Tuple[List[torch.dtype], ScalarType, Optional[torch.dtype], + Optional[torch.dtype], bool] +TEST_TYPES = [ + # GPTQ style + *(TypeConfig(act_type=a_type, + weight_type=w_type, + output_type=None, + group_scale_type=a_type, + group_zero_type=None, + channel_scale_type=None, + token_scale_type=None) + for w_type in [scalar_types.uint4b8, scalar_types.uint8b128] + for a_type in [torch.float16, torch.bfloat16]), + # AWQ style + *(TypeConfig(act_type=a_type, + weight_type=w_type, + output_type=None, + group_scale_type=a_type, + group_zero_type=a_type, + channel_scale_type=None, + token_scale_type=None) + for w_type in [scalar_types.uint4, scalar_types.uint8] + for a_type in [torch.float16, torch.bfloat16]), + # QQQ style + *(TypeConfig(act_type=torch.int8, + weight_type=scalar_types.uint4b8, + output_type=torch.float16, + group_scale_type=group_scale_type, + group_zero_type=None, + channel_scale_type=torch.float, + token_scale_type=torch.float) + for group_scale_type in [None, torch.float16]), + *(TypeConfig(act_type=torch.float8_e4m3fn, + weight_type=scalar_types.uint4b8, + output_type=torch.float16, + group_scale_type=group_scale_type, + group_zero_type=None, + channel_scale_type=torch.float, + token_scale_type=torch.float) + for group_scale_type in [None, torch.float16]), +] + +# TODO: in future PR refactor this and `is_quant_method_supported` in the kernel +# unit tests to a common utility function. Currently the use of +# `is_quant_method_supported` conflates kernels with quantization methods +# an assumption which is breaking down as quantizations methods can have +# have kernels and some kernels support multiple quantization methods. +IS_SUPPORTED_BY_GPU = current_platform.has_device_capability(90) + + +def rand_data(shape, dtype=torch.float16, scale=1, offset=0): + if dtype.is_floating_point: + return (scale * torch.rand(shape, device="cuda") - offset).to(dtype) + else: + return torch.randint(-8, 7, shape, dtype=dtype, device="cuda") + + +def maybe_convert_zeropoints(zps: Optional[torch.Tensor], s: torch.Tensor): + return zps if zps is None else -1 * s * (zps.to(s.dtype)) + + +def group_size_valid(shape: Tuple[int, int, int], + group_size: Optional[int]) -> bool: + return group_size is None or group_size == -1 or group_size % shape[2] == 0 + + +def machete_quantize_and_pack(atype: torch.dtype, + w: torch.Tensor, + wtype: ScalarType, + stype: Optional[torch.dtype], + group_size: Optional[int], + zero_points: bool = False): + assert wtype.is_integer(), "TODO: support floating point weights" + + w_ref, w_q, w_s, w_zp = quantize_weights( + w, + wtype, + group_size=group_size, + zero_points=zero_points, + # to match how the kernel applies zps + ref_zero_points_after_scales=True) + + w_q = pack_rows(w_q, wtype.size_bits, *w_q.shape) + w_q = w_q.t().contiguous().t() # convert to col major + + w_q_machete = ops.machete_prepack_B(w_q, atype, wtype, stype) + opcheck(torch.ops._C.machete_prepack_B, (w_q, atype, wtype.id, stype)) + + return w_ref, w_q_machete, w_s, w_zp + + +def create_test_tensors(shape: Tuple[int, int, int], + types: TypeConfig, + group_size: Optional[int], + subset_stride_factor: Optional[int] = None) -> Tensors: + m, n, k = shape + factor = subset_stride_factor or 1 + + print("create_test_tensors, shape:", shape, "types:", types, "group_size:", + group_size) + + a = rand_data((m * factor, k * factor), types.act_type, scale=3, offset=2) + w = rand_data((k * factor, n * factor), types.act_type, scale=3, offset=1) + + if factor > 1: + a = a[0:m, 0:k] + w = w[0:k, 0:n] + + if types.group_scale_type is not None: + w = w.to(types.group_scale_type) + if w.dtype.itemsize == 1: + w = w.to(torch.float16) + + w_ref, w_q_packed, w_s, w_zp = machete_quantize_and_pack( + a.dtype, w, types.weight_type, types.group_scale_type, group_size, + types.group_zero_type is not None) + + if not a.dtype.is_floating_point: + aiinfo = torch.iinfo(a.dtype) + w_ref = w_ref.round().clamp(aiinfo.min, aiinfo.max) + + a_ref = a.to(torch.float32) + w_ref = w_ref.to(torch.float32) + + w_ch_s = None if types.channel_scale_type is None else\ + rand_data((n,), types.channel_scale_type) + w_tok_s = None if types.token_scale_type is None else\ + rand_data((m,), types.token_scale_type) + + return Tensors(w_ref=w_ref, + a_ref=a_ref, + a=a, + w_q=w_q_packed, + w_g_s=w_s, + w_g_zp=maybe_convert_zeropoints(w_zp, w_s), + w_ch_s=w_ch_s, + w_tok_s=w_tok_s) + + +# None stype means scales use the same dtype as a +def machete_mm_test_helper(types: TypeConfig, + tensors: Tensors, + group_size: Optional[int] = None, + schedule: Optional[str] = None): + output_ref = torch.matmul(tensors.a_ref, tensors.w_ref) + output_ref_type = output_ref.dtype + + if tensors.w_ch_s is not None: + output_ref = (output_ref.to(tensors.w_ch_s.dtype) * + tensors.w_ch_s.unsqueeze(0)).to(output_ref_type) + if tensors.w_tok_s is not None: + output_ref = (output_ref.to(tensors.w_tok_s.dtype) * + tensors.w_tok_s.unsqueeze(1)).to(output_ref_type) + + output = ops.machete_mm( + a=tensors.a, + b_q=tensors.w_q, + b_type=types.weight_type, + b_group_scales=tensors.w_g_s, + b_group_zeros=tensors.w_g_zp, + b_group_size=group_size, + b_channel_scales=tensors.w_ch_s, + a_token_scales=tensors.w_tok_s, + out_type=types.output_type, + schedule=schedule, + ) + + print(output) + print(output_ref) + + # Relax atol as our reduction dim becomes larger (more rounding error) + # Relax atol when we have zeropoints since the way machete applies + # zeropoints (after scales) causes noise around 0 + atol = 1 if tensors.w_g_zp is not None\ + else min(5e-2 * math.sqrt(tensors.a.shape[1]), 1) + rtol = 1e-1 if tensors.a.element_size() >= 2 else 2e-1 + torch.testing.assert_close(output, + output_ref.to(output.dtype), + rtol=rtol, + atol=atol) + + +@pytest.mark.skipif(not IS_SUPPORTED_BY_GPU, + reason="Machete is not supported on this GPU type.") +@pytest.mark.parametrize("shape", + MNK_SHAPES, + ids=lambda x: "x".join(str(v) for v in x)) +@pytest.mark.parametrize("types", TEST_TYPES) +def test_machete_all_schedules(shape, types: TypeConfig): + + group_sizes: List[Optional[int]] = [] + if types.group_scale_type is None: + group_sizes = [None] + else: + group_sizes = GROUP_SIZES_TO_TEST + + for group_size in group_sizes: + if not group_size_valid(shape, group_size): + continue + + tensors = create_test_tensors(shape, types, group_size) + print(f"MNK = {shape}") + for schedule in ops.machete_supported_schedules( + types.act_type, + types.weight_type, + group_scales_type=types.group_scale_type, + group_zeros_type=types.group_scale_type, + out_type=types.output_type): + print(f"Testing schedule {schedule}") + machete_mm_test_helper(types, tensors, group_size, schedule) + + +@pytest.mark.skipif(not IS_SUPPORTED_BY_GPU, + reason="Machete is not supported on this GPU type.") +@pytest.mark.parametrize("shape", + MNK_SHAPES, + ids=lambda x: "x".join(str(v) for v in x)) +@pytest.mark.parametrize("types", TEST_TYPES) +def test_machete_heuristic(shape, types: TypeConfig): + group_sizes: List[Optional[int]] = [] + if types.group_scale_type is None: + group_sizes = [None] + else: + group_sizes = GROUP_SIZES_TO_TEST + + for group_size in group_sizes: + if not group_size_valid(shape, group_size): + continue + + tensors = create_test_tensors(shape, types, group_size) + machete_mm_test_helper(types, tensors, group_size) + + +# Test working on other devices +@pytest.mark.skipif(not IS_SUPPORTED_BY_GPU, + reason="Machete is not supported on this GPU type.") +@pytest.mark.parametrize("device", CUDA_DEVICES) +def test_machete_devices(device: str): + group_size = 128 + + type_config = TypeConfig(act_type=torch.float16, + weight_type=scalar_types.uint4b8, + output_type=None, + group_scale_type=torch.float16, + group_zero_type=None, + channel_scale_type=None, + token_scale_type=None) + + tensors = create_test_tensors((512, 4096, 4096), type_config, group_size) + + for field in fields(Tensors): + tensor = getattr(tensors, field.name) + if isinstance(tensor, torch.Tensor): + setattr(tensors, field.name, tensor.to(device)) + + machete_mm_test_helper(type_config, tensors, group_size) + + +# Test working with a subset of A and B +@pytest.mark.skipif(not IS_SUPPORTED_BY_GPU, + reason="Machete is not supported on this GPU type.") +def test_machete_subset(): + group_size = 128 + + type_config = TypeConfig(act_type=torch.float16, + weight_type=scalar_types.uint4b8, + output_type=None, + group_scale_type=torch.float16, + group_zero_type=None, + channel_scale_type=None, + token_scale_type=None) + + tensors = create_test_tensors((512, 4096, 4096), + type_config, + group_size, + subset_stride_factor=2) + machete_mm_test_helper(type_config, tensors, group_size) + + +# Test to make sure cuda graphs work +class MacheteLayer(torch.nn.Module): + + def __init__(self, **kwargs): + super().__init__() + self.kwargs = kwargs + + def forward(self, a): + return ops.machete_mm(a=a, **self.kwargs) + + +@pytest.mark.skipif(not IS_SUPPORTED_BY_GPU, + reason="Machete is not supported on this GPU type.") +def test_machete_cuda_graph(): + m, n, k = 512, 4096, 4096 + + a = rand_data((m, k), torch.float16) + b = rand_data((k, n), torch.float16) + wtype = scalar_types.uint4b8 + stype = torch.float16 + group_size = 128 + zero_points = False + + w_ref, w_q_packed, w_s, w_zp = machete_quantize_and_pack( + a.dtype, b, wtype, stype, group_size, zero_points) + + # Construct a trivial model with a single layer that calls a machete kernel + model = MacheteLayer( + b_q=w_q_packed, + b_type=wtype, + b_group_scales=w_s, + b_group_zeros=maybe_convert_zeropoints(w_zp, w_s), + b_group_size=group_size, + ) + + output_ref = torch.matmul(a, w_ref) + + # Run the model with a cuda graph + stream = torch.cuda.Stream() + with torch.cuda.stream(stream): + g = torch.cuda.CUDAGraph() + with torch.cuda.graph(g): + output = model(a) + output.zero_() + g.replay() + + # Relax atol as our reduction dim becomes larger (more rounding error) + # Relax atol when we have zeropoints since the way machete applies + # zeropoints (after scales) causes noise around 0 + atol = 1 if zero_points else min(5e-2 * math.sqrt(k), 1) + torch.testing.assert_close(output, output_ref, rtol=1e-1, atol=atol) diff --git a/vllm/_custom_ops.py b/vllm/_custom_ops.py index b276b8fc25473..aa89010ca8ecd 100644 --- a/vllm/_custom_ops.py +++ b/vllm/_custom_ops.py @@ -444,18 +444,18 @@ def _fp8_marlin_gemm_fake(a: torch.Tensor, b_q_weight: torch.Tensor, size_k: torch.SymInt) -> torch.Tensor: return torch.empty((size_m, size_n), dtype=a.dtype, device=a.device) - @register_fake("_C::machete_gemm") - def machete_gemm_fake( + @register_fake("_C::machete_mm") + def machete_mm_fake( a: torch.Tensor, - # Should be the tensor returned by machete_prepack_B + # b_q Should be the tensor returned by machete_prepack_B b_q: torch.Tensor, b_type: ScalarType, - b_scales: Optional[torch.Tensor] = None, - b_zeros: Optional[torch.Tensor] = None, + out_type: Optional[torch.dtype] = None, + b_group_scales: Optional[torch.Tensor] = None, + b_group_zeros: Optional[torch.Tensor] = None, b_group_size: Optional[int] = None, - c: Optional[torch.Tensor] = None, - alpha: Optional[float] = None, - beta: Optional[float] = None, + b_channel_scales: Optional[torch.Tensor] = None, + a_token_scales: Optional[torch.Tensor] = None, schedule: Optional[str] = None, ) -> torch.Tensor: m = a.size(0) @@ -463,8 +463,9 @@ def machete_gemm_fake( return torch.empty((m, n), device=a.device, dtype=a.dtype) @register_fake("_C::machete_prepack_B") - def machete_prepack_B_fake(b_q_weight: torch.Tensor, - b_type: ScalarType) -> torch.Tensor: + def machete_prepack_B_fake( + b_q_weight: torch.Tensor, a_type: torch.dtype, b_type: ScalarType, + group_scales_type: Optional[torch.dtype]) -> torch.Tensor: return torch.empty_like(b_q_weight, memory_format=torch.contiguous_format) @@ -617,29 +618,41 @@ def fp8_marlin_gemm(a: torch.Tensor, b_q_weight: torch.Tensor, # machete -def machete_supported_schedules(b_type: ScalarType) -> List[str]: - return torch.ops._C.machete_supported_schedules(b_type.id) - - -def machete_gemm( - a: torch.Tensor, - b_q: torch.Tensor, # Should be the tensor returned by machete_prepack_B - b_type: ScalarType, - b_scales: Optional[torch.Tensor] = None, - b_zeros: Optional[torch.Tensor] = None, - b_group_size: Optional[int] = None, - c: Optional[torch.Tensor] = None, - alpha: Optional[float] = None, - beta: Optional[float] = None, - schedule: Optional[str] = None, -) -> torch.Tensor: - return torch.ops._C.machete_gemm(a, b_q, b_type.id, b_scales, b_zeros, - b_group_size, c, alpha, beta, schedule) +def machete_supported_schedules( + a_type: torch.dtype, + b_type: ScalarType, + group_scales_type: Optional[torch.dtype], + group_zeros_type: Optional[torch.dtype] = None, + channel_scales_type: Optional[torch.dtype] = None, + token_scales_type: Optional[torch.dtype] = None, + out_type: Optional[torch.dtype] = None) -> List[str]: + return torch.ops._C.machete_supported_schedules( + a_type, b_type.id, group_scales_type, group_zeros_type, + channel_scales_type, token_scales_type, out_type) -def machete_prepack_B(b_q_weight: torch.Tensor, - b_type: ScalarType) -> torch.Tensor: - return torch.ops._C.machete_prepack_B(b_q_weight, b_type.id) +def machete_mm( + a: torch.Tensor, + # b_q Should be the tensor returned by machete_prepack_B + b_q: torch.Tensor, + b_type: ScalarType, + out_type: Optional[torch.dtype] = None, + b_group_scales: Optional[torch.Tensor] = None, + b_group_zeros: Optional[torch.Tensor] = None, + b_group_size: Optional[int] = None, + b_channel_scales: Optional[torch.Tensor] = None, + a_token_scales: Optional[torch.Tensor] = None, + schedule: Optional[str] = None) -> torch.Tensor: + return torch.ops._C.machete_mm(a, b_q, b_type.id, out_type, b_group_scales, + b_group_zeros, b_group_size, + b_channel_scales, a_token_scales, schedule) + + +def machete_prepack_B( + b_q_weight: torch.Tensor, a_type: torch.dtype, b_type: ScalarType, + group_scales_type: Optional[torch.dtype]) -> torch.Tensor: + return torch.ops._C.machete_prepack_B(b_q_weight, a_type, b_type.id, + group_scales_type) if hasattr(torch.ops._C, "permute_cols"): diff --git a/vllm/model_executor/layers/quantization/kernels/machete.py b/vllm/model_executor/layers/quantization/kernels/machete.py index e5696d08f30f5..15df0200f30b5 100644 --- a/vllm/model_executor/layers/quantization/kernels/machete.py +++ b/vllm/model_executor/layers/quantization/kernels/machete.py @@ -79,7 +79,9 @@ def transform_w_q(x): c.weight_type, packed_dim=0) x.data = ops.machete_prepack_B(x.data.t().contiguous().t(), - self.config.weight_type) + a_type=c.act_type, + b_type=c.weight_type, + group_scales_type=c.act_type) return x def transform_w_s(x): @@ -105,12 +107,12 @@ def apply_weights(self, if c.has_g_idx: x_2d = self.act_perm(x_2d) - output = ops.machete_gemm(a=x_2d, - b_q=w_q, - b_type=c.weight_type, - b_zeros=None, - b_scales=w_s, - b_group_size=c.group_size) + output = ops.machete_mm(a=x_2d, + b_q=w_q, + b_type=c.weight_type, + b_group_zeros=None, + b_group_scales=w_s, + b_group_size=c.group_size) if bias is not None: output.add_(bias) # In-place add diff --git a/vllm/model_executor/layers/quantization/utils/quant_utils.py b/vllm/model_executor/layers/quantization/utils/quant_utils.py index c217f5ca620a1..83055d6000d83 100644 --- a/vllm/model_executor/layers/quantization/utils/quant_utils.py +++ b/vllm/model_executor/layers/quantization/utils/quant_utils.py @@ -126,11 +126,14 @@ def permute_rows(q_w: torch.Tensor, def quantize_weights(w: torch.Tensor, quant_type: ScalarType, - group_size: int, + group_size: Optional[int], zero_points: bool = False, ref_zero_points_after_scales: bool = False): assert quant_type.is_integer(), \ "Floating point quantization may work but has not been tested" + assert not zero_points or group_size is not None, \ + "to have group zero points, group_size must be provided "\ + "(-1 group_size is channelwise)" orig_device = w.device orig_type = w.dtype @@ -140,10 +143,9 @@ def quantize_weights(w: torch.Tensor, if group_size == -1: group_size = size_k - assert group_size <= size_k # Reshape to [groupsize, -1] - if group_size < size_k: + if group_size is not None and group_size < size_k: w = w.reshape((-1, group_size, size_n)) w = w.permute(1, 0, 2) w = w.reshape((group_size, -1)) @@ -155,18 +157,20 @@ def quantize_weights(w: torch.Tensor, max_q_val = quant_type.max() min_q_val = quant_type.min() - if zero_points: - assert not quant_type.is_signed() and quant_type.max() > 0 - w_s = (max_val - min_val).clamp(min=1e-5) / quant_type.max() - maybe_w_zp = torch.round(torch.abs(min_val / w_s)) \ - .clamp(min_q_val, max_q_val).int() - else: - # If the bias is such that there are no possible negative/positive - # values, set the max value to inf to avoid divide by 0 - w_s = torch.max( - abs(max_val / (max_q_val if max_q_val != 0 else torch.inf)), - abs(min_val / (min_q_val if min_q_val != 0 else torch.inf))) - maybe_w_zp = None + w_s = torch.Tensor([1.0]).to(w.device) # unscaled case + maybe_w_zp = None + if group_size is not None: + if zero_points: + assert not quant_type.is_signed() and quant_type.max() > 0 + w_s = (max_val - min_val).clamp(min=1e-5) / quant_type.max() + maybe_w_zp = torch.round(torch.abs(min_val / w_s)) \ + .clamp(min_q_val, max_q_val).int() + else: + # If the bias is such that there are no possible negative/positive + # values, set the max value to inf to avoid divide by 0 + w_s = torch.max( + abs(max_val / (max_q_val if max_q_val != 0 else torch.inf)), + abs(min_val / (min_q_val if min_q_val != 0 else torch.inf))) # Quantize w_q = torch.round(w / w_s).int() + (maybe_w_zp if zero_points else 0) @@ -176,7 +180,7 @@ def quantize_weights(w: torch.Tensor, # For some kernels (namely Machete) the zero-points are applied after the # scales are applied, for this case computing the reference in similar way # allows us to use tighter error tolerances in our unit tests. - if ref_zero_points_after_scales and zero_points: + if ref_zero_points_after_scales and maybe_w_zp is not None: w_ref = w_q.to(orig_type) * w_s - maybe_w_zp.to(orig_type) * w_s else: w_ref = (w_q - (maybe_w_zp if zero_points else 0)).to(orig_type) * w_s @@ -185,7 +189,7 @@ def quantize_weights(w: torch.Tensor, w_q += quant_type.bias # Restore original shapes - if group_size < size_k: + if group_size is not None and group_size < size_k: def reshape_w(w): w = w.reshape((group_size, -1, size_n)) @@ -195,17 +199,16 @@ def reshape_w(w): w_q = reshape_w(w_q) w_ref = reshape_w(w_ref) + w_s = w_s.reshape((-1, size_n)).contiguous() - w_s = w_s.reshape((-1, size_n)).contiguous() - - if zero_points: + if maybe_w_zp is not None: maybe_w_zp = maybe_w_zp.reshape((-1, size_n)).contiguous() maybe_w_zp = maybe_w_zp.to(device=orig_device) return ( w_ref.to(device=orig_device), w_q.to(device=orig_device), - w_s.to(device=orig_device), + w_s if group_size is not None else None, maybe_w_zp, ) From a03ea40792201ac8ff547d37d9f9255b347b9ccd Mon Sep 17 00:00:00 2001 From: youkaichao Date: Mon, 18 Nov 2024 15:14:59 -0800 Subject: [PATCH 036/397] [3/N][torch.compile] consolidate custom op logging (#10399) Signed-off-by: youkaichao --- vllm/config.py | 12 ++++++++++-- vllm/model_executor/custom_op.py | 9 ++++++--- vllm/plugins/__init__.py | 4 ++++ 3 files changed, 20 insertions(+), 5 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index 14017bbdb3cf2..ea9ec43cc5a15 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -4,8 +4,9 @@ import warnings from dataclasses import dataclass, field, replace from pathlib import Path -from typing import (TYPE_CHECKING, Any, Callable, ClassVar, Dict, Final, List, - Literal, Mapping, Optional, Set, Tuple, Type, Union) +from typing import (TYPE_CHECKING, Any, Callable, ClassVar, Counter, Dict, + Final, List, Literal, Mapping, Optional, Set, Tuple, Type, + Union) import torch from pydantic import BaseModel, Field, PrivateAttr @@ -2169,6 +2170,10 @@ class CompilationConfig(BaseModel): compile_sizes: List[int] = PrivateAttr capture_sizes: List[int] = PrivateAttr + # keep track of enabled and disabled custom ops + enabled_custom_ops: Counter[str] = PrivateAttr + disabled_custom_ops: Counter[str] = PrivateAttr + def model_post_init(self, __context: Any) -> None: self.level = envs.VLLM_TORCH_COMPILE_LEVEL @@ -2190,6 +2195,9 @@ def model_post_init(self, __context: Any) -> None: func = __import__(module).__dict__[func_name] self.inductor_compile_config[k] = func + self.enabled_custom_ops = Counter() + self.disabled_custom_ops = Counter() + def init_backend(self) -> Union[str, Callable]: if self.level == CompilationLevel.NO_COMPILATION: raise ValueError("No compilation level is set.") diff --git a/vllm/model_executor/custom_op.py b/vllm/model_executor/custom_op.py index 6ae7d7cf6964f..b07966f2ab7d0 100644 --- a/vllm/model_executor/custom_op.py +++ b/vllm/model_executor/custom_op.py @@ -61,10 +61,13 @@ def forward_hpu(self, *args, **kwargs): def dispatch_forward(self): # NOTE(woosuk): Here we assume that vLLM was built for only one # specific backend. Currently, we do not support dynamic dispatching. - + compilation_config = get_current_vllm_config().compilation_config enabled = self.enabled() - logger.debug("custom op %s %s", self.__class__.name, - "enabled" if enabled else "disabled") + if enabled: + compilation_config.enabled_custom_ops.update([self.__class__.name]) + else: + compilation_config.disabled_custom_ops.update( + [self.__class__.name]) if not enabled: return self.forward_native diff --git a/vllm/plugins/__init__.py b/vllm/plugins/__init__.py index a0c73a752b5e8..c5182139db50b 100644 --- a/vllm/plugins/__init__.py +++ b/vllm/plugins/__init__.py @@ -80,6 +80,10 @@ def set_current_vllm_config(vllm_config: VllmConfig): _current_vllm_config = vllm_config yield finally: + logger.debug("enabled custom ops: %s", + vllm_config.compilation_config.enabled_custom_ops) + logger.debug("disabled custom ops: %s", + vllm_config.compilation_config.disabled_custom_ops) _current_vllm_config = old_vllm_config From 2298e69b5f1dc77f00aee687a3843a4dae12cb91 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Mon, 18 Nov 2024 15:29:37 -0800 Subject: [PATCH 037/397] [ci][bugfix] fix kernel tests (#10431) Signed-off-by: youkaichao --- vllm/plugins/__init__.py | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/vllm/plugins/__init__.py b/vllm/plugins/__init__.py index c5182139db50b..fdc848cedf054 100644 --- a/vllm/plugins/__init__.py +++ b/vllm/plugins/__init__.py @@ -6,9 +6,6 @@ if TYPE_CHECKING: from vllm.config import CompilationConfig, VllmConfig -else: - CompilationConfig = None - VllmConfig = None logger = logging.getLogger(__name__) @@ -50,23 +47,23 @@ def load_general_plugins(): logger.exception("Failed to load plugin %s", plugin.name) -_compilation_config: Optional[CompilationConfig] = None +_compilation_config: Optional["CompilationConfig"] = None -def set_compilation_config(config: Optional[CompilationConfig]): +def set_compilation_config(config: Optional["CompilationConfig"]): global _compilation_config _compilation_config = config -def get_compilation_config() -> Optional[CompilationConfig]: +def get_compilation_config() -> Optional["CompilationConfig"]: return _compilation_config -_current_vllm_config: Optional[VllmConfig] = None +_current_vllm_config: Optional["VllmConfig"] = None @contextmanager -def set_current_vllm_config(vllm_config: VllmConfig): +def set_current_vllm_config(vllm_config: "VllmConfig"): """ Temporarily set the current VLLM config. Used during model initialization. @@ -87,6 +84,12 @@ def set_current_vllm_config(vllm_config: VllmConfig): _current_vllm_config = old_vllm_config -def get_current_vllm_config() -> VllmConfig: - assert _current_vllm_config is not None, "Current VLLM config is not set." +def get_current_vllm_config() -> "VllmConfig": + if _current_vllm_config is None: + # in ci, usually when we test custom ops/modules directly, + # we don't set the vllm config. In that case, we set a default + # config. + logger.warning("Current VLLM config is not set.") + from vllm.config import VllmConfig + return VllmConfig() return _current_vllm_config From 90a6c759caf84ff7722449a33895e397ccf1a2af Mon Sep 17 00:00:00 2001 From: Ricky Xu Date: Mon, 18 Nov 2024 15:39:14 -0800 Subject: [PATCH 038/397] [misc] partial prefix & random input generation benchmark (#9929) Signed-off-by: rickyx --- benchmarks/benchmark_prefix_caching.py | 116 +++++++++++++++++++------ 1 file changed, 91 insertions(+), 25 deletions(-) diff --git a/benchmarks/benchmark_prefix_caching.py b/benchmarks/benchmark_prefix_caching.py index 6d33096ca1d11..5e9381f712e10 100644 --- a/benchmarks/benchmark_prefix_caching.py +++ b/benchmarks/benchmark_prefix_caching.py @@ -54,13 +54,30 @@ def test_prefix(llm=None, sampling_params=None, prompts=None): print(f"cost time {end_time - start_time}") -def sample_requests( +@dataclasses.dataclass +class Request: + prompt: str + prompt_len: int + output_len: int + + +def sample_tokens(tokenizer: PreTrainedTokenizerBase, length: int) -> str: + vocab = tokenizer.get_vocab() + # Remove the special tokens. + vocab = { + k: v + for k, v in vocab.items() if k not in tokenizer.all_special_ids + } + return random.choices(list(vocab.values()), k=length) + + +def sample_requests_from_dataset( dataset_path: str, num_requests: int, tokenizer: PreTrainedTokenizerBase, input_length_range: Tuple[int, int], fixed_output_len: Optional[int], -) -> List[Tuple[str, int, int]]: +) -> List[Request]: if fixed_output_len is not None and fixed_output_len < 4: raise ValueError("output_len too small") @@ -77,31 +94,55 @@ def sample_requests( random.shuffle(dataset) min_len, max_len = input_length_range + assert min_len >= 0 and max_len >= min_len, "input_length_range too small" # Filter out sequences that are too long or too short - filtered_dataset: List[Tuple[str, int, int]] = [] + filtered_requests: List[Request] = [] + for i in range(len(dataset)): - if len(filtered_dataset) == num_requests: + if len(filtered_requests) == num_requests: break # Tokenize the prompts and completions. - prompt = dataset[i][0] - prompt_token_ids = tokenizer(prompt).input_ids + prompt_token_ids = tokenizer(dataset[i][0]).input_ids + prompt = tokenizer.decode(prompt_token_ids) completion = dataset[i][1] completion_token_ids = tokenizer(completion).input_ids prompt_len = len(prompt_token_ids) - output_len = len(completion_token_ids - ) if fixed_output_len is None else fixed_output_len - if prompt_len < 4 or output_len < 4: - # Prune too short sequences. - continue + output_len = (len(completion_token_ids) + if fixed_output_len is None else fixed_output_len) if min_len <= prompt_len <= max_len: - filtered_dataset.append((prompt, prompt_len, output_len)) + filtered_requests.append(Request(prompt, prompt_len, output_len)) + + return filtered_requests + + +def sample_requests_from_random( + num_requests: int, + tokenizer: PreTrainedTokenizerBase, + input_length_range: Tuple[int, int], + fixed_output_len: Optional[int], + prefix_len: int, +) -> List[Request]: - return filtered_dataset + requests = [] + prefix_token_ids = sample_tokens(tokenizer, prefix_len) + min_len, max_len = input_length_range + + for i in range(num_requests): + unique_part_token_ids = sample_tokens( + tokenizer, + random.randint(min_len - prefix_len, max_len - prefix_len)) + prompt_token_ids = prefix_token_ids + unique_part_token_ids + prompt = tokenizer.decode(prompt_token_ids) + prompt_len = len(prompt_token_ids) + assert (min_len <= prompt_len <= max_len + ), f"prompt_len {prompt_len} out of range {min_len}:{max_len}" + requests.append(Request(prompt, prompt_len, fixed_output_len)) + return requests -def repeat_and_sort_requests(requests: List[Tuple[str, int, int]], +def repeat_and_sort_requests(requests: List[Request], repeat_count: int, sort: bool = False) -> List[str]: repeated_requests = requests * repeat_count @@ -109,7 +150,7 @@ def repeat_and_sort_requests(requests: List[Tuple[str, int, int]], repeated_requests.sort(key=lambda x: x[1]) else: random.shuffle(repeated_requests) - return [req[0] for req in repeated_requests] + return [req.prompt for req in repeated_requests] def main(args): @@ -117,9 +158,12 @@ def main(args): input_length_range = tuple(map(int, args.input_length_range.split(':'))) random.seed(args.seed) if args.dataset_path is not None: - print(f"Start to sample {args.num_prompts} prompts" + if args.prefix_len > 0: + raise ValueError("prefix-len is not supported when " + "dataset-path is provided.") + print(f"Start to sample {args.num_prompts} prompts " f"from {args.dataset_path}") - filtered_datasets = sample_requests( + filtered_requests = sample_requests_from_dataset( dataset_path=args.dataset_path, num_requests=args.num_prompts, tokenizer=tokenizer, @@ -127,9 +171,22 @@ def main(args): fixed_output_len=args.output_len, ) else: - prompt_len = len(tokenizer(PROMPT).input_ids) - filtered_datasets = [(PROMPT, prompt_len, args.output_len) - ] * args.num_prompts + print(f"Start to sample {args.num_prompts} prompts from random") + filtered_requests = sample_requests_from_random( + num_requests=args.num_prompts, + tokenizer=tokenizer, + input_length_range=input_length_range, + fixed_output_len=args.output_len, + prefix_len=args.prefix_len, + ) + + # Print some helpful stats of the requests. + print(f"Sampled {len(filtered_requests)} requests.") + prompt_lens = [req.prompt_len for req in filtered_requests] + print(f"Average input length: {sum(prompt_lens) / len(prompt_lens)}") + print(f"P50 input length: {sorted(prompt_lens)[len(prompt_lens) // 2]}") + print(f"Min Prompt Length: {min(prompt_lens)}") + print(f"Max Prompt Length: {max(prompt_lens)}") engine_args = EngineArgs.from_cli_args(args) @@ -137,8 +194,8 @@ def main(args): sampling_params = SamplingParams(temperature=0, max_tokens=args.output_len) - print("Testing filtered datasets") - prompts = repeat_and_sort_requests(filtered_datasets, + print("Testing filtered requests") + prompts = repeat_and_sort_requests(filtered_requests, repeat_count=args.repeat_count, sort=args.sort) @@ -161,20 +218,29 @@ def main(args): parser.add_argument('--output-len', type=int, default=10) parser.add_argument('--num-prompts', type=int, - default=1, + required=True, help="Number of the prompts sampled from dataset") parser.add_argument('--repeat-count', type=int, - default=100, + default=1, help='Number of times to repeat each prompt') parser.add_argument('--sort', action='store_true', help='Sort prompts by input length') parser.add_argument('--input-length-range', type=str, - default='128:256', + required=True, help='Range of input lengths for sampling prompts,' 'specified as "min:max" (e.g., "128:256").') + parser.add_argument( + "--prefix-len", + type=int, + default=0, + help="Specifies the length of a common prefix to be " + "added to the input prompt. The input-length-range will " + "subtract this length when filtering prompts. Only used " + "when dataset-path is not provided.", + ) parser = EngineArgs.add_cli_args(parser) args = parser.parse_args() From 284203f171d86a9581295436d6175246215437fd Mon Sep 17 00:00:00 2001 From: "Kevin H. Luu" Date: Mon, 18 Nov 2024 15:04:25 -1000 Subject: [PATCH 039/397] [ci/build] Have dependabot ignore all patch update (#10436) We have too many dependencies and all patch updates can be a little noisy. This is to have dependabot ignore all patch version updates. --- .github/dependabot.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 4f54eea564ecb..683b70cd89989 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -15,6 +15,8 @@ updates: allow: - dependency-type: "all" ignore: + - dependency-name: "*" + update-types: ["version-update:semver-patch"] - dependency-name: "torch" - dependency-name: "torchvision" - dependency-name: "xformers" @@ -24,9 +26,6 @@ updates: - dependency-name: "ray[adag]" - dependency-name: "lm-eval" groups: - patch-update: - applies-to: version-updates - update-types: ["patch"] minor-update: applies-to: version-updates update-types: ["minor"] From 7eb719df13cf8059485f52648a6a115700158301 Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Tue, 19 Nov 2024 11:21:42 +0800 Subject: [PATCH 040/397] [Bugfix]Fix Phi-3 BNB online quantization (#10417) Signed-off-by: Jee Jee Li --- vllm/model_executor/layers/linear.py | 12 +++++++++--- vllm/model_executor/models/phi3.py | 10 ++++++++++ 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/vllm/model_executor/layers/linear.py b/vllm/model_executor/layers/linear.py index e1f8a6e36d781..9da38d4857d6d 100644 --- a/vllm/model_executor/layers/linear.py +++ b/vllm/model_executor/layers/linear.py @@ -470,7 +470,8 @@ def weight_loader(self, needs_scalar_to_array = getattr(param, "needs_scalar_to_array", False) if loaded_shard_id is None: - # Loaded weight is already fused on disk (qkv/mlp). + # Loaded weight is already fused on disk (mlp). + # (e.g., Phi-3's gate_up_proj). if output_dim is None: if needs_scalar_to_array: param_data, loaded_weight = adjust_scalar_to_fused_array( @@ -480,6 +481,8 @@ def weight_loader(self, param_data.copy_(loaded_weight) return current_shard_offset = 0 + use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit", + False) shard_offsets: List[Tuple[int, int, int]] = [] for i, output_size in enumerate(self.output_sizes): shard_offsets.append((i, current_shard_offset, output_size)) @@ -495,7 +498,9 @@ def weight_loader(self, # Special case for Marlin. shard_size, shard_offset = adjust_marlin_shard( param, shard_size, shard_offset) - + if use_bitsandbytes_4bit: + shard_size = loaded_weight.shape[output_dim] // 2 + shard_offset = shard_size * shard_id loaded_weight_shard = loaded_weight.narrow( output_dim, shard_offset, shard_size) self.weight_loader(param, loaded_weight_shard, shard_id) @@ -808,7 +813,8 @@ def weight_loader(self, needs_scalar_to_array = getattr(param, "needs_scalar_to_array", False) if loaded_shard_id is None: - # Loaded weight is already fused on disk (qkv/mlp). + # Loaded weight is already fused on disk (qkv). + # (e.g., Phi-3's qkv_proj). if output_dim is None: if needs_scalar_to_array: param_data, loaded_weight = adjust_scalar_to_fused_array( diff --git a/vllm/model_executor/models/phi3.py b/vllm/model_executor/models/phi3.py index 34141511ea791..54158bc141235 100644 --- a/vllm/model_executor/models/phi3.py +++ b/vllm/model_executor/models/phi3.py @@ -14,3 +14,13 @@ class Phi3ForCausalLM(LlamaForCausalLM): "gate_up_proj", ], } + + # BitandBytes specific attributes + default_bitsandbytes_target_modules = [ + ".gate_up_proj.", + ".down_proj.", + ".qkv_proj.", + ".o_proj.", + ] + # Initialize an empty dict when there is no stacked parameter mapping. + bitsandbytes_stacked_params_mapping = {} From 8c1fb507052d385d94ac49a7388fd6db5d0069e7 Mon Sep 17 00:00:00 2001 From: Mengqing Cao Date: Tue, 19 Nov 2024 11:22:26 +0800 Subject: [PATCH 041/397] [Platform][Refactor] Extract func `get_default_attn_backend` to `Platform` (#10358) Signed-off-by: Mengqing Cao --- tests/kernels/test_attention_selector.py | 19 ++++---- vllm/attention/selector.py | 56 +++--------------------- vllm/model_executor/models/molmo.py | 2 +- vllm/model_executor/models/qwen2_vl.py | 2 +- vllm/model_executor/models/utils.py | 4 +- vllm/platforms/__init__.py | 1 + vllm/platforms/cpu.py | 10 ++++- vllm/platforms/hpu.py | 6 ++- vllm/platforms/interface.py | 19 ++++++++ vllm/platforms/openvino.py | 8 +++- vllm/platforms/rocm.py | 14 +++++- vllm/platforms/tpu.py | 12 ++++- vllm/platforms/xpu.py | 12 ++++- vllm/worker/enc_dec_model_runner.py | 3 +- 14 files changed, 99 insertions(+), 69 deletions(-) diff --git a/tests/kernels/test_attention_selector.py b/tests/kernels/test_attention_selector.py index 169ce040d370c..d37f95d48d5b2 100644 --- a/tests/kernels/test_attention_selector.py +++ b/tests/kernels/test_attention_selector.py @@ -5,6 +5,7 @@ from tests.kernels.utils import override_backend_env_variable from vllm.attention.selector import which_attn_to_use +from vllm.platforms import cpu, cuda, openvino, rocm from vllm.utils import STR_FLASH_ATTN_VAL, STR_INVALID_VAL @@ -19,26 +20,28 @@ def test_env(name: str, device: str, monkeypatch): override_backend_env_variable(monkeypatch, name) if device == "cpu": - with patch("vllm.attention.selector.current_platform.is_cpu", - return_value=True): + with patch("vllm.attention.selector.current_platform", + cpu.CpuPlatform()): backend = which_attn_to_use(16, torch.float16, torch.float16, 16, False) assert backend.name == "TORCH_SDPA" elif device == "hip": - with patch("vllm.attention.selector.current_platform.is_rocm", - return_value=True): + with patch("vllm.attention.selector.current_platform", + rocm.RocmPlatform()): backend = which_attn_to_use(16, torch.float16, torch.float16, 16, False) assert backend.name == "ROCM_FLASH" elif device == "openvino": - with patch("vllm.attention.selector.current_platform.is_openvino", - return_value=True): + with patch("vllm.attention.selector.current_platform", + openvino.OpenVinoPlatform()): backend = which_attn_to_use(16, torch.float16, torch.float16, 16, False) assert backend.name == "OPENVINO" else: - backend = which_attn_to_use(16, torch.float16, torch.float16, 16, - False) + with patch("vllm.attention.selector.current_platform", + cuda.CudaPlatform()): + backend = which_attn_to_use(16, torch.float16, torch.float16, 16, + False) assert backend.name == name diff --git a/vllm/attention/selector.py b/vllm/attention/selector.py index 664707e9dc65d..d263839705690 100644 --- a/vllm/attention/selector.py +++ b/vllm/attention/selector.py @@ -1,4 +1,3 @@ -import enum import os from contextlib import contextmanager from functools import lru_cache @@ -9,26 +8,12 @@ import vllm.envs as envs from vllm.attention.backends.abstract import AttentionBackend from vllm.logger import init_logger -from vllm.platforms import current_platform +from vllm.platforms import _Backend, current_platform from vllm.utils import STR_BACKEND_ENV_VAR logger = init_logger(__name__) -class _Backend(enum.Enum): - FLASH_ATTN = enum.auto() - FLASH_ATTN_VLLM_V1 = enum.auto() - XFORMERS = enum.auto() - ROCM_FLASH = enum.auto() - TORCH_SDPA = enum.auto() - OPENVINO = enum.auto() - FLASHINFER = enum.auto() - HPU_ATTN = enum.auto() - PALLAS = enum.auto() - IPEX = enum.auto() - NO_ATTENTION = enum.auto() - - def backend_name_to_enum(backend_name: str) -> _Backend: assert backend_name is not None @@ -216,40 +201,11 @@ def which_attn_to_use(head_size: int, if backend_by_env_var is not None: selected_backend = backend_name_to_enum(backend_by_env_var) - if current_platform.is_cpu(): - if selected_backend != _Backend.TORCH_SDPA: - logger.info("Cannot use %s backend on CPU.", selected_backend) - return _Backend.TORCH_SDPA - - if current_platform.is_openvino(): - if selected_backend != _Backend.OPENVINO: - logger.info("Cannot use %s backend on OpenVINO.", selected_backend) - return _Backend.OPENVINO - - if current_platform.is_xpu(): - if selected_backend != _Backend.IPEX: - logger.info("Cannot use %s backend on XPU.", selected_backend) - return _Backend.IPEX - - if current_platform.is_tpu(): - if selected_backend != _Backend.PALLAS: - logger.info("Cannot use %s backend on TPU.", selected_backend) - return _Backend.PALLAS - - if current_platform.is_rocm(): - # AMD GPUs. - selected_backend = (_Backend.ROCM_FLASH if selected_backend - == _Backend.FLASH_ATTN else selected_backend) - if selected_backend == _Backend.ROCM_FLASH: - if not current_platform.has_device_capability(90): - # not Instinct series GPUs. - logger.info("flash_attn is not supported on NAVI GPUs.") - else: - logger.info("%s is not supported in AMD GPUs.", selected_backend) - return _Backend.ROCM_FLASH - - if current_platform.is_hpu(): - return _Backend.HPU_ATTN + # get device-specific default attn_backend + default_backend = current_platform.get_default_attn_backend( + selected_backend) + if default_backend is not None: + return default_backend if use_v1: return _Backend.FLASH_ATTN_VLLM_V1 diff --git a/vllm/model_executor/models/molmo.py b/vllm/model_executor/models/molmo.py index a7c90a3f5031b..2528f741864b3 100644 --- a/vllm/model_executor/models/molmo.py +++ b/vllm/model_executor/models/molmo.py @@ -13,7 +13,6 @@ from transformers import PretrainedConfig from vllm.attention import Attention, AttentionMetadata -from vllm.attention.selector import _Backend from vllm.compilation.decorators import support_torch_compile from vllm.config import CacheConfig, VllmConfig from vllm.distributed import (get_pp_group, get_tensor_model_parallel_rank, @@ -38,6 +37,7 @@ from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs from vllm.multimodal.utils import cached_get_tokenizer +from vllm.platforms import _Backend from vllm.sequence import (VLLM_TOKEN_ID_ARRAY_TYPE, IntermediateTensors, SequenceData) from vllm.transformers_utils.processor import get_processor diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index a929b9323b245..0ac81387b1bd8 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -39,7 +39,6 @@ make_batched_images, make_batched_videos, smart_resize) from vllm.attention import AttentionMetadata -from vllm.attention.selector import _Backend from vllm.config import VllmConfig from vllm.distributed import get_pp_group, parallel_state from vllm.distributed import utils as dist_utils @@ -65,6 +64,7 @@ from vllm.multimodal.inputs import (MultiModalData, MultiModalDataDict, MultiModalKwargs) from vllm.multimodal.utils import cached_get_tokenizer +from vllm.platforms import _Backend from vllm.sequence import IntermediateTensors, PoolerOutput, SequenceData from vllm.transformers_utils.config import uses_mrope from vllm.transformers_utils.processor import cached_get_processor diff --git a/vllm/model_executor/models/utils.py b/vllm/model_executor/models/utils.py index 03226f42ee053..2ab9b19e22068 100644 --- a/vllm/model_executor/models/utils.py +++ b/vllm/model_executor/models/utils.py @@ -9,13 +9,13 @@ from transformers import PretrainedConfig import vllm.envs as envs -from vllm.attention.selector import (_Backend, backend_name_to_enum, +from vllm.attention.selector import (backend_name_to_enum, get_global_forced_attn_backend) from vllm.config import VllmConfig from vllm.logger import init_logger from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.multimodal import MultiModalPlaceholderMap, NestedTensors -from vllm.platforms import current_platform +from vllm.platforms import _Backend, current_platform from vllm.sequence import IntermediateTensors from vllm.utils import is_pin_memory_available diff --git a/vllm/platforms/__init__.py b/vllm/platforms/__init__.py index 9e740837381f8..1f68fc2e25df3 100644 --- a/vllm/platforms/__init__.py +++ b/vllm/platforms/__init__.py @@ -1,3 +1,4 @@ +from .interface import _Backend # noqa: F401 from .interface import Platform, PlatformEnum, UnspecifiedPlatform current_platform: Platform diff --git a/vllm/platforms/cpu.py b/vllm/platforms/cpu.py index 42bee31dfb0e9..f9a34a47959ec 100644 --- a/vllm/platforms/cpu.py +++ b/vllm/platforms/cpu.py @@ -5,7 +5,9 @@ from vllm.logger import init_logger -from .interface import Platform, PlatformEnum +from .interface import Platform, PlatformEnum, _Backend + +logger = init_logger(__name__) if TYPE_CHECKING: from vllm.config import VllmConfig @@ -22,6 +24,12 @@ class CpuPlatform(Platform): def get_device_name(cls, device_id: int = 0) -> str: return "cpu" + @classmethod + def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: + if selected_backend != _Backend.TORCH_SDPA: + logger.info("Cannot use %s backend on CPU.", selected_backend) + return _Backend.TORCH_SDPA + @classmethod def get_device_total_memory(cls, device_id: int = 0) -> int: return psutil.virtual_memory().total diff --git a/vllm/platforms/hpu.py b/vllm/platforms/hpu.py index 170cfff94f90d..1e0888a30ba96 100644 --- a/vllm/platforms/hpu.py +++ b/vllm/platforms/hpu.py @@ -1,11 +1,15 @@ import torch -from .interface import Platform, PlatformEnum +from .interface import Platform, PlatformEnum, _Backend class HpuPlatform(Platform): _enum = PlatformEnum.HPU + @classmethod + def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: + return _Backend.HPU_ATTN + @staticmethod def inference_mode(): return torch.no_grad() diff --git a/vllm/platforms/interface.py b/vllm/platforms/interface.py index 970c0d1be617e..f4849fa2ccfb0 100644 --- a/vllm/platforms/interface.py +++ b/vllm/platforms/interface.py @@ -11,6 +11,20 @@ VllmConfig = None +class _Backend(enum.Enum): + FLASH_ATTN = enum.auto() + FLASH_ATTN_VLLM_V1 = enum.auto() + XFORMERS = enum.auto() + ROCM_FLASH = enum.auto() + TORCH_SDPA = enum.auto() + OPENVINO = enum.auto() + FLASHINFER = enum.auto() + HPU_ATTN = enum.auto() + PALLAS = enum.auto() + IPEX = enum.auto() + NO_ATTENTION = enum.auto() + + class PlatformEnum(enum.Enum): CUDA = enum.auto() ROCM = enum.auto() @@ -71,6 +85,11 @@ def is_cuda_alike(self) -> bool: """Stateless version of :func:`torch.cuda.is_available`.""" return self._enum in (PlatformEnum.CUDA, PlatformEnum.ROCM) + @classmethod + def get_default_attn_backend(cls, selected_backend: _Backend): + """Get the default attention backend of a device.""" + return None + @classmethod def get_device_capability( cls, diff --git a/vllm/platforms/openvino.py b/vllm/platforms/openvino.py index 31fe3f1fcbfe4..ad69ced5417b3 100644 --- a/vllm/platforms/openvino.py +++ b/vllm/platforms/openvino.py @@ -3,7 +3,7 @@ import vllm.envs as envs from vllm.logger import init_logger -from .interface import Platform, PlatformEnum +from .interface import Platform, PlatformEnum, _Backend logger = init_logger(__name__) @@ -11,6 +11,12 @@ class OpenVinoPlatform(Platform): _enum = PlatformEnum.OPENVINO + @classmethod + def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: + if selected_backend != _Backend.OPENVINO: + logger.info("Cannot use %s backend on OpenVINO.", selected_backend) + return _Backend.OPENVINO + @classmethod def get_device_name(self, device_id: int = 0) -> str: return "openvino" diff --git a/vllm/platforms/rocm.py b/vllm/platforms/rocm.py index fd8afc92b0f28..022256996f97b 100644 --- a/vllm/platforms/rocm.py +++ b/vllm/platforms/rocm.py @@ -5,7 +5,7 @@ from vllm.logger import init_logger -from .interface import DeviceCapability, Platform, PlatformEnum +from .interface import DeviceCapability, Platform, PlatformEnum, _Backend logger = init_logger(__name__) @@ -19,6 +19,18 @@ class RocmPlatform(Platform): _enum = PlatformEnum.ROCM + @classmethod + def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: + selected_backend = (_Backend.ROCM_FLASH if selected_backend + == _Backend.FLASH_ATTN else selected_backend) + if selected_backend == _Backend.ROCM_FLASH: + if not cls.has_device_capability(90): + # not Instinct series GPUs. + logger.info("flash_attn is not supported on NAVI GPUs.") + else: + logger.info("%s is not supported in AMD GPUs.", selected_backend) + return _Backend.ROCM_FLASH + @classmethod @lru_cache(maxsize=8) def get_device_capability(cls, device_id: int = 0) -> DeviceCapability: diff --git a/vllm/platforms/tpu.py b/vllm/platforms/tpu.py index 643db835c85ff..9057afb6514e4 100644 --- a/vllm/platforms/tpu.py +++ b/vllm/platforms/tpu.py @@ -3,17 +3,27 @@ import torch -from .interface import Platform, PlatformEnum +from vllm.logger import init_logger + +from .interface import Platform, PlatformEnum, _Backend if TYPE_CHECKING: from vllm.config import VllmConfig else: VllmConfig = None +logger = init_logger(__name__) + class TpuPlatform(Platform): _enum = PlatformEnum.TPU + @classmethod + def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: + if selected_backend != _Backend.PALLAS: + logger.info("Cannot use %s backend on TPU.", selected_backend) + return _Backend.PALLAS + @classmethod def get_device_name(cls, device_id: int = 0) -> str: raise NotImplementedError diff --git a/vllm/platforms/xpu.py b/vllm/platforms/xpu.py index 106e8eddf458f..d0b3dca9a4195 100644 --- a/vllm/platforms/xpu.py +++ b/vllm/platforms/xpu.py @@ -1,11 +1,21 @@ import torch -from .interface import DeviceCapability, Platform, PlatformEnum +from vllm.logger import init_logger + +from .interface import DeviceCapability, Platform, PlatformEnum, _Backend + +logger = init_logger(__name__) class XPUPlatform(Platform): _enum = PlatformEnum.XPU + @classmethod + def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: + if selected_backend != _Backend.IPEX: + logger.info("Cannot use %s backend on XPU.", selected_backend) + return _Backend.IPEX + @staticmethod def get_device_capability(device_id: int = 0) -> DeviceCapability: major, minor, *_ = torch.xpu.get_device_capability( diff --git a/vllm/worker/enc_dec_model_runner.py b/vllm/worker/enc_dec_model_runner.py index 82824faa6629a..687d2cc79360f 100644 --- a/vllm/worker/enc_dec_model_runner.py +++ b/vllm/worker/enc_dec_model_runner.py @@ -8,7 +8,7 @@ from vllm.attention.backends.abstract import (AttentionBackend, AttentionMetadata) from vllm.attention.backends.utils import PAD_SLOT_ID -from vllm.attention.selector import (_Backend, get_env_variable_attn_backend, +from vllm.attention.selector import (get_env_variable_attn_backend, get_global_forced_attn_backend) from vllm.config import VllmConfig from vllm.forward_context import set_forward_context @@ -18,6 +18,7 @@ from vllm.model_executor.layers.sampler import SamplerOutput from vllm.multimodal import (MULTIMODAL_REGISTRY, MultiModalKwargs, MultiModalRegistry) +from vllm.platforms import _Backend from vllm.sampling_params import SamplingParams from vllm.sequence import (IntermediateTensors, PoolerOutput, SequenceGroupMetadata) From 74f8c2cf5f6a34fd21cfbe6d72bcc1b2a2a6754a Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Mon, 18 Nov 2024 23:37:46 -0500 Subject: [PATCH 042/397] Add openai.beta.chat.completions.parse example to structured_outputs.rst (#10433) --- docs/source/models/structured_outputs.rst | 98 ++++++++++++++++++++++- 1 file changed, 96 insertions(+), 2 deletions(-) diff --git a/docs/source/models/structured_outputs.rst b/docs/source/models/structured_outputs.rst index ff4ff7169fc5f..484e1f17d191e 100644 --- a/docs/source/models/structured_outputs.rst +++ b/docs/source/models/structured_outputs.rst @@ -10,7 +10,7 @@ This document shows you some examples of the different options that are availabl Online Inference (OpenAI API) ----------------------------- -You can generate structured outputs using the OpenAI’s `Completions `_ and `Chat `_ API. +You can generate structured outputs using the OpenAI's `Completions `_ and `Chat `_ API. The following parameters are supported, which must be added as extra parameters: @@ -137,6 +137,100 @@ It works by using a context free EBNF grammar, which for example we can use to d The complete code of the examples can be found on `examples/openai_chat_completion_structured_outputs.py `_. +Experimental Automatic Parsing (OpenAI API) +-------------------------------------------- + +This section covers the OpenAI beta wrapper over the ``client.chat.completions.create()`` method that provides richer integrations with Python specific types. + +At the time of writing (``openai==1.54.4``), this is a "beta" feature in the OpenAI client library. Code reference can be found `here `_. + +For the following examples, vLLM was setup using ``vllm serve meta-llama/Llama-3.1-8B-Instruct`` + +Here is a simple example demonstrating how to get structured output using Pydantic models: + +.. code-block:: python + + from pydantic import BaseModel + from openai import OpenAI + + + class Info(BaseModel): + name: str + age: int + + + client = OpenAI(base_url="http://0.0.0.0:8000/v1", api_key="dummy") + completion = client.beta.chat.completions.parse( + model="meta-llama/Llama-3.1-8B-Instruct", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "My name is Cameron, I'm 28. What's my name and age?"}, + ], + response_format=Info, + extra_body=dict(guided_decoding_backend="outlines"), + ) + + message = completion.choices[0].message + print(message) + assert message.parsed + print("Name:", message.parsed.name) + print("Age:", message.parsed.age) + +Output: + +.. code-block:: console + + ParsedChatCompletionMessage[Testing](content='{"name": "Cameron", "age": 28}', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=[], parsed=Testing(name='Cameron', age=28)) + Name: Cameron + Age: 28 + + +Here is a more complex example using nested Pydantic models to handle a step-by-step math solution: + +.. code-block:: python + + from typing import List + from pydantic import BaseModel + from openai import OpenAI + + + class Step(BaseModel): + explanation: str + output: str + + + class MathResponse(BaseModel): + steps: List[Step] + final_answer: str + + + client = OpenAI(base_url="http://0.0.0.0:8000/v1", api_key="dummy") + completion = client.beta.chat.completions.parse( + model="meta-llama/Llama-3.1-8B-Instruct", + messages=[ + {"role": "system", "content": "You are a helpful expert math tutor."}, + {"role": "user", "content": "Solve 8x + 31 = 2."}, + ], + response_format=MathResponse, + extra_body=dict(guided_decoding_backend="outlines"), + ) + + message = completion.choices[0].message + print(message) + assert message.parsed + for i, step in enumerate(message.parsed.steps): + print(f"Step #{i}:", step) + print("Answer:", message.parsed.final_answer) + +Output: + +.. code-block:: console + + ParsedChatCompletionMessage[MathResponse](content='{ "steps": [{ "explanation": "First, let\'s isolate the term with the variable \'x\'. To do this, we\'ll subtract 31 from both sides of the equation.", "output": "8x + 31 - 31 = 2 - 31"}, { "explanation": "By subtracting 31 from both sides, we simplify the equation to 8x = -29.", "output": "8x = -29"}, { "explanation": "Next, let\'s isolate \'x\' by dividing both sides of the equation by 8.", "output": "8x / 8 = -29 / 8"}], "final_answer": "x = -29/8" }', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=[], parsed=MathResponse(steps=[Step(explanation="First, let's isolate the term with the variable 'x'. To do this, we'll subtract 31 from both sides of the equation.", output='8x + 31 - 31 = 2 - 31'), Step(explanation='By subtracting 31 from both sides, we simplify the equation to 8x = -29.', output='8x = -29'), Step(explanation="Next, let's isolate 'x' by dividing both sides of the equation by 8.", output='8x / 8 = -29 / 8')], final_answer='x = -29/8')) + Step #0: explanation="First, let's isolate the term with the variable 'x'. To do this, we'll subtract 31 from both sides of the equation." output='8x + 31 - 31 = 2 - 31' + Step #1: explanation='By subtracting 31 from both sides, we simplify the equation to 8x = -29.' output='8x = -29' + Step #2: explanation="Next, let's isolate 'x' by dividing both sides of the equation by 8." output='8x / 8 = -29 / 8' + Answer: x = -29/8 Offline Inference ----------------- @@ -170,4 +264,4 @@ One example for the usage of the ``choices`` parameter is shown below: ) print(outputs[0].outputs[0].text) -A complete example with all options can be found in `examples/offline_inference_structured_outputs.py `_. \ No newline at end of file +A complete example with all options can be found in `examples/offline_inference_structured_outputs.py `_. From 272e31c0bd8640c15e85211c74fc9b428ad86902 Mon Sep 17 00:00:00 2001 From: Travis Johnson Date: Mon, 18 Nov 2024 21:57:10 -0700 Subject: [PATCH 043/397] [Bugfix] Guard for negative counter metrics to prevent crash (#10430) Signed-off-by: Travis Johnson --- vllm/engine/llm_engine.py | 2 +- vllm/engine/metrics.py | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index 9a2d73a020c8f..e72dc81f35b67 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -1716,7 +1716,7 @@ def _get_stats(self, # not counted (to avoid double counting) actual_num_batched_tokens = scheduler_outputs.num_batched_tokens # type: ignore - num_generation_tokens_from_prefill_groups = 0. + num_generation_tokens_from_prefill_groups = 0 # NOTE: if scheduler_outputs.num_prefill_groups > 0 and # the len of scheduler_outputs.scheduled_seq_groups is != # scheduler_outputs.num_prefill_groups, this means that diff --git a/vllm/engine/metrics.py b/vllm/engine/metrics.py index e896bcdded2d1..47472c274ccb6 100644 --- a/vllm/engine/metrics.py +++ b/vllm/engine/metrics.py @@ -512,6 +512,11 @@ def _log_gauge(self, gauge, data: Union[int, float]) -> None: def _log_counter(self, counter, data: Union[int, float]) -> None: # Convenience function for logging to counter. + # Prevent ValueError from negative increment + if data < 0: + logger.warning("Skipping negative increment of %g to %s", data, + counter) + return counter.labels(**self.labels).inc(data) def _log_counter_labels(self, counter, data: CollectionsCounter, From 382b6a4852b9afc9a740b02736688e20f7d58446 Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Tue, 19 Nov 2024 16:54:58 +0800 Subject: [PATCH 044/397] [Misc] Avoid misleading warning messages (#10438) Signed-off-by: Jee Jee Li --- vllm/model_executor/models/chatglm.py | 5 ++--- vllm/model_executor/models/qwen.py | 10 +++++----- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/vllm/model_executor/models/chatglm.py b/vllm/model_executor/models/chatglm.py index 625e31bb0d368..2ea592aaba9f9 100644 --- a/vllm/model_executor/models/chatglm.py +++ b/vllm/model_executor/models/chatglm.py @@ -575,8 +575,7 @@ def forward( return hidden_states -class ChatGLMBaseModel(nn.Module, SupportsLoRA, SupportsPP, - SupportsMultiModal): +class ChatGLMBaseModel(nn.Module, SupportsLoRA, SupportsPP): def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): super().__init__() @@ -695,7 +694,7 @@ class ChatGLM(ChatGLMBaseModel): embedding_padding_modules = [] -class ChatGLMV(ChatGLMBaseModel): +class ChatGLMV(ChatGLMBaseModel, SupportsMultiModal): packed_modules_mapping = { "query_key_value": ["query_key_value"], "dense_h_to_4h": ["dense_h_to_4h"], diff --git a/vllm/model_executor/models/qwen.py b/vllm/model_executor/models/qwen.py index 3978c176a2144..44ce6eda42943 100644 --- a/vllm/model_executor/models/qwen.py +++ b/vllm/model_executor/models/qwen.py @@ -870,7 +870,7 @@ def dummy_data_for_qwen( return DummyData(seq_data, mm_data) -class QWenBaseModel(nn.Module, SupportsMultiModal, SupportsPP, SupportsLoRA): +class QWenBaseModel(nn.Module, SupportsPP, SupportsLoRA): def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): super().__init__() @@ -1024,7 +1024,7 @@ class QWenLLM(QWenBaseModel): embedding_padding_modules = [] -class QWenVL(QWenBaseModel): +class QWenVL(QWenBaseModel, SupportsMultiModal): packed_modules_mapping = { "c_attn": ["c_attn"], "gate_up_proj": [ @@ -1062,7 +1062,7 @@ def get_mm_mapping(self) -> MultiModelKeys: @MULTIMODAL_REGISTRY.register_max_image_tokens(MAX_QWEN_IMG_TOKENS) @INPUT_REGISTRY.register_dummy_data(dummy_data_for_qwen) @INPUT_REGISTRY.register_input_processor(input_processor_for_qwen) -class QWenLMHeadModel(QWenBaseModel, SupportsLoRA): +class QWenLMHeadModel(QWenBaseModel, SupportsMultiModal, SupportsLoRA): """ QWenLMHeadModel is not only applicable to LLM but also to VL, which is not conducive to the current integration logic of LoRA in vLLM. Therefore, it @@ -1083,7 +1083,7 @@ def __new__( config = vllm_config.model_config.hf_config # Initialize VL if hasattr(config, "visual"): - return QWenVL(vllm_config=vllm_config) + return QWenVL(vllm_config=vllm_config, prefix=prefix) # Initialize LLM else: - return QWenLLM(vllm_config=vllm_config) + return QWenLLM(vllm_config=vllm_config, prefix=prefix) From 5390d6664f65d84f37a5fb524e967b01baad9100 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Tue, 19 Nov 2024 04:52:11 -0500 Subject: [PATCH 045/397] [Doc] Add the start of an arch overview page (#10368) --- .github/workflows/png-lint.yml | 37 +++ .../arch_overview/entrypoints.excalidraw.png | Bin 0 -> 123422 bytes .../arch_overview/llm_engine.excalidraw.png | Bin 0 -> 178116 bytes docs/source/design/arch_overview.rst | 274 ++++++++++++++++++ docs/source/design/class_hierarchy.rst | 74 ----- docs/source/design/plugin_system.rst | 4 +- docs/source/index.rst | 2 +- format.sh | 4 + tools/png-lint.sh | 15 + vllm/engine/arg_utils.py | 2 +- 10 files changed, 334 insertions(+), 78 deletions(-) create mode 100644 .github/workflows/png-lint.yml create mode 100644 docs/source/assets/design/arch_overview/entrypoints.excalidraw.png create mode 100644 docs/source/assets/design/arch_overview/llm_engine.excalidraw.png create mode 100644 docs/source/design/arch_overview.rst delete mode 100644 docs/source/design/class_hierarchy.rst create mode 100755 tools/png-lint.sh diff --git a/.github/workflows/png-lint.yml b/.github/workflows/png-lint.yml new file mode 100644 index 0000000000000..4932af943a07b --- /dev/null +++ b/.github/workflows/png-lint.yml @@ -0,0 +1,37 @@ +name: Lint PNG exports from excalidraw +on: + push: + branches: + - "main" + paths: + - '*.excalidraw.png' + - '.github/workflows/png-lint.yml' + pull_request: + branches: + - "main" + paths: + - '*.excalidraw.png' + - '.github/workflows/png-lint.yml' + +env: + LC_ALL: en_US.UTF-8 + +defaults: + run: + shell: bash + +permissions: + contents: read + +jobs: + actionlint: + runs-on: ubuntu-latest + steps: + - name: "Checkout" + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + fetch-depth: 0 + + - name: "Run png-lint.sh to check excalidraw exported images" + run: | + tools/png-lint.sh diff --git a/docs/source/assets/design/arch_overview/entrypoints.excalidraw.png b/docs/source/assets/design/arch_overview/entrypoints.excalidraw.png new file mode 100644 index 0000000000000000000000000000000000000000..bbf46286cfe5d0820e4183827f9c2b852b005b4b GIT binary patch literal 123422 zcmeEu^)B1}casAfPfRAfR-IqR2?cD2;T7l(blg0g8-tNOyONib#%tbSm9F z#1QX3eg(as_j&I7AGm)I{it*1oa^js@3q%j`|_5Tl{|8g>fo+jyN*cRlu+EYYk%;r zU3=6H?1R6#^I5DEe%o!UD0y{PS_944u3hxIq$I?Z?`uu=99maV+T5Cx8$(l`@Q%C3 z6eW1A@Fa>k0DaMq5qPD8`TJ}emUC%+XU=oF;XBQQmp8+E23+xLDMT*I(>-Qf#K**T?fwrxX!ag_ zZS3HG?(l#5kH^I5kI^^Qy?U^V=m7n{|Iv>Ui|TmsLgznxHF9@k_1$~^)8*jHw1^mj zOA8L%_>Yf++@kQmzq?=m`rljpw@dloRs6q${J*jIZ_~!8-rT{!a7M-7#8J5|l#$w7+e)VKb*j3IKFx`{6d ze_!SaTt@k#E$Q~X(9<29?98p@vFtYP%CpF}D&J_$v+AQ=cU`FGt=MAID9AYD(0&(_ zroo@AoamQ$!WOs4ntMV+Rajsh%%Uz?5(P8_z?|vjszMUSfbV>Zz;sss!zdIfMlSoG2 z7|LwY@M!zp=?8rnt`B(>Gyn0)Aq)IuM@7w$evZRbXHeN78z$urM^QYEhS+&&++d)} z*D)(mCHY?9(hg+ z!yP#$%@0N2V(&C~OI1rxopv33BWYNmYj}7ot$3OP``N+7pvIpBlhOQ7+$cfZIB4BA ze6oGF^bf>Nz3hQZf>i;yY{E%gsgCXRn3V=eVxU<8dCFob6RX z?=(CGXG%CT94yp&WUu{N9a&hN4kyhZioaGz;lOKSpQ~>~{=5tFm6ZWs3)mwSNv)~s z`Em7=%$O8or8wy;sZp^gxkzXpc%0_-N61q&i(8+cWF4oGqlXLjGe=T}BH^>OG2=1S zrtOjYh*aR~cx$Se7p-%G8RcixMF<6PF;dCxHG-Dm#on7-Mst-0rrSI1f$xK|uMt=D zp2`GsRm0bh*czkt(u=1BCc5&i(72#JS@=udbhm)p=~@=3Tkq79oyza) zoc;0Xkp=lZQ|FV5qm7@?0c>HP6~5&{Ua+S7yC)H|gas|+-FakJ=1BIr+i4mmhbY+& zlhsl4n14Ox?JKhO^*JSb_?3bB98ZW2?)Dd5xvZK+-)r8nPEg6xz#|`162nFNoDuxu z#@3RgQv1c1&I*P07vu7V{NeZPR@?SNAyHER`|WV~gTr*cqh>z+1AFdpA*+@eeUUX|gRGh?oV|_$)D^n(fr*eZxG|WJB zrGJm$;OlEmPR~vY=fOtlptdUcA>_5XFp}+-mWO3gh*2Vp06%8YaWuzoSp}e55dQgzLN%HUGUBlbM?J_M2Q6 zmXpj-KSHd^#$3rtN9A)(RYvX;L2B{rCSEjmS^42r`k*tt+BRM zYFs?B|GW=|+BWbvBnK^v255aS?aVQWiXT~_LEf?YzkA1r$-b3gk5Kc}MIOIkl)w=D zcAzM|Z6d9h?PB$GXAV)OYPMn8kI%?A@EobT&Sb)1fTctU@wtHXHMqcLnrwn23yswa$y@c z^J{q)?T3VHDY~(!WX(QW_Dyc?XXPW8>svC{c6qL&UXRu)kAyjfzoO!Pj`MwHMIBCZ z{87`Hziw8Q0apD58>74STu#; zaG>5~R23We#9{6 zIR*KcGgVQU$fT#p9zRz_*VUP87KR=$eWIJF7@H#DO;)}!>Mlr_%$6ClKXl?^zt9Z` z9{P>F4wLP7A@9o>M?&r>6`uXDhh(XuqMWin=))P$-+}ULLlr`<9q!$!@f z&Dw;hGyPnc&5|Vgyf1{Ga*C2;QODAs5_Yz$ZQ)P7F~Ju#dHCCa_%X(WHFycmUZ&jH_pf#uc#VYSnq|{`_c-hsOMXD zc>3+j?hksskJ9t(Ujg?z4suHK;>XLEcCz#Z&$)pr^>nRL#pgtdt+qX zpznyyhP6ABqLBpPfPz{1BCaPVVkSV_{q!V`5HGq>?SCicI(y{jO``}m!-=-^e2Ek$-ZNM`(rP0S!efD6eeP2A{y0E7k^`d)rsLledG62ZbQRf~jfS6O z(=e8#bF&*A$X*qLGV5v|ME%#ABpwG4LS@vj10W^NWdOv8j4?hjmD4Pn9WLzBdD|(GG~c4@Bm*{AQB{RYeo+NJMMbh1x#8paHK zJ4FsJhanP|PeRVv{=sw4loy@*>@>INJKS(kzgvj)pEZF$Jq9RCL+0~or~Z{3ljm2! z63OHoId%C1Y$iR(J<>TWJ~+K!`K;x(_8I2gZ>z4!jV^l8+`IYOwTbp8u!mpWJ?T8w z8yRIojUieNX~o!BoR{=@Ik}g$&34#;21guD;^eetW4RrUkf$y{4%*VRHQatlr`aNf z{R7ywX-!W;=E^E_BPibB^4D5B!v$%7;@kUcYez0spKmfD3ThUU*B_SqAdik)2cPK){$-Gi)B?u01=h+t)Zp#!r@@)@Ti4$Cq_P{A=Q2e zbrU-GmAlsImK|A@n+p+J#Omsh%x=O9%*qKmLp9h#S_g!N{FNpj9%4JN=|S~Z z{ERWf%4wz_9{7cGk>q#pxcsVo>gZxe^86-YCLdskZZL`{`o^oQ{j~S#Zy7F>H(%5) zD``pB?n6Krq}F_Z6~Q%n{{T&0sLMCY1@!?RIy|a%uB>>RNkyH>WQt8=NL<5R;F6x z#6py~Ktr5Xsk6hE(?)luI&)JE>w_zpKTDg7Zmnm#Z7j9nH$ks`#Rnx)Su z`{c^`2V(o6W4RgeBF|>vh6~gv^XcwF{BVfX4S@{!dEnu;H6u20!yl2Y~MWT0Tu7`i4Ad z4zx0-xxD}&v;!2A-emSzLr@pKl2Usxcvf_}VBn!*A3!qyGq!bCDk9J+BLStloZ~+^ z?htbztzNHIiLdBpyO!IMN{E2HsXUQjkj1x1afcR_+o**Ti?6oVi~hlbe27IW4$_?h z)^siM?x&}5g8{cfxJ(q~EkoX|5~Y`K7WLRQ%a{nhfsR-0Eu)My%e&mR7{AmYH6j7bD*@`Z{=OvN9$g!^@kd+lZ_Kc#UO`796AfMAr7KSFp zS)lB+{w1mqOc0i~Pj9Dh5h8XatKZsOE}wc#E;~CK_RWqWP{=9Eex_G=>S}fi9Jvp$?d4iQiK2l8W z6-%9tf8<72-=<--q%W!M{RF+78mHO*3II7+`6_m`oUhLK$sn^1OzOp%?}=}F?$xp) zp)Wu3Y@B|Iwajj;Nhf8kBU8^Sv2F#Rh4FtUSK)0(#nG>aYfcCn3W0Y-oH==mNXD{O^B3%JMuH*_D0!ll5|?ZpwhwGP`Wp+Y3I zrE4bmt=<3R$w{PT)ogp)@f%=>iIB}oaiwdQ_a3BV3-z^YGD|97pMSxsUDhqKwQk#0 z>f-pizi}%cUUvVX0VO$M?)Gb4C=x!$X$`Z!GB-7u3{0w8oW5inZ|cG6p5pvhbfO}1 zaZRuqk(Y{aGfC6Q5F*_sCAYPl3|@b0coF9&t?fL9A`tqvXcrbWi|j}+lakn#@8fnT zZ{80oq^jlW zY&wsLi>}Qy#oijJWPRH*G8pu_H{f>lkYle?m6pp?ZjN)_SZpZ2?Qje)ZSD-u;((8tO@uj`~prigv@o#1tqyyz7&t? z87udI3>!IVRqcmLRZdg{2E!`|^DX>d4O0d4*K^mO53Y9W9uZ@S+?)?69}Mp9a7^*b z{p58tAD~cGs4tm;{Y0xwGJk~o+8B?`&(AqjZ)bGuj&3fcdmO=c*VV=i_Cjxd?$7a)jWz~+Yg=ln3MWwA0xB3 zu8mThh>2gZGN=ceWOd~yr~U6}`f9$rO(NTm5zvG~%>_b- zQA7})QzKLN3A`5OJB=YPThla)btp`gS?Hr_qOBgLN(XUN988M#6%iP|oHLT=*XGTG z;>^`ms6HR2gnF!kUdijS%tr8+V>*L!2{x%AY)CxP&yQI+OtyMn-Kw-@k=)Ea8}m5= z-mRAI^V?(`DpQ}1iXJA*E0bC7KjYZ_!{v@Vt5`W5`Z7S{PQuEbqez{~HEI5mR=&BG zquG&6X=_*)cz58-s|DvV`olgV(Tg3;0J31=Gy>V?m#zqrH_gmXpmId$|(4?#{YOy6c6-4R<)^{`fQhdUnm2iB)Ft)26i3nhw2>SYYxAA?Cfp<66O|sdbY-dq z>Ss?s0rb%Y3u?h_`u%IJa(-I0pIO>e(=GPgDmssDZlS54b~8fjv!6_O*?QX0fx~%@ z3+t;3snBR>4Yay{V?H029STcdRRIC1$!Ca#Q~rVH=zH7`-CI$D4air+2EP0K|uF7Ur_)P(N^~(k}aO!Q8dQLKfc@ z(Zn(lBEptqV{RiY%&kzPf-ncpwC9-4K+8yif8Ih{Q#KW36rQ=cK_JlL)m@=VJ#4C& zL*t7WLa;I4DZfLA9mKE|bGfZ*WgE(jG!+MFG$<`kAK~rp|LA9QWzT-nx+`;A=^h)k zmJprkHczDbox^m>b*5~SC>HL9XsLPU7kOy+S(U9CW)NwNNE|19m zJZ*@=!+$@uT;^-)?o4u>Me7p}*2?#GxN3D&#iRXQkm@P$TmQQr*$xgIHQ$^hW>5X}Rb+3|% zyM^j|kL*#-*!y!oD|qM$TPK(l2lKZ?Tl9up|7@uP1;7?MD4g53-B8`UhGdtr?;1sR zCfzNWg)$0+52n6Gsc{NFFZP6>N0Lq$mSFNNHKF5DFUg=cNnvl->_mWOeF(23iT!gM z&BjFiuC=9!M`kHaT?!1&Tvn)hO|<|v&8t`v$1y?GE6|`GsCgEj_1PuLwlP;_?7Nj;!QPfj4BLwfbHYZW8da9bzxf2>;9Z` z_0@6~^gdK^eYkKJaGQFN(@nDED2hCKqMgs4Bd;}CkbdUYkv$d^X%v_D`ex-wxzEd6U4C`#F5rW};pDfTwe+I$oDkI4C?&Y2~sK}UTWYxFnPfkv(H zt=j-cjTRqUlr~daiffug`z7eFMqPgq8sERr z^w>833ei$=v`Y2e;M(4KX6kTK11s)UAapoPNL))Nqo40aa~Yu#gbu_y%k=X_yP%`E z$1Atbg@ltNxkim(vVu2DV*}*b3i^p^G%p+qa{r?jG~Iw)+Np7H-%kHpa0ns&9cq~3 z`RF25>Wdww(8Y>v4dk;2zCSO5LjGAu7n;I2iXUt;+Nb0~cvuY+@c7B@!fe|_lYoWk z7Yb3TBqPeasiF3)USTFI!ES!&x$dQ66(IWse>^5nfofe5-djYMqiYFf6{NCvIrh03 z)xKlRhN4_K(Xcv$W$>7-*dK-&N>-YxPob1ONWnbl>LWqqCK^pD6o9ETjacmNgk)*F z+~cTy?%K=6_6_Y_&yw^~+i9=pN?c9;7)^Dn-F;EcjWS4hsr;`ubVlE>^?`-ii z!r8v^S1rE0JbmTD+)#jar%8O+uu!9%$SQ;3L)oDhDqr|c%dq4uJ-H9%{cWRLnwx2C z0_J5=j#+wX>)P_n3|a8WD1CvtRoiG7=GLa|434Nk%Nfaazm%Yyj z&3z_+Gh>iSd3v!>1DHv{@r39}xU#JAJ|Xg-3kJ=c74{QvR+zV_-+Ra{PWK4<^|`g# zl6mbs`vb8eNCdJ0EERm)B*N~ap2gEewT#k%{WcPtB*HT#3)0DF^tiDH`|@$AFY4fq zQUFNvKV|N)*<2kdwj&o9j@Vj{-&nU64H6q z!ocP`GX(=aI+bcsERS*}7YD1~S^)LbknBgE`2t%Y6Vuo2Bf}8nQOlZt2Ny1EJGlnT zG>Lnk2L86(my(}LiP(BhqlO%ajn9Yb=Vq2T9->b`^SSWeN3QKr4v{vyAwy@JY#3F0 zibmmlewuz7jxK_kZokhXqpRyIZ)I?g(OBlnhzvc6Qs*V}^_4ks?n8ffP^$_E*HtoJ zcX}ISXo)4S=KN%(4)!BPuO&@dBYgp#+udXeGf*&ir`*>2wl>@NGZ$h1cU`B{JP{BF z)N>wiK?ODkzljtK)dp-`OYyr}bQ0Kt_e!F7AI7IpyhJuqjACqf@@5%#9@#UHYyR=3%r<7l3(T=4;=h#LumIgof|3 ziV}%Et!m@JtM;L3$=How6TM?tCv5K3SPRnkUY5Yj#d4k(F?yJ z5rRi?of0AHLKc~3Tu-qhZce#KJEzhUcSvHt!)MO3c$GTXHn?0LM=R0aKl25N5ZKmDS>Dk>X(!vZHp_lM_M-}5*#%7 z>(iG}RIoS(KpxnR%Nm%p_}*#`$8{2%w&H~EBXo3#uuI-8N|Gv9pxQAd_vaoHY^M5w zRp3QoG(#|_Xq`vjr~Pm`Nr^`tDK(DtTPjR7m$SPX!fTc1zOlBKq%5IpsZD#Pq|f2R zNb4aQ^x$I?bOAI!vHY}2!s)rrmo;y5&WMigJ3>XwV&#S@W6>g5UPvC}R3xP38#jJ5 z(QvMojBsNwnSBV|n2_JyBeX*8X(jXMnKHZjrS2CkY{iot)FQ40j(r5u{-9$aY>;i$ zcK1Q+^=-LkbN~3VDIm|dKBs>lRBHVQBZ$aCpJ{+WwFH(lFU;o4X>mXL8i^SEcUOcZ z0l-{gItu!j3u?DYjS9=8MkkQ+9=G4ANEu?~F{j&1Bltk@rTT4i56w2 z!Rg`|(oNy3fBHf(dQlJo8su(=6rTGhBa2}UT-2r2@q6MNs@F-X~egz7})_pr4UATN*vfA6`7R3KtB&t8 zU2^gwJyYWHy=Dnm4i`E~{H{{zsK?1<$@|l~k2a6k;)IP}QS*}88Yk#c@t7NT9_q?x zI-c}=KR-8i0HZpl@1NNAT%y$Oo=)`!kO3Vaj$3?qu)9oYm30%r%#1R@d}q?J-Gmw8 zNxRqAUTdX&n;Cf{NjrAzjG%<~GQ3}&i_76R7kaWgxEoU_eV!gZO=fB=M6R1{(8YNd zSgeXerV5N9OIGxyH-D@N(v9C3qTjAVKn^xZLs|;fJoa=)^!`kK9~y{B5CNW1zRN0a zL-S)VwYW#9H70FfIo~JN;aBDc!k!kfH>NOT`y47x0$wNkZdv;~+wU%MUxL>msvdDv zrpXw1P;;9OQiWfzfvDg{X}Se>VP|bB@9U(G=}?KM#d!xE2fp9)8be^2NO=i zx&t*W@lmVq+In~yQ!%fW=D&dQJ~f`4?&Er}s0=+ilCz&s_SOWtMryCsQWZLQ`SzWH zci4|zFZ9zK5rIm4h zgVakRTNV>2F*ina|FbAxG0+@#I2!~}{!ZTdsUL2!mk#F67%3k^`{$Xq zFkfVM`ABj>qQxS7sX0ZJv$-sqO-AYmP^eGPaS0x&SxWO3+F4dmh#ep!1!K?zF06wE zKu+A*_jR^3dj;36DA8?hJ%cP-TRtS6I~ZQ4Z`(a*?<&5CyVWk01kx}A4G!aZNpl4oO6+hb$N zq%`bVt3uFm{gL^48`n#VDkG@^Eo92rW(-ciD zTQ(ug)KA%JWa+8gL#V@sc#@F&wjvcv(i#Hi<6W3^2Q{%2b<2MWxbQp# zZ57|zC^N-b_Qg@WLiW!H_1uE}$ex{yeDCB5-6*e@8TUZTAR^?PFF6$jh(d}Y>3g1Jm|<%H%Q zQ!ptqRN1aF$bAlv-XU;R|mSb`X!coNJHd(^T!eK5(G6t z)MTLIS2Lk6y>hf|{AQa1pMuxH<4YXPHkF0BG#{aX(qbNqtIxU&_^TlszN zg43I^3NyFpD$Ba&+w5men6J|%m*gbTJeDD2sZMc=WKmA&=ZuN@3_ho3E%#M}k+NI? zZ5Qh*Q$fe+oVT(9)rq*v*aP<&KgZYS8C^{|o$QZK&F>xG_$lpL*(cczRJgd*)OjDi zkIV8u|MY{$Zc;<4FtKGibzmFk^)t@4m2Yx4x#?k_De8)`1nOb!?Wr{lC03`o#{^lX zs#8kcj$R4~QxYw6u`J)(Bw&)1nflH;Rk4GoMlH{L%(eUV*=y2-(Ks_gmFVVO==Sm# zW}EmMCl|83wdfU0#`Sqp4O>keQtvOCl)5ZCFkef3y!Gt(dDo%_Zk{CUc(LOQDY_OR ze6GwLF6(qZ`-?pm<)RC`TumnL4m8aN$FTtS8 z?g)yEkBaAO57{bFX#aj(qo*9<6kBKAZF5i5ZU!ER*fXbs~QYmN|1Qxerfx#kf+zqC2b zda~S<3~dXIKVQ==ZiSr71n%Y0#`Cw6GrSE;#To(HU5q=SN~c%llM5Ebw!-rJ1iNIQ z1gScT>?eNW6OCfDbZ1SV;WGnBadz|N7VlM;?g`G@sNT0diyhxV>+u&Fcq>e~_Q$aG zRTSxYak++Z+f6WvC`~)htcZXoZrOaNW|7e1h&E?_|9(Rmg!J`G3UeV}K+!>XO!xi# z63MPp6Y1raAavS3+uS+}eLUj2;6D?6ue)f!Ur#TB`lZy{TEWZ+*=+*ipEka!1SdjB zS#WBGK;R6}4`^r&k6WXe1C}_enNrhEp?e||?11!RKePO}wT#v_zHgQ)l--FYNRaTg zwU#(7XxujPlHrO^s_~qlp%}@IpWnnK-5IfV`$1o)g@=dN?lE^^{2Vbib3P|Z9mikl z|9UcxgTUKk_cqLFIH+2LVq*s%fX6fgTwISy#&(qDn_(Y7xu1kAXD@J`>iL#EB^>m~280G(a7XNytM<=Vb1D8CUKig-*J1tRzj`tTiPQ z3Q^J15N2BA^4UCoN=Xq}7Kx_MTRLVQs<|Td+LiWpvfG z<)5dfJHPnz1@q}qTXqT(BkWo^!ghQ-#QsuUO{C71Z-?dsbJs=Un_QCbZ_;gdbz;wn zUH#b9mXcc1b3_jsa}Pb(z2{u)X#u;0T;s-b$`K;5UMicbfbmG}X(Jw;WY?O^9hnFA z|89PdLkNy3RF4;|g2|ZFgXj-tds8t^Es0812Txua?e@Zd2o-cpvvq~Q$F|$|@Nxxt z4})*&G4r)4rOn^$2Fu9ANfv7``H7>v5<>8dZ8I;-YFSCB7#l;=u*AyZpA0J3M6wCf z#q90TJeY;T8Moz>;hu>WRRU%pS3VN%T~bu{SKn1o?|*Tp?t$yzB-o(C zcKlfSuOHxQZlj`dntbq*yZbn5G7n=5s5lK@+J5BBFH@Gi!{RW&ja_f0tgp80r)oZY9pF_VO8jeaB8*8gpJO3FREQ9Gk%fS8do_xACHz)fit?O*XV;p7ro`{VPEp6aFIjMS8e!~PC==Uiz zn9RJBH%H8qXfBSNp&onD#satxPE=8ICX-Fd=RmU;MJa`T-RO5~d6x~smlUrnONHpW zF{Y{KH{_^zn$^~yg(q)KQN@5UvrQ?U!E#}^zL&&JW#ZW~(Z}EVldSDGqXmS9-+#vL zwGyD>6=}de&qN1ikAso;(FW+gq<#&BQ`NL(%Ud2X;hsqhO`zw-kLa8j`C~(dEJ%vD z;)VYThY!$L7?3kcTT{&?C`2Ph7VGErR^sS`O#J@FwyIA?oJmm;%2YgBu=RrzH7uEf zam+eXZq$scfn7q2SD$F5JZoFL?ha2p@MP#t5?`x$B6_sy%ze_`w*uI5fszuSOLDnE zcvi&Nqieqoc{d1ue%Ydv#Tl4X@NQp8zvgx;ylT9}2p8@El;HiaKGc2Sj4|czZUNuu z8EYhL1>QB_on))88O&ag4L#L114`%f;&G=fSyw#KNYJqW(F0$B&gyze?%N5EJWKXY z>lhl-ys!*LPHwk`razhC*v(wTL;u2dM^B>Py?ctf7}8%n?L8LUxSjR8{bA5OlFY;= zY)Is$8!KCYOymGC1E8nZ0XB&9E7gj!Hr7@WWlK!; z^uxV}oPE!??k}P^-IWv#0V9nlKTS?jIG_HxC?)!1sbRh zg7n9C)Pa~64{)YY`csm@Y$xwlW!jF&3Xa9R{YXno6aN@XCIl|&n+XvcM_YSy+ca!n zDR}^Ol?|##7ZcALdh8&LsC$VLC5Me0$vG`A5P&kqTav2|9op3555&fR+isC9d1n~!ZHzzXG;gOlAR{uAd}wnnSn z9rM!G5jKl&)PAIxo?%N~`4qYV!eugI((!L1eN=U2yD;t|dJl`?r8}_0$Ix35yJD7O z&z(+WA4Mu`zhv?yEd4sTwjPOM83bm_m&&n=h=*e~o@_tj41RP}gzwJ#dHKI4)CLV{p} zEZ+~}<_MVg$>VI`JHKMphKE22uF>+cPzb2&3QL|eTrPQUrR z7wtGzxLGJ>Ao+XNEnBAQ6qgp>%=Qu|+j!w9%`bs31EE-Sf!0HtWH!~Y4&_6 zYjB>v-l6QHrE&`rz2Jn>xSK;z(};7|eB^q>z0f_iU6^rdh2HC1v@=2`em(Dd*v&O%Pi~DuwOT+N zM{ZiOqUKsH(=c?9SuirePbXqKEG-CXQ`ms*A!U=hs0*6GwOWIBF_)OHgrzjau`S&5 z{v`xv$Vkbg9=Po^4#dHF>nmo`lG%@kw36-gUfl(;5(665%-tj*w<@1ArM|ZOV&1AM zjPgywZ}2}~usa9b@wLY>U+F~VLe4U+u6-t#lPFuZD-lP=C9X_8N|_k!8Wc5Zpn=E? zZF^rt=i1PgZ`G%x{o~FnqlX04*LU#qr4nVL#{koh6r5K=u*fav#nIy`j*C0&DGcHG zKR|Zce_bH%0A&}%- zkI9TaR(aV%#DzH7D!{GUiLpTYiMbQw=a9^r_L;4DnUa-@TO#9+7XVa}C;cQF>K;9` zL#fgWBGWu0_6C^Wk@xxuQMM6|XR27TR#OLN958XxL4gvTN0p`2`r_uh)iSBA%H{gp zmwj)ObeFFWvCC=}qzj)&tq^nf0jW+ZY_-FdG1>uRN+tRg5GWuuiQ|;>kQMy?H z@l%o^x?bwX+<8GrGla}NNiA~1Kg|MBQz-K923rO7ULKLpTBX}s@%mLWt$UQzG?PW$ zLbQ(Yfyefquypd{PbsF#vHLa{Sj6L4DHh2d;saZmmoj85gY^Q3LryrXh7JbxyQ8v( zf{s~zNy0~Nl(K`GQw*V*3ehb~q4aZUVqqE0=X>hPxgQoBi>KqKj{2#1R~pHF{w+&@ zVyYhxC}FZBt!~?eo+VFDAr&BPg) zcNIf*k_ccu(iT;xVEJmlC0BAE?ZZ}~_btx!iLzBdAD;r$tIYuE;Xn?9=L<`-(?U^u za;m0EmwTxdtQs8`fC_xvE6OLWI-bEP|5Iss=C0S7li7zhWG^bDjPEN;_G!sP+avtQ zFu(ujBO#$yO)*j#TMi|Zmad__9$Oocbp_$#cZW;13Ujox&`p%AYTu0xlvj@1wj1?z zfN^?%^~YbEC>AymkVoZnU#MGQB(3ks=&}@r!6G3xARTGyeBSBAgum!SUu~iM$m141 z8cX-L+*J*yGK3yhS(DgqtNrLBh51y7-n=V*Q0rcL*YBXW|JG8e7Wx_U6{_^H7c;T9 z0vIipq8idmoxz(;W_7WA!|mKqT&~;-)bFX>ky)fy#%OqeA}IdwLq6haPb(az>ieWf zl@9)k*G1D)VKo(r#6_6FCl<`-(_>7SLoVf_*MSLKv)J2@bk5rDtFfhkAV|{LlsnwR z&TEZvBTNNnJn7Ox{^VdgsyOwTDmBgkgneF#GZKYGgX+?bCY@rWko0Gw|~T7+41 zja4e2gGm|T`SgSm$^I!+$TaKZx}PvR>N$4ua90l15(rc_0_#44(-}}6dYr7R;$_d1 zDn2y*o}hpjW($M~jLLfU2`w)HF1lu@5x{Vj4~{6VQ=bs;1_EE*ka1F9t*?j}MW&S- z+I&aZt>1G$t#zpK9|44N#PJoG^+V;?xbo4lE*!g98#a8iY;9hNAH4K@v?pfG{K^O09Q(}M zTI#5(z$N$doifg%1R31%vmNX8f32d+6Q&mBqCu!L`TF{-L&aXoJK%U~=IGEUb6urB zPMglPTs9FOzz*|wI*9Q|K(k3+uMom3)vPV;y^rv0BzQkR%8G8-jg*~aRnyP%ZMo0L zXSb1wIzvwo zyuGEj`NnGh1 zLW*M;|JwuNFW;8cRu;u(vGq8P1}tm=JW|UNTA0zc^ax=Xlw%_{p@{C=0-=Nvwc46- z_gWYH>RuhcPBXD;)azp4_F>UQp0cGO+KLE&|%ODL)Zl@qdl>Dl1~%A{~-8b zl=Wiib~-lCN#Jopd)fjzejD4wPLd>qW?2P&whJx8Y|+?%R}d#t^_V6w0cJw1?&6C4 zK`eI5L5Ot-hMxL0yp=6g2uCQ{H28*ILNbfyHfj1icF1a~rzcadD(009bh_du-|S4( zY~qOD$*C9FCFYuUG>yFO(^c_NBY7-s0&bXmXix^7R!1dj4Qq?#B1G^&J@Pfu1-7?p zWkG^*`^e7C7bX-yk5bP)M|jEEaG<9iv|s&BkIwuY*HQ8t){)H8qnu2tLywo7VsNqd zWpKfW^Xw2|Y6gIdf&sF3f&fai218ypH<2MdL#5*z!UKu`=7^&Kp0Lok?XayL^oiPG4)B9R8u6|z2*ZH zXqQoyO7>Ol?mJlvz$f_8Xy0KzhN;y~DAYpK(*U!{{df7aDQ77q-+9~eak4rnB{QM# zvWqnHSb}~8K6lZ=xg!$P#WJrUlM=t9&b!a1`@OxZweKs5G-_Ij-0l}5G^n^%o5&LutTSGPDDzp28eRz5OTmZi$FWmUS=8hP+KM@Zi5Zy2&U zwZN7T=4?8WF>F&Nr}u?Uk_(~7)??Jqbr|%jwWMIp3B0J996#>#+Zzdsh`CEoR`2I$ZwF9uilw>j z>VDHn2i#nh_nV^hz&?A`K7PIuPN&;=8FZ_#$ch3_67WOznuST# zmCR?ikQo^;R??1Ne{|UzWV8cXo3k<(D+Sx27(Oh;HUBuBv2_QmJ6D^TWrg=BHu~RJ zTAjR)^06wO%G2R4uo?(DrhN$}Y5D7$jI>u)(EO64m8jCk@vLIcPcV_KPxchI*=bM2 z`nh?G_qhTN_jZXO_zZyeC|@xay-IuIZ_Tt26vN@+ouMC&n_}J^s5-@t9JH!;9qjq8 z>I)I(DoM&DcyvP?kM8GZI&`|In%sl3D942;eY|I!TIC6L?-J9NBps|WM;9_C3BMdT z52{QpG{Op37crO?e*#3 zelQqjk^iYck`HGKFj};(ol_(nGEM&onU~XqaG248NVPSyW(Ysg#$Kh$^dm-Jj=2-~ z5MyS!P)ZqtXSwXQQ7m;>zE-WWGz-KAlavkSn`IX$2UnI+eY|q11<1Ib3A;RMl-Iny zUpkT_Qz6xTnJH{4D@&VjX2Rgf#kVes>NEbtOTYfCdASgIU6eE9pxqI~m*#APq7KR1)0jv9 z_jzhj$cSYuquWm2U^*u6R|wKR|GQ76Kb}&XgUKOI_>x;N*{!voFy{8fVLUOOG4$I4 zKW}E{-h1eT9PCNnW$TGWwSa;taroxZqyxJ{&44LjZhUD(eOUCrfr z@W!5+?4dF2vvkwavaZXGk?JjbhH}xgx_hvJfr{nJb z3qFIL;G_7qYzLp6Ai2mNMdl0Vc%;GLJ5&3N+)#L~VE`I} zGn^eMO|uBOa$}mGHWs_42E}Nsa$j5AptdrL3jxgFh<&d)5qqM^C|Irw=DZ+ps#iR> z*gz~dIf^TRdOjpI;w+7789Eh_T%}}lYy9AtosUt@!IhjK0O=Eq*VQb%7i-a1CfrU? zaVeW7D7kzbFKlcj&WoEUYb<2 zOb*di0bH2T)%kQE5o+Z!(O#|oYC0({qXM{>DWGUx#&J6Dy~`V_x9lR1AQx?{Xv*O7yd9>?YQ^ zI3ofMDO3CaPn!43`gje(%hR0aJebaAd4D@jq{{r775BKC&6inB2uM2d>J4d2Ak%Cz z@?Ch!UEfrf*g?q>5HB_8M+E}(0oZDO`P`bgrPe6>>W${bolOQ4et%@5T+YN}M=Mr|Ol zJ$^cey%Zk^!vJFAd$Y!ZUWbI&)F-2*fd;)cH5ERd9EH;_y?NcN>?y0f_-~=Wvv1k-nCy4M}TqQ#H!ch;sV-jlI@f2Sb@&G~a zBh(jjO(mvgO(28U)Sm0}((Q$|@%5qn&2a2UL>^11RnZvZ1bD|_YMIf>a$m#q9k=%D zbReWhl56Yr|1xkqlhpx#KIHz}NS8L?&NyvIHHGcGFE zeW0HqrnrhY9;%`b?D%EiW!gXhds8><**1`Xv1nDTA97L#w^?gDcwK*=2Sf(EokoUH z8d)t&=jMPF<(xLPLFb@~VhMO*WKp`%hFKjtc_{-#j~fE@pq#c3fTH_-SzSOoNLVDni-r2?i#ppxt{e1JizACAK~ z1ej^02|O4^$iW6s_MQsFlGP-&B1fkPB9q0G1yI%v;5>pXpg$jvzkRfw8i87SrU=Zf zfAHd8=K&#i;`sr9jU}_!RMT)>5IKzTTB12vIwqUmv1?m;(YY7gx`+r8V&MmnDHMD? zZ(lops_T&`a_%RhFhHNC_U7DmcS7S9=Ao*n!tLi@-+J3B{wdliNPo~05Kz5S!G2{L zccR~NjNq9VtUcnmR7$b>2FLD9{h|iU!P+r-Yn9R^FQgPO}I@gir>i$AJX(mk;4L*?0~HWND5w^z2y7e|<>@ z{q_+b&B`DZ8p6R!HqV!iQ2iEW$1aoA%S!)qiql^oJeCOW`X>L_zaDY=Rl^hRMh%3i zFh=X&Lpc*Cs2N9}P-!I19^I;z9ldq?Ei=C00Bz@0H&2>aJ{3u3vf^I?|@pxQ9T z+3!kn9WYluiChipQ?9yYVda=S-q=C++<_f3^`Gl`-ULcjvPR12*Yj=v5HmbD3HaZ% zEzJU9h(F#>wV+^bbbROs9KS^;3UOA2g_g_o+Kr9B*?fIb? z&YcKC9)zBAZ)d)h6=LV5TL0`#K84IYJwHOd2=bFU$RnYoa7Iqy8TXYR+lOoY%QgR( z6b{$a+Y9Fv=)%DZj#M72^FtPh@SC9{9>0vfo0g!Q^%C9klf$A3Ik|wcPHM&)1iLwu>sE>O1{>uYH zZgEFwTe>t3+a2Ea*F)bswgSrLl)sX!O`dA5DHhA7*~u@`_yK@JBM5CG@rW#`zxUt? z>*uKd8ScA@$&hBD;H>c9fv}6{G}C362%2b&_G)fND0pO`LdjfN91a)Ahu$9K^mZ8V z;rl1){_97*fen=Heogy763+?amLx1f)R;{EKla`_D(kF!9~PubR0O0I5RsB@xD^p; zX=#w|?v@c$y1Ppnq#H%)uA2rWZn`_)bI%B)GtX~5&sy)l-@j(oLOz^x_CEXUxc0R{ zLuWC7^FwJla*}~@RslY9d^NDte_5l*f8B807i!=TfB5WPis`SLpc~)lgH4 z8Ratl88m9)i<-kDSNM}C!Fs-3YLfOIKOo4CrIoC}0I37||b-i~WUE@uya{0ka#grs@2P_X(7~UrYwX7B}k;CxR zX)&k8)Wmat!tZ!-ofK;|@UHSKxrem%mfxS!saKZ*lC}T&n=`3?jGt}@p7t880(5FR zJIGVfa>P85=HCnF-|ZFZ&H^1Iz5p@V#hnQG&B(&<_9nPBs}{{SfBS2Y=NN#>q?yht z9;#PzaIa9MtevrH?v{Z~zOhEBVF++H+K&j~#BEo~^XLCHN_##lNh0X`{aei#yKCdn zM3;5$tSwHH;m+rQBujX4tH~g@`OSDZ&uFZ0w5&L1RN({HKNt4}y!<$@W|`l=_ZdB4=Z^MaUHyj zllkv{HNE-gVWZ-w z{4ShK^gn31-Qc8dfRdx%ItOsG6b6v;4jY+%SOXEpCAa6Vzcd3Lz|kH*!{4M&k%k1= zB}_n{)IO072;@xxU{j+d{m~)F4_06@)%Jpy-?uW01mFb|03z0E7VV$Pcn!e=2<2R# z!DE0U32^FGDKiHABeQvcOq7eHTeLkQTNFF0Tt|7{O=Y7nK3> zjq?KkeM!zqU3eD{=(U*p7R&{x&VIFwW3SaZFR=h1NP_ob*H;Zn;Qgu`+qs^KY$kdo zCV+k@XQ)7ZoSjd47GxMi;Mvia_f^sNfX(BRZfR=!?aF{pv@!t=-RX$XPyF?d23!2; zyQ!S5i~;T#J-8!|O@s66U;Ohhw^QIdcI=B``E|*2OkS697~2w@Ul(+UUH^}--UUbo zbvHm0V}1#aJs|=&Up2Vbse_c1REa{&8mK7z45)rR6>t)85axN^ekaMV>)H;1fJ<&g3;}-Eyl|kT4aord^x*7pC|w^?e$}}@tp!8_ zo*|%f>b~T|{7Vk$ue3m7q1ObWvtu6{rIEP4L5o>pCOTVh=I z2zh^%#_xyrQ^8~2`pH_<7dL5jV zlV>;WKkvgoS9;A$0v-&vDFywV?fQSNz=;B!)s0?m>i;f)HXs1jqh)6M|9)0C;0YWA zo~+*=k^i(U7G)rS2b%oNf&UWYKi53x1{duKPFg|j6O^sZ&9Nx=*5gAii`F@D7OoK&0PZzIMcV|o@ z>&`TEPYrs)bFzB4#%jY_(ycMxjffPxR{Oum>fM`bUJDPC2mbr1Y8fCPcG+wEFZ+M? z#1mqJ*Squ!HSPaHX^Kj&dFk{1PZuDS0+P&)v1t3>Rdqz!6H=1!KPiM(T*E*_9`*lz|LyT9dQD;f(@n)gLSqv_NMQW0e;$Ds6W6eO z=YP_E44@}zo>Ec&RZl$=kkQ!2o&Fc|hl>pKLhKFw7ekzI>TG_HjjA_j8F1tD1{rzf$cjtlmWc?|8yZXW0=4Hk(x-uXE2Vt{G% z~JBF0dRF1{w4`~D|xt=-u z^k2XK0uQW2&z_E^o%A!`&nJ_D1W}Gg`j$ma!st8+j@{TZ3QO?ky~eHn?fX!9HUcaV z35D95P7yyUB<_3kJ3q5@x`;9z`8@z`q)&(yiBfj450{_#O5r-gge#ct`7gvRIX z3YY2$nL+f$A=#`vocVmlc(IoJgjAFN0cz6G=f)_!LP}5121L+HY|Lm*OJj^pTv{ovZJZ^L({1y%7)v&!&hA4NB5`X-m9n2=VARgS9@b+K9h}`O z-}Dc0h+HByY8E?%88uqIAd%s{MUn!A6m_IqB8~j5os0`#m0k2N>&RQwGi0X2z7YgW z8YG%wx*TXk;>3VnUeaQ7$8O5^#B|BEfDN* zL>cq>YdHi9-e~m4@BTK^}@GE>2a^K4wG4oPf%ye3K zmJdcqN; zgOJG_8|f4mm?)n`bA8wsMt#9+*_X>hd-lq6vg;;F7BAB4SDxXc%(YxtSIJJa5=h5_ z8V}*{2HP*uI2QQ1jgE9iUHlH7`TQ~t$GIi=V*&K`q(VI1oq4)lrPk;3=9X- z#P#meB3$B?FAs6iD!^u&7$i#d>49+wlob%y26SM2+=$$^8MSVjr{8S z>DdF+7E`V%hOr!1ih-0$)!4o8@bdrzB8UX_?0=0}-)SQp;Ek;s2Y&zd+OQnNiKc#f zuY>z=DOyRe?f&klxK9aGnx%X94Wtw~{(2XQ7v9=Xvd1r59vLDbUTJ*zLkMihHIK3c zh}*#NEn)b=v9vH=`DG?>SLsqE%a_2Q!Y{s>q18XKY8I55CY3lHrO z-0V?J=H*K-gbUfd&RQ6sF`lWV;pq9cpd3Ww;a+MwNan_?_uB)Dubj^%Kzj|sC~62d zc6F!k`JG_o6!LFMN8h^%K6bf0=|*Kwo9aq(+@dT9n3?fxz`!Ji&8~W`E3PN;w)Wu9 zJ>wD}&Nll;j+W@FC3Tbt`?VL{Oq&Nawhw+s-#Vz`8!oI|Woz>u?Ys*IPuvU90qIWk z&2$&|Szi@9UyFI}8ZnoVV4)z0gT>`KMvvUO8}ai%DEtv6i8+IB1d&)RX>Y1FrO#nz zd>NsWJfUm!Za)7YL+*8Po1(XKGPnrw^jb)fc+b}>@ronz4Vk}xFNXHj-*a7en6yh} zd!d6SEK?#VrkEM`=PqAJAi#W;!jPT!mT2xgQWTR2CQP(D&)QKVp9HurVFwO5Tu5UD zcw?rY555yZuxD-gp2|Xfc|;Ugh^HgCDDtt-C?pVFdoXX)cl}Ir0cX6h+=v>qZCF(a zWXl<3t7mC6nptZ*BDEUcAyZNb@BnAO)zIA83u$!XwAsPWPgnMf&kqR}2Pen_u|TBi}6KG~>* z-K{)QR4z+=s`lvCEAQ+}t)fRGM2=k<%%>scu@8Lvof(A&AyDdvJ%r*h*oaq^RYU-? zWBX*YUrX<$0K6We1wUng=O`gS>0)vnlevJ>{e+f!1!EFsbPw;u?8WPgT^Zkfuu{{c zve)VNZdtmwCzOufdzx|ga-9@`z_hk({a`7pyahI-)X}Ecs?PJ`F+!rLbP=*ab@`o{nnbgxK(D_J1eXKMWEmElo+?zg zxHv~83+CRz7XuKPrr|7`rmi@)P{8hAwX0D2%IYq&vBb0EtlJkLLOFp6n<2@2LRb%; zd_muw75c)!efw(i#2fgo@W=@$ac?5<$0?U68MVbRB}v4x@ZUe{2mY=j!69e?oU3z~ zjsSJw7tL6A=RxyI#4@`{yW4@D7rbVMR@0a*N0VXwR*CQUy>4U&%ARotjYv?ne~ROK znC{3s8SNxN(nBV&TMyO(2z(uor|4WsJ)J=}(93ou@Gq?knwTd!$(tXuuaUkmg4cER ztF0#lf=KCmBMf;5R;?Ddyg#nj_d_p~)@H$TZ0;C@x@85rHcm~Cwf<&5raF2HETUWTHEJYe2=xbVTOt%^;eL&4AybLXZA%FjQS6?30MqwkJMLwbbPCW^hkH>#GKQ%c5iR7|JChcG(A@rQa< zj$5geCSC$BZi(v+G-LPY8aM>q6W(V1_=8SwzB6WkQRksj{hQr2PKwBngdG=1hf^*2 zo}L%ra-DI`Vi02*K4L#Zc#7$zPs~ImbQSj8r*FK0^QQ-Sibv7AN<%nw^POj7bRVVO z`l_-I|4@OPbSRHqVB^C@u=X$1o0AR(8JbBN)|-t8A_SnMozxl4F4CVNOhmh?VE=w|#zoUn&4|4o zuJ{MXEWDfu-3AMhe;1C>o_X5DUYD6ALb^`_+0WTjS>diRXgE^@!#Y^G9oA8+T$etj zi^e?%y|MJlpY*XUV5R1&Ue0AE?ZrAtM1c)PJNW&ndM4xD0YI%iTSxlRbh}q$81dfv z@zJHV1P0wrbh3f9E`~JmH?tbHzH~|%^t*Z2U@P_QJd{pJ?f8T4^7=aomO!3+y(A)j zZSwaY06D!Lw8g`O+biumgjbkbfFeii+E-EP5xWT#y^ZhUc!eoO&?1mtqx2=2>&%mE^@@7F(^ksBawat{rX=_6_kgw6WBy>; zJ#5^fVKQWlQz2(OLqC7ax`ou?4_z#_H<0@ngbAMuaN;V~!a>(gGg8}}=xyDx@HR4? z7#6oo081aZc{gn*rlF0L!=$f2#4PNV3w)Cnl7up=@L=YQ{q-NfYMp<-XHgHB(A#%$ z%OqI;b`0JFw~{ghMe89z`|!e#K-CW*y+u)WS16q`&5MK`jD}2lUx2wK1g3Io7CK&Q zb!tZc7NEG|+ZL((e;?d{8i< z5|7+8m^%cY$bi&m@f};77`qtt7U44~&Rt>wtRx7T(UgD$CvKu$T92aaYl3_uw%-Y_ z{7zg-#M4DwO3crXaUbK(oM_Bcd;{Q#IW)BM&9eh9&exWDC+SkMbO2qK> zG3TbIQ!RlhU-4bROBA@yU?(Y@8Q%!+%=r2esuTKLVV*~xfm)d){s-*`>DQ(Y5LNff za+O3c+}#jsM!qOi>)19na?ZWylUDgC8R2-dx8#L*gIYV=WOcXA{si*2;rfJ+-1$~Z zM}M+8Dd-QhefDC3;T3(*T<6`8`b8u!$6JqGlGAOV>#}M3`N5KVxl9e=MshSAiToSf z0!kQo^i-41yQQU+Rd#;*pFA#58Fl)0k812U&_@qz*>?fmWUI}$503In;})Lbbei|> z>m>i{O+f?gNkRaC4U69R8Q`I_Iy|_Fg(+}rffqJ|ZG_-uiN5L9<1xw*$0^#gu71|= z*pn_VT@)u-nen}hSB5x#o2oae6Y1o5E)@Q_Fz2PgvX`rJLLN~*53-AAm4XL4NGLdq zG`WiPYgRITie~9XB@5^5`m(A6v(Q30aYl{WMOjLrW`7oksi&e*0(DUL9Qj8~G?Rg~ zPKznNdY@nK_+#F?dw4-qWxEtw`|8slB4I*4I8iY1dffH`L%L_+jtC)%vZ7N1ladn2 zTTIh&8YD^ecD zW|Ox9?egD#A0hf?+Vulu#9}(IsTC8O!`>%GQ>Ddwn9t_GT@M~M(~O9(vTlRVx2fDu z@;DFK=uV6PG^>S_8B6eownWmN0eRs*>Le&*!k%N++J%n(J27byc=>+Rl z2>`Zjn2U_F2vL>G3=r-1;%$7$!*%2G{4>}&~ zg7wk9#dOp!&66#QwcQw{yKiE%zp6~Dxvv*gpV<}7;FLJ`>is+VH%SSb2|dz}b0{Ip zrt4nMZh?#WOvP;BN^}3v0K$H1A?AnnhnVMv%P`g6>iDw4Bd2tVJ$t(g=yY(Dc+?h= z{|Q(l{N-mc{`xq{OgfrP7=%BKW1iW1bY6hpPW@3h-P#UWujrcKZpzXGz(hT8E`ZPF zLyT=e30)b?rMba>`fkDwst(?T3#-*HP|r{28$QJ5Xdl%XLA;{mHC4D596&rw{AtmA zwiHYmAbWcJ!>@;-55ShBG9}_@M?V=fXj}w%UDpNgyhb4>@5UiOuF5V@_v{Y-jCNIs zroiEf`@a!-Rbij-j@5nen1P--u0AB!B zD5D!s75le*>(fKd{!9hTCJr9^_4{SU&^>OPM*x#Og9v(MLL$3jTY)2iGWG6r#E{8g zDC1VCP*)4jWR(Iirs@wLd!aMO@-)QdG;^5tqIe^>6sCFZQQ1l@eXw*F- z=uV0Pl#5dJC+oc2R;R!XnJ3s@%1S!q^W}fZdjKgZ-)ZYhW3hw?7V@T<4CYZM)SPR2 zsuYe_I24we%vRYb_ZSS@#8S$1B6g{kZ-lZL^*?)!w);H`1eb!JKY|pnK3wxDdgEW@ zcBx$jQN6&qz^S8UxR3h?9LyATSHEwyW`+@8?$F13Z!ri$T-MReTeg0BqnX!pm2=B& zm-`vyE3U0QQ%<&N!TDs8b)eqfCS)tvz}}kz^WX{ko-uA;OPLLZcK7O6;duRl4|l0- zH^$mb^gmc4#_09;QWBM0j6br0by_1OPKw8TntvALY{^JGTo@*RV$j?Z18QXavAppE zl~enGd25a9)0ev1ijC`{Az$|RmfvB$lX3;g&wbHn$Y7}KNMEO9$YWs6Z|nbpSH#5! zyV2IpkU@HSAJ{_P+1Bq(x}RNlNSkBZU&+14r_=bdqTG2-ABloIMZyJ9%x9afYVVfw zc&1aaUY@U-?gH(OM158J)rQ6N>%)${u17@3vH9uqIMq5pU{c84fC8k{xK9EA&`FE$ z8vA$yg!22ue2v;?=uSKq0!eOwv&BPiry1LNdrq_pu*p#AwT}aLoQMo+tRDbeDW}p3 z_gVZZULlmEvp-VbWa1Yp)ntmuk*pA6Om3DwN#^D&C?%Q(yP9=zi%AHBC!znY+j(h$ zSPwO=r3~#4CE!q06&6l*9?zbCZIh#xTj;!}NhO&!S)@Taz{(e3pZwMpmyJ*nOgOEj zpYMp+a&phvpsJO%wBvUx9_1U&!%e|nu}X{=160$~bmwL;l3}_&e!{NYV4i<(r?qdED%(vp2%5;CBEMUv)egp+X1FT%u%?(mvuQvKxlw?l zj}=n6518wtdIWhN95=QCwzT_zg(ebk({?lk@!bbRuc}l&5OQ>?WCR&(f1ez_^TXSr zur```;HtIj1PR^)q%aO2W6KAnfGelFwyZ#AM_Co;*yFMxc4BX))|`C9F<-5uyPxDV zdl%3Nw`TYeka~d8P}OsX3yntPQ$Rh;@Z7}FiE|kH=J_+iZ^TnVguniFG>+WO*_|!PswIre_EOVOT z*{e>1aXtNdN3%VGBu`%O>zn`_DcX>{TX~7g0dIe?8*h^{gGyOQC{wMLBZtvL#I7WF zYx!kp{#u_y1zDl!knxaPApbDk;^{(XA~kqCI7p4X-n>nhoqr^|{Z{r~LZG`jSddD0 z!pRc=o_0CfYcC9aAW0;W^BwChORKtrK``hm^`-A}e}PKg+{~y^IfM4aeP<5}SR4d9 z0GR^A=!Qq*){!36S8t+ftlXS-V`H4SG}RraK&?bz`G%08r_%MhZtDJ}y*>VkG*yBq zX1-Y7iq6Cu3PS@kNKp1jg+maC=6ZDs+DLV~A|(@@GCkQ02VVg`W4RZ{0kG#FBHmeM zsj!3}c@+IxGd#Z-t%8Gcl}jE`1XmB9uxY6Rw{sR*qf#r;pO9m4^PAE*jc6STHl?*e zo^qWm2*FH7pt~_8q+ZG;se-8+?gmj3iCj#z+|JGQqy61Hil|YXnh@y$Fvn6&toV1NXhYAn%H|-VpK|#h)N1; zep=0ePaXTn?B1Q+;TMtg%HGDjDg@+bhdM@+IaQi4b($cBf!jeutOCiTYrZ!~!=G5- zu$|@=y-qRmTp7+~ipY@p{54cMlkjAFaOPA1A2u zX2{991fBoPy>pWIt`GK;<`NyEr7v>V0gp0**u3&ya0e+A0ME&{n^(@x#?*}mV`k}B zkNlopXMaB5cz)KOBPqab{LXtgs_lnhUdPbZ=G(sOgzJ{aiOYkuje0a{}4|! zaE|lRtaBbJ#Yow$)Q$zg+X_#|B{?!5ZPlBRY8*l2DMwgF|EcN zfR|u&XWiUs^D~KTx*@YuCIms`+W6Uyy!X^|Wz5SrUVr(CrpZ3%9lW>kdDZki4=l;Z`Vu33% zJju}Vxc+FtqzR1ACM{flv^rqnIY9;y)8^l)G|8gs^K^LlL3!rYI2n&_5xsJ4Z(7`> z%ajB6iQUh{t9Vl%#0A97^h0}D{p}S>TyHj>3vXmQVd85}(|Y_F1P-%SDy>Is_g7r3 zoZGI*Ko?gPu$S}IZpS7Yz4YGM`9V5fAqwp#J*#c6Jbu@wlVRBRT4Vee^z`U$eL-e% z9()Jhps5!b?Ftz}EmEO?i7t8kVs23eB$VEpPFBw!3(=_890VH=nZyF%#FHn}H;(Iz zWNY~)wCA1k(|7}>9c|t%#>{!)c}#6QUvpTQlFLyny!iqeS$G88`gC3No+GD|+L8&! z;5!v&llLcUoC_S%9oUHZ@n(DK7CV!9M^rg1UOVbOp-@}nwGFS{BSx||QqvSl<}LvJ z>~BSZ0HU>wILd*$&@$}&nCP;O(O3ZP&|0;%KkpQLAV*G08#(E>UZ7r~ZS%|m zy##OSxnTK9!8d0+<72yp^oq7c<><6#{~eR4T$CT7V-;Zs2Wxvv>AVDhL5PFfamKT! z8aQ>#5(+uA$?SJZU96<4%*uI3^5n3s%|L=fb7!9&K@ys4kQjRh01=X)Crt;_;-mdc z&hmwS1?C+}4jP(J`!YA!tq&zh3F8B7-^e{qL*jW4ycfMuv(3e+LAWqbEG|EQmnH2H zb-D*-KRU13m_I$T*@lsOyofDbn%n0!ci^K)KKKTn|F!KR&y#dpPOBshe(z6~^&?82 z8zaTidXQ;>CmTMy>{^n*wc{BoS4$J{;W#_ZrO(BB zc-3heG_JOSF*&@M%4~br5MOI-e zFocWU*&r}kjU$FfFI40jTUA2wERE>rr*HXU)qz9w{cbPOwO)uP%Cst*89fozZkC~n zwWKuJhaW2lo*n4#L()Q^gQL9iLG1pKJ^q5Wn9 zuakjM5FIm?7)V7K$z>hd#28KX>$UUr&t<*{`H~ShWIK9n`JyYfj2ckyfUJVmXtiDm zB}0`=A4RM{ua-R9OND}6arZ7!hxaQ^<3zeAD`V{3&bvC?Am-25iSOzT7(NhdgPmJI z%QBr31xx}b95nHxievPi_KF#Qa$ZA;Ds2GnwgVrU74WtOxMTHXUerT+$e9$jt32iQ zIZa~A1P%& z0&$5wo7?d&?P!sCf<%mIVzVo3ogYE(tcb^+cbKx*v_(Ouvk$HUYj=__-oaZV&{z|$ zaywS=Lf3j!H2N4Q5|+TeEa50m(ynN{;U;DY9&b{eaUAcSgnHmY%h_9H>(z_~Ao(&E z)pQa)(edmiKG2z$K~QI2ch3Wb&NuaKP@|W5)1F>FHMC`cnW6a#X*u6XI_-;>*1OHs zd7Q8$uRkO*Tpy`dhtSskRn}Dzzq8trVX*;~=*byojoRZ=IoMI{LhRZjW(&1n<2_vN z-?b|VpCIZSI^m5@JYCvdz^7%8Rk3m>m#w`xwnsq+tiHL(i4Z$QgpT)Xqk^$k$S?)ox4e@c~ zv%0Z@?83=`?6@@_2Qpt9A5XwuDY}B+H3!1j`pBo{>m$?-Q!&j&Q)5H9=^hON0^=_V zl0WY%wJU`u@RqmL)HuXxWFee5VG?w}CxNJlkSF){cuj7sZjUwW$6Ul92KuUjQVWw& zf2P%H0catwn{}2XUDx>Hr2k3A9${_zY-f@sC+dTlYQ=6V8?)XRmecZ5Et%3=y59JS zkl6io&PJH|meP1>Xq}5X`T64TEPJ*}ezjg`AgRWlv*sdyUyZa!TO@ytc3k!KTKCB@ zpF-qMvs2rRW-1||zCaMDEdmGk$R927$%40ll3G&mvQh-OLdNX_$jxe%y*{lId&XM$ zQX-7nS7*fKO2{sa+^ zYdXDZ^;Z*&jl9uMF`0Lv>PaS03l=P%{waKID%(Yyo zuMVE!@MO8OeXRm{K8(bWjQw?VXOaN}vSpI{8R=*3wEkobv!;UrnaS$CW$~JQQK&=N zF^234|K@PfO`i=B$tRNy78V1(+@&68or4nB)_EHeS`;&6od$ZYOX}q#w7rb_P#DfW zq1|BAVGoKHZZB$BG1!RpK_WHtHE`P9|5JT`Ywza8R8CD>KkXif)0@G9!^tU^NIexB$N^HhTT}o*h|6qESAqgy;@-@8XT^3o!RqBCFuZLgHjidqDy( z)=I6wrleDqgmMrR#QwCg4@Rc@YlZ3h)cez(V?`nF1bCbJMHK|2lDYOyXBI*a>=;Wb z_Pgx+mKbeG9g_80_RadsY(#=Ogl>+g9PMK`&F-Ch;dJftz-+%yyY7_97rw6FQSmOg zO(Si$D!ZqcDIIq|?1BHm$5aCw;sn(iP}mAoZdiAA}e$4_5# zfTOb?#-#q~#S{@=>n;zSmxo&s?C3{NYtOO$k1O zups87I2B^oSXLsoAjlyOumNMDxc$N@{|1TSvYDpvMdqTxTjiWnB6btemTvXB%*|m6 z8<2*P9Dh}#9j4pq7*v=-HMj!un#NTzrtg54=ZhAq|N0WCd4<+qh3Cr{>ArE@mW12% z4@o>uAkMVKo_hdQDYg#KZHr~kt9ZX|o2T}<(gRyRf4cIU4uNTul&Fu9<|L|dwB=#1Y7Tk*z{E1b=Mx_XP5g!t%se;^E zXB=aU;iMlVc|()Ib|yo(*cdfYTK2h1waY{KdeuVKe_^Yd zVHPV~o%SDd^#=62s4ePGL$?xL=o4&~`Iyp@)joFrF_f@XmZP-g++8TRl5V%Rx*V8L zqc&~Dq8BiAomRgs{mVK-%&n*U-KE5SI(BERB zw(Bvwr8U2#T+WhMfY_gP$}m{fH-JF;WLs}y9mf+`vwrN*veb5)##PqH((YRhyU!B^ z3m7>uyJ;!q2z})|gJeBCen+7D)((4z@__xV{X@Sg zx#zs2msO~6_4j=wdv#nc>xe<_c}TH)J6S4aM5(Y4gZB7Q%RC~>;nE#SA8hsnKTf5z z29xR%HHT;yIzXfm7EUiS$Rv1XT0QHb%$awp-tXkFG1^fma4UtyYvI`|y#+#J;L)ex zXVgW5;F~0@<4f`--Vo+@-+xhRhS;J~Ms_l>J*0a~_1ISP%&0d<%YD{qSvlA3 z1bcJnS!1CKCfkZrv9q~pk#HBroJ)IA4F3kXCDJ^&*ysECPo9>@PacnrSlv#SE`(x{ zJ2HzA@L3-|9>Ck})?pQwTvvmw6xN3l4Lu^3GWs%?omyj1=GeifT`mXxC}VM#YGMU7 zY`*cM1w44YLS#2y9b&Lm7!N1{q8J9}!dJ9Y-8s;e zE|#~1$Um~Wqp76@j7|ka$^6KClMi$&Kb-SIf&}1`TD}auH5;eyH672;nUR4DNZ7SH zFdn?jt;z$puU_UJ_{;2M7ZV3>{v|G)I(4$q8$_EW&)Jg@=O1SY7xpGi2pw~Y7g?I{ zRc_n^+<>9a_g0Qcy63V7=9g-x9NrjF>es{u$i7t>MPIbh(s)#AJ|IQJZe*;IW*=be zV^lcF)JtkPOC!ZRQS1!Tmb6D|%t4KDqfVt$3p?LKDEmvTytOh{56Up{nIe42oJacY zp4aq%xc}Zz4z$$Pjg1aYvM+l&`qW3Lm!bqEu1TR5Id+=lG&0=^c>Hgk%7&!{Sg2Q14<=D;0>i*{9rS>x|`AKR@|? z6Q1eXUI==5Y=VtR!g|HKfRK1V0KvU?<%zM3aE8WATYDy(k{$2 z`sRms>cE-n0?%=X#IC)>X;n=gZO0L((}`hr%aI@v>`u$c+#Q~YjkH9GWpCv(TL3g8 z^V6)00o^;p7v^}2xrbuPg$8*mcIWAdiF{VI-wi|(s`t``T0900Zjki$SfLcT^s%mP zbO(RNsEW>@JFqX_i5ZRL*^+HreR7v>Da8X+ud`*h?nJRB)**Xw7HHDcVV`#|TA?}9 zzp>fEH#8ru4luOG^6;y$DMH0^l@p7iUFl2jgAFxuqIQTgMRBaTgG6M1zBo-2v}Vsy za?0g^)XCw%+{4$Su3n$1mmURA2iUxcyMF`5Qoz#&_kOPcub^86VuQtqA@mgT3|y&c zl=^Pnq1@)cBr0UgyBkHN);0;e4pPstT*S-GM_0?L6IsV{r3I@1tufY04D!bbI^qOr z)8S&}YYsc*385TjDlK~Y2-euE1!{HOG8^6p2W_RaVP^tO+_c$Y@_B0Ca{bnyG44ue zBn>Jeu0HqSxskSG7{D_)<2vvhn|`fepIeLk4qJd@?(o`a{Xu6pL)ooC4ExTM>M8o6 zqS2%8Da+7t*pr9$obE0Q{%poCq11-aeD?eUuX;F!e@G`QUw4>xJu90f(iGm<8?}Ln z1BjkxVb15u^nEjaM!Opr=SQ$4*-Fn13G}hv?n#$9d(5TCC zdh{fP2$%_zrQ>16IH#qgtNPl2GQ z!j~wQ!Np3eN5Z=FTX5fD0wG%!`%B!%0oO8)DQBH!SSzQZe}8B{a8m0$U3RFkGxVFE zgq#iUK~~LO-yjkO_^7U>$0!ZlFC&>$jhEKrE3}GxNQ;~c%(Gzu-Xy$JP8-Kn;rg&o zLev-jrJghds+qBc|E zsK7Jiz=-tN>4M7EiBShJ59KRqj?tV=2lkQIUud*Py7%2GTt`|Pzpx9kS(s$vI5D-F zDn9AQ?754wkRNYx+Fg(kn1+GK5Sv-&kmN1^`<{DtiPX$7g2<6S-d5}sIH=fghHlc^ z@?Y0*cu9Qcz$Ug!;6|8WuAAOF%MBs&ui5hTdEcFl)VoMh!29SYlROITy92@)7QQ!M z_~&a-;_VK0YO6}-!_q{s)pym*&hC(9qEAf8W+J}fF&E+ z3QZKwt3jOft6=rsYjK;b%YKsQN5I(1AHyOJW=zf}O2nMvj2UiLi&F0F1ly_OGpTFS za%th0a)>X*^1%SsPqNx>{UK<#*eZ($ne0jXAihv>Yrb$HYrfqgH7<`SHanMOqtq^Y zTB&Hhp__Bkj6_@wj$3a|9gjqgmS9P9%Nt7bBpvghvPp1VD~NxbzGStEs^2iYYxr> zNM%XI`m*L@ivo$H1^$eN#HL#3Au;909tBGbJJw4q+bSFNkv(rXj12+!oJD{4MNm{c z%!uP!T8}2Szxmj5m5|Cf7;0ZztCo|lZL@xZ!YMZw+fecC`>-rg&=lcXpHZMzbziiw zd@pwBX-l4t1JiRa{Vv-rl|liZvt^i7@%=;t{uLM7mI4RKi)OVtX9cN5E=Ca82dLE@ z*GxoF=jwLEN&>{khK;+tB~X@5Qm?-e^Z@l!{ymjh_p;PMfdZ^T%S`HHDx?<)aOB-? z?Cs{DJ!NUSxy~5z9B3Xbw>2LXNW+CCxu42N#dEErHLp)pP0CtwSEV^)6MaDOgySod z+)gm+t2mPdtEFRjnk)@=@?uzZPe=MP-FP-!S;u86U(XY@h3-w;Xf!Ezc9Tf14~Vs! z!nW?NMdtr3Z&(sl*OO|P(UH6DYBpple|)L!biXHTdy_JVYdr$g9f(mTHOvOuAA6n| zpHTIkG$p0NL^$)5#veOI)yADpRFThtQ3e8X=q8Yl+OrR2bM4hVd~idnr|eZ8I3&Zo zO_ICEVe4w5K$ zY7Zg@UG;UPgo!`fv|9TTVXh|p3DTcWrHMJXb@tVqPPb^(G%lEVyfgj|-C*gQs|W9=MWuj!JcIJG!<8W@;m&=CmW z&ps|XyesowWQ^#2Pe?pE37;hdF!V7j0Z9{RxXM`1JF|7`x7vcfkwEZ!Yn8_S+&2MI zY}GAduf<6+d)16U@8ia854me`$lk62;@2^qc3@GU98L!~V>3v&T}M{d0YH_Hy;nQj zdN8RZ*)5=Ne5HDJbMM>@J2xEU8SnB;*kmPF?DN<=4pDwguStz+s2iJQ;n%E@%w6!I zRSBarDOv|dDADFxiFjQ4!r`zlYDMBb6q2CgDI|91p?JUF{(u`kjx26bv zy6`^mp0YAy8@|lKm=N7_I|EgZNMnUZ?eGY@JeSu%obRF*%oh=nO+R!XaDOZet?d+N zisnf948~#+wSp=F9`-GhO+|;NCY!sf6$T`{N79Krv~e8fo80c$3(|=kO_ajbg^-d& znY_Emp$@UmAgJozed`w`x;&UI31f5WM{2pdvi^2`4r26JW7#!a?xIW5~d@oZcqaONA>aGFi@u8gi23)p{o zmPETBz}HC@-&>|!pB`{mtyV@s-zxqU-ob554{tZ+H<^A&6L_5Y~&+v1zEk#@G z68*MEcQ>j;z#s=Q(Xa>kYG<=KuKP<_kpecZdelSF!FAyZN*I8FlnN4pfPi#|bR!`( zgh+RHtAL8MfYi_+H8cYZDbfwn-96M$L)|mq_rBk{Yu)?v<#oZ#Is2Tm_p_hp*>gpw zuyn){%0`($7Omd%QLeT{_ojL}6Vg1@ZEL90u`Szs@cBe=@6?5I(9SMjJz$)|bQmaZ z9OPPvDc!V2N)6i!=2D0PPEUo!=$|b@S7A5;w32f+0*k&||NUab;SV+6G$5$hp4>^u zhulnGoRDNb}3kf>Wh6kJ=*$vXTm&r!rg3!K)+a7skqrne=EI3+Cx~R*`lwuyk6CJ zspp1tpsH^K(fXVWs@drz!&tlAE^`bIuZNOwvceRx0JE`2wB6K^II+ehAZKFx!67;~ zwOwaw-_A+4^KsPAcjqBdtB-}mXE9E9EbJ>lIX#~H&TR7%AeX_RZIKVeo~PpwQVG25 zP2zE5e?9dzYT&IRFf*jVh{G0_UA_ch>=ojEO1i5llksYr^(PwbV2fJ2u8-?jjpo}j zzN_*cmKJTZj&$w22pWPZbw}Rvc5yF3K6GPk2_cMw>x7nV*&vP`yhEbi9%40BJlp z(?Tm9&MqYXyaon+GE=SUn@U7zQ5869JEh~Nz5Zu^Nv+ZeN1bve_8QpfcLNJhYV_Ii zPBV3Rc~#02f57UN5wJd4^r8EBXzhRD_zpmV#LlH#t%n>f z;}Q5@otb+Ud21F&qhh9p^s~b#{u1zY z*mBVmxNS~MWgGZ(TGI#ICGG3(&eki}dI2{lmK>frakUz8xE;AkJ}T^eRHc|nH1_=o zz{1YEv%C33NGrC-8Tx#uyQsep;BBI#^8PqyFtN+d?_)IL}-PLHg`4@#qPG`r?)Z z5qj^F>YxqQa87?&#jFUP42}fm^(FiN72Sa6eTs_7RBN=eCHe$R6%;K<>4De!ASp=r zFS!9@nG7WQ$JY%5GrbgggDFPRd6~YIznWJT`-Ry6<38GEqWnA`dFYxcR_XN|cm)u{K%>2VUyRO1Il@6WRpgK5jw-K+9fIJuu_zi%U<*Rf3{wtLAeG(=N%-wOUw>?SG zC2bc$3im$s0_8_aPXc%M-aT7?+zc#R-_+RRw{mv%Ba{3?MG!9MWYcZEx}CRI+vDsL z6_%eswO|0)YP;(y{Sj5VK-}Vs(N;HW}J+M)SuJBe;QF1y`Xkeu~?pw|; z!kq6V1GQ&Uv+=pH}*wt@>t7UhjUquPCFLoDkcmK zxd01mrdZ~N4`g3<0Yz(F0w3855pXIh*yUGJfck{jWCyR8V|n()CapeQ*4ZCSzj^}I3C}+|3Au@5;2IK=AsH*Bf{-DHkk-Ny0!k) z_`@~7$iub1P$XT$780?HL~zOm1LZOD9yt&WzB+K3^*KnrzF??Uo`Laz&RxVt`C4sG z<<)M83{axWVM?dqOY6NCCz&-Xr3}!>Z;zQ3R=szR_?3kvwEG|OFjMG}@j8}SG;#dQ zSTjlGch*%-5n^2HONn9D@{2mB@_74=goQ-0Ja`Od2tW3!HJqt0U^K9=Ss{_}dS9O6 zlt{1bo*A5fI=M04AR&_nX>2}1a^2S`l5Y)A7udQy7&lgt1#_tAmV{4akGCgYK#tFC z4=>-iqpeCjn=d@E@P2BA|&Jp>xR<1*5&DhA4Lpk+-IYey@5BGPS9j)n>KHtqBdUYOe{4S4p84tG0-(1%px<4(|NCkYtB3$FrP!K|hLgJ-+@*s12>!bG_ zcw9*8%>zIo`;Bn_vtEe%cC_HG;-mfj0crr1M*>Hy1aGYWkPM$DZ7ufy`rc>ZHtV=P z8+Y~EZ}IdtzyDP-usxO@%2Hw`NA}J0*`EM?)n2ak;0=eE;G_DSm{DBr%L?dL{-q8} z;<5W2RQLcuB{_MG?sbPyU={vMGpRyvcN;z@Kk2fe=Qg{s&qcB9%nJPLD^MGvVn_sA z=t@0Tb!u?~S`DSQ(s+HPpqAU9j;!AmY5ydWjut6gngwuwp>B=1GU$Mk zbDFr^8ex8y$K*ca8b9|3s4iJ<;-L<=YB3b_ok@56^>Uf8CQpiBnhjEG8P z*}Z^Sv*`Bbg-fGg#R3lcxomFMF(qv(hi$OXk&D5TzLy6QqfNG6~R zpd%%4N)>hQ+YcC=t@mC|NfGpc_QJ4^W38{)ULu`uIBEupIVZEc9RB(Rv%s&1rJvU* z#hd9^ka#BOQfaX{2b=Mh?V=1xlagrR%G)rNSi>S^|=z9nk=> zyr3kr6Qp~V@wpo%cW!?&i^{U+koa8BqV&b4+ zQC?qBN$vzB%j8P491wj}*k+8!t38nCHI%*2sA5dOsFL>z;DM{sO&E&8&~_} zx7U<{fFW4b!o=Q|2KF0IN%kftF_*+SMtZ6k*iKDm_5(h3H!Z)jw96|O%&-zZuoA0& zV$XTNKW|^HJ>|^**8)UO?Y~|m-$i>rXTgwUzQ*tU0drVPc9PRt(*5)ZIm|}=vBxMV z%kkDn;@9U_ouB&N{C4u}xP_wrb#;mMNLHmPBfJ`W`{C?v+rL}@K>p0OMI0LmD^ve9 zCUo@%l5G}w@2?nZ>Ia$${C7b1fsOG7F~G)nv-CX^9)5&D64uFWiNTMb>w;a#e4)|^ zoJ|0-d@Y#e_{l4rlP*P}0=riLS4p+bc8er|(>kYJdFy=z_Y2GP-$B`8&Ejf6_2<+= zz=@>wl@NXUO%rNgvR-h~_L3tU?JeEnl9!P&5-(VhNkFuf=-vJ3`zc5Z@pY_1W>X5U%9~cRCi14bJkf}3}Oyf7uit`Lo2cLq^)l$$5!!hqs zdmTykpP@+0V9slX%8(3VT}N#W%1;ZXR3WkyQbi_P(yz%LY9qX2X<2@F>ohusPNz^y z+zb1W4x%apT+YfBkIft0Fhqp-SPrV(t0|Rhf_1$QF9wo@ascAj6%sew; z0XJ}UuX@Ppf85Cbcs>*Z_EDG>WUMvb>a!4J{obJiW z0*qV9(@VI;h;$;4HRXOOnJdODLN1C1$8tNPnu0lwE#{4V9g_W{t>A3bU-wz^)~G>l zFjWsj)01qx`P07;0Nddsv85ug{iQa(23o%Oe`DXLP83Dr?04GplOU@dj@E3FbMVC3& z2f8EKGH_PPo8u*(Nf=rF;1e=o+5j zVFoX~dG#N#)i+$Fzby|kb@>F#WusrAP!5uEFy*QN)O?V`vTg;(2Na6X)Tuwu!W z_!tf{)vdzZWalqjlG!Z*Wqi=MT>-dJP5OWA0RYPf-T%t5*T5=cSW+0VV@`YTJh%z} z{qA2ew-5U$|AM((8vlZQ;AdP9&{LPQnj39N!|0Acm*xSST+r*sQk})g0lJBIWrf3( zR)lhm5a7qpsVMnB@CB0^dg;uPas634{l}lfR$Jv$uNMh-E4-1`IWMUYFjd}6x{}zA!fi8Nl##^V}E)%rM7^(05 zq{GH9$xQIR5h#YqGcww^tAH5H24g8kO?>(vG0oQuN*1d~olm_{tY(3`FS_UA&UgKv z54F_H&Vllr1?NC`rSiH{J-2(<^U$+s7-*IGL8I0(g=NZJE|JSJgGUA7f*}iGI$_-o z<$!+DJ!fDW&-hvynB=O1?RKm!QS3ejF=X5LIsEnymdlI_U^~MaGw7TM-|>(EnZ@eq zPr7n&R(}$YgVY+exsgB*Q8%}~V|xc2^h+Yc{Q zfD2QMm`)zfB8D1Lb}L2`YjxE5p2UI@K?ll)yu9F3N-1KjV4TI7B?EGNm^M-^54^=3 zb|2SjdbKeo4N(~8l$LqX z#8a|cB)=NVWwz0fN(bTfPH`HJbuU!`ME}3Wa@-J*K2;qZ=u=%_4CT1<-|-~h3xu9( z8=D1+y}FB9AC|n(i+5{LbUI9(`1xsYf4R9D~ zfOFyAPydQOb11iQ?RP5PHe;bdkyIT#E})?;v+4YhdLlpgq1E)y%YOd#AasyM<4-SU z%^~~qr;^wjukUba{nC~LzJ&2N%FTh@_Wf?K62_jcWDpwgDZ?X1#_&uHS&!;K^*(4U zK6~|ylB(PsWV!ck1Gx*dx&r)s2}0P-XR$kvERYsNQCHxK{Wr;RWCU@>8XqfHgGm>m zvKm;K?^AT^>b;6CM30d-<@1@WQKvjcw>0ic;@!Tp%6{X{OYD#MO#Hm6YP@L=A4_V` zWr{tbTT_goqm>gQj{N@ZLyi>gv)C-YJDClY)&_^~v}U^C`F5wNm3n>X%~|K;vz;w( z{mNbbHLB~EKi|fI>{%gSROj8*Wed?n+lcKiSHVpaxVb=9C!Szc$m6!SqA5u@)rsvV zyyJMF&B5JSf_m@9cB-3y<_=F{l12e_`V#4?ek;>{Ibi{+vJ#B>ry?Spmc!zNB# zx~F&&PMkKd-BH28Xoz*ESLl%)F+K_>Pj)b)?EN6*Hp4`zFG6UFz1M!ajZrDv`eeQ! z;L(HarC4SPSn_L%^P}{Z?8VI3GGF3tA%@+%6Hd%^HVqd;p$Er_BNME4@skFVIcQOd zg`eNEhyUagp#G+l~}`)MJK#GZ|X3bbbnMWN8Zc?o-*{1C;J-S2zUTBj;)= zW?=bi*~~h~GN2=4+^+INjY#O{xseaMZHBQ|}ICnZOU&zbPcpa?(H-L!HuMLIK37*;uGf{Cgz z;!@vak2blNQ%|O!#P5T70|j^9T2xt?ed*o-i<5G6tB&6)tLLW(_f*!Y;G_<}84tep zyLm7}a_H9^{j;5(1RLsbFXzG^j8Cq1aWzw@9KlQMfY~`}d`VWYDo&ipl60^tPw2xU zjY-xg&-f?MNRmcBv;JN{Izh0kUeRudg_>n6%S9V$R}A z@Ug??bJt4+Yg>A2EA74M-Il_xBOM>n%LP8Ma~PFOAi&1gRul859ayR?`_^muOhA5% zXXh0;^BZ7dUxSIIqby!`{I?cvGEjX{g*&}4|66)^X`($EudYiB)8+lsbZS4m`z_pX zdci4Y`M1elPd{v;Tx<7p`;y>1fGJJ>?4>_wWjC9?f=&+s_W zA7J2T7v&NeQ%^SY_w<1MROQ%e)FOPA;*e>Iq*rY@<`Z7|ipfAEZ`OhFJDRST5w_85 z%_EyVf~~R6V1lg{Cf4&l%gLSzr!jqZ8R2)mb3lq|dWo*|TU~I`JqC7Q%j@h5dR^T3 zeBuG(<+MW`bWdN3rB8UujWos_Z_mB}9{@h>J$E@_003v3ga=?Pl;-C|FP;O$|0^+z zS2=Or27N)0j1ai~7{F3Ge$0l*+y#r_;+3Em2SNaf1HTO(A(;c0!yLb1yJ6OMYQq9_ z$;QV$Z>McOL2jibzrVq&ZI+ z;2b~)$bqgJ(mK3ShjXf|}@umZ}3yqWwG;Qw$Qbi5J=(Y*S=F(v>Sa`9&=b_FnSfU!W9 zg{n@;%c|JWK|Vh~z6%Z-|3Tl2DFFsuEH{O{2uq-NPk&hka&76+&spaH);E}P@#Cb* zjzBQSWQBVB#!Wqsy`_k3iE}sA#|%eL7anA`b-dHkf9ys!8Cy3i+XbB8p~E0#@LEng z2bDhm)#~`Zk!z{-8r~3f?}c+Ky>bHm$;_HRuDA9Z#|PLv;F+;$$$C)63?427O|Rnr z%)|$hy1J=AY90iEasB(bD zc~%+gDjIJbcg5r=6ItURU>Cl>P&fUSSRkdkcGdfBzZOr&fSHwRSJoH!*HNEdD7;DC zBRpFvRY0n(j-M%?^{;a10_F~%t|^Y|W^L4CMWEC=9XGPo%1jvhIVti}044?fh1S+! zyR$xlgcCE;=K(Hs(MvIK?+!oDMrYj|1r9? z)j=q5ABt;?V&2G|i~A{Z;0!2z%z6UBzehQ4NWyk7m+0BL312~P-KFy(ls4b7YThhb zVP5%omsR#Zp&!>dlCN<9xlD2G?E&4$Z{5X_KvbVdu?DNhQ`}*Lrr3I!XVwjXrjo=x zDt_IrSZ_KLLk1P-k9JCyUH6&Vu%wklYuIhi#$6oiIRn~y@5^1G9ShwEXg$VGaBzCF zM#i1oqe8sq%eOjow8RQi1V(ad*%db%N@CIx*?hAF`iTU-HEREyNLWu9)_~qD*##3? zrk{iBjX=jN+zk^8?S&6G|2gf?-?M=GYqSe*a2#_EgbyTtu!DmU{vdM7mNspTK=f)n z(+{UeEt@j3^b!3FA4)Gun*x&Fi#FT2eR9hYfjzg8uXkM5@B{(os?oAgLDt~X%G7Vp zl9=7WfJc-_5(l#wjV;r(2U^Z(Ew2*}mYy;q;a9XW5w|DiO8qYttc2e6?$#Dfi9~j@ zB3>GABn4@%I?o?-EoEBaX=0xJ0-ke*j}E^b85IYgV?6^L)|) z-rW~3+Q?<~V^PP_=XB2~Vr?*UjyyQi{JH%^EsM!7Fl{)93y>jpYh^V*ZZR$L2H)e* zo1x+exIt=hF>Ky*Y-9XPPS6)!p^^WVwGg6Nq{Cokt4ejmjnBlQYKYs_YkCPIX0YH|9<=DBH-IEEy)-6f$-kmzTiMyvx%iA$TYKHGZJiJ z*dIvV8KvC@HZ6VEcjDyX*B9mHu9fz~hk6VuDNzI3-*N9&cDWo+->W&Qni7HoJ8SWL zjr{v=J-RRT%3Okg^Qo$fRFVDGz`l-Wt!q-rxdJMvHltd##p?~+YcykvvtNXP+j=DK zDL-p3$s)!=4qcHKY_vOFF1j4IA=M05?rOo0J0UrOzRi{NLvfq{^<~GKBE* zN}B0ojz%v*R01YBkW{QKD{KnEJ671rg`Bo(t?cJ>ydQ}C+%9a4*6?)xE8FPkedGs} zg!_g?iR(A&uN_d&ro5-N3;PZHHl#egdpApd|E1Ai`jJ!SGoiL|GFjkvqEU4G8TI7# z%fg0eN7a(pS&Dw8pgWz_`!U$Cx)GLXYms-od)r&?b92=3hJKy{mgKgUFo?ib+){cX z6j8*yp;fF)zBd%o9UEiUe}bLb=a(c-iR|6MdbOw7fUb?QBQ0gjXE(DxV(uyk@O=6knp zuj|x$<-qI}sLpaMj98+S;4FTN`(437Z3IbGvdC#I29BqVrG4Omr!3H5pZ})MQc`Id zf4RLX5h4<6#u)bd+j$Dht{gDdGac(A%92RD=J`{5bnsEiK4?TMgAKi!RGj^0W)dr8 zWV~JJ=d#Y(lfn%vx=!vnOYNsbm&y*+`(M%3_LUF?rQhe zr<SjkPdYtn@JkhPx+cd+GL==ESGA-OH9<;NHqYut`p83*Vil1&7~p zh{fk7zFS^=~e{x1IjnsaB?u7wLOuum>FVx-n_^KpNH? zIQ>bz3cP%@JZ86U3-+ZQRsY7B(`S7PtDk|FLd?0?JZ=o8t_7=J{Pxp0?dB~o{WE={ zZ-Um10h41pxg|iju+hSjjS*y(Cx8#|l;4TWugYYfhMX(Su&N)m(n8oTw5CJgJ19je z^@#9f{Ox?>^<}a75#mY$Ba@z{0l$tV!3=5!UOFE2n1)x(;0I-S)URW1p`OkIbEfm7 z(KI7t&jy*}k128zxI5DDwGmq{->Lbr5c%w`h!-@U(~K)D;%HKz3R!rB!JhjXYu7HA zH>U8@9&L@p`CsozG+mLt@>BRm%(!`W0i5g9@;>fc9Y`s}$$_R)jNQT$G4O~=_RI2E zfu}I4j0neIW6QfAl(aOM*W+ot0Tb`E!>6@b2fv@Xr9OG6ofzNl{c|M35Tte_f0@VQ z=5*&r#ohUKfS~zK?NQb7gDssU;G27J!He)1{Ouqv;2#EkFjHq*@ax-j!%bW7Yhm(% zM*D%ai4#f|KDf<<{N`u@m6NHLut@T*6X6FVCIGgnfw*Q>5*iK%H-@vK&$JCG_<>s|v7GFT4D=YhGe>ESC~IPa{3J$Wa(Fkubp$K%eDIZL+c!+B>O6q zgf)`-6RydV0*yR8lkLxPSnZ-9{Z`A3+mHBwd8THIQMZ|o--^OK-9NNdp}eZlU~toN zD4Pfob3Y>xg%=Hg| zj3^we68Kdp^Z7a{eZax^BKDKdq7SS~;`6wfE=72}wmrV}epPyq34n-);Eyn1rBDN_ z=k$qVMicA>P|gs_e6`+Y+S_2j%k~^5ZrQte8F0V9rZ0t4VJL*)=m|~o`zqDRkc)}lC?5iv?#pt z1%15(#vxp^v`M+Ka9Ee}Kj>XgUU>^KH}6ShkT9g$CWS&7g`JP`#D%jGS!E8qnS)=^?6a{gBq= z;#v;%F1jWdxbN@@JW9MK3H+0oa_(8VbkF`nGIpO|Jqg0h?@tiNl&>!ouSs@JbdOVl zeut?x;tB7)w^Vs^kRC!+^(UbZO|z3c!70_qJgtW^le3)E`!?rQU@i}E-WVTrCj)dQ z+3&jFf4Pgfn0Tgs?He=fhjf_wa()5qj{9a+t+G;;t+!#^E8XX4adA4^(djd%DO#rq`fouyrWkL?+W{sYp}EJYdku2l`0{X0?a-yxAr_4=vT-z4K~#fgxI593Jm&QE$9uyy>abt}#EoA!^q z>igkEe%|$ZfLEaHh$jFM8t<*;uD4UQ*Bt)13v3-s20woPqRDUFd^WKr5NdZxX+2Ce zg_3Cl%s`pHu>uuV!_o)KM%)UL4th3%;CodmkYgbv!5%0)?B!~C`0Jm%!^5_uv0!ed zOsCd$gLsrIopopi%JdHQiYT6$4b-suzq)DB7vLd?UyzuKEN;9~yFtxIwB@emaWrM*dU1jjNj{WBe30MDe0hZb`6GwkKJ`!Sk zG=Qn;|MxW1frHSl5a-7NQW216JtvF7!G1g*{m#O#O!sk%Ej(J^Vzj6U? z-A7V`-YjV88vx+GILz!4m;$bRZD)RRT;q7C*$GH%<4Z3V#dop4op>-RCCbIy%{+SW zu?Na8AstSJOK{J#(HEQLP4o|At-fvt=|h5roLJxk70#L z0`OVmv*;*=O%JfN8v12NFt+Mvik^xjZ^$7kYf0HGKkj z9G&4iQznDIamtQnSaMp^nA>E+$k)bMT9rO!fI&tj^Pwz;+JomN+Lj}ygZPuaw%dxSy%%MxqJ60SgSJe1hT3KYKKaJPHCvb5#(TMk}w2o>T+k2mJ<+1kTqjEz`BY< zysauJLMvw{oiPl6Ve-i7;LrZ`<@vIlO4P>H!x!0ZGqn?a#v|}ts#l(d0}~rhP{4Ek zfcV~_6{taPT^PN&JJxt{5SlFDdEoQvI&`f%S;+DC;ms~USXl;lTimw@%>!aFwtBAf zZ)0NSbl!O6yB~{57|-jE^HW3pRQ=5lHEY&0LKS^zK5SV{o$t!!nhWCYWVxky4e@aI zqRL0ha8rPm@7@!1t>L{|*AzP=^S`)6+ianvEWqBO6 zqBd#fvR3pZaXkrmUx4lMw1n zb$oQLk?HE*dysh#F`99P0cE67J;C=XUv*IB{(nlA&_?v z_NJoyXQQ1+HAsxU#?g@;>;{b>2$D@!v z0t%kEI}svGC%}lPiKQs>t8eF`6o96#<##FB8t9HTd0%fk{TC$ro7OU;@hkEI-OXks zy7q`(X}|7m)JR3sWX?MUOke9}c;>Z!7^R@{9g=%>0#_ivj|Kv3=7=kxE|~%i${hjwbQ?5<$<+dyHxo zzt$5vseC=e0H2*@w*m2Dao0g7aNRvrS-%$7hY?I1DEEf1ha2o$q2U)G;g)Tck6ng( zBMH2-UF#Naj}|x7<P(3fBREQcL~Fu7I$!o?+Uat?uiQ-+8LTBd5UDh-X?vFj0(N8M-+^(+9UC zOac<93hknxDPAx=29NU9j{7wLSys}AvhPW3j)6-$%mLz*0>8o^2ejK35!Z7Bz)elh zUk!{d*G`G;YM~{Q1uq>P2b!+1w7gZ4Ih2ryeh-C93#S%gd^!k->oU%!Q+zTI@~fQ= z2(fB&W_SPeTrQixe01>*WO;0|6qR6vWtVfx#kIlMhm$Mcb0L3HyYZ5~y>pe4z$3)x zt9t*-qqg=Hi|g9Vu?Amni8gzeK$7UA7aJd1?PKda54+xJt^8mvt{YN+JL>@*bKof9kV6n7?gw&(Q;QqdpF4(zO2{ny)78vwV9-;aW?ezQxqOkhCk!7 zvMLm%7|qvb$avR*8A3toEo47#o<>DuEKkf!8z~8r4!J+(7PmfPI!5~r*>4wq z2eU&(T;)fL4H>|`<*p&RdU@v-`Dr#Mg~qzF`B{&W8J}9@X2NDt;{gpoqHiXj^v{8> zOk@0pqxcH5#`pE{rv!jGO&Jv+@LlJeg}zi&SdxO{QYSRVOx3Hz2JCTy{3^q=1c zhO)hmV%Z8)`1h#L!N9&wu+U?UEROXtlL*DZWS%;GXEnP$T0fU9a&z!yR2A^&en;qw z&Rer@*Wt{?WBIn#ojPpZcUz~2kXYi($&}z6K3OUiMIr5F6GyU>>T$s6xtT}dw5IPa zCGd0%GuZ-$$?Ll+I5)_^S!&1c*g3|0mI$E-rZ(6LbGkG)(e)${2KLz^2x?QazBW6- zZtbycGh5s1zPYQuxDz__zGNpL8Oz%@8L^q!tg>99SB}HR`3^VbWDYL$s}R#ZDwQG7 z!r!Vgtu-xuoCFIe<29=Q&5MRPw`-DdP%Z;QJO%+_0dm+y}euBPhEzQ)O z4a_vkVkp~ZPxtNkjR;8b(Ube$GN^z6DUgmq=pK9y42ZzIGPmJ(d;nqzr8W6&8yNm& zzy3O#)Y~K6V&ArQr^>GJnZod4PzaIl%%16dq2E!E63e}<(b1?XF*Y~5X%t8wVxSJ$Y8z)PsJzpHm8u!fL7C+zpo6l5sd(3$kgsZt&0JubS4I|K7O zmKk~aQ#hyonlld^yXwBSiyM*!vm6d>^mK`5%UmHH6}EH74SLQv3w0Vg`VgdKNGaeFf`xN zHsSB&!c6n!WOCOFdIbf4sET!2k`-!ju^`|ks**~?9|{5~lKxMZoNdo$tFf)f$$guUoV14cB^N)t%P5c03P-v|!UvGXL`(QK!W}`-l5J*O%=3 z7tY&1b3JmIP1o48w>Mhkch(1YCAG?dgpSi|^819-LJ+Agqe-JSYMNKL*cQ<6a|*jg zY)f1PZ8CG%nCb>D{n_wLDy#A=p(E`JRhUKt=*%h%I>#P8<@F7=yJ#29df)tUqK}0T zE}FT8A~>nEFOD)*i1(Ov>Q4o}EsW=pe_;@ngVkQ^@k){6o99QdJ5v&*y~bZ{e`hf| z`G1WL)?d%xN1=>z8u;b~BVp^KRJ3fh>lxZP&Gq=&^59Hl550MfpA2YRJuUOnftKp@ z-Q(<;w25d;oBDWa7vC$wPt^OD3uBA*sO|KtF+FD`Nn>2ZX}Ls5mZf}4K)lJb5MpEi zdgd@HME+bK*1r6mf|GUH?vryr=>PUjxmox_Byn6f|H=UlPyh0NDy!w& zgGE!t)faaF754;aT%2V?GmwyWZ32?@F6e*~ue(;%VsDB^|Z#H<_vAK=d4fmMc zZrI2XkMr!Kfm#GSaS{A&WPgv{*R>orPIl4ux}UteIdRniu$F?SXxap1W*%MzLF~I%ciV&JJywImEDlqp=Y8WWM z;VDlf!HTc1lx(X@xco)Fd&K~t{R4a5-L0^2rJ??RD2GXpz0}ZOms_!gNq*TPxVmj0 z!y2D~*|Uwptq|YaS3t}7!=Lz-#pFpH;GD#QSsTGe;`UJ)QrXhIlLy+dOwpp(M}GV> z$5-A%8x!HdHFiT42`)&hzC)C5s+P&hf$2os;I1>+^6zMO0zrO;6;XOzspnrXS2xiu z8W1f{YslTl--7rdh>`BV=hG1D7eb!-@V1m!ZfjZ`E2)l7yhFGA1^%J*RQ{(%O`LyR z6xINI!F)1f_&Ys;&|lZQ|B8CoZi2?E4~=14`e!!B)^ar1)#Iww92jTV{|=dQ5!{Vs zj_>~X{fxcX!ET_WRHIl&9ZdT>b_iJ$xMhRE@iaGRv`I0|3DvTMe2WKp@4C%`3m`}K zO)Jb=-2-DGWF(2^J&9ZV<~-WNGhSXN2cjW55zjXV%eDTJ5By%#GW4Uy%oo@muVN|u z@Scw?#|sbj4=UbB=3RFhZGko0r)1bLVw>DD^H?qwN4P+zk?Pi@CF29i+Ah7+Jg~Aj z(`ZPeppb4}8JA7+!=tLf0!>(?NHN)N++!_cnT2vkSK%oq%T^p^<`k-59Q!Luvpihpm9GrWhYcl%5 zxg^J5h4Or?ak(SPURZ{~)QpnEH%~R9{5HznH0>&1(_0)(Kt}waKdv_kGl9+TWc)IF zw($1Gp!y8L<<6S}&+(z60b3jh{&!3T+k+S||D<+X8ohPd@gzf>NDv-7qW z*_XcBzJ^by3u>~AR`UMa;L1sflv=&727Vw5tVnJI(&4XHwF-)kQ`cqyM#Qe{di)ig zVmx&w@c`8pO6c$|YxP^!hSLxIkYn_&B#&^?`Lut3rxdCFw&xqM>(b3*)K?dySMHz* z>1IBgP>Y(S3=}y^!vv9H2UPWZBG|Rz=+%n z>ZSGu#&YxC?;iNR{FSP}U<$Og9WRv~m#6GkDn;13fg3MvxT^htv4>aV5JRE2JHTi9 z-4yR`O**h{{>@G@e^Cu?1}k>&^Z!CP^k@PBEC&+mtWpE&%e#tr;~!F%fX zY3U8-lZqSTfTHjTVIj_ZQ0Q#5&)K23$k<3qZPzf|^q*q#{a&p=*@v@`7K1q@iR2lF@%biXE z>582?@p|jS^%aorg&moWXzS(z#0pHTeuEegw;rYs`#{<49tltb`7O7dtlnXko8gT{ z!z7(4=bOKB$t>1x?393F=K$dGZ_eGmIX4D=`r!0k0BiaS04qTmM)Ci0g#Vr6hIJQ& zK-GIS{M%R$K*nS|?}EiLnaySO9C#v_^NnDA3YLv+zqWT>4!H6)-H2}W8w=K*D7YNU zzcLx(bDdfS!$73;iAS;5s!ry*NgC}$3#aZ2R&F@*g62Xzpu|dXw}Q-LsN(;y_uY?B z$N%G8C$mCE3Y`_%*)qB`Wn^TJG@O~}WJJ2Flp-0~v$D#FE_){#*~%7?l^q#b-{LM48d%|U)_7KA^FisW^q>+?+>Orp`9 z{q9~(5pwpy@h_+PYd{cFnDww{ZT98Ibx;Tu6?XjZNyHIw zxMTA!I1NC(#WM3wF>-{m8Ym}*FAkXpabnKHMt9C248fy0bhpaCfoyj_7_%oQ9&YH9 z@s#1<@3VPF3H5cRRXZ61kbEc<<(2i(w^7Sy3;#|*$#Nk z-i2;mSsAI9pX<2M(@A&o{)Vb9!&4VPy9{NCs&T_ua(YpPT*+)YK>sNBFHPH5@vOoJ1ZuZpksm~p4 z*PNm1!Owa04>2SS`B@m>`p5el-114PTt6dsW8m zZ#Kib|G>0wgUstIn}ch4NZ-+CG5LR4lS^msf_gmNo0S3plQ4pNnP=|+0^qbm1_4hy|#8T#>`yT^2z4<_&h(SUP5(Lph!Lgt|9*{J7G%LO+BsmPIn zQW|hw%**qKX(FLg2ygv6nq?JW_9cJ1QKk$-(%DAS-@nt}Q8B!C7#q7jS!}khdhcFM z7KW+-XR{Ub_`(URu2i;ght5oqeJ9raK%IX|+Eb5tw*JMYvz{-#jLNU5-C*aQEy(z~ z1>Fk@6{I_+Z&tG>JZ88tKF6e2c7I;3`3@S?bL+j+Zp3UbfX+z$v^^d7<8nF2r@mW_ zu{`HgIa{x~=v-TP=2m5Ix>4PiQm@s1DYuXOt<#^Y?0^~%hBRJ0ZD#d+;R5?^^FOm} zW6qp~yRjYs(SiAr8%m^F_H}U51s(EjJ~WX~l^DaBzsL%20Ho2FwrjVj!pm|`?_QJ^ z=btp}4oz*ODY{c|2FJ4cbMR!j`d2VRHa^4h)Kj)nldO{8^^OULfyDdzPXCMZ(Bwcj zuhHeZw{PjH1(Z2gdVAiTOLR1v=%{h5b6}8nZxU5MEGKha`vL1OkjOmyy|+l#>agiU zu}cNnpr|nk7>v^)d^KB{9$Sl{52S9+aGK!m7*wpKtIQ`YPM>I*MXNlxQ=3TXv*j+< zg#wD4-Q4+Pt>>9r*my6CKRFkC@{>|-GpbZ6#nS4zQW_-T3MP; zW*D)IAp=q^0LWYWP7FMO475mt(Vrmj6ZIJOf8rEDd@JLH*SjB4+Wv6eykD)Ch@`A) zE?=ARbL`8ubdBlP(V~YYbA>C-7JWg#VPnWm9O>m2wK)5oQ%UubyZV-QL(J>zf4?5> z9`YzKS5!Z|ifX%tv!^IZc6&Zn%#roJ@u^}<$MVYNQU_eOsd(p^Z>{_pR{o9+kF z3L6!<3twM^^FrG630o)c+zEhUW!fwdL!5dlBdt!Nf5Z#`!?1P(13w1a zq{%^ZAcKeYEpQbH=>D#+17@K3okt5$cYV#hEyTdx<+!Q-V!@1|xaAMGtKT?nrE4|M zakZ4cOSlk|H_Z>yLiG05`|51d=ff}NhXgjez4xA~W0(VzC)-LL1QoOIm0vl4wUnXx zpi*ptexoz5>j`S&uDBOPn;6Ni<$170=VoV8ISsJ=VweMrqjpd}%|_vQ3?1w5SV69- zKG%H3m_O?+`(Uj=GXxq2uJu7Gv$5N~weP4*u~4@rD+!%`AlEpmFw`asI|g({Amwv2 zF}^vFuvcHsirdp{7-$p}b~v5)!K0EN6RJ8q)7{!}$)5hH!>sr1Hu4-cK1}uI%bVSk zSoGHYC2xDZ=YaO~`Q|aF-oJKL(;c^)fSp+9=Hr|u{ovZ|qNN@OforX1l=%+#h}GWZ zSB6Y%H^*P^OE&OF`CR+O@wMo8PufIOR}u4H5D;QG(aF1zncvSA1SWars4De6M`l3! zjrFIGJf!fhbyz6afbuuZk|y*UuYqd{FCU%-8zA6)iUNRYLib^ z*RKoadu-DHO!7K24Yt*11IcFY>UKkytu%}iuk5`9YTfXyX-4~e?xtOOyKWq*V!R-w zQsn_I&bbAyJuF-uHT(HQFd^iSQ%UQhRld_*qawFE9oYlKLho;@&5z1djipALrAypa zYFnkI{_s%xr@w@Ov((DhJawApPE+a3DRmquYn&bSa0=T5zJC2{*7{YIYX?7tSc`zb zRm}V9*PW9W=8-nW?x4n6M%(V-i>hZ$FXQ*Ao6O5N23SS&THM6gyG|#@49qzR~?QSH`ZV?`Ezo$d1ClCGao*7K5=Y_7=hB_(z`6x60s+sr{~XXZLa6<*sbw8djp1iyPJeotlBsF zg3z=KDfcGFs*jF|6<=N)6_F_Y{q|?`y}K=ixpv<^I;YRxdj2Bf2jAJKUqk)fUnEL} zZoV^_?%e8k-_kz<62a^iuC1%h_e&LCiM(q+7Z?szH`r!4-3~N^O&q{V0@?f+`$>Ri z1srcQ*x46oDWfE4J}uEMc7cK`?pJps5yF6zb#q)o&%l>vy2akyFcajDhRQTR1q?^v zH*m$&>gePSt=o$S&Au(6kmiZd8|Bjbxii7ej z>1G(IZ3lzl0DRS`le7=s00_yk?5N7;%ZTXd_qmLwS$%x*AAgI1dHqdOrNB5{zrQh8 z;bXVqN?)b!I8a!lKfZ9|`_{eKWt$>B^iSI@UZ-{6kB5Fhjjr!ao?Zzyr_U+6uvBw+ zOYHH!!+Q&#;o|pYnE5|Gt|VVld$nA+>nUpByX;c0Q~9Z<>=HBGqo(;TWpnrGM?U(C zhtP{Ng8GZgKNsJ$gRAaoSEpKd6kW&Lvth9@IXYk%pU-9wsn4c3A2e@5O`IQpNq6|e zSfg_X_PNc{7Yf`K9iHu3#k!t%U^ig@>o{>?(K)&|uIkv(LHH0p!Hz}@9_CGB{%gzPQ{NL$7eci zGfesW%iZ7Ew%!DVQz_3C-8W&)bo1Bq6>&Coem_2&Nec4jL3de#x|8llZRnCe$~Rx< zc~0h~848^{9)YxBcQ~P8l)-Ehm${s=v3H_P8{cE#8o?=JO< zu%#6Lexv>a$M0sZnPk}ufXt~fPi{iCC={9_m9Cc@>!&Kj? zgTW|6-1gaG)lf9`Yx)2p>)1Fw0!%rjDO6Jxq3M5uY^L7_?sju;QK*sibqz!!A^U~% z|2-;V#f}9L`QX)sfadcZinhU?zz5ZvONqG}-Ikib`C?Q{pJAA>?S&P=+NTrt z^{cMK`q90q6N=wW$7SvF``wB)V-%I942pw4huyxLt%e3|=J??xZd^-uL0DT9%y^;m zES=GoM0fOMyYtolVjK(l0H!sW1NmR$8>Y<#I>Wnv(4y{afdb)%P?Gcq)4?7AtV**? z?4n!%EFN?7!t+sj$L}wW(6g=x)_32^>U4VaNIKPZHKy&-dgq45mRIo;a z+i?5~bWkZ)F@|D4LA?da&8{mVUV8nfzU9k4f%J0YjZDy$e2`Jq`Z-9vO`HU z+tY;ER_Sqjr~aan->*k@sft`wUVTyW*R;GKK9qAR0A=BunVz%1HC)t^|H1A>dW9EG zKUD5C6r@>lRw^=aTxWoXo{$&chEkZf%gZ+J%KK zef;(Ht@zgF59%u0o5GjQf4&$g3c6Zt>K#E*Gy+Ie*(nYG%Xb1n>)5=vyeIJT%eU5m zy)UspRa06Z%l3FH@8ko4S^v?F#AV38-}G!&fjYatWabZupYi`L3N&%>#BgN)571Rt zw{)ZTXMbairecDrQJ?Bu#RI)!ZHuG&OFUHu4Q1#^74#!0STXn&x=#M6aZNgv(y%vB zA-6!G$!7=Ka-Urr`Gz2-)Rubl9#sDOHD{1hmP?+OF&CUTZK9yH8psukSZ{f)I5k|% z`$lt86c4)s9MZr9F%>>We+rT4YpuiIPt$n2jR(nS+1V;wijuNUCKN+BwX2zccB(~B zfw`qhrRAw*5uq7kx2^w`wyzVnW=lue?^MNQutfv+QeXgudIWBynfx&B(P^>u0!Br5 zesl6AFxjh7$K@Sh9h)cs?V90MAp_|}$Y!hl_W5?pM+BX}{EMdic_aIt|Lu3LC% z>DFx>B5fpI`0H&tzLembfuf4Y*eu8aT*PjCq?ZV=H3b)M#a!|#xz!^5G$!A}6a>#- zfG`mMtFpTis+$YVpm>t06;>RtvN?8d>qWf*cSOfU`DHJcQ_W1B3!nD)=KWf>#qj)8y($WUo+Wr(r@-D2p$g&c#NR|ZJDtDA+1DoVKG#Iy38bxK70rO=(*bw zkxghFj=r3Px7a%z7||!|^RJ0DskW;cxJ~y=$fsmk^}oLz^RlN+z~36VRTkg8STaue zz%n8v`GSCj!ou8@H%6(b3I*3*mw|6X2I(q1dk)I7iK(_Vuy++)DP8pDSu+_Zdp-VZ zbw*+_z*gjz#m2kpg&VWwf`{a@%Ceo+2@;s@{`Ll@Et1en|M48AM`|GJ>3Qj-U)=OMOIHp|J*^y&OxGGFsPQ-hw{ z{rdA}H=lXRzKsfPUv&i4UAQ9Idu8k37I)p&>38oWbSm`II>X+bIK0&@{ZqEnT*Px> zR2J>=SBkMEf_ZV|q0Tf^F6iw`hmWtYe*NC7z6GuW`rRMQ2L?h|dTy0wzV~cbdRY_- z-CWviSvmq*%3s+e+yk>H^}waeNAD~&^33KAQz`TwbNJ+F1Z1^aBd3cXXplxr{ozo$ z0+?X`*+wMEfWwf?l%~%x$G98AKMREx^Nrp?K5bY@=SV0hU1Gk)o$b`!KUvM*bWu~r;iBhw!HVSkrAm)|t~ zZs4moBBWq>7$7?7`LT>cAi|@#`8j|rRSQ5pHtx{-2F__n3Fg(=Ju-%?ueaO!Wc5kP)MPk&==e!3l)Cc|RE`T57t01IN_b+S^s69q^M} zQWTxTHEDsBlbu(QbFJq3%v6-rz3=AwyGq~a@VwE*g+r~Rz_0LDfk3vt&Un!f#!rO%8#-oSszCT+Sx7QoaZ)c%OQG-cUBH|QbrC#`C%gPHaEzkIl2-#+DC zG+?U1tT&Wf;31*ijd^e_9%>QacH)&*9?%9)PRA9T#T@1~hrWCYtL94=_xw)h_g!+a zlsshGq2UMXdewckaV& zie(k0>rW13LPoAV2TyzffN>zbNF3Qg&e}itn7T7LmbUji0PVAoGijt{fu2|?v#CsC z3>krtsd@%(OE#v6pmWZzh*v2V$h_eIE=g@m7U1Kn>PIwiQ~J|-ut(QijOG=%@Zy8< z!-o$@@I+V{P?pV_$Vz{*z739H*vvQ|b)=$qIz_h)jDI1VXYc|zjTq)6vm2BFt4hF3!p~sro{GKv0lJgl_faM&S`TNN{FAnH( zs_m2Xjb0iO&(G&++>p4T-|o0y2}@p?h96Y6p>J()^p^o(PRA(mJ_ltP=!aJ3WTYX2 zfQXUa2ZwdA4c78Ij6EKQIwkNB5ux`;XwInNpKwu^fiu#G0SWYh$w&ce;X1`!CqsG; z%TGJ^$SY)!gAFT5RYKo*=L?uy@v$WI3+VySJu5zFVcXQu;HpHua1P-JV0A^`f_i||PVG$bf=vU!~5A>ZJ}{bOS?kNG~!k}yaCo3gBy#vm8LL$C9PwPPUp zWAc%Po#e-5M*`vHFYwTrQIc`c>+o|b5)LrPB$#)n|D}LQrWJf7X-^i@AT@PoQuGrt zs9cx^YaSP5w;hG0kmcxIB1|=>6Po@VCRVV*U1eNiR9tJ|A|1@n0+DIq0NTOwQ~7+n{rW?>L9Pem{?cS^Aa@z5{OILH5(FtIsUc#O zmX_v{kxsdfT!*C6KqQkWZv~q@n>zdonS|s32|2cZV#=F@)3FM>`ZNs-Y6UY3TqtV7 z(Ej7shk}Ep=&-Eo9T89MK(#PPu#L`T7PO7R9ABaq;LKn=EMdW*>y+PbUAzT5zzxxW zJMQY*lw@lG1L*+HZz#_37s||Q_R0X<1NI311~H+^sZY?LTKp2mMpn)7{UIHBe^x2F zwHu2Hd8Oc8sxd$alFo)Or?eE>YxH!IQ4%QGhR`3vl(S-A71F;Lys>#tA`+w~tYn0H z|6x#a?Myjq(CC;D=j^i^s9M4*jyx61%4SJV5Mq#vHAKs+jHX)2sWiWMhb(3Kcq{fX zI@N&;>r)CPQ$>N$hFzpB_$6SL?MKBeiq3U*CwSwSF7u_|6}8m7G+V>idNmTd(J_kqDia~l#l-vgle%=n#j zsY5$)%Y3gt>i5ka(067y)6kP*?H54SEgIS4egIexhW6#I?K`>$C z`82M7LsZg>yeEx{HKllN|0G`&sJ1-~71+WV*j z@F4_P>D}qN5GkLb7YYGt5&ypC#yn@)*q5xqKG9c*i~A^O2p1(bZbqJ_an?5CeZfJ- znR6f+0S~+M8WlaQkIXR6eOEQJ=@ZW_{h+0VuK5`yK>9oe15oV$=`YVYzWE>oBbfIw z+O=NL`v?}>ZVjd(Ns*od_58E>{i{s8Po7xW!e6l*ICk}6Qfg{Kc?{o+R}X*6kxpJQ z3iDc;3jdIo+1z~pTZ7z1p&w?@_%MMsB;ymd!PuD{t z|0lSn5EbFwZnezE$eqx~=vOvtD?03sg|-2bB(U(75* zl2H=SwU-qbc>{H+efmB@M_LJ4@f7xbVERIYxESCLLx#{u#%zdg%~GBvHZl^{MEQY) zHEDCAl(X{Rfq09PRYX0&Xi#;j^(H^D=^_2ulyV^M?T6ZoYKYJxp{W|2=kDaZ!ISN| zrnjh=Bf;4u3k|D4GxynxQ)hyi(x1lToqb2pinwj?C&t~Rk6FDZ+5?jdrEU{#XO6qi zIQ|xLeT*pv!mC(mD=R^zH>GfsiGs}Tk}RN;(-cgDn_dk#%;#~Q(K#NotWk5k*ura{ z!p)F|e+-<1xfvWKhO=0gl>Acw<*!5};>;{C)40rdo*}X%xQ@I#hR zdFQu`DkOXOx1S*GG02#5_H~f!^|DTZj)c}e=)@|8s1!=L@wbN8|IEoju?l-sJT<(J z2bt5&<%tR>0Zg27AgtlISCci!A5}Q+2SY6sffzuYq#QWRu8{EMzB(^IiIx#3VHqfG zMwu#v6%&?;@9g&V9FhhKqV_@KY5&C17>QkuZERrQ3n`G}&G)Fs@imzLIPufb&00lM*ipiH6rG zgb4jgmi_2CDiIt1E)sQPS7v7@9LEud%T4X$WYlg}iMF^ebi6O>TBByHm-ct3nM{cC&op^+Vr~Q$jTS z=PS~>G%FQPt}u{|z|%ZdAD%`p_9E0D`jK`N$v?QpM|HCz2CgAbqj0(+ZP5JHb@ijVuUu~szdA0ynQG&}~C z8IEfM6wEXz;r8+jmip#d7CFpPHjq#N+`uEq;O;B5*2if+sUzLXE?m6GzL9^kowaG% zi>_H9Jh#-YFy~(C%-UweLBibwJR| zZEN=4n%u_BXSr@#4cc8ijF%vTR|`AQK&l0x0f~PhxdmIYoiC;n840-RywU}&MdI^T@ffzS<(cnd zKy&JZEEAC5r0_RF+}0xY0y_2%o4Xt>!N?FN#^M%dUBT# zq#IS8A0RuP{>&)cZ5G8@f2#a-*irq@xcgBH5vT0t6=5Ba!yISM_@tW3yVe+_wZi=F6K&P| z>3m17+dQ0S@&o=fdeNl8+8swX#-{8QI=5#u~Y$hZ5=#E6>2!*XUO_P5+2ZW z6_q|EIamOvgo^@~=l3dwqwEH1(Q6i_Pf`7YQoI=ESXhDE-XmcI+!iR^GN(i~xQjByl` zS};^;-bYdnB;Oq*4xrMw1+9tJWEhsNslGD2NBpi(B)EPBPx7C%5IQc34oJgeHP7M$ zN#`T$XbMVvTNWIy$By^gR^l23XOaG2wliGQRkz0BiqrqK z8iM34xo07>S(}beX1rS15YV^)%#}F$O0qP;3Q@hM`{Lz38xD+BUGu=U;NE|NhV%>T z0R;da3*Y4(6kxOu!^yK%+i-J{xFH=`aK`%PVMB@HKXzBXlem#2{t#AFfM@Z+yR(R| z!Zv(<%<-(v$?K|ZVJ+XE8MJ1321E)G4wuJNE6i;}l_3&Z8k42h!Df5N@%0lu^E|D; zv@F}8MEEWzDS6Yu0g z&bMfwaWbUWxBv13N*bMX)g0XZh+nZ4V)V1mya*3FM`GgptD`LTY6!QVBoP<%r%p&> z-}n`!KzNH5GJbTNb`uQhiaRjAvjIUseqFSRii+x>5AyquZ}Wmc8z&PxJCPZYF92n~ zemG2Y=VGDAfOZ}K!p;qE3WAtIJe;`u^A7={P76sBjVCs?L24R$!N>S{L*xTuh2Sgk z>{JKyiNHdB0w3Z#GQ6AcM63Y#P*c4iZpYccD>c$2knRM1#YfmW#g!jF8G+Zc4gQS2 z6ax#dki%@m5BnJdpnkgEM_2@dgRfA2G+DhaxBVRqc*SEk5iSTOu&@0u8x9bjFgC^< zMu^68s*xQO{By5Bw9S&mw(mX>S4>dNX=K15!XnP$6eo}hA_)zKwA0hi?Rw}NddhakzK|?1?`D74Gc(fOt^)DR6zZ~zut*a#0JP0WX!o}BgL8T} z4|5RFg*^>uNc*AGHNq2CS15fj+5bg?erlLmPu)Zpdg9(rgQ7ykt5QycMJh}Hpnqg| zOC2KP5;_%lg-rS6Vq?M+qM`toY}$KYlC?elRJt^CDiUCR1rftoKftEvG(Ybl99cmF z06I48B8u!~;P*VI@|+wGBPE0gJ|4!G3gGf%9S?AQwoxa74j+gw7Xd!QQ||9W>j%c$U!lNu z0i@J8o!>`T7r_X%b=mS|5lNvHfPgE`@(9W0z&|ApHSB*(lC~z`Ap|Pbq#b7DaGxWb%qMq*^k5>B}l3u zH~_Rd(&jwjnG9m`j>Pwbg%NJxp`1f)M24Yx2o~n-@e?7O+4%!l@my*|-F7;~&_j_9 z$q7vkJnw?$HNZizJLf>`-yg(Hge3wjJlb;BoJgpor0?GC z(!d#dGz}N8)_DYrypWjR2l&YgLVCN-KR#3RlJ6w!w5cTK zJd;bXWs)EA^pCCe-8Nfjo3Zal-J1&a%f>uCUUf(5tJ=sFyMIT2KSgAIQ27Y1KKV{U zOxgzu?C6Dy5)Za>C-xv6qixoYBx$14bsXGhSGa3jhCpY?U^tBoyPe$lPPS<&11l}< zc|&5GAu1Y~8Zw=Qu_0Q_Cnl_&w;x{zx~o|>*7XsQZK%P*?i6uH2y7!10v6GL{bddZ7y zDzsL2H2|+c%g87qH}8PP9n;1qurav@V|4429Jo@7iV@>Bjnv|6wLv_F zuxoF-J#)_}{kgh-uyVy>dtFMb#5Rm9vJa zPIEu~`Y$#Jor~ohrrEPPivA8kG9kmLVyUAal>h8{WFVPXT-<9a9UyQ>Y$2rYnT=?n z!Cbwt%C$W92DtlsRJ*p1QP}5K31V`)nzGoWR_RfToGZzxse-FXM+(|Th7R}oYp&<; zA8<2^C@@_Qwf=EA`pgN{`_rDfN(N@@?H#*b45DJTe0*c_ODSqcjG0?*(UL%=a{v&Z zZxr2e;--?Rgt=@x8b&rgDzC^CFu-~^{VgeWKVT4#_b^-dXxtN1D%|LwiL?rfKW0?f zFsN@g$$?f07ZyXc7~jVxmW=?LtUeZ!JiZ3(|Snu7+> z^}^BeEsKneXx*rCoq^}>=%@((p~iPtDm6>jvoPzrB;USfP#vJ&A&CMZ}7=P&=6=9JH_mt^nZ=VxG zY|^>7K|uw>7&P-{zus0;muOzitFm58(bsb3;*mKj+!26I9ChXK1UnC5J|_6*M%A~G zeeES{+;X>M+3v1s` z=FbEsW}-f5|Csc|hMTwi8A=$G4ssu?`(x}_HkhWTQ#E4jcyG3t(|?-c*_WIy1{ote zN{Vahn>dkQ^(FUG;JYZYq@}t|*Z1#*e-MnHa{3(7Q1_?2u7Q3bL=&-QkhiI2_}p9^ zmA$bR30_~no3a1I<+r~Bx;NLaqJy7IE=$@sf3w+?YSQ?sbl1rLo_WdSBn4~6R>zzJ#wF;_vkl(^VR(c(--c&RL?689T>S<8E_}mZT@1<>@g#HlzEb@ zn=Bi<1YBl0@AB6IZ*>PR(JagO!btWAEDq4;G!pa0CLo7@%fvj{(`B{v<{BVUHZyvP z<2gZf9mXTlY zG-?fP9riiId-p2{qMcsI+UVxL!82SWA?x`^(9eBiMrTV zS?^Z^tY;0@+?j2i27(R}ECQiiAl&sX9*!eLul!*qyVlf;e_qWA7+~+crmH0Wd04{s zynXBlxUU-Zeo>-MWpjwt=#AFohZVu+vjXe@ue)K6yHS5iiW%Ti;rv@4yUAAE4RYqva^_4%sO+C>xgt<%=&SLQGNO)ctHOHtlZan4yuy%2TJTwE%LEAloF z981P+_uNO^sj;K$qHhSX7OWoKrFp8;M+NU|?+oxL(#|PL2%dk7*PMPVwbEUOzhTm= zrR;l`jK4nmGGE|xomQrpEmehJ&d?pHp!d74MbrNunwDh{Z5e~JTsC zrMDvBrC&{Ad^;?MJ9Kl3@8TDl(g>>78QkaBovScSP?I{6l$J;qN40TMn46%aT2d$y z7JA@6-3SXap63S^3e$de;!76brAzKk930!B`BfjNls~cVXP@EiLW~0tky+Jt`v{ig zecCzLE35r#?mI1RIjWV|&LfC?k^{~QUV0K)K**b6f6;-LiqKzbx4)Da5A$wHA1jfz zCv47~p3&A#OxufKE3jx{*cqu}7>jm#QXh_1jFrey7LrOVm}wbpwLg72LwM|U#Hz2HPAxRNo>}l}Q{$m6qgaY=B6})tNEIfFJF{A>3ZPe^(^Odl<0$>16 znZ_;=xipeO0zoIo<4V2VP-nD-uzCY_ONoeTeqfr#`aSU{yp)y&UV5vPA3(1BfKK!a z0`{FIrPqXl?+U>GsLr7e1iuMc3HPAX&8B^+LLpMq5)ll9#%WD+ zLOu<7n+m+tUub=O`%5|(0kFTJzjMeEHkSpAz5mZb|DT0GSn&VOLdayQ&AG=XA}b$I zn{n|aEM)xFQzX*(`b7*<`Z=+UN|qh4QsZHYN2JIko&uZT+Dg+h(k?@%-<8J4b305I zt;D8c3O~O(QCwV{N!^@zMVD0Jcpb1=Y*gSO0%4~;!uSJO6LfW3b0b(0=>@Aq;u;4t z{F^Pw@tSoUe^RJ{tKNaM94%WSNAaHX5@XDstqU5%10|cWbfJt-zM(4X@bBZCUgm?rc z9O$8_F=Dhjnttwn;^;n#Dw17+KgSqMuHB}0uM94xdv#dt0pSZQ(4DAPQXd5pjxu-_ zi1I@=raP6ZH)YHO?gns!5_r^O8l?$p3|R_!kY1Y{Dg@$4QYOb2iE!hjhd-#aF9IG= zJ~yy$d#Bu;C@zu80Gfb;R(hSnLjdv9qZNUpSDWZdx94B5_VjRfS$B8$7(j|+QdTio zR2ync>dYmg7i0~2lqg{i8o~y;!9Q)#!{XbKhD_wbTD2ISL{Y^o2+8egOloYp_9%@| z5{3Om10n}a=2GD1Z%8;x0nOL+>9ZCAFhr$-G=t@}>GaacQ12Q(=n+3r)_eaaiui@` zNFa#=Tic1*r)q#K^8n98_U#MkoS-~3fwO=|*93Ao6crWylD`sgfC!(1V8aVY!iRLw zfCgpnQ5+x_5<)cNs#$SXr|WDSYzTL)hI{`(g4#&T2INMsBLFJh>70XBfuVtIpn@wy;8tDX?}7Y zXew5H7To;Ivq9!I@ysgV%->@4&k|LcFs0>zq@kg!MDP;q2826Sx!q=r%XV)Hz+KJl z8IMaO4(HkrViw2lkf3q^K*j&qzQupD3m}av!l>-TemQJp+1ekVdq7`?vJsulZ!L(XE%i3ue>7L(%v3g7^l3ev9iBmdP!sfQ$QNG zLMH+i$<{wrLs-NaK&1PbbS9C4h-OJ9{C;tC?rRgJ7#xck7Ud>w6SZfCz-Ea(VCU&7 zdzWO*AmU$^I)5b&%i7M{C9Ltf{(JsUV230PyES~S!((2<~>77 z+u<};6YV8^12uBjhKWo@_kDdA6rTb%w*DZ?5o zRT*u?e}$@*6L2;TMdq5CDOHF>r-wKY`%T=6D&_`DCl+O?*A;o@7rAs6yn=m8^Wl(C z(}GXb(&FMJ-*6X_xM%_hK<`dIPJq;JW|~w@+@Bxc&m<~#J!Sa&-y%$Eb3E;0AZT))R&z#y{_!aI@S%th4g_!E3J zNsO~{QCi;3PWNTN5S4knS?n4?i6mZth*Tyf03Q-eyaf|)gOo{cR{5>aCJX_J1mICn zUeoMY?DnLJHW6yLMq*YM@cQt>6=HcS7h86mYA*^Ai7P$6KG&kWeGe}T5f0!iK)Ig) zV_bR1i5W~98CC?CzDbDIAuFkRJk^7)+sa+$cg`~k;TSx)Luw?xLv}D2nVdKU4w9pOEg{$+Wi_YeRkrI{@Ud+2JPVT#ub4 zd}U``*UAF%<%@4p+v&3S-9%90oj zE?&Gy{mGVmPbnTPLgmOF01;TDD2wrtzc0ZK0=bSw#l_NJ z+m_jSv8bk{dh6&kP}cJg$M(WOa?Y_!ci8AU7b~xJ;Y9__PY`%lq(1$YKm--O$nY0- z>VK{IS{z?D@n8G#ANY9+vy(E<-EWI}>CBqI{h1(jFpxr6a(qL~vGh^3rD$rjVM`u- zto%0TIa8u7#r7YNc`Fn&inHoLqN5RRtT#CJ6Y1zW2!O}-2PzR@V~M$}yVgomxOjd||K5n8oE{RNoGJ zm*&z*#6%N>UN$2h;^zXrh@=9TQh`K{RjSMBOC|zW<44cIP+g94x#05 zbPhYx5<50ldHwYu(UN3LgYpW(jYqEG2_nrG86B}(9r@ckhcUw+iT>ZLvM3D*CA1Jt zDoBz6&}e8UGEN=*qXauZZ{uL)1L^0y?~u|qUe@t2r|r@W4A7+IuX7HQY2nQ9_x<=} zrI1vvs|L8(lb>)Xp_j;z2zvF$0L2X&Pm}(d?@0gA@Ux!}QB_h;RGgR@|EV&3fkgPP z!HJGH9lwURWrRwRA?(!Z)KNx>ipCdR~dkq;2avk@y4ToxH=*o>?7k= zAJO-IOcjy@Xsxj?B6spW4hXe1isEs=oF$0xc0`62>k}RY|<1Z^j0>LKFfNlprHRo@*h%2DG(Xu>^-gZ~CWw zj27Fie-M8AKzAhnuc`CPIe__RZm0A~V8&(uSmNqk4ilJ%iV^6n$%zT|en) zB9=*zOc8`tv~__!tLAE9zF=XTcb zj0Yfb&FIF>c;An4QG%uf`!=y294TG7H~1 zztG(DzF(wM7YVg+SjeLgZ+C1Z-XR-JbA8VNo6?K@gN_p1=XSs$5eL)d$QpvA^UPFi zD^W`tK-s?J?(2j+Y%ZJMg(x=a5ZM+^^F1d#=B(QL`?)$-PMI1rRG`saOjZ~@XD z>)dhl9yLzSW)hunxvMcYI-FQvgG}@V90?{b<0)I1pV7N8=;zc=Q6TswV~}WHZth1r z$_?{di&Uqz(5e6`r2}guu2>K!^`5IG7T>tveh=b_m|0;+<-wa z&yj4{!ByK1#OYfe+b)VF-H`no7kZ3Y|yw6xfacysrwgI`?p)A*d99g z3Hq}Rizt=iqjk|)hw@!cEaQj+%&lv*dBjI zW0(jEOa~4yb0(7NFL9fJFuOLhR9YEocj@%vj7QBbZE62nPaspf_GF^TUq@J@;-DU> zTX2yH=#5iEy+K740vy1Dwxa@Ps16_e4>y6`He5Bv_hWK`>2BipctCJ~{g&xC3t{`y zwETMzf~DG}v{?ZFt6mFL{X0&8*@I16>=tFoZRFWM~HSE~Y zz7)_|N_-GngaX?~bw6Q%#mX=-icBV5z^8XS6-JB{fecaz;#P=*3`-k|-AZlZ-#j5m zGzK9ND~iv3Khjn}MbP-ArGTf@q*?Qfa0-^o2=TjaA&@uaf1-JDryTpYlbv~pK^=s+ zwJA_vlE#hp9*;xM>F)p>StetUSP0}Ex5kN;Z_{+2s6vLzxceG2$`%(FKlBRB{D-9Y z_sKL2&_Z;Yyr&5Nl^BL}HbT3*=W{4~IM0sQ!5XJfql(*UmUun_{N})i7eonKO2-9j zIDs36HM-a+8^s2aYT5s7lk`glBCgj&;;N>)UwM6xY~*H>p`OJjYcd)RIxvL~#7v1HLiny54vBV_m&T&<7fuu7eA_-pO-H0pU<3#pXq&ZC$HoU3b7$T=c z?M<|?i5@cHM0ivPgXl}))&2F(1jN`1F!sI4SnL+=naFr`spqd!8)|`JyPLk_xSzmQ z1NRS25&(DUJR(xn3~QZSy_Xt-?Mqr4q5l9W>7N(@z^dU^Z`cTI#7p;3-}>>c##4_N zj_nuJo|--m=v~2okz8~AA5i)a-@D=s(xc}6VdIXyV^)bKC&xj6H1+OlqBtg;Yx@G& zTOf)jUZdQqL4d=p{UA`a*YGV8=cc^o+uu+atWWMhC%~3`XdOkc@30uX-Ng8l1J>&A z6v{|=ysy8154qLoKVAa=3djj|Y< zKgE9=X@HNB4<|Ho${uM$8Cj=r{D)Q{Z_(-;@I$za?1=_R3oNtE22fm!{<*RL1_c59 zY7jg=$+XZC;L%4fqmiJM*u7!i!b?=R|4D5+asUq2F~|NAYc{lDjc&4`a!P_$grw|$ zo(9mJFl3*74E;hZ=JngZ(ve^G&6&11YWNe$$0v~o`7k^A_8Mdn&&)VUd&~_U^p|wq zM#ejihUEv~4u#sq@C&lC(xgJF?u=mc6*4Icu(Zs+{lUCmY}cL==6uN&Z%QQk!2k)` zjT<^MsnY-USGWdI;8R|(aUtoGCkWpd52~ge$#%=f4j%89J3ENwSmC{}|7j~C`ljw0Jzi* zh~Hk9WW)af6o#-8op;|{rH!G#Vd1cKM@5DENJRjP|3pEV-h{iE3SG;eQm{W z4**pv`CV-P@4X_s;U32<^Eh6Y8{kvyP#)q~GsJ;T1v&EeR zL*x%z*OkXyB^Vst1df3EdXNMy)Wn^+ywi`xlf+({AUUPOi-(*FDqU~imc5h*ZfA}0 znklUDp3t~lpUPy9-=9!2IGmpLp}4b&O8|`yTQn5^t^a90K#A+-{Zgog1L!Ye`HOF+ zemp)b2QDnJQhlc5e;a#mWAgH7HTz|mUDSJJcI{@!{vLnOV{JOWaq?ZcQ@E-9QR%fQ zoRVGWSm6vQq(1EU;lN4jI)Mn8 zP{E6G=qAO~T)B;>;5GyF!uadw(O_JBOmc)}w8e5qwO!&2IQQfxT+#<#4<*N97tX%> zZ*WrDz+#*GRR1JwE(+qhQ88x>ldO2(;CkirEUJ>!LmO7+hYv*;f*Z&hz)d?hGbdD* zx~eoXxkc}Uv#jS@+>c@~1jEtM>+>}+v)*G$IgOhm68DbXNnyA2xPt4t>K|1&8ZELE z?-YI% zZes_V&6@1;nzTTAD(C^gCMV;Y9T`CaN2R?*N_L$VZl1z%vL;=+LHAHAK~=@8x^Ln% zQmO6nUM_n}uj$-JOO7cj_a>4f8nUlc9qci4Z9UWY^^Jb!^VRaLjdvzjd|CfaRg}+` zt!YX!vdz>M&z3pJ_1N+)G%3w`T>e{cRCcvVb#1Ee!TbfjESW|5e%H{bW7p1Jea`n> zxos-9)AGr?F*Ue3ebC8I__8x(F#vrpn0F0lOnZpYYfoFbryV1g>2AI`U~n~&0Xjum z4#qNI%^Flamu{KeJG^fC?8yOQuqYV9$mWBWET5_#?~}i`xsb4SDX&55OC-3~pmgJG zTIJgc@SlfqV5F@a-13G5vtJL@iL0Mzpi+(WYX&2YZi5cz=5BXd*r1}U!)sMz!R-{N zj}K|@mW9_e%>tt9R-UWaHTwC`O_~42)K`Z^xp&bDG6o3hkvK|NM=1f7lEwfOq`NU- zXi#A2R8c8CN=OVKNHZWXbc=%0H4F?5!caqZ-u?Q0-@W$_pYuHW9GH3E-*4}=)?Rz> zlhT`J7B3{XKa@VB6W!k#J`h32Jqv^xjX~-ju2PqG=F0{2|GK7Ep8g@Blt0`G%I(XH zaAVT0x?J>=wW8S_$&-#deV@Mt-=Pk7vnI0!jNX;o}R-L(b|&+Ic!y_=h43ZY9tz>Ws>* z+Bmqr|F+;suW}FJJKriqpySKR{y4x_?zS1egE&^W9 zV4TNH20fvA@f)2*k3PDP@YJEwb1v_7DLLLVrpW1FZzIJks%tad7TgAf{NR61frk%o?%4j&^cbLKQ{$BQlp^vK4 zeXH4H7=`3RNdajK6)P$Z+WfB7vr@loaw~hB|Az6%aeZ*5SgT+e%z`IbPn0*x7!$4U zI_Lr)QaRBb&Y-BFU15YiY_DyZD`*?~U>K=*8TX1RMl?ktw^L;}`JFUrcbixpCe$uA zCQkIoDV?<5`1iMu0)z_o+g;F1@f$e5n5ZHTz?G!qG+pJWo%&XMQVZiAlF))UYuetA zwH%lF1K9r*gXh*^F-4)+YWrP7ox~OG4G0f&p^j-T{xG$Nt#KWA_WG=_wN}>-zBa7!^QpDfUEwZ@Vy3Dcuw<44{ z0N<)f%6FB6iY5jWk7G&FG*tifar648CvaZV8;kx=B`mh4xM~eIba7*KwcBnpx?yKB z*1@Bk?_e$0w=nxIdUPJ&r4rrW5qjQh(cgXP2TO~RK;>`BS7S4ORE5yDz3nxdEED_v zJ7%3<9$4&dF442m&e#?8S@gU~($JMz{&{(5fW)uZHpp{fu2{x<+c}xNz()+iUyHKn zU^m_q8JXK>nH(D9vj-bCg%vp5pZ+EmzVLvTn^*K=peQ4Fd(EL$Ti4heQb9$aud98< zl6*lUPgcnkwcYQ3YT5?lE@Xg5BMuNB=G()|$1aR$Twr5d)O`ApqkrOBJ2Q|UH9o-k z%mFQg7*qK_Mc#G;KbE1s`1d!S)PFLxFTJvEK?$;R33SpzDO!1L!0xF7Zf~dSmHEDv z8Z5)dOzJfw>~q@b_#S4dyFK~;#Nrw=lJ27Q{zUdj)G-mmBdPzv z>ECDf8%?*xovQOLm2KK)luY9Ug)Bs zK2~z(*#4z@dqLNEJR&Z2%Uxiz?Rk9lrbV~;`dLoo?V3`VxTVEtjw7-HOl~m+FAvs>tD_+!HmCE4x_?v_xe?4)r1#SM)Z;g=?C#2(an>oIO%S7#P)00o`JQ(XY(Vi|FNIN zK_z8%Wu=~5D)f&wB13wlbaVPHlbW@3D(5hgyv^>1;F%hAw(urDuk-45Z4=^5S^j{Vrwpzd-Y5F}iNA0ykLg;7=)Vihn~rm_ZI5i9)410 z(uUP&7Ud<4`oLL_0g6#E`r%-Iqgo}tuVkrr*MqY@U2Dk0!Ie@-yn2oVTY;TclF#pP zbrfn>ypeTcLPy-VsAde=@Q6K7VR>x0sF{G(UP(s_Ap?kKF&K z{qXlxiw461Bq0%CPoIm&F|h8&E?upbg)&ALIlMb5I+-T3XSEy{dc$;wL~OnkeC;vb zP;)wo9F68!2{zsR)-2L*%QxF+_JY2Gv8(ojkT5wCr{7`(+On(4hO2$-uu~?$SP<-9;aq?ODkl94JcYv+(X3qo#c8`BX4BqH=Rm zC1eS}j%c#NJC)ALUSC#CU5!pw(CMAWrbMWP5gOk07TtSHOZh@cZ1e>uf<@F40;hhQ zxezSBH2IR_&zbB6c`B}1XByWKa<+^77@L)~;-WoYGR`1q-JKq2lhk?L=BQ29;HOMk zs$OH)K?0rGtkJ}^48MrwTRd`w8tB}DR`I;UzB3tVF{$r+RM5TyK612)HpMwXt>{6w z@leB5!y&-C&A&Jk2Jq)ewb=zB5gcL=0(DySvy#F`MzzTMN&a}smCF4$m&2|jY_e|R z92v>e_@JVkEa6`F4!W&fRWzs(>F$eVOIvCd`MlS@*7r2PlVzym5t@LrGZ zC~bLgcmTJJAi;H?VV8gomN@@KZ6#}l~d9J1!-w%8skr{|H&Oc7>`vsqyN>s zL4ITM=UM(nJd8u?=;)l{ArEioL*;`q{d4pt=8#sGaIp!(;MX!J&RmEzj zQn9U8wWlR|74p{U;ZkPZdnpf8KBjH~roKV?!Ol}hV@^GFVv(J%E_xgM^Dx8rM#)2 z=xhZNnO8|CS+HU`BgL)9i3L%Gmb$AQ3g-ux*Wt8_bba&B_SR@zdvQ{_x7@gP6(UH{ z=S<0jKC>SV6-$BYBrjCHArsamhn-T;Cha+Qs?hDO3(ukIiAY2FBWuQCH~3+^-%UW8 zMNuAu!*U!PPmMJ#&sc_e50nD*qea9J*^jq%0RYF!E=CEMtfB>f|8YlyBv;EwvNHLZ z>hMn*6z970)i1vt;t3%1l!_lt8-mVEw3Ie6S|0>wup4}MA9E_ngC??@D zy6b6$c;zq>a%fWKLT!>qMAfYaeb!mHOKoyGj`oEF9YGshq0U`l(r7k&jLc47+x8sE zuE#`+G^K0e{=+u^*Hz`iAH-dCYwu#0vTX_lw*W;^y1v75*QY?BlYgd~x{ab7irv2c zb3FX&GaF0`LD$p{UnimmxkFL_TM(Qi7#?9SW#p_=GE{jl+%;xm9Cn>+^Bhy!lMolb>@zlkmW|lGB??|6x zezO@DcG$8M2e_UiLy|`87`qSx=egFFL3v=ECS(tvGws!_wZ<;6X)}gv`q=>4vfIqp zFykFzyM}9()`=lI`tklMZME8cLF5T74ItNIv83Bl-rK81ER6q5f5b%d;!0bOGqCX8 z{{$($vbv{l^jW#1?`raSce2yb1Ips8)*%^5(S4%xP*JBGnw!>SX4jTdWdcgbW7k8x zsN&Y!45=wSy>?Os=RStKlU%;<1j#V{a`i;JS6j~idm0KZctE_+S^bXc9d zDT(2eymhd<(jVWSuU$iK&Tsj9PcCr?VC<~aK?sL`CmjvaGr_;-eDe^ZLOWA`Y2zOZ z|AEnS${fi*Z@KPZ*5GlCodu5fD=+~lsM;5>SJEEhwmj$J^IXeJ7fr)^eGj|f)zGhe zRx)@EnB)qEk2jHBm(hzr9$TEB(%G11Gf%N#i2SDUmrchQwo+A4J`NF#eW`mDYYTbBZ^IsJ&6$ zFlUC0eVBo_RNT5Av21tm^54C*>nr(9{DQMN<@R@pv6L~QT&-Zriy{+*Xxbf{(D z&ARhNPrLaSfDa6E+3jd3@evr@6FlJ{Ti>tkJbvcty>;CuojK+xgQF+@HpBNDaOV)t zQv!5k_38NV*T;0Lx3%xG*WyW|#yjeLhit>t*+E+-S75i)J-AJZq~R?#LsDP%T?S>g6#2ua5F;Krc zcXmn!Llh4@Vdu-*1HNqIAX=}?oRTEjuz*OCvSib>0(^xL`UbCy>sPKy{EVe`4CB{2u6)g@PW7BXbY6F_DpzXwh z4$PPDnNOGcwMR}f_2G?}^Q;qqbfjnIJJud>q%eiO+~DD7 zv+U5vv(a{i!EO=SI!iYXdyTUWTZ(;2FNkot>)0jd#+es?F~#I0v*DA~{x^#1!>vYx zz6X02BQ^dME%ioE_hJ1;tK_a@-AMD3U0S^VWi@)BV_Hu*&LZ9?H47>e>Tsv^v4Ey? z5!B5d?rAdINCd~CHU|i<>nFv2pK>U+(o%Os1Q7e-FKz80a#1L&bW!i5w|{cs3NbJD zL5B#%sZfby-|p@$_*PpTS|53FQbdu=evLFyj!3Y$17Ny~GbdQ~RKDP=eBP3n*s2uD zCF{`3Ja4kio`2mW?g3v}$J_}F4v1DHO4vs?WSIPA7ujC{5xTOlBP z;&kmQvz08co(mCdy`C2LTrmN|^i(HSeRyFsxbXd*@~AqwVW;gOP4yb_xO|y8v`vWh z0VLwJu()4Yc*R!b%iUmpQP7&pO_?; z>wlH#6-C7{{r!%Xfp;_Ig5FmliDEsXrEZ1>b2Y#@@4(!_V!CTzWfz7Me72`2yERqq z8o%HhNlWMW6`9zi9f&f?>j?+DY5MS_b_SBMW1s>#^J_kb#3)SDtUV2)kv$^O8_m4S zd1!W=H+ZDeQPl8 zMy3E~yxZf1g)>qLji=uK9?l#oDHIg1C?8zkxbyD}=l>?WoC2aNrt&8DvY+fD)UAa7 zwisP_;b)T0UHIr0pK)Mj$&b84GVj=4pe_0BH}Vgt!v~wrI+j9t*BX@Qt%EeNUuW~J z&eNje7)ypcW^qq6WnRzvEL3h(u(ysHC#rk*0_Lo!vA}YnmzawEYZ}4Z9m|#pXslVZon(* zSJfV2UG4MtC1YJZ&>vn0GvAQh-yE;>WslevE4piEWHuhH?fR{)VE9L@ec7*U*tN^& zxr+#X!^!)7W3|C+l^*b&)>Czf2R$3^U#AXiJ_nlGhN`bAq{M9;9-OIWE_~TF^!jM7 z?@0i21JdMx;v2Y>iU?UJ?zB*q%W8%Yc?+=~N5W*cP;Pmq`(jUNj$K%M<(F_oIcq1v zKt-UFYvS~UI{6!99{!SSlaJ@hKRGGn!s&K{GhhAb<7*C&pSH06DVp(hoe(c<0~h^xRe5Ib`Sp>%nVLIa)iQpgm{||d9y$`- zs@h|(E_20MVohPNd@jFfJz5ksPhc^WD}P#;WmImR97G)eH7SSw7Tzl*8ZV9px3azy zuQsfXMYUZdR?LQ&q$KJ3MueI8`anTW-l`iAi}6A;MRdrmZl`LyL1ur{TVSfqY}-T1 zMqJdGyQFZ|#CPUk)@Wu#Y_cf6Z*gVLkSjeCTg99G_0D9H2m(crEYyUqk#xC{+{p2_`}3 zsdnkL3dmy0FL=cT^=}UVsNxoBM=r7(6`FF)79)?xv{`>n4vogl4f%(=7H7Gym%Cr zll??0s=6V7ezy5ZN@|Z_ceU?9WL>nFlZAt)PwciPmF${vo9b6QsgHCG=gzE3G166> zGgBYgFPqJ=*_4n9M)bePt2c+wNDGj91hb4u2KbKbso~>%w>mb3(c+w5+pE)jETL1L znskzr@R5eBlQkk{gKJmEi|W&VaM2DN4Q76YNW#(Nv{uo-EOS=o1Ti#?5Ti=HoaEn~ zmL2g$G5{l73m)pxh#iB{DhP}^=57(^VGc;EO5X0;FpOxbkV&SMh?0;{z(arO1WUSS zq8N|bWLqXRH|?gS?ib7ocVe21A^U3>(4*=y2+;x&dS+L?P7ORkv1)gR1?197)>E>z z{WPiK7q^;x4vR~;1MFVf|D%ICb6EFu&RrXY_dps=XDoASzM;@G$!OU%KWo;Z=YuGn zh=hCM?9DIJd(S(Q{6a-YKb6|p&`de^CEn&%u9Nf_D2(j491}$aNXA*R%OKB9tmW#z zAI{a9NarknitAO`=FFE?)}EZr3dQaXY<%67 zD5Lz($E8F`Qgd)uykH+_CHt?@TtBWfBZ)kaqgRZ#Q|9e9=N+gOTxO1y_Ijs`Obv$T_5&62y}9Ad50z%kY3r?ftsOy#p z-F;4W-N;#diEgmh-s6>M!nZ?wkAK?4$D$nTo*YYSEfSWeq!TxEOJ0)2nj{HJ97FCs z&w9=ZQcoaq+)!yU`B=A!pJ|yj;^dGuhvp{}pQT{aPgvKX(wpO1b+R+N)I$@y^{6NU z3r3(k?lcMNXl2K`*T`|L5rnOLb-C$?lM-`ZBPF7e=WU8?G`XX-O-tgumVe0ypGpo2 z(Am!!Oczv+klz6~cU|GUxARl$IRth0pUq0b=8?qqh}HYTgeU9~)P1?K+|^97Yr#J9 zvy!AQa#mkqvG{gi(+h6D!dV@vWxQOu9>D>Xz~rcV$OFj3JZ=)mn$R%lyPAeyD6f6@ zK)GL_XfPz;VhgMO*^mNNYDlc~^?4t+geIo%y5g&n!OTi8U5O6B1t+r$T4(I!+lG#? z+P+5@424;xV2SR{+YX@AZ?s}LuABL7B*goh%~>%FW z@&{>EK**(tEk%;r#hYs3ao{ojas`2p4iEh4e%*OjO!ur#}B!khLEZ|@{aijU6X46IxZv1_Xy z`Sc|01}UOHd(*0+a9m$P!G+nbGil#aG(RCwJdRp#?Wfb~REPWuK}EgnQRolVy)38a zYdf3pSO-)J-_8@l+_7)71?^q?%vl7c9LgWYOZLUmp*#sWrr#vIR%R5sw<^m= zye?||DRFf`v5Ue|DWPdt``9LgGtTkO(^JND;yGnAJjqLHAC51!3PzvN1!Kew@ba-d z;g|vn^7Pb5RL+S%b7WsJ9!vPuLm-&rU%b%Z@E5Q0D;AURan!eSoX=ObnnBEaAC7k~ zk>fB!9M6AV^!5wS_kgnV!y_!u2u^KMXc0(N(xwOizN|UJGg8`bU+zvtkPN2oLaG+7 zMb0H$TX7%xvws$&vSMJ6B&kNvZ4RsQe zMzW)Nj2-qFZp84qWlWC}RTFq`*XA{JdCVx^39aiTf>B*X6Q;S(xXnv_b@EzWd!V*Z zr0pEG2@4m;Lst+1qP3vM{xKz!cqs^KkWw3|5%J!GE<}n`Bp%aoRI~22Yrnk9y|$dz zg&wg%k~_O3Su8iK-^XIlXXfDqf=(pRhE(gSGJD;06|?$AmY-i$)Yx|OKcd`6iWMAU zMk!Mu|HE*yfIwfg;wau)kaHx&>-~^Pg&G8x$B9 zP_D^A8e+4SOeo0}9?*~rdko%YX&!n&m20N{Uezknjs-Qw_%Ng&o9P!GP(4{fr--S-TTfim_Zj^!N=dtjRCy|!Bv;F~!pMvm*6jSYD2 z6Pu|(se;ooG~Xi`KaOAVfv*t*9;W;@yHTydnJnty?f9+)kQcD(+va@r?P2CJgPx>y zW0Qo{=+N1ZlS<|OFXWCOYI2j|TIC`D$_`JJ2&Ni&D-n6V?RIisdJa1k|ZYt%YwS1lZqRNSC&}c)L5yie(RXmsMQQh^)qIcK({8x3v90iTJ56 z9OM79$_Wpot&AqO_NW+_{$LHKtu&=!r)uF*^gjdI zDH|JI1BPt!3R#m_w8EAZ45iUP8qAK@gHsCJv_8{3tw!xsZPzHIEq22k4NThRL#K$!PUxDYtwbPtBtkT@Hr2G!k}WqxxWw>&I8> zbVVi{zM&E}uNCu*oXZ=QoL}dDr(D1X7;GQJ zn4%==Borh%e+7wVVh2f-bCTy?9r9gy0F0==5-q3~z9oD1RFDxq*fg3o#@x!Xcwx@c zUibYD;-B{d*^oT@0CLBOTH-N(iAN3rOb{pawzyA2^FR+1As zE~{DltL%5Cm@OdIOQbB{rgnSy*GEoI^u<0NsO`QhzSM1stAa_V(TJr>$BBLopwZrQrA5%qiF=2&_TH*IDp4YcH$mAHxHHRj5z3cJp!Y$WQ z$u>zTF!XvHUv$bbSz+g(z*l?90Cv5=YMuA`wH6?x_nz5=qW#cU-d;lKlzZ6y=)*`U zB{|WlCn@|u+`udgSCrWzk)cwx(k!luG)1o=_-VKl{rgzBN#e(9XG&fJhrIu=Sx2AP z-p)qxSf*?@6nregOgWGOG15FmWl|%mLX(k4w`|sl0E!Eo@UACl^gFEs5-NphD5fJb}&qmE6;Vm|mYdy0+(H)P2+IeXPj&uFohIhLvq8HJuOM1A3*bvZP zY(@*iUd4h-utMvg@rx)FL`v`STPk5a$ogswWgy?7U`T+`;_)Py7T9i-s9Ei7MP7za z-qW6kY*P>*+C>BlRQ0mCbgR=75}Z3945>WnzmjQJD2AAzq4+Ce=L3?XAYx~%BUY~$ z=0<}@=?5ar{v5EwqIU+I+T0}?5r#FSHKxzwHJ8`Wyt~qtFwt{oXk{iVH%74aRvp*R z;apQ0fyzN}nHv49>dZJ`X+LU<>t=`qtIv1CT4nBcvc{%0XN>sU1Qk`0_<^CB*>=id zFUTCoYAa2{VhrjWbE@|}1jd;`1e-hVwOD)9M83$Z6rfql`-^wygcVy}ou2BJX!~{u z>Dl3mK>XgyELgZciLz-v)7udKTmDkI)^YtT68Y852s-ZY?Y3zm5KX*b;CWM@b$;;> z1oJfIwob(Is_(V*>Q`R1}W*D{V#4h=wg zULMV+{h3Q{i`mfjku$96H3G+ocfcp@za1j#6P<{ny=E^0FLGmh4Qz1QDUWn~uDN(n zwUstS_{N>R0P(F|AyGw!{AvGyWfR;TWYdt*%vP+}q$~+2?>cBT`F(g8v9hjgQ(rTkkPU zku9KZRxgi=xN0QPv~}Hj?^)x3Y&RWE|3T{0`D>ygOzCxWnsJ0KYqx>}k}u72rbZ64IF~pK=zfKa;(BA1cm^Px9CDbk|C5KO@+pZ6n0`An zgyqp85hk_nFw=fHtISx6o=(4_zmy#bFP+slkq6R+JVy0=*s6affp!yDL~2AN$&A1> zkBB?gp~5}ahxgZuD_6Y1v0|TGt!oHWu1pLOP&Mpy!H`P6+x9@KBZq>7n}S3}RoU{@ z>W#_;VdsazhgaD&8!xL7SQekvnzx)H^GA`zQqqt|q2r&qAJ;Ua9<}4l*oI`M>qg%D zJ5bHhXQ8ftE1N^@A#l3YDCKg!J;Ye);5t>Y3C0D>Yt<*%#k-AM2->CVuN#AS+cR9_ ze=vPULoR#@>X%$R`!+O)@u9?*lytRbuBY%KcBkwXoo*EZl(jyAZ`Z)4D_1WgbG-GS z<7g6MZTKu8F5~`AYTc3~gZpO3wEFOzr4Bm~2V5WMF)d&pd|I_TAy6#j4m}N(X4x7{ zyc4mlZYGYkD4ax@rEK|67mJOMvx1jmS*5*75Hpjp^D7Yz+I!{?E?hBC{7!9TlESh` z6GgWu+krxep&7OIwwUg9(l-pba${&k`U$EDei53#a<%i@dUV)DHD@aJa^^xS*|W>e za|A!C!yc~_$NTVy`UFT@Uf|+m^nSA{!0E42vC^C|s|99BVMGV4dw~9VOiM2DOtmwx z3S$e1J=PcEZaqsn3o9yxdzQu2RZTGt z{G8Aa_vzu}~L#$uaUSQ2r5Vubo18Dq?Y)VKCU zr_A`>CE;mA$TIn~-l&_V;ji9(?gV8pp@A>MO!%Ks&=B26;{(O`XsGu~EL-r+sa?Hk zZV~LklDu&ZE~HIS;Gb*Y2G`)9Ki2>(&_a*(1R%*w>_#GQ@6( zbgEP;^F*wLb8B`!H!s_V%Pj(Owq=uNl+p8FfhQFei*c(2#M#`n6Aw~QU_{v{oeRUi zt(R$8Qf73v(;<&abh*tu|EO^T5tQhy3h_IJ(?7e8OwK;;`SlE`UJG8;!CXsmPb}$UPB}Jhe{=#pGQa$dZ5$RWpSY2LMho`^vr>foMCX@H7645LMmY;u%I;9Bug9ULI1bhBU%Jf-c z*kpbL(ZsXG&^hTKyzqi`-rM?A)U{DxrLGWywuII87n`PiYUxcuVM=c)7^F|hj?ht| zHejkI?=SWB1gD5N#^4u0jU~x))wuk4y*UQ$%W7w-%rEwX?QWaq-;XEWuXKFm(k7oH zLMpi2a34@S^6 zt{Fu%R8(EzU1>M|hzNP7Of7WLY{dd#k$Nvsqb%T$sE10|`T?MpKi*S^+{V?vRLSwf z0I!BNQZdb(+{1hGt~+c;Pq`*!VaB#QW@feBs1ly)I^A+9GI3MSyePYvcy8q*mO{bGXn!xj7hX1{Ds?>PU43AaQ<{1fd=O&`=TX)# zGDM>EEzUYYq()B^`Xt0Vp;(xV5WQNnIBNCE+boR6$;4mu+8$KrU3Ci1+vYqE>aB>U zo4K9%k@^~kW3K6h^8^HE^8PO_(_r`^3DJ`7&+j(;7geiO)iS0xY)g{n$!*m1uXFGh z7l9yqHW*I}KKt-(YwAIGrc3CoYdoRzi4ZWn|2rqze}Kja2-3-yv>Yhc-mCPHPuUmY zOp#H1FDAfCnjr=)7Wq2SGt#Is#yKSeXPrWm`;w4d^NP6~x1h9MIpH!YyK)lreChJ? zWEkTx{bL4PbW;=|g7xTPpM333;K}pz*c~>NUpO=Tq-?onVV%%S#Xvgaa3&eFLm}SW zis#vP(*)XI8=~G<1FaJod;}07Eml)is_=6b?MstqKbZ1@QAzb_FgRAc4WUoThf~3=&h&E`B%0Rz~&SPgq zsI^ZyZYf1!7AVkeMQ{73K3(0^CCpQk#vTrww6P#M)rB1d#(&Yx+`{XxSWJ4!(v%{d z_L~pX&n-jyPEmyrn`Mue0;+`Zl+V~(&gKQ*15bz78JO5qK5-jE^7IzfExv1;LmO4Z z@_wE3bNo=L3^cBQ=K5-5)apD`sUG_GBDLhHfvY&@cXah>?C3?076Rbp4%U~*k`c@L zcjt%I%(Z4P2LX@j0^P5wQ*EaEa39M&G!7V1!__cSiLX#=W@owLw!mxfaOK{k;|`!) zJ)Rlwz1}B(5Wjgv$=XWl9VgaVqltwv)0FO_iuBe~qZsXiRy)pps;MU7+`9%BzE=cfPjsV88xu+_jO{X~OI674w(L z?rE3Yc>NSfB>Gpb6cxjZiy=!4F4y~dxZKZc;4I89XAo1sU4 zx{*shBf+o9HGZwj{5#~qV&H=nav3n2Kkj(Rc8XH%caY|iFQ$JoZ+LX>?-V8-Yn++b zJ15*o_YhnT>z!v!TNFPG;-A^g&KkJq`ekfilt@X^IW>pK zKq=Z12z$r87;C*`B&A@F{&I z?qehZ3%OccjnvzFhV5|dsWAe+w)>Tc4Y9HyF+fhDWv%nBM1vvrG*bm9Ki1Q&*nZK3 z;t`QW(o#QV>Xhqe?p9n9kf+!`pU?Fw`dSQ@6Gv8(5Gl00v!}gnjg^5aaTN!MXUgR{ zOV$dNreiro7jYRQZyV-M8wtLB)_E`XyY<^g|1vV`Xw=a^(gWx!@EYCdtoi4v*9nC} zPLr8fA#X(wyFt?$r+BAM&R57hMgQlt-FsvsF?6U2i=1r2IK!30au6&yHXfutRAY zw0y>>Ss!)z%4PT5u_3tyEh{0)6i3g3ve{d$5NC0RCKM4MT?S1#h|D^)l|8y*HeR1R z5=C96AJl0G1-0P~%o0ENF!Ff)meVGh(0!$cg-CGcagx!H%(JISbE>Ya1GC0u^F zjGpS`bH$Cv9HBH64Z>fRs<#J!Ve)`^M?i3TcpvSU%BSnK;P|=bU6yRzPNF{_S6;GG zrk#Mn?#K=6svp@dGjcJ(W8MWe{rvZGCF-0y`BIjtf!k?qq2dRc1l3rPmi`Cqx~ozd z%_sItC!Y~tSI=i+@q5-TQUW8%E{x7SUh+}}`|Kn_n|rIfbZ9rIoX36()U=iEDre+L zQ#YD?8FmejYmh7acsyO&BZLupPvAZ1tLh_*T0}tn?N>`tp*iJvX=E%a2WlB@bQg{& z!Bn(Dc0Jll+}F=AT<5_>VtAlYlpDt$rfv>pB@5_DrtE2C$tLkHw}toQEdF3N%vOyM zkG)mF+FgAgeRTH3sE(WjCsPPWBX0BDKR(W-ukNHq9RdA4<}GP2$0j^r;Cyu_HSbFUcF1Zqx*y25U<2}K?Pq*>v0LCMaCPv15kOY|;aapOHA2ii zk2Y zfU;e+3lawlj3-X|sNSQ@*rDx(HpDdzYF%}&K->h=elZ}y0_7# zE%9tTbVpoZWc=m+!E78kZXk#k(ZZ9Cfs%^Mwrb=$wI*g*eXuc0%nhmg+y>N3wq{*I z_2}Ki<1j-4Ss-&xS>&!FnJ(ofd6zVqJ99aw*PL$ZU(n z2@S^(#qz8I>lbol5>Mrq`)v;Y^8)0WhTqX8YoL@0%wlqmCtS($BZHDe`mF4-z@%$5 zifT7t!kSlryFJ)mY*GNeOouGY`!nu$+`xaEnNw7Wzb|t#VUK5HyL~!x zMl&?iLtSiND^gRSCMd-{LY3Y$oW@`zpL{2##<(8mi7`MXo zpk`L0I{-yzy)9|<&&hgF|7xip46mG+r!qvY%qWnB*Ts)v?A^~#vYqG4@Ix6Aw?wVG zxp?11-P25T1+wn&+|r!LlPoxmfmD3X{C8RG&BzFf9@uJ{vk4+)R#fxro?r4$rwq z=9h~tJ1#kTt&rP_8_}M^l@{n-5A&B(JN3K2UfJ&0k+T)6M9p(>~rFXtXzziB2ThDN0w4w;HDZPfA}y_Vt%SUUoG>~Z`)vIKAyVvvzMul-DAAJ9gqHQ zpNF&;jlkfWXgV-`Jm-DC`kEgFzq=Cp9&XUea&|~>{XS#kZQ-gZMuA~u7vE}%aCGsy zSyumzugb2(24wr34FWHEMjr@+y!D9}l6q2}M|Egl&sU1#XX^Ke@KO2ep3S*oFhj(r zsa@X#&@T)PX;tLe`9nFz+g@ukg!q;8hi2xN`_1e3>V-YmZ3UnHJk=KBiTuB!DhvZj!0y02DGW!BOpRMzux}4k!B=-!t&gb2w)U zRPS~+Ix=W3`$F4k;^@>l)8EV$QqD>ERI43H%irKjL;i>V*@PCZ8xXsy4$IW13> z{!ff)QD**OrV+(VKBJO$9&Oy3=^)N?V_x2W@>YzlSr_k{%BR2n>MD0#2-4&La@1!ECmslH!Q_3K0k{PK2g>p((lG_yKjoeo5FH`+-J^d>Mb`{v^{#^;(aia;%75jB-|W9aIAP#pCyn?2({d z_o;b~f^jlGZiU9*@v!uLF4F8HC7anPuwuT${I&jP1jsSQ5pVwYZ|F}gGPQSVY(KOW zCA;41g;z)=fV`q4Wa-OyIR0WP%3*+)GHCzV``qN9JO;B@3@4lI?Y=e^|)ILqu=0IWF3w3 znfRFe5Py?*uvPfckP@Ox@iEYBGaBkd0fYkOIYQmxhmrD&a7mZdXY;&u&GbiQ? zMWUP}P~AE)OYNImBY>l_y5QcUeb&D09eK>eVOBxBfKp-z~|<4H9Gb z?c9KK$Nr5hB{2|%Q1m_^pcKQLzrQbhl;_0)%K#y!(V)%He5vwg(^^c}s7XSb*9$#(&JX z1I{R;qJjw!5DC)cBmxFdBu9ZJ(+vnTIU^E`2#69SgOWo75+#EIN=9-91<6q|h~8Dz zbG|wAeRI$Gf1k?_x@y<1T~#}*z4qGgde@~X2nO592I43SaSG=fYweTA)z931tUXtvB&o!AGs&KNhB&5I1@O9sZqcg+-r1+A zWis?Y^L}~vu|lzU+q6Xcke7vezDlEV7W>2o5WA4cnLz+u4oBNHT=YI3F z@`ReUPAI$E+RTkG`^$r;Vbh5QQ`Tzx2Lp|6)oxqC6=R95@!-#gKgB}ZxS(xwHi(?b zOEMiKnd-L*h`mRXg3Q4o#gW~wB!)1W3`$zbdfzO^96H{HJaq*;X83&3$S_;)Y?m5^ zc6GwL{`REX#HgPOc+;K!$_?t9AG`Kdj-|?dkC*bL*Ix(cYRP0T1Em$}VUCk&mI?wm z_D1#U;R(S?vMf!8J}uD!?zSHQ9n|UTl(GU-ipHbzya2FzL?c*j@<{|SEPv?r%YHXu zCPjJmy@3)xHBG&r?+>+0Hx@Wa^sia+WbJ#SMQ_Z!mk}CqU8_Qn^`Skw1&4&39mHq1!SE!ReiV* zH#WdUT&Z!Cx7%~&Qhk45!aj>t)pE z*mi|U`+2giUq@6|=6kJqYzqD`>E?p#lE^8PM2o=(tyQ|z_FVG<01e%t%VWPCEuEM5 z6wK(EexrdcM1NV_(V0qHL8=Qw)htBKB!k$Ybexs+{4*bY>L1gU+kKXK&2j^UR>X5S zlXR1UyfiE*J<8UU@2r`pgtf2r6WCcY_t8b_T$1;u#W9vfQM_DWNXo4j=(!8EmJoVt zk#j_&aeIvYKz4j7AD@oaAp7q>!F=>FH9x=f3}6VPr5bapds6MsYkhPhPpcwQY}5L z5-Mp0h;&))gL?8kFLKul){8X-D0O$hk{DPFn}q(n=|l72o@#|XCrVuDe4vFhCRBZ7 zq1zQkg#1L7rx{=f5Vg=H?x1yT*#37^Ey82J@!=Xm>huRjVuBFy|AX>n#pF{Y+73+$ zECPfDM1W~1kv*vHah5hOeUzBXz2!k*Cdl*(bPpJrI6Gi{pGu;fH(-da!Zv2aVpor9 z^jL2}=R-qQ$k#~kb#Hw#5PgKVljTVyf7`yd+t(eGMOB>MbYo%IL&J(6a=fc zYbS*uyUuyArT+Nr7?2#`5gv-Vc}L5bF@U`1v?LT14$0HC(&v>jJ4^+4n_p9>Yx5;v z7wuFoeLbrzWn{Wu2J>`=n2w%()=FAUd8xVl0<1P3%si4co;3`NUmGuPcrB&~_*riL zz3IN)^hF!0UfRZlzTBkGYsm#)%Y7PKR@n9vE42#pTaHrm)<_R}9rh0S%DTOAllFM} z?$FF@hTLe?=wk{nPZl4CX|<lF~XU!B;SxcLu$r4QJPNg0;T& zJMKEp_X{{^7N~i3dpSvT=742M>o8Mq&yyeO3j?!;K5Z9PdX-q&840H4DUPudt9`}X zLZf3Z{eKCA(ph;?24u$f14~ylD@06dmJht!)7*j!_$qc#25gs12NE4y^KRmiNL4e4 zdy?mRG;py-UJP1)oTZ+jp(T}tYYdC9uGp%Z`USjeJ<_-bp|xqeZ>iZ!?jIww>PipQ zDt9Z`0vujbN}T6is-uplcQ+cjEBV27$T3*u<5y+YBwjMwrTo^EUs+mtbqzFXWNIHZ z3*W4iZO8b8&cv;EJ*V)aHUVQxP=aN3H&I8({`lRIpMhnx<^!&U!;%}c8 zhDxLE{}?-m@6OZ;d%+uD)Q3OS$UiH;k}+DYdEkroc2?CyT1u?;+eUNtSrtn1T7wfy9P?c+L+2=o`xTBcDMPD^ zUKDfS`xw3EjJ0qF`kbf`i=L5^h6#>$nU7B$yM8#hAoITJF$ay~L_aw0#!O{s`bj`ZI2b$>2>Floe#pP!UKZ~xlgmt?{c38XXgFJd2KF4#X zRYXfa5H!hJ7!_H)D~(exY1=*&!sedk*sXlI;Im7K2z!esh55*e2`jREm;fF$Lg3jQ zm{N7AHrv!Se}1~4c|5Uu)=>y2mmKq&-j(L!q6_#b=~%FfMK!o;9_&mWtYuA6t+IoT zS#UON$)X{#o4$_P$9bx)+Rf%GY|0ik_|HnqDc!Qg3HR)H?-7@aw{TmZ>+A0SKs~29 zTl90N-IwKtIrM^L#DO<+AH^F%;)|@8GhJsN-JHFw{4sv_9r?)V^3@HT_~dG{2&@su zb-Xr;JzCF|XJ>nb(b#$Fb?-^mR=LnBf4Dv4dstXS`<@oJcdkHyg+X{AGC!!`IK(l( zfT~dY@bs+Von80D$F1i3k7;?i$KF8E?8~6qoj}p-Nq>x9)AUTymMR9KLp4gq6yWp1 z8MQ}JW1aeN-J+jck}dVK*WB~QQ!Gu1R?%t)TyW(*+CE~xpQDO9aS3Y{VoN+XbWFLU zQtR<)yPJu;-^^D_MJDg$nmjG&E|GQ3uq+kFXB(&cc%Q|zDUMX!3+UE7JOK8G-k-un zpr0Nqu$=qg*3uWg&kZ6n^dZ7*VCC9kE>}ED0KO^wb8kIRN9c?^Ug=chnh({~d)plu zD`KClRp#>6bOY8evi5+Nx^Xs=qH+6Y>dF+O-o2uwy8sgw()QtKIHL4I>|@~mM7ApN z)?;r)ASx9p!a8n}hOui-c)ZQ0A3)gbIWk1e(li@OptOmOC ztw*jDp1KN}o<%xMoN}S zYwb*;{bUOsK|~jifLnoA0e{Gl+8;qY_C zB(XeTRgATa;YiI5y=S?`^_1n!-D$A_rl1hAjmyDcbb!r8B-k3Wng|42OJu5goTU~n za%(T8nooZOqZ@dnM*spsXpi{8k%!1~Zx9f0u@+p9Mf?bJ2t;V2?b>01%AKJ_`ks|T zdE5gGL669UMmv-g7EkLX6&&DCnQV=KCAB+6Su4(3VM>{oqVW~CknzVr0R)?5T7~eN z3T$7|X!LD&Q?lyI#RZ%>E8$tDyd?5W{;*w3q(Gx~1DL7Krk5OhssHN)=N$uaOZrZ= z-CN|AF(2uk9PV%RjA_LiCiPx=PBFOBG?w|?9uk#_+(a zV#*mXbw;;`HkY&~MQgsyR6K1+&iL(J?3KfAD&>lCSMWd_#RLAhDbt_@TWaMv%Ea$^ zcx-g92V+VXeTYvBtXH`g$j}(-G*@BB(cH9r>Fy2s=m&p#0rpPeK#_@)3A~>TDbfLe zs#@zDmrAa}^*JHQWHsT}P%iGaMYEB3NJha6F^4IG{sM~=>A&<&rSY!={FkCKMO~~K zSflX-ppQ+zw+SExi@k?VpS<|a^4uL`&$TX1`3wyr)ALtn{`iy&4VQOWY!zZiA-t5u zAgRD&;W5a08u2Z4Nw}WW2|FITv(irqL6(63 zc}?gyu{T*w_oo>2;vUQ7L*mEA1DL-x)HJJ6xV^xl2iZM7Nfl{f?zX9k%hAa<`%)Se zNKASYJr5*WndWp1xSg45q0rDncVR**5dj2}ub{>Ek``r)u(ejtbv_GAz^(Ei`vL{n z7U1dh&#l<)2Xjahpw<>k>+*ov)2b+jRtectm!It>%7d=eAD@e!pap- ztwP`MN>`>}#F zdDF}`CKcZc@B9pjp6Y$|;L_`iwvNV8&m-@Ee3@)@xF<6+yUB5DEXn9G3WzW8{uo*Ep!g4W_kK0I3wiReMs)G1_#X$QKu;W+OjCc!{ei|rms_8RWm9H0~Yxq5^F`$l@ez7*y2n5H{*as zadz6yw4-?+RY(tPWc!U|V0)V)_)gt-1_xM;T_ZhyjyhjSJ<50FajzoY#Gxx3Ww13q zdsBBs>v)?p=e?fqcd1A3mQMiDOv%@8R0ofr1g$rU*9KvN-8$q5GyVi@-{t zEaUdqa8&JJ%VJLxe^LMv_bJMi>~Ni~p%IcZk?exDX^9XVehlcp+CS&}Z{`Gt0xY+w15*Le%;3fyW4@01t!UHvukC3}e>B z9p-x9dJk|!>5!Y^9u>i-+qh5eMOOB%pal5KI+adV-(v{SNvipV;2Sy}&I(F1WFr&Y zY}04!|6;&zN&SQE0gO{>o`9s&H9!xiTfiI<`)08(&$PB5Ja7oIn?nX{EUdoL4Cj%L z{dBzz@lPW2zK)p1AB6gL->N$Eqo8ydP*^9a9DNb%55RIfjE{wPD;BK7VCz6AvIC;j z8ojOn%&{3NMb}n1bQuBC$$`ASr+74f;q6)cS>Rc2%VF1x6LZd@rPPpiQtAkQ^!q7% z=y29r>5LyOM-#$1Y;rF7;%H|3mouUc`}0IVQ_z7%oN$_fNV-V~FuuXuv~B;fYtV?b z;U9SezXJDv`gUh&+^x24eO$gt(|(g1UZ<9{A`Wb$;kiI3o7+@4~l`=zCb$@hTwXH|S|vP`hxL>E6~Ubs{3{ zMMRaaUEQyp01O4ceHhX>FS=aGD03$M~ z_CsO!G5}E1*Qb%a43G-O29-Oj(__V~j-!DRR`HHW=iH_k`CyJixO6p#q< zp(|azSM|=oJbB@QB-3r1O;1Gc!_N@VVDuo2H!3Q$MUw;1(%6c)Pd|4 zLaE1`h(lmDtOQFdu-ggJJ}vK)1-^$#kz>^Xr z3TSwiz=dc;%$_5NP5`tQ@3yfpFIIufMjp^Yco1{Hao!_dxfq_sOjhZYM==N~*kDUr zD+z>(9VYR|)C-jWct!LN0mKo>dZgUaIO66ZA4wz*s_NcJ2SAi^BE#UTu_*umQpo5` z);cE#ZyjK3^MJk@g}6P4KCBpgeoCk%-)cZGP1Ozy9>oY04^NRx(laJtzuDB&Z^|Hm z0;c`K2GN~@k)I!^?BB+iY<qkolbcupVi{EQ4MRU>)m7QM z22d^QZvFea^x69bWkB^STf|#jnSvN%YDh3-TG&i4M*8AFTEZAP1#I*QXg|LT^9Fy6 zsN;KpT=ODsJN=w4oMcyGi4?!>dcW znTuwIdmH7#I6md^^`Xo$MWv@es6GUGLN5JJqJ&1~T?T|hL;sVeDdF!fr=}38mGB^uO_ILIZz_syiZh+5cWKj+NTMAwF zyAT>>0t9Ei&*(z(TI2?F+p3COatbY2y$dzwT^oTI6wtpTEFf-(9aq)9Y^>tB7$k}l zHRGrU_ZzG}V=@GeM^ue>l$*n~EXvotkv;in087WRB>UE7d<$?}oibe2r-}zP!=mHO zjc2T-_aIn9GOUi}JyCCs`Vi33pEpCS6k3NFzn)NY51@-jILxc;D~s(XwY>>cPP_f! zh1h=bdiVBc1cEcT+H3s0zy~!~f|^LkZAR89(IWPla?2N0llfhD!MBhXzzcC}J7BD% zBR-w)8c%N@`Ex&()eWOVH=4dTIli#;cf{`voX!M9a(O7Uj>2bK@LjzRE0RcXam)fp zn1ndn(UMm<+qxc8mt={P382RzFl7Ai%H;Z?1Kry# zv&163+PBA=5U*?wPa%ks!trUIK)uS|*qiYq-(3f%{rrqrw0|AS{u}xjDD7u0aPG3L(iIKY-BbByM7+h{aMP5r% zrbUcQL=$za=D}zV)txubE>WM}bBZAdM_yr!Co!JBgTZGn53ij?75z=N!09D#J;~g^ zKaoYUZ!SMkrsufPhrdfWiUa~%zjVHwqJ-hdtA?yOwrSDac#xK(ia`Apr(?k*~;O0xxE zuN+y0Y}kar!!o?U=j$o!GTYiO(3Ub_Vc7_W?NR;)^UE8`OKu>2;${#BVw2mWC_mzDb7tiOf~yEg|tDAUJA}I2(ephk(z2?TCR@lUu#L9{M%1k+u*_ z1;$Z`6im!?_z0d7DsZr|Sb=qPWAZJ>h}zFV(1pJ5cdTb5o@%qS*sfukbG)9$#=34I~9)l^mkH!~TY_ z8hz5c=|HA#X*Z{S#UR_57OoK1=J#f0Sl$&utf)JU8u!7~MS9j0dd-GLvK<(3GQD!U zLoUfqxJv~Z$xyDItKbI6i-qC)TPJrSet<9@8)?RFH z8r}dYWQL;U)SReDUsIYQ`31x}N|TyEMw>W8`ciUAS;ngErlfHE-lvjZ5Zg6DmW~GI zkG$sndu>2E!JF{QNFL@9;O9TN+e4s?h&ta;fqsYUF@{F==Xug`P+<@ain)qc-ZheI z8zL(e?65V^ORO+1P+Uqs0DUg z!Gn2{=Rn?TKtC(+dn*o01nDY{UO6uYp$-`tq-{#5m(HqadMY5W7KR{Y@^15%shTz} zba6I#@Li?vCGC8+4PzrRXwySb%C~S+>8FxuR6l&E8Y~yOGG5{MK&MqX#e;-ijGqI{ zaCa-%PB>&u1}?d^DzWmZ^E7xSL98^mI!KY(fnC@#T}9uo{y@$Zd%MEZu=Dg1(KIr- zWn{>_e><>A6&)i*DYi8CL-)}mNg!v3`Y@ug$tsvTamT6RFpWVVdCWDmDvw>QDpttb z%WvM!eD*ziO%v~a^tIg%DhZYj^HptufK3MlE!A!g^w;8JN;z6YKv%rlO(BIjoZcT) z9UaJLimn+x#d+m<6b^IKS|*dK*GtlYJU1d?;4K6r>C}gQwM*jiilOv@os=}4Xf&8F zg!6Q|WtsbG)~od*+H!U*k{b3|$+m_xR0B+ zst#QilTuR&Mf)evvbaZ3PP#Q_mSZzT@Zbyxou!^SsEbrD$Pj!EL-1RKg|wffGW5?A zj(EBgq!Ebb{qtXt7nxS`fq1){XjP~nBagJ2m4#1uog7?^*4*F!)gOe%l<}A%HBG}- z=(SR=c3wB%O!@7;rYUH)AQ;sLMTo!sJsdik`KCfuOjlFn%X%$)G_tSv*8{@0@fHq1 z#L3G!tyboWzy6od|A&};JO|%dzdULgNEiMAt#vS*Y_wig;PAZ)5N68A+hS|K0i6#? z2BHr{|Cz;-(P=fW7+a&?Xm>#Ak3{TBv)LbPzR=pdO}xhF!2Byea&+W~O?FRa6Taa- z>w{)PQd4onGuXo{^Z9<)s$Sa5K7J^Up%osxa@l-fM`Wb2pRkSkA(l;Ajp_Us{0qWWKIMJ((+KEHTeeb(SE;u zx$m^0g4y7i&v=6oXvcqh9QxqY1g}zNHlg-eVY+B~%V2S@5`pHdoIa?^dWmaI@oRcdvAb&E(FY4 zd7{_S+DhaPTtK6r-jwp%s5}-Vf>5s^Jt;Zax8>h&gO9AGS<)`*J4;7CY1G7%fPMj{ zj>Elgnis}shKV5T6wt7}fmUK_VK+wMc4(e|{o`dCeKnM{FkR;k>zmBjL+7qsJ};hp ziu8t?2ibBmyW83fc6)yQY7xKlEP?|j&ITl2F(+TMFxsYns{(IPYPQk2GM>dATAWx$ zU!T5-cb9EDf-zuFvLiR8Db_aV!!uQ3zaM<`<-F2WkZvr|4v&Bxly{DN0wZe)sLdOQ<)}srC-?7n!?baTaxOlvXC1Kiw74GMpFaz`yCA-|t@^NrP;MFbg{U!-)A?hlkPduMNk4c(uRf z^}Y#pfBx-Ti23)wJY7i-nfGJdrBNN>e`@&s*N-r?oqie+^S3Acr&I%(^&Rn_TO8zr zfY$%ZoBsbU`u|!NUAo^xOB<+iV7-lu?9g$C8+RNGZEP&fjSU^ltqI&t1d~fR7h^+9 za}zs5XO6q})&w$lCMTSoJzQlHW#VDtWWo`Qtxe1crf_Ox?_h@= zchDm~&qary^1mKl82(g2x>R0_H4@a@m|QgKmOxX4qs10Vr(X%*6PS0fkQ+zRFgVi9 z7f2a)2tTK=gnsmjz~?6xEQ!gTY@E4|?JZbwPj0fJ>HLD|kDzLx`TA-*dY=)CsX3a6 z_ZJQeiHdsOP!e4{yvgZ(MV7&e-;^EAg#XbWq=_DxL>JWDLXY@89rYi3qKCp-7WdC* z?FJ9HeZ}G{0~2h}R6+mmSF&s$FPINccCPG7@hZwZs(tuy?vnqn={n2Yw&7J$E{`vp zoHk?cOe};QAL$vPX+lp%%eu6M=AXCU+ZHRjXvazzA*f+ib~F2s%NQf+EZv`3=Wmbv z`4~dQyi@QBdSYTC#5H0+pxc9H#K{SxsY2*~UXBL2b>)YxVtRWpgE>mGS~Di*Ihw`j!4F%9rc1~% zMsV9;{Nv6K17g(L&d*%W-q}|Y)@UY^X!JV&ms0ul=>U-*p9+`J1l+*0#u2-nT?I53 z|HD@gZLK{Uig2VSnuzqQF#DmGc+vvUU)~(_$8Zb3t+(W8+xo+1Q$RlwO+!BrBjEej z2@gg~q7h5LGTnaWfVGoOdV#-#UgWvRKYX`O0sqpKh_roa0V2yr!y^rR>T~|kc@ii)oXdygd(+2J2vR z)vSTp@T&}(j47&d_I~ZHzxbuJm_r-APv}AY;{3exBT}zN@8%J57~{v{UwUse_siR+ zIW|VP{4C3Nb&3`sHMh|KTA08T=~2}tLNjPlf|`uWl0CVkR5XRpnxB294L!u5$%2k$ zRRP->SR>NIgCYhzwx393)Dd3fHm zSdh(Koa-!Q*U>`zGmi*qlnD=6Szwt=e|95fU<4*V@;y?VDv{Wpocew( zhA3{qnM^9&A17-pM>?!i`R-SMX6HX&7mB9wZxjgX4hY=VGtE*(ukfJBJWWZPmM^|x zvA>whu-J=e8fl30j!66V{92W>77O2)nAAkC7_D}=XycS`H<1J-?>7~OTC9U6X*}|U zVT8U7K9Kr~WqQK!VENJDb(Me+l9Uv1yWALomVn1%i+26T--g?d*O)GJs7%&PNm7V8p*I}8o1Y{cj9fIactfmFnXNGwS{~^^q;EC)9D=3^ zEw5crjN={U3!M6eb=Xow3&8VPNtB^SxYy_pf9L}XT~dCJ7Bt1Mb2Ydzi5Nu&;pkEw6z`b79`V zGVxeuv@w#(P-h-ND-J{I3j(4un%Shh89Qi>r7Iv4)h0)WkRp= zG(KAPNho0cy0JDqh+Yv{v8%>o9+}zSs`mY)GK)46kI>}>rRGQcjqQniwioFyH>fmk1Q^s_hTM5YswnweYPX$9T z_+Q21xt@Hzix$T7W&6?tRe!7zqqNr#G^Y_3&s+_HaIzI^WG%TlJkw4LtqPJ?ztP_y z+8vMw7efb&*`goXK7r{2F>@ z@{&{lmML~+2|W}{TqN~u42CqJ2z0EGbRQusR&W?gV30)T1|DNxj;-0z%Atg3qUeo6 z@@R>8wR@yi!)IWbT8J^aoRfhH({n!PMV|DxXkH#QpAuOt_LG}fpP`p@5dUpU+2Eav zrf41^blCCmHF#f3#4@cnrB6h&m`;bvdAvby<$G4JJJkGheQ9Y-Pw|BfdJF%CvqB-a z##8@9=1*pvAz>k3FK6DuI(Xks#M*sqX2IIYVja+oyrWLqw+wBp`h7|FC4Jv2D~dqh z7lYS)9{A)vDl9R)_4OrKrX;qhek_wXsd)v&J~<9N#M+@RA@C;MK6}%9%8c*kn=P!J zdUx%0v_RwYhUj%XdWOsGEEz(NFc+~7De}w8s1C>@_^euq*f(Dzx$mC-LMoE15Nt=nRTMr(zz%f=!Zi5@yR=^dz++&t$Glh5wIwW$T_%7h@Z@WvB1ANi|NI zT=`92Y={bzL>X`WwpJEHR;2j@hhC%C1&huU&xF|G)X1E{CZ4Mtpr@^*KTEwW@mkS@byL= zmO!c^VXU1)vvwsrF|8^ZeUiZLR?1ME<#*+si*BY@G;XBpa9rh_e4&trHIkYWd9EaD z#`zGe?XB^K-X$;}=>w#*@P~n&}?pokB-4_`7vO;d<6p1E2o` DhPovE literal 0 HcmV?d00001 diff --git a/docs/source/assets/design/arch_overview/llm_engine.excalidraw.png b/docs/source/assets/design/arch_overview/llm_engine.excalidraw.png new file mode 100644 index 0000000000000000000000000000000000000000..ade1d602a918726f5a407f9422eadc0b9c25639f GIT binary patch literal 178116 zcmeFZWmwc}*ETL8f{1|%f`up`0;04Cf`W8|AZ<`XDk%+u4GMyENq5cAZ4kmR^hl#L zL#GV&u2J0LzMtcH-~Xrom-~b4eT*{mo9nvPTIV{?b6qc#6eOwk((m21YZsNYl(@>S zU3=gUX&5;fyfQj?u?7A^WTPT^V^?P5iSb>#7(O_9 zFI6sxsc_wl^*pcoKJd!kGcrt!suWhw##}S zF*(Ek`eR+&L-c|o>HqDYG5E>BzkBn4|97Gc?~MPiPj`^~qNv0LMe_fC>W9ex{&P+K8#(`tod198>!*au_hQcs8Y8acns;)oj)Yqc4b=sx zWa?Z=(JL!H!m4ua;);0*tB?8Tu{Zw%O!1QyJHIy`+@Bi?K2j9V)$Ia(w zpWjAf*+&Jehn@$rs!A+RbsO{8cIR7A(C+$JJJwA874gBFq9xy=+kK;qY*@R%l0)&@ zYCK_eP?VSGJui`;5PGEKwmv6$<-vf*Lc*5w*5=0iXtev6<~YgW*jE?drfVW)UZ1#f zPBBX8?NtZMx+vk2c-i2K`_k^p{2L42ll2faHSgtAf3^M4M5Ge0Pq0P`+HuXZ>J&a` zYN?P2_;iQGr?ekcWHTx=SnW%q5JS`+CG0Ho?!v87{ZIGe$v&@4cAnMDHF)^=PS_b) zv4OS})%lH;{;PB=Y3do)n>|G~Nh~4MOfmwa(zNH5D(lJ{L%EZZ9FKYbYoSEhDGy5d z4wuw)9r^`ZD-SgdRP{RabCfvD@KLAeSCrdMb+sp3mD+wdkGiS}=cBXSwFF1l1ONRe z1-*H*9p{InSxD?x=ZB54ah!X4To%xc!A3^7Xw(ndCgZ#HK^(c(!!q!!FBTH6FF!z? z5_YOpRTMt@Iwkb*zksr5@=*pq`n-atPtN>;#U3e@-#@>KtbB{WwWkHQW$JdLoOKpI z)~;E!aWxIs2lGb_Bn0cdrQy&zwo)+lwZy@SPT;|vrn_O0y~)aP*<`jBSb2AMQWVcj zLnyalN4h4L`=*OKGjGIAcS}>zrN@*PFP*r~{~tR9sN2-MUqt=)$!DBjU%=GEep{Mf zwwq8Zn#%9?S*sC~gf05494Ap{mQ`lN>lvh6zS8HMQ5|BtIbAX{^zGSczVV4gTx-%B zgD-(~$s&=l8UsJ(26IiC8AEvxZ{@=IIBwBu<(q5LaTz=}Xb8D{3tRPhdmH`%>Ce4) zdA3QNx1rj8Nb)p~pz!sHv@D;j_r~PRS_gNX9jqBFeT@$14<2t#(w2Yl`KX3~XBK>4 zZC}a`&3bN)OdU-=i!K8pgJSh${OF?}3k}k5>v8pB>Tk=g(IObRqj{oNFNxWNVcTSWW9)XkVxRbWN zgw;^q&#-|sD4g1So{dGM)x06xP50Ne- z$RpTdI9+^B32Wh3W}?3@q|u=xuQ~*kIxY}D`|F5*BCKCXp*%bF_E~#OG1*(9FtwdNq5l2stuARvi4d=6nkTVTP9L6lI zF~JvL?j85~YZc?e;IY->Mn7yP@qu6u`8ri9dmV>fJ2zcHqmioG?hJZ?2QiD&z1`gd zTfX#{?|BDu>R*e$Vms@1Ml9ZWWjbn)LYOep~O- zF@eYdjL24?pxs1mqI~#w+h^0=jD8cAfl|UKmmiX+T@A{H@k_X?BHbo`6){oNUO4w1 zS3eeSKS<^67^odwwqr`z&0DEz{zN2Xg})y~E6f4ck%PgJ$;x~fmLZ#R| zO~iJ@TqhZCNqB|*=5tk26se@|G@jticg8QQsbH%V-T$!WWK_M{n-f=5rps3JME}YV zp2u2ZNqKU8}Jl|{nqmk)dCH*#mzeRrSRn#Fj3g}d5=u`2=epDrlHc#OH@ zG|m&Tx=uscV8Pp*y?xe?{%C=SDb1mYSEukouK4jJ>*kxZH_=hI|At{PaIzft|2|iV zyPcLNS!c>tqHw3IM;ch`gE<|3F6O+w1}&!^I{_pVRI&PWW#9Dzu+Z7+a@Bs)FW0c% ziQajdK}2TXv(o1RAu*~+3bDgds%ePOdFSb(8j3|6hOeUAlVp#$?DZ^7B=bNUsmg}> zo2k_>9t0!O7_%R7SgTJ1S;ng!_VlkX6n_O_Xi{`Ion$+Er0;%xTIAx*mq+~1O1>_9 z4FDc5^Y5LtmLGityI*b?=L(ITiMi-8BV**UM~!;TEEN~ z!+BvzIc{=3V+vY+S)O1VhETT#x_SK>}i?AgQ< zS;X=}@th;XKiPp(ldg{q&ZcQ(U4&!108i-OZk_u+|IIagZJj~<6XvGao0Oq^7Fs3& z$9y`t%AE2u$p*5DM(Di`ZQ6HMxL5F)f1NFDP@@Vd+r;An?=A&j9Wh?Ot=)*`6mE_9 z*9v+ORCiAF=5sRIU z1!_e-z^aTIrqh@1eIjSoFL$Y3IEN?w#y;gtD7m4oTIDMp+E(&*rfAdtT*Fr;Pg65F z9Hx6nl~gw}#Yc$&60_ooUj~g*<-tBFTr8Q)RZ6-m^jC9uFc%)rk0SJU`BxFU168z| z=nTh;D|bJ5x7{{M)jJ&5^rQYA8%uYA)d7pQ261Hu-kS>;k=W1|Zah>Egk~#A+~nRh z*(^?VV{PmoG6+yTz-H<9Us(yvO{Ca(^m%<`X?IdznR9*Wtu&z-!%7dL#(Gkozw>L} zZu0oV<&y*W-|1dD#uyV7Ypf84dVTV$OaQaITygcS)3AdI==3k){8=j$#6Ia7Sy8sQ z4zy`sR>fu)>&%3t^VY^n0-Uj*&$I9JHJxJH@$auZjiRruO(F@A{DbRs0K-8J;)PL+ zgUq637pl%lI&dYA`Tk$0-dh?DQ^=iAn(Zz7z!H+e&&V`OpwZS zavOd__`8QguDKTTSoQUoxS{7;L`~s%-zXV;MZ@v7*beuUPuimy7R_3Z^pZu`^$Dgp zwuIF^RP^WGv1#y?@oW&2k2$`7viIoxWNW%6H=?&Vj{q1KPV5tB2qJ|#f0mN=V&LWb z)wPqINZsH}PCp_1SX}1D*bWE=3+7Egt%XlQPj4@i3oV2$ivbiCiG_UgP8B_uxtnq2 zb8QrHP17UKPP?54z_j1u?XI{bCXZl!w@#C3z>u==7(VzgTSiFc1|3OIBDfsq4Ummn zxCG5)rvo=AK4|IsHIj?JuY@CXQ(wK=rqrF#7PAhm$hkf*NYXrnQ(y^fSZ2w8qHf(&W2N6j;v_s6gq?PkT+ruKB<;Qm3RhdhleL%VogDF)gY!xKlanuJ<_)pO2R-97f= zucJ#KkW((eS-G`ZB(NRnU-y)AX+a&_%vrYh z76pBRNfzeXS_7-qxba!PiKIjUW9$T7exgNhv7L3NVQ{E@(&@j_2jM`(6H3{3sCxHax@A%L>b!qyG;*EmHqhs$`A_E=0%TA zLYzhKhMt-cI#C*7E8PnFsV=69_4!xptE15h5@nlf0828h-Q16|s)*+rW2P7OhV0+| z&hX2gW7L3*bXUCEw^@AAgPz;%w671L67|=|((4MmyL$Hso~p@Q4J|`O0BGTDX&N)7 z^(O=#h{Ew2#&sr3(>Cw2$TMyKP?qN%+RyCg1c~cs(y4yx%jh9GNj~{CM@-_)DUQP| z3aCu)7?JKT!JM2<`H#MRcyiykHT}q+XCe|KB2Ia#WGnV7nRvF*KAQVXYuHoxP=Ye_ z^8Kd|ms6X)FSkI1TDAc@6R-6C#(VjD0$N@P(GUxcWoye*Y)~S(BR*$_8+AP`G0*9J zd9S2Eo+0mc&b?}pihJ5jP8wOE_G}uNwp_SHb^{B^H>ZwLG3q)FziXu22Vh-e(bD5x zLUH9vzx&Jy&KL|PMKx&%T04afqn4T1Ycm!4xkl))2^^1RC3KU9bKgHV7A-(}=BNAK zB`F)pZ_%X&z3nyW=xb$W?t(k?k%N?gobG?Sm?NmRCpr94N_dVvg^ELZ!3#4@{Z%4ASSh2&*MXi^iPetE)kjXANcy-fF zY3MwBvcTvQ_3$KGo+OFTSi+yDhN1YGylbv_sdAUq4_qPvJ7VCVG;FMY+lbchf~hHr zWR7x~myVm`tXR(_6ozCgA-fOf7fVt+X{ZXtQxVH4WR`EWk`y8hhHCxqmbhS|goj_S zM3Hx|zu?P}XIM%}ih2uFiFlxtS8UkzB+U>woB>u98|wL%y%THnJY!$WU7b?^@Xgpf z6MS#fo3hsQUd%V?f@-1lixP zQ)VJD#g`O2&(>&9Kv-q zMj=u#s{Bpt)N@bh>~sWe#|99b?|TH%)mV02Jvs85{yBe2L<%cpXx^nFzU^dsjKUNsEE z6G~bw{eB}4vhHvm#652B^6RAL#3^AptCL6!etH#RjsUtMV#a|j{GguNYWoe4`@_af zbRcU$v!BlSAD#i$s^8fkcRS8 zEis!Y_BZP#>~9nobYA%eYJJe_z%>(LH^7s*ZW~UH0R1v*y7o8S`3Tt5>Z;Es5n(Od z$a0okTA|6P1FbwlR361krAKu_py~YaYfo7yZ^YlPpbK0URld8}Z(px-S3 z=vZyV_|8cDbvKpcJSKjnusIzI;-;cgU3m!_S$h7(Iz9R|i6>A|D=9=u=+w3aQQox0 zTK}`wi(|^8iX6?(BMqVMp^?{IwNQ%-z()AUgi9VSG^(IrI-U^~?qi zlZuk>^Ut7vd_w6E}@kY z<>=^yooqvl6iX2nUPqlqlJqq#n=>=6M*w$9raNLB?aHi`kBuK+&2`c<%}yN6P)}C6 zZa>p!!Ig09v@quapxb=Mp8;d zR6eGTf&uxC3r2O2ccig}6JD^ns`VEM{g)}Du!9wlhoio8DvVzzK8)Hbe3`h6R%sQD zG9qbSKQsmiY2Qh~w&=R=<}~OD=cRTHuW3`*(pVyu03(&KyzP!5U_56IPtRvQhT~ma@d?V|-zW&_{u~he$pMGy-*KY- zx&Vx*uI+!|y1l>T=VMNHg&KuO;Kim*iY$&jabw{#|2hy@jN6fY0$1u!$ywUex^!&O$`ce7Z>F`&ry}$nhmRrv!8B!?a&5*ljOt_q`Yk&B8m&$iC#% zDV){Uc7m>F7^*TAA{;775CBwq^ktVHvmTm7PL1iCr!h zp|8_4-{a8ly3$G8MnFKL!)IJItJ;K`?0Am%%3g6Tuv=L9Q`^YB#h~R9Q2R$~aXHlV zwp=T}Bi{GqRdt?Ze-o;>rkTQ^(@WO*mh-vg(rj03$NY|5P&>c)q^KdLsw;>?TX58v z52)xLPWP*QUQEn?tUfM1{F?sJm4bAe3!E%N^=2KRM(oCo5v!BIQM8T&l|-Ymn0p;9 zMa-QTr~R=;;UkTaSI3HN%eyU0&?Zw=xhcH$m#GcS_Ut=0u(p~e9WDah&ACzwVXk&5bp~iMEuMEPau8L?^Hd`+(3De?E(bK3QaWk#A(jROM5pICZ08} zE{qmL)nL>rHXk0MOkQSJR9y;LBsi;h|9vQ!wjLjxYarauW-@cC{41UV0aZ=d~DDk;v-0=kfeM#R@X^Lm=pI~86nC2zsIM+8y6 z--iF_4U#u_A(jIp^!hzw zHAmUhrIsfDoL_q8C7iT)wZCsY;t*s4wp&Dzf<_E?z#oqw`hJ@s3?@gI@a@w5lY6;&EX0Eim!O zu@wse%+S94TELq9thx9?m;=Up zLKnRroW$8TMh0=TQ75_ab5z_euY@tfxpjSVJ_ix?+W%`8?vy-k@ zl03`t_|ccw7MsM}{>w` zHqo$?KpY1!?U_P4%;*)3MmY;bzDn)T>g&u}D7E7tbfN80s7-^`4RG@%%`>u9g*=Om9BjU65r$)(cdpg|K2WC!@>iaInHb-n?)G!xFTB zu?gBpgSzjCxKa8#8_sBOTBfoVQUF&J(S# zi6(GvNfX3)#ww_NuCCv{i)^jFDP3s1DzS3?YEdM&rO=_1JJhG=9>6q_GuTjU7uid+ z*;Ln79rYqtFQ^uB_5Tb)HB-M+s|NmaiJcz2^&!Wj@Sj)*-j}abZXvlh|rA zEfS7_FjY&Z!6yX89ge|?CAqewC?}c4 zh=zD1121wZr{)wgN>SEK-+3ypgNEM2c|0LFz2?b%4xg83pe-iO0%T=GV!JzXHp71|ziK)2_8R8sa9h)A#!yS@T>> zD18`*LCYdZFPxW9Ti>*6fI+T0gFrV^zLvQpPXDOszi%7qLo@gJvNAR=M zb5;mAuWqi*7=C<8#Tpb1;%Ty6$fYx%Wle&zjT|RtzJbUT|5Y*z=tr|rAFTrF(L~QS z;H9OYJoe_h_Vwe0^HxL6T|Kv`nuLKCu3J)ntJ zkGihjI)&Ma^tO`lxltf&KN)gZeBY6CPc#2Br*>D6do|#iewi z)XkVujO&RYYf5LnZu#>W3qP}7($IJ1HK~rUkkExFjTDVV ztSxt2C78CSCiZ$Qw=yILF<~d0;v2f|kStCD-=HDb*@{OhVoqnNrMxBBI6mLSKYmee za<7GlZ4Iaa@~&H6Wgyl#L@HoTbQt&MBSH;0f=@Ii5=l{YkU71$PV&+T7@tHGSusg3 zNkQ4y2ukN^9s0J8U@|V_#z|6D-d!`pI$DvcvSt+koMSmx9wG3~%IKY1^ry!Vinj}8 z(+~a3TArkbn)cCccyyr<`Mz!pAyNd4mmg$g4z2Z)INlRUCQjgoc$Y!KRVjCIO!FxS zym$#521Q$&YpSKo1XmMCS-g6|WT#OLOz091ve-OSVZS^#53iDXu7Nax1@3Q^YY%^lEFznVc{vFb0D zuroX%n?{{OYPY@})++AFzmLn3ZxKJ_x7=(OWUtSu+4(w=9OC^5Xc8kTQ0p%7%y$^ zIRFRbbp{}~wmT;0nbb?{a3c}IV+n|CX%eogu&B;L`zfu#aWPg7gp0qxR5Fnx$16nF z+b_msdu^Rr=`5V4Os0*U^W)KaJZ5TM3W=I!`PX%Y``f2nngA=yLO!twa@FgXdiSkE zB?BM(+QOzjy{*9ko#_53-}ye>R(fptW4L66*d{su{XSc@CQaKVMxh z^NHc0MD-iVN`g~rCGbTGlb!ryqAK;|(e7*Ovmez*$C%Jb!$$~J=g0sE3C(T7kTfFmAKUb6%UX0x?~(JisBgXkVB6{Tj6!wk{qbV*`vgH14drXIWC zy|mUow68h-Rd*D3IwuF%o0tZ=c(W7fo-vPlk%fuN7MUelpX<`LDr2H=DzE_Qn+6?n zpv_hxvOC_QGxzgq)Fq2=``F#Ur=Z|y5uV6dY9iVf&x>dghXQWWvSfa`qOm)_I2jXZ zfuUVqotN9;GV)*nj2iXzAFH1mn3l$Y9^{Ks!Gs^@H4S{Y^=Rz5@|eRMR^L5u^N!GX zD&+MeRE^$Ui@QAt*wr*FDcVg-&v}X$dl_2RhhN;o@_}_1G#+f8+|Bbmk*<4 z#v!0^2A5AePxllNtZ-8xttbF;&Qa|`62rdtvR9HuoPuc(FoM+5kZ7KDmW&? zNZYEX@bZ|U#h2q|s|(=Cp|iRVWl$wUi+6C7`Q(zZWtLu*$z_*{Is=rJ1=bV$@rZ^~-_dS+-JE*6bcy{``Y4-nthbr;rX8-M?(7?(dprd} z*W6cn9enYoNsbF6(s2^LK5Hd*U<1ll4dVx%!BUrG)AhHLY0#qnvsvYBk{UjFec~?2 zy=cGeB{&?pJ>YA*8y@{)2P!2`f>QWkf{TB9X$tp_?Lk8la+q?kdd31a(h4|Q> z1<*C3#cq`5I`Wyzj2ENRj7h_TZc*`ILGWQrka=fa61cEJ@3L=Ecw*@@ z6-QnMQ^DEJ8>_27zOZE#8cBB(W|wUY{8U-S!-zSs9ip-obkv6b9ZaZd5Ou`WGap zR;f&!)TdU8-OUWGskRvRU9CLRVFY#e{HuK0)$ycgaoLKAXXxE~k349++b&X;5A0oC z+?#8rLOR@Rr=7;mfbn5H%Y*E^9Ld7By`O6EWlia5TvkP2u*DHJb-8OBi>h(dU#l)Y znrLG!`u6m=Psb5B*r-=APr^7UQjo|qUpXB#<}7t}>aV_TgE zGzL1e8Xlrw@?BX|i$?CIq&e1Wy0MW^Q6m@LcNiU_a6vNbs0b?cW_*W{c3zeOoGbM1Yuk%9JujY| z7IQBe`hz^HO(N>cr&H`qv+Gd?i9vj2lezVbd8m-0Mo`Mcxm7kyWP{lK%`%Is-=v7k zW?J?Zx7%fc4O9wbNY%+C;|n6ILjjpK=VlI$FH~5_(6Q*J1y%8K)Medbk*h!90G{c> z(%hhHrv+`3^K9mcoFvx3Buq0eBv=I|j+@a`0r^l-p)ANYWj;G_I^V6x2>Sm~Lyrl% zE+>V;OipU~iZ=vfo7?hD;&8J+fY_Vhtor(*=(j9KdblaqM(c57B7)`_qQehe+T2A8 zZdM8IK6)M#2Kq-UEtbXO%%K1{2nL7xS+(PFbSYt>rY(#6%D_M!ZkvGFB2|bGIK$uf zC@!Z4#I$oZBMsFXhZ6>Y`5OX5@IJ>9u#ml@=QZ^fQfwiGuHZrLYL8a7Xe?RAsYe4u z1+0y0&K#U+A~uVd2mHd}>h9_xWC8(Ml3v!JnsaF-mN(>raUiao&-htbU)mko4tH4- z>3#q&bQze80JM#;^^jE3-s{;$;l1e##CSr9WUek7CZG}I~ z23!dWn$ts5;B9&fjw5=sf2k^8FS=lIkT=kJqHdJXxAa)ob4Xm&%3b-{F024P3qEvK z;utz95>DJX5D6)|^rJn2@)9sEa@$-Tt+v^NOU9-{ty)kKQ<1)_&up|E@jt1%SW!45 zM|wzuerY_SjFcvH6=0#%JakTr;po}4nvdfrWa?y2W_dm~tPhf|_Mw~NjQVnR1Rc%- z+#r|U6C;-KJp(-4o=~|JPPzTKi)>`f1-+m@x0hH~jl^Nk{u4jdN_~7qfIGn?h>}Zo zL(s$;br6hb*6TkWUU8g{KfGQz^j3DLMV8Z!xI)_9lrMvfGOHYKS~OjZ8|pI4dI!Yl zlZcdzVM$Oi?@xx5#<{(jKt}PJsbg&7d6Y~#r=IVgg0T;cS>k0+n>LIB1b7U4IDfDe^>3qU7>Ru7}1R_N$7uee9c}R8w`XU>P z0h3*Mvi8$Gh*BDns}q<3(8>f2J3#iC3Ot`jVY>g=B~f4)2OXu|f*0krYC*$~hwLta z+iRbQNM2*p&QITWO6Y5_xzR@GETsA`B{Su?qChL^=N5dauUjWKp)JPLs$CuLL|Tse zbOn7?aC_*=Dq26H_n>NbHI8d4`MOc-+j*x`Rxy)8O1o$G(L9t3avG_-K6=fKKEM2|MVLL2&aQrLZhpQpc4| z&^2p0Jp$5LfkLAqEO_5Ch^&H`rwHe<_s=32tg{n25B0dN^d_oCVKGB4Dp>&)poLa5 zC3zY4Pj@0EdCb~TtGbC_7mNQ|jy}S2terG+|FsE~KA>7%{-a-3gJ5*(5LM#SwDHa~P(Jy?u6>my*Gr{{w^gQ6F}s4G>X~q? zCV-iRcY*RLo{k(->)`*XCurbM;%S7v)VJJ4!m;cM-N6SQ?@kU_jx$C%34hd*W$8Y} zdS@^>G!r}zJoGKUAo9bP0IK8ch|xjV$p`d1`cLv7kf_`B6^(wM30Y5Cy+_xp-<-C( zGST15i<1;lYCq5cMTc&_)7A1-a|4&KnL@?lM>CFs(8UBgLl_;fD_<0|K%c*|^3hya zaS{Xwhw{bA>j6*wU%agaBs3K0y)OY#d4nX^Ny31gKiuY&T{MB2E5%Qw<&+LT&*;sU zLwus4VWMrBloqb(${5B(tGzgr@!Jc~uNtH1-hmS)H+Cin2Ozlc7mH_emlS|{7k2U# zr##zrwG6Fxr>kM#tZeopr6p3!&&P85JVI#M^8>xF_mtkZ*_}3o4DU5%_q(SsGi7l# zVpC5y>4|>51Gh8Wfp)yT;f3DzYX_N$(A7=U&Tf1hPm)fL^g|&~SZ*Zf+N2!3Qi=Ld zVyqu$fVTFq@njn}d0tN4Feb)zx=~A*RQlS3b%6w0uiL_voUG z9@psN=m^xWVOC_nQ8(;}KlFiJCeO7~x*yfu=`e2tlqO)9YS9hq0_OptS&z9(RK=-g z*@?!PP87+&^J{hGTbPy7WS!)VDMGX|W540$a7BH1tG7dFzO%P5mL2_Ge>sh3HGtf! zh-&yPSO*b98h;DgCe~VQ-4?qmh)jmh0svql9;MqG8s9DODR;jznalkULO z>#Nh*f;QYe!zzPSYAZ2x{#xpRG`B_TzfVxISA_+OrbZz(p}ER##ZhwOH}1Ej{<4@; zeqDl5g2o~={jS%;9WXSOaw{z;iDOJw1{@A0bGKdVlA*i5zUB<03e%=A@1J3!1rwza*5Uz{sH+JU zdHU>py@T*|*Z={|qFgdI@2JXbGbvs%*m-KHlb7yzsHLcUZarnb2ii^9MHia@nlT;A z?k~hn8Q@Zp_Gi2)cTx8AHo5#7{>H-yM@WIW5Tmm?3{Q>P%|i(W zIS7KSJX4>|=(B$Q%S{bN=)R>E9wh?rRASWZ7I6L}}q z2v@DO5O=~7V#NRy83}EEibq{!D!hfs#*#^DH?StkL6M769s~=k?_S%?*}4E0CTP|r zY|h!W(7s;3cCpHo$aY)XA#j8Y`w1!fo4ldycEt+wa3jy4JCEAh8*i(?BC+KrsKV9q z_9<9|mlux_JgY50^(o$uv-6AE(k^^(H;vo10(?uOE_b!=5Owdm!>AcXrraL?%i+Xx z_LOUp!KdekBwRNpc(%t~n{BrdXVG#ycVV7TrgUN&&fw5w_JeOX(tKJ1gA$oS?M?+G zhA!eC-^xzxEaU4;NKr2CVRja7!szwJSuk^JrZ6kgx$cl|FVVtgS>9CK-azsI52J}d zcFmRx1BVk6#L=ia5%SMt>)HW5QY6plA6!N|$%H8*mBCxX9IR`v+1BG;K{F5FtqwXl z-z2l{{0x(2I*x2n#@#AQ4WFTzQD4LWf|CXlMCAM+p^gB{brTqCc0$f-Jle69e?|W` za*9VOg-NNm3((6DLDj{L;QCKrwU;~PICN+*W)uu9K5kD56M3iH*B=f#y7mS>t3$=- zMt!3*ZADhfb2G{7k2PH&W**Hij=X<-qL0`&THs+U_^_%?oHq0x0U?9Ibwf1-{|&17 zgmfg&ng(~_*>|pHc>;nvJ7X^bc$-bkG5EC{-<25x%|AR^Y-;#2WN3OBzY?iXSIhj0 z9g+WN|EY_bxt~~BOuN?ihK1;H(yVTcS^ z1ID$xWjD9h7v9)hoP$o4L@x=nn-(Y_*_mgXh6pB-J*Qoo#<~)#L?S20n&ZIUZ~(U} zwE2e1bbRPX{0)g@dF~tX$W4t5tzpt>`7T21KyZV0-6E;NBRP1rz^rWm15%m69o&x3 zsE*s~iF8v}OCKf`bZECbW4rUp8P5e$^7whBZR9H2M*h{nN*o>g5#=X3dh6ce)&MIV z!F>g?x6!S=%Js~q_PRoRXgt)akjd@>%p+$);&)k{r(01(y@qzj=~I0}7rj}Nlq?U< zwIP7zjSj(0%$*q;AEq33`MyLq>&K2Q5DZ(O!SpcH2Zy$75%^L*+PfCK=>IjERiVvq zykTn5y67y=T?vD1_qf?(T}9R=xmoo}AJt;pQul%|7v3xcdP@KR^W#hH3RR6>nVQH% zo8?EQylBx5q-*bVPS9*S@@@bmwKX(y3rrI}7=%2>>CE3l7!a+Z>;${hlaf#L+jb`g zq78=2ydsM}%oEXWc-GIucNVwUfw1mENne2CY|GZ~gyTqRFcRVyR#dMS3d@HBDYG)& zD__E*Q?We{5qqyc>}G0J%KGZU)$6Jp?rGO+C@&T=T<9<^Cg+IjPF6nKJo&*UJSwsm zSn5{9hZaR_qGpOE(?_twO_8QpKnvg;4+W)q^jTNqY11u&;i+I$dL_lP^UhDeNhI3M z2Xo1Yui4)!o{m%QhF;S~@0=7mak#xV*U2lgwKqeXpAMLWC7MOYHaP_PQqm6g)5F67 z2;aV`W4}ia?ou~V=i@!oy7yr37Q3j)t-;t!9r$KQGIyW#zazn1Wi%D1DLU7uviDNq zlqPrS_xIiz-Pg)CH-P!MpykH%R-Ri9aJd2uqh}MQxfPn)M&w$gGTZC(P(2wzXWOld zoi3UI#T9Qo{6#Nu-qk0nIn}dI6m2%kiMWQ2@6Wma%*Jwp021(AwNTAzTV_P5V#krO z82Oe3>mRds)V z1F-EdxCh<=YsS)`@v?E8*Gs9Mh|KKRZoZaE?4%D3dTrTsiU4)TDC%x3*M3Jl4R)-F=#3=km z)<095Gko)9BcJVz;7JbjXEh}aJ7EmA4${@xN5gF;fl(VNqr{1_!Hp(|jwLkMe*Z+l zlBOXmZEBAsQ-CqcvtXaPtrmF*VH+Se*_J4Oym`USq6<{0c*qNUV$`WKARSBvQp=A| zegu~23&gA;aLnq=Vb_xly$B8l+(1G5lQDA+KcR3HFIC`ytjiaNeQukO?Aad(lS9Wj zZ$;0YxMF>5g@619c!8uKy*088rr%#$#m3me7^Ga{%c5&A1X;h%oX2JOMF8_gUE={; zO~$AWPWGr?xKWln$nlM#oy}eQPY5u;gh&u@0#`XHO@fMp3@^esYkO!>bFoOr?*l;2 zaIl0>0ap5QtbbJiY6Ve|+pX4NTJuWvBBp*fWc$YV_tQd93DCl)qS~z8MjC-=^>degBZ|Ifvt3b(TohfEKLhMwOFED zXnHl1im`CN3TCOzBfxEDxGtcEBGafo2wit-65!`cB6C;GSSf!-1)BTsZ#;<*?J*42 zmB?oJa{AOpXl8{o4@kSWZL%ZGvBg_IgkQ@=EM26ewhiO%CsYrlZ zbR-bjkz2!B78r{~G{-){(IMZbKilDqCocq@!S3+)nQeldftmE@7~`1)qe~`Asuhc3 zSE!7}o}ut7_yps3mzt|l$?ERC&{xox$^N)Es@%X|YjrWU^94XrkL z{@!zNG#uP$Zl;b`pcl;@gptmWMmj29@yw=f(Zub@q<-{pL{!Bh&HkC}s3YYePrt55 z)z_jzQ+Xc6^3jYhz(@(f$45m(dQY3deRI9~7B%Wzak4QmDY)a#CNrrZQ8Ri@R40_Bmz=xC2i|s&;&uCt8POyIm4_7k+AkU;ckoh^MDw9j`KGrb4IF2-l_FF(^BT_(9fwCD^9@* z6CpZa%CY|GU6*FETQYydp`0m?@!oT{J&#&VPV~Yl%wEy_6D#f8I zd_lbv%RWEs2>@9J0?gMdCG4}{y-&{tI)%GM87}WOYK&loA-VMC5nd8q?q&qsA>;Dn z0XR}*#c3U3F!zUw`d9mAQWcAx7ugJeNTeph8%p`lhwkz`cJK$xro{u-EkP%azq>(b zF-S`8y@1pqRmtr25X3OTMEa%sqLsK#?B(n{cMfHwE*Kz=o7EeY{ps-^G6FHJV0o^4 z2PmtQARJ4D)kfH*-6s+aUBL`}0nrkl6Bx<~AVpLQ2 z%0&oV&g6a$_W>}$r8rkAo~bOc4u1drQKI+Y?iTsUHO%y%gF~$&iYYoS;#S%KEgg`D zj`>3~z-qGKiCm$|QtLdQI(u-9SgUaj=u_KXDHu^R{1V6tv!n-gi6PlN)O*0;YTq8G#D@rup|%{1zg_^F#@JI4x68SX5l9VFeNRXSiPUVtpH!J@voyrjbppxfS-@>vS z<`|cMr3-Q43a)cQ@KjzdprDE|UABeAOMZIL#^KHGe-Qs1K^<_L;kf!&Z6rM?^}ti# zed88{2O>VEssIY&@udV+TS^nwQ9x;5z`z&l{GL%IPJ${0(B~0OJsmhmv`aWc&p+Sy z1!4F%XPUoc$KVVhoTM0ts9u};eaS%Z4FHSQTRgXN`P>8iB7!ixb6AO&b?U^648pJ? zn1)ygw^8U6SxXZx4FZkbzmNy}&zA<;AOOy%qWQi*VfyPY8SuLalpiPwcDl(yi|sAt zs2_)$<{Lq;q;{o&r>AMAGWF1l&H#6WOo~dvIoQ)fW!GqTtOA1?0EG8@xqgp_{*HUk z^GCGI+OiR*&T#Jt$DcqT5kBEfB}|POKpf>-_6ia% zqA~zFLY>J2+K_QlnWZ$y_13tIf-k=Oedob5{pR#J`^Ucz9fRL2oSeg(hj#sw&$bsz zZ#Urk^Kh|=b#EW2Y(rsYnGdHVpTU(xDwSbHPGe1H2*x?MqiK3F5vFZbK#F^%<1+Vo z?m*T4KjC!n;31fYBHQuxezsMg-g}UJnjb;qin*y?T71p+q)yLN`MU`dKMbC76NiY6 ze`?9EH#z5B33Zo%qcWZ0BA0X@dfqR2r%(Rgg`fGz^MCneA3E-Mn3KqKhFVfYz$yPR z2Y!xlSBrSb;N+i-`g3s@&X8XFF+eij9QO>Tw*~_?yk;G?Q<8iB3OR;mXdaonP8wJI z>pN4NnT1L1dcc4>D}rw8j|kXXXRZ17H449;Uy29L)(iWZPR_qKU-Sk!4|rFMFi}&* z*lp*w?*E5hR!hIF3>O0p6=PuxNDAQL|NAGWQl7Zxa@z=1f6$KML)y-=+CQ)z;jHn z*ydXGmmk4C0DDLs+Y-n+OEzLJj6K%v^duJfW-yu7?R^gX;Tl1r@@0q_qkxrvxowqz^=`qyO*E ztW1Qyk)d0Xzr+i${sN${VA%cJzEf=}0X`?LwAeL(?a)_dSBZl7zYgEOh_`>%znQ5i z=f33XSlk;Sj#T+@KEice+P)kx>N?p|)N0oNIhqNsx%$sgh$VzVDQ@zQnfq5L#1cby zT44^N{sJ$Ua1A4xQ;F25;=@qTn_nkfZxfKY`1_WcU-3b$0BP+*uQlJVn+yJI5P~oi zR6E^UB33-P-63V#5NhW<&|QVgCEs7N8NdkMe?D6(;n~U-CXfHYAcX&8xJL|bm0+-B zdAkTxy@lb!%L78tT-oF*%V*w#YwtoY-Mvw3c8~b)xYZ+Ymbgem{eSJU@>xH)kFLLi zFz*bvxw*YuC|m9_<1%eKA7?gxLXZ}K?9SZ7UZA@|lC04L8ouo2+x-@##! z2A*{|NZaz^$U2NG#smG!1^9i?46c9D2BWOuzeEoY(O5tY@~EqgZaZ=-AyYG6%!by5 zP-|zf?enEtUmP~qS98G{k+zy7AI=Q!qFj2hf8GA%Pe@TMy+&F8ddd`Lu(=b6K-*MZ zk<}*!=ADe7G~I1WfA-{`ll1df{^KH|?In0F>v_NCwKPmJW^hY>^n9(Y)~yPRwuv|`_H{wzaZ1US*~9t4<6*Z!X23TC2Tdg zxs1hE)VGg|cvju#ox8lt^ETnys8nQlcOrf`p19uxO-fQA&3RA}J`{7L9%_(V_vA%hr4iAZ z(5>;vIrT5Ah?@wQPt1sL(#qnvM9vmM;l)6eBNXrq&y53G{;>*tZUO)51i9N&v42hz z3A_x@P1B%qkvx^@pMnt96vA04|C?}NpeQU_M>oYmihntFI1fUCcLgpoOaQiBb3OUH zDOmI74`}Guh+afw5T`2J#{Mx(aLLnn4}qQc#(T-BdC-uFSV2%{0P=qOWG3pj2-D9$ zB1Zx*LFyPg$oQ`>aUcieexNhP0mlQr>iffUH3P!O1V9<++0<3~fBZNk5Xok~<`w>P z#qaTnqP*bsy$o_QRYd>JssmO^xW{63f4A=be_35{l@xfoDDLqui!+eM2fDRCRu?df znrs#zk?P6O>n@Y{uZ#|4A?=tTlH+^-@m?@UJZEk|hZafivMOwll~F3~|E4ynhHS7FC>H-H#Q`Wf;R|4)RgnH0l_a9xr2fw*QIJT% zqeyNY`j=E2K5`#Yh~Tqm4*}k!0wBCT=T$C*A_@_@KP`bYC(b0a{h#7@|FSBPe|05v#J?;awcSZy-3T>O_<_22&W zKQH-b7XE~@e-7vWeKn6JtX>_n$=xo`&u$Z5^y*t4>6|j&@?0|>pQ>4G^a~A^+(Srl zcMlQ$9s-j8@gGeoCFKr3WL*9qfAC+P5X5jdBqmC&fbxHS+@D{AfGA0p>CbL)-WM8^2}r-y>~pCfy?YzYf9ACxcoAeX9TE(A)pc zZyPBj<&>3J2TA_-!Ts@MilYR6JGOY>Hsr}K&)f|;aa4Zt z-}x=-5{w#8(zUa{hw*=zZ%JL4j)pTW7yq5#n!%_sGl$y}{hyZ_1<6C04qe7*;(z0} za))5l{;7jE{|+JlS;W6X$bTa7uMpz%pGf@c9F+V|B>r_~0&nI2OFDsjRJXgc zxSRfEU~4z~5b^MR|ssWIkdZ)d#vx_>{x$=*rQIZ-ZDVO^)u@Lh+QqfcRNh%ya z1X#?*SKi(_;^*%xM+b^`F$8{4OFv`M@@Lmd7+jCA`n zn_o|&+u=`t5;Vzn(ray!Ork84^{+gCd|tBxeQajyxTHO#orx5L3rg!|icgLeh(jar z#|yT~*Ky@k7>J@9l4SgPoW`tj_WemC*=6b??#Hx>PlTJ^zxT4>$4IE%B=ow+Ze~4r zss0{CAT@!*%#8vib87LppNoQ*;)afp;*c}5Uk~qyBqzb&XUW$wCj$lS>APy|p#wIT zkiT8>73tF2i;oO-1SB#sk|oyM8yB7x1+;9;$G`aTcTt!SzM?y!pmFlBU(fp3>sw@h zo>qW*F7}<_AU1aY&rx3TCbBhSAIutIpvbf*l-k!5E{Xf*Z~WrO*P@C=hMVN@2kC=| zMqXOadS@4TQq&79B;-?Sgx$JQ$ns}_UVXHYu)C4KPv*G$UP0~G%1NWY8>N2$*G}rQ zPT9qm+qCXF$*C(P-PoXBS`jO%KtW(^0n1xbfOgDqUbRu0B``o+Nc$r^g$`D^rFg<@&nS=PxqA5{v0&e8!|zA&RB;h`<(mnTeyC=DuqSG@HY08U8eA6 zpoqIGbTEFt`~CcJ(w{@$9C`Pd&Y(N4De1r=Y3rJ|78n0)h`{p%ZYDitScRbT+J?SA zA_V)H_tcsj9{QK-Ewj;5bz5}(>B9W#)Jc|MKmR%c!jeP1ncicva=~@Mz6bw)mZzGNWI$HLJx|8o(2yj*8sA%Tm| zBFXw%k-v2G&mifNmYqIy8Sl%I^y9M#NE(Co1cZM)!gc4qq&?xn(c|IYIyxRw{(OBO zCyKyJOvhm>jgW%=`|s7Ipj5YgH88Y>$-m=gq*N0iYF5aNQ7e4wN3~>8wRBtf1A(Q- z&vD@(9INeC&pz%Gaw2<})bei-V(mr4aq z>oKB1EOkB&jR8@IrD2x`iur(aRBUm-vFPd+iTPgj+%xc9vM$kk{|vBJ;2u3*YQoN6 z5e7{(;_Z;qSD1w(thZnYAtoTBog{de^kZxw{6Ljm2!O5vpjXZh7zF_+Q}ct8h2!9M z;Lttw?_0rO?o&zR{Wm7k5shTb&kRQpU+_m>2o}^gdbqX|bNWlupCM|N4VI(7@$+SJ zh@f?F4O(;eV){jZ2xVXPVgGd+^AYcclpBGR4A^1RpLcEyzgy}}ByFv_Yp9K3=|S+jHyHK?h`IXr1+W zP3O@L@RtmWIS{|gy$$+AILga?on!7V-s!<=9*Ow#X!>+W1k6ud81etHL3L4q`av&r zr5mkC!hQA(W@7HPqkaxEzYhQ=N8+5{2n#HKpE&2u1Mh9ADcYe$)eGQRz zzOmgi&sDubml=)j5<3IUTCeRcAwJ;krRoqHfFIug`GzlY>%LSJ znKubRa42AAw6Zi>#1-c&&E&>}+u2_9Ca3N|glQ3%Ai*i1K-vfnT~AT#4_dyazVRK_ z<5VTd`0rhusZV&35jWIT==dWxWK#HKUTn_lTGcyB_=3L9^0TfVDa)<|!M0I3a6USq zU*4f@U^hV`goxR}r~4eBx&=a&?I7nEKly6kF~g9)PX0s35cvz>2!|A99p%)$3Q~kr zO)EiBXW0{KDK)4ZMPHEq4n(-%J>)?+u8;G_ek^5R@fK{V2NH@))+OBC5)%Qn*A(87 z<3!1Fh}9ZkeFSOp04bBr$Rv}}yeEM^B)ASZph|!NXxF#NMc-6I#A$_>+xfcts)2~B z^n^}K8V~j@JQkoNVC6jP)&L~jQ?nwD37iLzclqRX?1RMb@Qjw4|tEm6yCOZ z!S;I!LsjSxuzU-B6Se&-oveaQUCu%e=sNj}*Du%&@h<}hEEc5tx>*%xa!bc}O+4{f z2Z|Mn8fc77LM##jxTUv?FM8rxu5tAc;3gW8l}^J*=eeTZU1X)n!9bdXNQ$2gMeOxx z%#oh_U9+EXjB^)pvLX`l_S7+Veqh(`rf2a5rC&?d*} ze0mR?3wPeZ$U4!*s!xHVg0X;(8-y3&H4dG|ZEb>lucL8TV#=Y<;hrENHVLG$&NlT^ z;?n@~Fz!EpoG^{~{LOh?XJGzhaA`g$s2E&Ls!5wQnH8+AEde6ET4{bw_QLq`?|3V1 zbe1g)y?GuET#W_B)!*m7B%1QQls)=>Y{!p-M^Xm)gfl*Mhe>ovv^E)Rbfq>x=)N5| zIMd+^ha2)hHPCtG?Uh()Cg0~d|kz^B9GN9y*$|6+BJ9AkJsdd5+Qcy(BqI+qhFaGoH=H4iSJIIF%LWx`73r^rh32V z@RlTc43t#l`_B0ERp%}gdM$)l}jBC!Qn8YKWabQ?bd ztP^0ut~}rOjTR4TdC`Xj2k1MqkCXYlqbviAM>~L8-W^qInS}!FOtZH+s^C{4omZ0gaIQh5P?G9B>v z^CP}YUDp))^YHUYMn+sk{f#pZ5lV%AA>j$S0!_Y%<-xXv%d{?xMs698J07QTJb%? zI%^HBR36N1Rlvt=?$0mo)7Og(ziZtl-n;b$u>I!^Y+qb%!h?{(c_2z;Zj^5p;#6%0 z`m%M-@c}wE#tKmuSJ${OFYrmk&pk0>+erDW_5t3=AKQ6(;o9L;!@v;Z`pPO-SjMFogL~3;(+Jei`I!ov=~J?VYS&KavB*$ zOxj+EMk?RdLS_D0ui^H5iI0FJQ3jQy!UGc}8)O%kBntt37tWNMP+1g{Sog69XwQ8kmWzhMu zFaoM>`XtPJ$GYbb_|K&9&Vfbd9O8OXxAomW&asI`!1Iag#M3KuRzPaQjx(ZpE8Zp4 zQ)*;?mNk$4oxm_YM23Nh#7x%g!JWnKf=@ zo#17TZgK)E23ZV;T_508GIZ$S%Lm<4T4QauDY341AWb-lcem|I^cl2>1&6u{aEo|F z8D9ltOVKa<$eH@hA(WayjmQT$vu{D|;6hhYsBK%EYoDIQ`#PUD6x%9V3D@zbO9mMN zXw87s-xm`JbVc)<1Ev8*N$k0vU=eZT1>aVhF+=aUWxnf>1Ps&=TUMty*O%CR&c)H2jo$bYYtI0 zmC6N%u8W5b1k?cm@G2P~Fmu8HX6|JHXUy$$(_G{@QY|_>1pZDBZtlC1S){Dpn$>r) zNx4p+6TElIohc-xGei#-9hU&wj}?GFwD?{DoHcGd0VElURkp@=clvG1KKQ?}`6fk^ zWnVd+{IDG&sNN&RJ{wq9F3UhG>Z?k(-U}!^a?f~1rS(Pc-AZNyooWm=$x!8z92bjS z-@QLulL0A)kTP~lCO#m^pdJo1-K&r3fqx!D&=`k*}eXzl}I| z3b4P$h30I;w6239MVQ$}X*+9eIXEb)-TJagT>N)d=6 zAdL`dq>Q*~Jsi8a%iiGf`jlJyEj=cm#fmv@mP%0!J8_P9BVc}?EuS?|MPpBrRjR(| z(=eF_{8`_6GN(SK+bkM6SULXOB;ew}3>OC>UKm3(Rae$1kbFg%K4~2dAIc?@X@jePe;!lc=N{tjTN*co}Q|O7h86uS|^@NYz?j7fYH$5*u@~?w&Ql z&H($i+p|YKlBN81q7v-XwZv5b z!*TZH3EFI@A797nRcqUGzU?TGI9kDp=?z=jjDVYPoAesRsw+6BRBJ9Q!_nkQLYX7NFVL(Kf?&r2+_0dT>>DS{gB!Au<__ zg*>e0ti?>12|M5OuG{6@F+>HYXh~*}^9M0R!=4-iZ@uYlQv&4$@EpPg6Lyy|Hjs?gT-T;n_aI~$~6-3#SCUP%j55=Otf z?)k-FR%|B(A}Cxfz5#yp<|iqZm|%Z)5cph21*U{-0x>1VmrR6p?{$EFw*yB{-Gv$F z@ey`oedcRizt(2|7EQgF>;!jjt?ITI20kj@`GO6cVge9R?XL?pz^1c$=Dl+piOgw1 z2eS6LkSJ}FqW2eupk5na!P0;>*`Z~SXo*5X&UG)$^wHSuIio3HT%Cti9CsV23EjpT zEu%2JVqX$t_q)uzdo^wRrhn(_6Yo^a#thvFA$!A`?r=<%8CTM5$*{e%yV;P^y#jZQ zibA4g#{~!td=*n5Xnw6`XVV6`KV1pW2NzzxjD=5NbK9t|%iiz7cY2|`X-HvmvVs|3 zA=8fBKP=O&x;6@eD?!n?9a7>r!O!sj{0o-nO>edvS-JpcK#vX7bvCA1y}K8%uY#`6V{wpw6yEvSsJ`?8d-l9t+Q z>W#v@aBC&WriBS)^fRl(O|MUF-_e${G70jB{Ly&`)p`?uKCbHjUM z;+STsy;0#zdTb1wej^)^s_7|X9soHnvUvTlwsl$XdRLvEVz2WEv!nVMb77@LJ%>SO zypVRw`{x}VB+|7f8S^I15#^o+3-I*u;qG3fDR$ShjCUmre$q)fyUVc?l>P13$ctST>Chvl@5S61b@a|YXeo9D!{Be%hWqXH7hp(Je;K-U5f2X9~us#)Qjt& z8h|Ncap zGuta(tpFm&vi=YkcPh6Qb4Knt8)K?~3%WF!6}wKDprMGKY9_~)&N0t5BfeWpSI2ql zH*7zFu$YUx`B9CJdfWaiN>T4eB8M^xb`Yzqryp{jVa<{by$-i}(wdC+&q%<|RkN9@ zNGIoJ6eKu+dUK(L@m6rZ&wk8fjs}D{NYl`$8XaS=Ir&^jkw0NzlCLk831@>)<8AJ^ z(9388jQ>u=ji#a1tk6%Q^Up6OmLH&2*9_zE1qqBipS}k#VR5s{5S;$IxZ9D%&3U!# zpw9sma%S)`mr{RPZ?=e>l;nxh~e;fCZ6;V;&0_I zbKjNE#!r=8A{XmvVN{G(xWz7-r=}^G(D)2;Ue2i__f0Z7pw8G9Qu zpvAMa3Y5giR4;t+i>skNV^P6^E=iIp0(nVH4j$g&WKb3_-f|Qs=Ur3W%^@obpezb| zx{#|S41-Z`s*$FW#FZev);n64sYu06+7&%LOP|H$w^IoK^^A;^)+&3jK~l-;2bbUr z`B3LERyn6pDu13+JIB#kv7;JPnpn)=d6-P?>*Upqu0j=7o8ib#0bQqw?ez#maDMfU zRXqdEx#xGco`p@{1jQbaMXG#Y2DHgF7weUe#Xi6;>`D_koYGg*&M$3uN&339dbYRT z4{BDGT{G5sVaGFg%PO8wUvZ!@!k)H=ytzrFwMyr^7PC=pSh!yHZn`d;0lL;fYu2y^ zx9ggBNZUZWLg55rRwKNYDH}R@rvB#jV*D` zv77hCfSzn6VNiAJ1>zc|9La4&NGgMpU}U8Z#b(||8{Y&xJk4=LI?lPB=XKBbJ78m0kZpG6j8!E3yisfxrvrH8*%*JbkV_wvKsCCOyu9fx-xDL@UBmYp@rN53hqt{3)WUcE z;h(ClILPCjq|HjroSGQ|Qt^%*5#=*$JDpQr8tIutlBM#JoPAYy4eNcaIZ3UdnUGe9d6@3kV;Eh|Hts3`0u1NvIMTcvtaovS z<{nR2Jq#TScRK`kicN|H6m$?Xdc_W{z^tj$p}cq`o#Ar zqDMTAR2scD`C!RF^YOm&T~6k@*_ll7VB_Tn#3)`Y<*X%YFYs||4r$znuFA-B@MV@J76eq$CCUAY9GlOL`0gTN4S}wPVO!z>|PiQ5j1@nAr-?# z=H;o*l!~|-(VU5!EC^yn=`+p>*pCeG+w`^D7PK%6H`Ict4Gl&IbeJ1}EcoVjyjvaA zJ5TKlx^qv)Xf0mC9>H#49>K+ALq*V8P_t!HOFMTVfaTe(Grsq0b~BHqb;v%Zjn*;I z#>~wuR_Z9Z9;|^A@nc@=$WTGGra0RUVguCB$lT-Zsq@6l!QYl`Ja$to)x+rCi~f=5 zSAtFMyMcur;Z}E`Lp51pR8W(*HSTe45=G<7Io%O76O;>%r%$X^tON39!UQS0KqW5KS+j$$G8~Fo(~% zU3oDv;(MOl2Ab6WKv!x}LT<)>;ATXwIOr(JGPP^y?RUSG`pz1&;=zFjDy#|KbsI%S^A(LGOC5;ni7ksFYF9Izgz;VIv5@th!>I71=CfHv$Q*Zi0| z%y?e@`hkY;yoaz%nYJQpA1cB3A(~AyeVyB8PCfdB?OgT@H4)S?CrHv>1-jTztU*lJ+my?QxRA826^|>6DuW zHP2q2u|&OuYh7hg8*K}= z>OLD#-?IPyGKlGeQnGCTKAIXvI;LBfy5fHSo- zhxs z_0U>1=OMPQzLzoCw>`695WBdY`*qCgElP$dDuXjN1h<~)Qd*G0q&1{8k&NFSu^8LQ z5aE2kux$IV)`Je?k^R|4H~m>P`ylPHOLD=AsY_IUx~?*TkWBd9Q)kdw>AF&_Jwkq! z@w~gof%`MhxaJbO0;o4O(}-0!lOx^VC-Ulp#=G?!Vjhv8>~mQtV|Ks$MMNq%ZUIw_vRdsN4JV( z_f;W2q8Xu@@YXg5U+xTieBphXwbhfeR=aS$zqaCpCRHbWhAq-cqs=FO6<9S^{vATz z$HLRcFn9$?w^%3Z<~Orb4jVly4qsHp2BKw4hVRj~_j*QW zljB-<#%?qm-e+EbT}9XGoqD%a2)A|$OioH^=EYRxs%#~qLYOULdSm4@pr zbI^nKAzNl&?&xONw~nA}i`hnT{lxyAjiChE(lP@t?q* zAD8zifOrL4y_a?wUhR@KIkc@(=l@~_QL(w>;}ToSZ+LueVDh=AsSX<#{&lOxw?#-v z@@SI7CPiGU>i3@vZ|!aFnA;BJCT^HEI#M3|k%FjI5@PM45IsV|eH2h;VgH~)_^h9C zOzxT<+~Ag(zd}>vLPD^{$0u%tLD|qASKN9*7XRgW16hp?^posqq6^3BjUA=)D=+Ug zxS1@&5%j#IAld7U+;z2*xXl{(4%I5f;O=77Q>M6E9TAt)A2l0t@iC5sC-XkM(>rC4 zm3tFWZPk|CQ-7t_w!F$AlknU+?496+llk&RHjoMX6sDU@vZNl>-|5ws))CXzyl1K_ zjg!&Boa1Q1F{Tz|+dF&9p2K@|8;I-FWcApTEXuPJR0BtsScFzhiG$Li#hOQ>5{1<) zi7r56pFZQQ6@NW@>7vy;Mw3Y|X>0L`n(bB{N{4Lw9^;k6M-zIaO6gIw<`= z#eXo+tr_U+8yQq7$(|2aczRozB@U{KV{wByrW?jHw{4RvOJNauUh@P#Bs|aX%>9I3 zS^CG)INSqP-**<9ImrPF8%4ZoeVObx%eF8TQ{rIp8UM`R*io%|TkED!EJFB2jBaW! z*?#m>IH(wfc>Lsg4x-0yLRZMG_0ZgVgFN0WRMbSRiR)9tt>(UEM<|m|MwvgBA2!MB zEc>RDX41f}6Q^%TV-3}eh{5YA!A>>7cG616CsdueyvgWi0!&v26^hx@Pq4JPs@fdsPT&z4NHsb(5l*#r+L)0M6ci*kiD>>;y#%zb-X!EZ1lw+eKGag?DPcjK6- zlz_}oUyreQ!Ax<50bOTDc#WN6Hqs5GX|8=S&7g~RU*cwX4{p!jf|Cj=!zZlg1gzSu zS5z3^Ue+@NyMzwiPsp(>WhD z6)$>k^MgWJ;i+44*x_RQcXZR|y99f`?bKY)_Dfyl_X-fNnke7i3?5Rzd3Z~(+L+Mb zjSp%B$){D}PmMAQDqeLRzaq%cE6O}^sXxz%uVBq}Pot;A%-UPH)>~*1>a9J#gBW&% zof-qNk(8YQ$2Mu#*ATo0#$&)c;P+(XLSQFR?;kD17|2>@z2b4uz79orz($xW&^_vj zl&rnKa$i@>yvm}DTQ2zRv1_$srcC2%(Z|$+xG|Y!TS$RkZ&kIhOwq#6{A^y}aHt{X1@^klmoc@D5C{axLZN5O?h)dg``5lMK9(y#v*LktK zWd+@oPDVXT7p4|>g5|^Imk)TuT|KkfnZH#?1Y}ECLY@DCzA*-iHXgJy%iiKKD7Owa zDY1yYx#0s8g<|=bjZr^Oj~_b^-F<8l z#yuxF&5oT-mF1)_`^&ES2c@a(MtB&q?xTPIl31m_T4X9 zdX1AnTF=(_;Lv6=pi!*uJtsQzrPHpJSJNU&d0o3NPe!vky`ThDhy7}q9TOU(X%_r( zQIM3UHKZ_FMjaoHO-y{(GDx&ePmATsX>ZhsGMO3e(XWV95yNXuolljimS!JeRn*rT z6%>W~ZM?0$rvvy6mz?FyUdbbpK(9VcoV4`ZT;TzU2CtzuFKOJC=>3Ec>pA7P7(-&V zk?0Q8Ft(hX1J1>B%NmOY)vsT*=WCUH57W(EG|0*^Iz>p~7CW<@_-#n4FxL+16YDQM zST7N#k5!_WMkn2L8@QU%nmAI7%^X{MZ1fi`CBnM0_L6S0Xm1*2t_ls3>0}7ZK)qD~fTSPSU_2 z*8mCR8W8QckEm6O{*5$lUG4(u@{O3CK8_Scz0~sd)$=9^VYY-;dF2}Um2Dq4F}?S> zJ`Vb6VMb<_pZOIT-h{>w4ZhLEfE{Ef!|t-F4>21nFly{K52;CC*uZaSCpbejxCSnc zz2C2cOU+}GS@LM*p4MWC;kk-M#u0350jbS`0A8;KB6vDf6;xtT_{u_lRXgaB*gKK&wbo|{E~imzD$XX zO;EpmA$F$(^YG|P`TlkMGqhlF0d6#X-pGMmr+Pt5CO2UN({XIpvL_#sdUwC2zZr}S z5+{j#HDn{To^=>#=&($>I@M@RdY(@__tO15QWw_X(&Ni-6`3);Dp-h>YSlJYB=4(Y ziBzn4dVuRa*Ipy#GfwwI3T^L^0`u-dnJrfz(MRso12yhcWGDEWXCzD>K{4+3WKQ9m z))>;6XXDJrS=*GPag|T3)Di$LF<q|M zR#D!k(|H9?*3r)xBcB*ihU?5jb=r!#`>s$PE-2~Kynk}hw6~|2yJ< zbdGz;Cgz%@&4*-PQqW#da#q&sU{_x#KHsQ?x2HBYXexHU1h(FfY361fd`gd$d*u(+ zR4Uxio>{U!@7H4;$m{VJ9LxZw%3f z`8CZut;H%MpVYZ_&ZcTBrVd(;kT6QgQ7C5N7f#=KT5?DSa|;)4r%SvH$~7MXv-A~dY8D>Q zN=4l&oMFsiqf*)pe>yAR*{8LtD_(tC(>e1BPGQe*`)J#kHZ^mzag{maY}U$i8P;u` zGy_e~^9Q5PCDJsB~L|G(%P05;Yv~)I?qn^Wnwi*+~ zbc=)PU5DCu_llQ#1dfOoV?lgd!hK4+FKx3wVzZiUzq?DIW5wuBRh-3onjWM_ONmE>tt`xj#i)x$_X5 zh=_$=_pQ7d85WW^*}-FqwJbPUA{W z?m3@SlB|WpxT*}gXB^A3kj)7oc1-Gm*w#S6ICai+W;D342Z0u*bqOkly(NadmQ7tX zI!hH!nI@EbVE$gO)*P)xH3LRHh{goyN<*yF~B>3VVHR4=d@ufjmyhWXuJ8 z_uIW1V(szZJo%n##;ZEFK#HSZG%%*LA+ zjWX3zHF8ihXtMGU&d;`YUQIxRa+A^bT!@4QXY@Bky>~w#Qx~KH4m8RyD)Y2O_{7+m z;OJ5~JxKjgtLQTZ6EVI~jgF_@sSSQHO!s}Yd5oWXXdJ)`lk+^&YuC5!)uJ6XQapqD zAynE!-9mJVK7j1pG+kSN_$d#-Bb^v6{B}9bA7pA5A6Xqa=^$9*5kh7Fy^em{l0l0a z?rAy?fThO!W)0N}eR2ccG+B&a3P3B=9~}_?%P$K=n$5TFDoj5sU9`+<+nf!@t+nN~ zK9`T<2tXhUnYIoTieeXfkG(Y%tJd0)W9dt=a1T6V`W@b=FrqH3wl%yAa9&Q&N>sjd z7}O|`&uhKpz@@j7i|K~l!Ud#l#F!+UUA#D<&tkGmbJ`BdXKx>qkD<>&)Ol&1uhb1f zm+8uhp*8of31OC{13rbPVF9VqD2XXkP*U@cYUB+fX5Y*HmM3yL-B+EB5_jXwjLvM~ z^VS?|JJM>vZ|9zU=7&MB!pr67FzjbVZoIF1G{cmqaQ*3XCc*m$3`{ppHC;p{7oEd2 z&LnKs4=J!#_P1*L3mi&F=Dz}sPW7R}{$r$(voCD6s*_N?uW6khIcFwO5b(8btuEXB z0;ua5^)npAQ}?YI;q`kLxvM)FWh%(83^PBa$X@JG=ZAi??7oD*{pLoNH(+AzO*NkQwW*e05Z@xJS+S}Gm!6%PhxAxT2+Te@t>A)xE_7J+z z2Z;HuB&xAwIeXUkVnlND22xuWFb1j_4_uSQFeNpjQs1}X>KotP&%VuQ@*60^GABiT z1g}cIb}#%&VQxm)hs}=*s{(;0V?6B2ea}rVjVrblt1#3!E3Y6gCn0-FHy$bs2cJ~# z*t@scrYk%=DxSTqfMxBQ3WXcK*nC#}MmSr9L(qHr$EgMTurt*Bdxe^-PMqaYr!hnM zUlaQnLMiQ9XK*#AGd~v> zNV_)c1U$r7r<~v#4}HCH{vE?(Y4?|tg|bQJlo|(_Uc3IrZUWp7UQyd}sdq|an|m~U zOz-dsepZ?gaJ@?to7dPli>1g0UQbJ(l5$RfHpo`CPdhSFs$-HEW89Sqv*@Zm^I|Se zx9MsrG2Q94;V_8s@6d50=c}!z1yY6zX>s`BQzAWa?z34wrQ8JQ9JIDbq&KRtH$l6) z?3(~-6}i}onsMX|!2|bKGe;^EZX>OcmDyg7Ia*l5`N8Ke2*S9TQ?b+L^1@=iPduW% z4FENo%Xp)X$>7!$_7rRA5Z@R5Qm&Dpim)v*_7U#)Nu^+2^Wb9}!I_@6E_7~X%lQ1= zK7#xpcE2|1Y9`%hNx2S|m`6XjFF&gSH=w(%JswQ2u8w&%;;FRqJab^a+Lp0bM@~9@ z*abBbo1~Ase#=86t?zAQ6M{@|fo6JU+A&a(?7^EF<#^J$jW`MyUbmauS58a@r(zLr zJ-fUcH06k$Op;xIeL)!1okhx2V*-Q-pGnoj8WD3fTMMzvC$Kc(??=FQ;9h|VESNXJ z=jlt3?>#9(t9k+1vwL$c-QKjF5X25ZljzIA1pYd#{v{?myK0rahHw`55a0kv9({>S zxG^i#M~Ysah#fzsglb@1yu7XAN{<2i+kKu-j-2$21hl4D2Lj^-1x=KdAxYL#GV(Wn z{yaA~L&==74 zc~v0qmb6;N-biU+xE#--bD9qu=xjOjm`-fy_H6z@XZ_ zcn}VVcdh%dh54B^6ws+X^T{WsfP6*GG`F~qcOf8s@OWlX?xre&Z?S4v>B@r`o>q74 zx(enBZYp>D(xtBp+48)Vlg6_q3c|+qu7IlFHFaC%$eO=O{b0!akGzu4dvnND0OO+~B-!=@cwSx0+Im9>c!4(d@ONVcs-fA*_D|S-@#q8ZkEyhY|ZBb0(T?9 z*;j&#R|n5)+D~ou+gsAUba$4d3gHCadhF&ck0%Q)<2bD)ttuw3S>iOIz{1yP1D;hC zZ;kH_Krt`wnbn{Y4J-KBM5~|clg>4qJ2HE^|8gvhQqd=0w_Zgk3@#1i>%Mr&_H?cU zGUL%AkZ)a*F9$LqYs{X-4yf%(0j)iinKjCG-=9`jI3f*GE_cN|9>B9aDN~;zsksWI zL_=O;FiseF^bCyvC3y)I>}H9xl{%nsYr&aTaG8BBmN%#x<+;t-o9_(PeJc5?rPS}* z_;RYx0XVK@y*PUFVXIJV)^{VMYGT~lx2%;B3Jt>$!5yHXEPe4gTdTFu62X+s+xbm# zu+G>6TI`;SS4an2^$(k|k9z=Ob%GYRRcP7yOy#BmgLa?f$;wbw8TwjZISW8p zeZ%?cfV};Oj$cJC*B_ZDtbx;bov=#ip=>h|DFf_+5XpkWzpJG5YAQ(YE!AkS*p}($ z#9O!^K{qq)Il1_H9VPE*Ivsl}iYs6PBcmXR4Vf(gS^AM8>)zJ>TrJgp*bM!&P>!+s z1ho_)KG)eECtEi2!`NwMOkah`MQHH`w_dR1BYlbT$ILpPHPyl_+NPsgpfzVRq*&1x z@s^EXD>LP>lLCtemySg%IG>;91BuaW^41)5?Qo)?s^U)VqW|Senw)`=ycl} z_~LK)hhN{qZa(p4{k;($0vZ?hD&P{!ZcoL#Su+oU{%<=N$2^D)&t}-;lT5la#(9E) ziqQ||Q<|_v@NYgL4Ci4)*iB5oJDjs{##H1&cCX^yr7;MdT>GZSr(y-G3n-cB!y6bM z4HBtH@Tf99uEuywWEk8k#$M@*dLq8rJX4FdUlu>|c9Y$}18$F>=Xde2#Dp=y{RR$V zg%C1hfvE7he;Zo5(GWbuzPRGK@Xze~_h4zX%T4p;8N~o!b*DdM;M@lz&h2?%Yq{Pz zY#pO1X6uaIRPN{pS#p$_91q#!TkMo?#Rz!I_#U{Ssr-3!NUK)3IR9g>L(FO+?H5F7PlQb}M^ z51c;6g7l;B-D}bQJE`R`ED;Y~F#YP2AKr*A#(ii6ozr;rJONHsbRPm`wCc|Cd#*ql zIfSr89*b{Yx3SX0v^=}@F{p-HPO19+y@TywUi~qAskcGW;pqKF@gOhzZeJbDdPKL^ z_pet{3SJs(=J$hHg8yvmw~02s%_R4&_Vl9=;-e8ZnRTB7m~5OivwvQ{zeb$PsG?^c z@S#33Dz&b@wzoi+Q# z;r-%?m?_qko#!UN9ZL$- zYqc7;jT7KhPz}CQz(FdCWUONSBJD9mE4|RfDt>y9) z@9$^7SVHNk3gH6Y8C5U87Bmc0E0WIztqTfDMnJwG3|>2#nll0M^sUmfi^5YE!4RDH z3f=8%ZyZYqCiFAtr^+(T;A1%apWSB&!NO2D3I&>64uzw+Px!MxBr#ZqI*&r$5j_ES z_*u#^s*8VFEl&Z|cw&R%{>S$a3a*ohOea1n?`dEkf0rh8ssg&e9d$l?`TTc5^g{sP z(WYHi-{^z@W#v3%m7$kE2TG~JuyU85b$j?IYxP7yIFA8FY3cOFa&#h4_uf<(AFMo< zYu5>E_x5HHZDt&=#was^LB@N#mn%T}c^{aGWJyhQF7Zkqb0DAqkLS>FwW288Z3KPJ z2f(OhsApJQ&R3cMOf|wVoS;gMdGYJ3=izhO9>?utpgFO>5IMDr7|YSa^%yxSr!(^um!ML)bk>*zNKu*D7py6Ad}R3D+4 zAq5wKT@%Z=?mE5MYl5QW~Kudp+uRBel8+k~Ri zrp3LRvpqi$<26ja2n&(L*%kWaB^C?r{o9|zRNKMn zf(_!u(*wkBBoh&Zy{|&PWFVJ5z+;yT=y(zDFdjGporrEZXg?O?xDtSDGBY2RKQ|-# zpLY*jGA&lprH+93bKIqB$=#i`TG8EbJvJUC!Wf18A>RGfRIAf(8&;qaKq<^5EAP^Z zL1~u`TAWPTZDnjv?{irjEb2&kbQx0(z1vaBX~uAc=g{Lhh_Uj=48`i)&}7b0zDQ=H zJbck>@I}^(2Q$zQo+TM&|73Vw*w7$7%S70mw5-Ib{DBUb{{-KOkesyy9JVjgFte6% z5W9uvKQV3a-(8Bj~o!Koe7(u1nk&SH$~je@j?L;=Eyn#LnVcE~eR zWg|TQ-WgEH5z8<_qWlB@%yBOAJdJ-FN(RfoW?Bdm38qTX&SaQ?F<226z-*m_6TRTE zH^BXr5O%;DYL_~&%zpMXI89|$* z0k%Cgxc3g+@ba?YHagT@JqN17o`*^x7_B`vEBN2t9qm1P}VT4`Pgs_+D9^-0_3 zQrtSUraQ1k-ZbZ-Z;wNVzTeO!&(%q!{jM$$-h$C(H%cDB+2Bt#59Fv(#0Eme!QGRt zhk(C!Z23b@Gbb=!C?HGnRWnlxh0K6ixMKeB-*gb!m_c>2H}q?trKR0K4|@OLa;$G+ z@b|0p3Z#11u0)rP&FgI|rTI7TV~=UyOzZDSK@(!i5d&j>mbe?zV{Po_XuWup4x~+P zSFYxQL9{C!o)_wE7DG0zv>?NTXpWE~m=if_M?H(cVQ zpqMCb!}|ZQ_vP_WzVE-8C`7ibg(+*KWG#x(BH8ynDp|86TZl}H7E4o6)=ZJ?`&wBl zAraZLW=SHEExU6)GiLaFf4}eVyw3UKoYz_Y8)Kg5zVGY4*7tSY*X^zWN~8`>_Wnvs z=gAz&j}GaB_Rk|-wtK->2rqhRoa+vJtyc9oa#Z%;aM7^l zz2TxrtxRmrix(0zrw!(t!%he2@v6-{-50)BFB{ShpQphrUG^~-annAQQ8;+fSfRaU z(ZTtySC2%|x%9}3JavOck?6raM&FbH}!v05q);(Gz8o!Tkwl+ojZepJ3lrk`=6&VL;A7+hL8>* zrDZLIqQgr@er{pmkY^Z$t{Ec{2skTz@xY9hq8Zfr#Qb?>xT?Qp205|5Qn+B9@+KvH z>vt#*9-2M~&~))UsO4SQi;({O90i4`FWo{~v(xwD!KS#i&LPPrq`){$Y1v4?6YAO; zb0k;c;OnjjhKrXvv0Hf}yi7W?V%h7TSonNQn*I%8qBbsK4zPGACFq0+iMheuQBiO( zs7ct(;OGgM?OfXLQU3*~lnfbSkql=sJlzeqX;{}`>STU`Z{f(_a19KF4ZyTAgd0 z>)z@|#9O5kV=-dFf+JLs6ysWRIsA19iYvU@ATX;&OzlE=dD3bbRL08r8G_#UD_q3+ ziUH*RpolEuddt7KpF49b{A|bbliT7IvXCg~E8OO|c~K8g1Nn;bYDD#!t6ZW+Z3hl= zru^@5kc4YElkqF}5AmMbd2?gpF#3y1mx!Nx9O&~YFr5|YK+Ds5bYz>mF+IP)xy-7eelC1cY z)h9*Rnu-`8PwnPAi+3T2jcRXr>}Lf9Vfn?-CZP^4){4#dogO~7W&5t-T^%`*3)R=4 z{6sliqt(K9ENAi4b;OvbVBrnYCnAro^%;WmEI5#rK)SG>Yb2)tdT$A+V5HVZP!uY> z(4~(c@{w>n>af_3E*a#;jHja(5TK6M!r2Jpsez#Vud0!z4mNNMR}0NuKnYv}p2Qkw zt(_PTz&OG%l``&!h%M3%BEjjvw?3ceh@{HBvF?mn$nt{gh^$JW(&vl0|NM7*#W*OG zLLeTA$`m8t-cmoLkRH)?<}$q|)e%U%2tm&WxZ>**m@4+6+5gXH z!u&*}Ak;~^?rJ5cZG`PpZ$Yb1B42&MwkIwx!S%$_;1GSx~ynKNga%KDlqAIk!qAFgWy5P{Y5cxsYhJFkZ6oVfwDcU_{5Ug z1iAP$7tpHjxmQ0_`on47&`T*C!`HSR{7?XZs8XN=bR@gO0rS5@Md`IJth->EmAh5v zs0o_mG#_wj-kqGd1mJiLResSzhP$Bvsb|UIEt25;otp0$go840@7La%b2QPncU``7 z_*=l{TEs2?YmS@{M2^Hb_`Gs>3M#Ipb_(|X!nAK)!5zvLzSLZJarMd$U zI|Rr;UFQcVXO94j{KJo%9nPbKL(F43&+%=&Eh1bgc(b|v7Ts-PV;u@$2NuQ?1vnKj z?|11gn#f8+*PbTiUg9Vuw6=%Kcsnj4&2m7S)~Y2UbjZPvkedBkQBuj@zC(PvbqZRIGz#UPy>3qZKQ8A^RyBwG zK)%vP_ionp(<~8WngS^L;6T-VhB__la3~@Xwl%cF@uc56)B<`=NK~43wnj8F0`Ag7 zY=>l)yog4cM6F2Nn+qSAr{G*y1cV9AyF8k8IFFv+tW|>nQRqhmg9o*1O%((2{=oim zl|}R&Bq|@I`GIiZkQ1pEV9Ba_`$K4uBpvB0)|LTnU#vn8^;7qLhj_vUDefN~1w&g~ zZKy%-*jrjMKUJ?7LizTp0jWolBS=f9XP=|T zFYx+JKPVnaNsW;xRZ2&ibAa^7Y~^;xA#QKOCyuSq=MI6a)KRy{?Kd!JRai#+4*IKv z5D@EtNRZeAn>bfwEroYZezN>K?ip>1I3!S^StwU>_ARRE+40-KZOyNzL6AKBSHOx! zAlM$H;NByS+Yq>GG?bX{b^;L{C%TZ!*O}<24u&(X{7_uA_v~^#mHS0`XC!1Uq9A5H z%o)wRXLX^iY)JA(n(dUtkSFTDL#+n1_%$3JRg*tumlU_sbFJgJlCX{zJU0bGMU?GN1AV!u$${ z0Jr$Wap0M*Nsdk~lxa4en*;4eby?$jR?S}?IBsYIXXWYK>7g3~_6m}=^;(+XA-h)k z$R?c3$`vNCiT+`bFOf2ecK-{AN|lj%;OPjyQ6lWwVM(-sLu;n_FK7j)0hyr2*qAo+TGisXwl40g`=${f>$;&S6<6DBOaQY<;}$7|{e^x!}((I>tMKFipEk zff0){kcb)r%~Cs(c%l|O?8Q*EQ|t2;rg=4o>B?PtqeSl$ibuFi{bVn*)5s2 zDSuI!xD>x7z)L(-=auVrZ2s`BUh~MG6Y;tqMLf*M5`71s`u=uRJy{4~@SQrOiO867 z`!!bB$n@*z+>_)PGRnYIvhGT(b$dV~om{(uIViBI$WJL}u4L}jfj$MHhh=wDxXu3h zS~>v5#XqKJ+&d9$KkehOUjR8vf&ICG^t!41j`Upl36)ED`|e(L4<)!1F($25xC7Lq zlk$C@vC*g#Kh3{Ci7(1@RPcjilA3$1mW>YfI_B2ZvY9!alj*r)Wtr{!r~d<7jJ)4o z0EgVA`?A*c*(@B7h@xQ;(p@Q4TMSdGke?x+crRT}bqeLYMg z6!Y!Q)oABe_8*sB4@^LPg#6zdE6dPp&juYuFs6XB76Iju|4XN&8^m zY9?(x2pgM(8C|W}$JPYZFmEffD-4@P3 zFxkCE`h?B*2OX$|>`;GLw;$3*Pwha-psy2nKCzB1=1AHNW*D7!4x~Bz@bYw%+ZogB zwiZ5v5?!~i&|~3ySZ+F8Z;?!)6mnbp@vK=h{XGuQDM2#l zN$%mj+1KFQhz)Z7A#g9BH=MQMd~yWd>OE7hp@H}P2^7=SLGMms3ZqkrrYRWy@nOiI8u% zfr}`I02Gm7KHSnf2`7=%&4k#6>*N8CL{bJnxE%B|W&p?0*eRqm0Xpupx6_L)aMf|8 zx#w4F8_0uW%^AIBG`b;J*&Ms; zFbS7D68??AtFog)6oLMLpd@#P5+^-2(@DI&#-OMZeN5g_uU31?3W1MH}t;%716 zq$`ZJZs`(8^T^K0hRzUYV}-U~owneVvwNAcSG4#JgLQkX5Rg81ouy!4X+5llOC)YF1r2v zhz>%iXt^yR4hIJT@@!LSF!dRJZ;1H{^*pWg`v&x)G

S5(v2;gPd?neFX0vbC0O7- ztYz|iP3XGY{*h(^T{w8hgtWU#0cB-fJIFRt#Nu?s2poX$IU}j zVQapKcuN3ilztOa?FzHYhwibQ)+HhX`m2`j#$JtPm`%KSHF1Y8R2(Am^g7Xn0?5 zevmZ;IslGMyUv()&AG!tB6FnhSHK zPnCH0Vqb1*+Q~xgR){6ygS+_K0SBrDN9{uMfZlT(nNG;r)WQ{TlIMSwD!&MD`kKMq zAkc+$+q1e0oqD2?j+RSHQ1UJd6%f{S^#V3(&9jh-iM4^01heoi$l(?sH!6F>Me4y7 zElAW1C+8V7i){-}o5N+i=RO~9Ii>%rOHjx1dwR=_mW!@~e9Fd?uoA%oiNC1H-dkAA zCWgyup%GMT?7&Gmtn*m|X(R8R?1bN$-udY?xlIGL*a7aVGVqkB{$`Sw` zh7ulW*35as)tq(DZu5*>c-)$4*^BgIO9aTk`uMYzCq0vCq~v7H(DB1;rhxK!1FBVv zB>BSLLBFG9*pep({9$ufAiBL^o}ybU(v;fnt+9Ms;ATgKQFyt>lXISG0CmmB zLc5C?9EiYWJka{vCN&$ml_&L5c%4aEX_EiDFW`%&^`|WCPoYcAVGP$0c@0aJ%%K%X zvjFR2-5KY!+uBj}hIigvD~))+Lyvb=DSQl{XU?l4_(hd3^RW_r-S5035&%cVD-lk zG5H`_3Wc3j^8PO-{GsJ855~Hg$G=ZQe;4Ib zXLl-GEdpJs-m%1-z^`u5==k*>G(eMtEJ}KUnSiC)ch7qH@gvaTb~-p;U(*}v(CihB z1V`~oH;zq>wwk$ksH(iyK>Y@Xtlk!C{t0zP;Qu~pvTAWsTGSuPwJ5cMTU3X_o@zu^e|1F;W3o_F{ZT3h86p3n_#2_*J=L#bUz%}mYGLZ`?@ zz5err!?}FV8e6qmJnuv85x>UpXIrtq#@#=XmlOOme6`N6AZl8&*(DR&W2JFIS(V2; z|HR%f(YBLMFfSBuw=E!^Pw;-8BkQ>W#w@0!>DwyL*P65^$)J|T(hKh7_8vk!0lhrc zAE^?|_vL>ditlYZXNu^Clk8{oAALzG{tFlA2Cqaxm%TKDifv6#%-!grVWk*fQ|oKV zwq4(SO2B)y0W`<{6RR1IHzdoH`x6Nb&nnbC5eo@{P$K=|KNT#oaR-dE%8`<5&aag+ zf;Icc;6`u6)TUJfyzs5|2LeX>>?<>_y20upu>Nj3i zp^ei3kboa)^$aLiG}5dcDK`2AjhI^Z=@*tUXP1Cez}8{*P&V%9tEz<=3I%}F z)In_P0LF;iJv-_nyREb1bf2EEQAUs;^zqybr=5oUutw`zQi#GUPUbI7XAHZ z948Z!bEN*zKyXxCx_NPqNzjBY@`W9 z9bzjX;M?V@owyuyTa{fFikuvOcjowX2dOcJUE13!GH>?U(@8JYQ0zxpO^!L_VRD~< zixQ2X9EIcQM(Uo|?rWyM{H0OHo%nV3N1IkY&LbR~Zc#oz?PE}od}74$@eYMs;@_i& z5j*GXII*58IuS*<03#~L04WV+C68dV zv{fBi&GtGl@i>HPwLDI^_+;{Q+fEjjp9{)1gTa$*0$#f#bssLG4_67ef2m7k zA>YhipQkg4vL$=f;}nkWgB#q%K82)eL0+ffq(4vLY^RN#{JnLCT^ZSxTz$`j_N%5pH5`|8GE|DG5{h-#?JO@+S4dV8wfly;v2xVq ze4U{tDbETHF)Ka z7v#fsCzWYHUT3s0hx>q3U=q9eC8etIeIcnj11Zf5e1Wd6Edz6|0Q}tYdNaVy(h=vW z(n>3wfeG%V%6v7FLNi|Wi%L4`L%QRlM;Aay=Yqrv?26&!wr9o4)gSV`-kxf`ej%In=?-{*RMSlz4cNRrX~r#wFXNp8;NKn2r*?f3 zj@EK$pyf&mKzrbqM1+VdquyauP9NVN8vB?9){4od+~PNtXl!b#hF?V~u>bA7+8|w- zenz(E$^gwcK6vcjTU)viXo{B~8tq-dbgJ2KOq$2Fvy1)BH>!NaP>OG$e&zdO{Hc;5 z;<Ydn3v$kXDRB53H<*AXsFtex|rQVPH7+Ns+cGUIe^Y<-f(^n^-lGb9QC!h0i3R@O zZ&=(!#fR(ruQzvT($n*;?O7iC#M9p!rwm-=8Bmm=4VE)HBbbS}{_!X%Crf{Jc4>=HcMpHe&L+z#ON z;Wz!PnbetnV;A4UdK7-XKL6-(><4ex(f+y5Z2&%%6^ywo32nJ`ZF$r(E`9%&8w0Bq zPM4vvK%J@ZuHIYGgLlSrm)|n@i}y9#vEG;~Vs(C51%ZOT87r63J_O#9{(F(b(A{VIfWgKa~=+rp~%Dp(Xp&(e{E@Nz%8HJLA^KIq#guMh=oM%sB z&@B;?ULAOGixR%nK7w7?DVsghbKmsM!RCGk>h|OpTm!p&=W8YF)!VtGONB0j$4^)9 z_uI|)`kmeFaI*2xIki@#hPeXoxpZYD`Q)5#xk(4#V|#n7w; zmHUTpOY!QA)oHLvmfGw%QQ^+ao7X})zK z;OJH2M~>_=G(LCh@eia4Sbq86+lou)il9ZjP}-KGP$lkwovBio&F;8rYX2hq9e6uz z7Sqq!zdwDX_4{_xW%^yydr+dfqO#xJBM|*V%_#f`v)uGI+wxDh(4QcvbRc}Ga_Ve@ zR{6`OZ7&#w%L35(OIGH+@Bc)PxXO0`-c-uAC(*+kqLXu=Zu|<0K!>)J+DdP;x59T^ zywa)hdUd}3T43DLpwRACUizq#ZF$j`Q{6Qldev2C-ybgx>0GX4%{s`qJd=5iyZ`UR zpYm7XCUoadDJwhl%{mo!8;aUOv+aF!2jkw+83=HRsH9hjkMa}dg~p)eDN=QFa}6Yr zzxB=Vf3O0nI;lr_rxkiZ1h3xLVoMJam3(6AkgRTKwgL@7GfN8H0IS{xS!Dk!ecLP} zPO^s`K8pp+OLo_lZ`jDZelP=PsnID zVZ0ON4$b%I@|+(ZD}NoVb}(2@2|tcPN~zi*=5!c8uQ7VZY%_w2Z8k}dG32fM)c2?$ zY4Q~l$Mk#Mg4r>fW^>Yet0RHz-mZ)Iq7d8~(T6q8Hpn{pTr5eY(N{a$kZO?fBE68Z zMu4%`%Ll3zZtH0twtlzh`e;_F^bt~JxgEKg;!0C=PuO(Mv6!ltjPLbFl(#QP7Nhf< zo78>~`5K!-Z5(bI{-9M0+3evb0WC?(4-FZx?vx{|pSRP~xAfTmM0A-D3d+%qiy&5g z5B>Q#{r?16s{8yDNlnx3oSk=a@in`%T6eb-^JMv(-YnB++tta5NON3Neep8`L{>n3 zU_+_ih~`^KuoM4EM9s)L_S1O!itA7hTH;sU9#e8ysZ!fUGk6ARU_uKH{k0;%)86r} zOx!kgdtzb}InmEvb|?*O)wx@&hSk385>C8v>fWptLTD9fZjzM3o%eR;;UkGI4tcbD zkYQB#BtE``^SFx$ znl5Wr>hY8qT0U_`?3=Dj+$ui|R0({iDu}!NH(ps+$>F2mRPksQSliO>%8o{JlbX{cWT6Qr;8QE~CQVrU{Y{HPs}%YUJ8WhRLa7bc;FvnvxIG z$3&PmVq3%zj!@AkAFpW^W<7zHMa{_PUa5C=4m^3Bux89Dl(*0KKy-fmGdF^`!93tn zqYz$&0s0qn{BeI1*LiRgRFMHfB&94{QCuDHfZe&Ser7IqZjlCnOZziEL4;3 zQ!R6A>yM=ww3l{p1oqDT<0eCTNfxTkc~{lviWb>Fj7e;scG58jWjm;)d>KaO1PWH9 z{HVjU%2S#rLy~!z5Ki}?92vL#ey)l`$-!gj=~HGRDJhS=NtW`LEat45dt6qqENz02 z06&cS^Y_dV=&Ee6*xh!;_?GE3Q>#Df(YEGHRc>zX8PjbnfdqxQ28y7m3l-}q-b0LCY3d1V5VYcEmZntE?cG=G_@M>AhS;p6JN<{=qh%L-tuy$toM!R z5huUzOx1CiffW674l9)^LBwr{$M6*X07iOAVx)W9^0w)ZdUF}2gl9cFNH-jh zp;H~bZQ52|?OdJeaR=!SVmSH+gny8;NQW>gs?|oYvM`Irnr{qn^b-ek=@@KY$ z$F@&@(yZ+T+5nqi#-C`K{EqmmC#V4>`!_%L92#T(Voj+kjQg=OS75MJfUwgI*_a;Q ztIP?WM-uk0Z2`5u5#DvNBB67yp^*oI;7RX1Al$ zD>;z=i!b9Efo1ryZ(KecP!Vf%Y&0ain#kw0d$q4^_HuMg!}&GX5xxiJT8Pfe=JoE7 z$g0lhAj-!p);kP8>Z7DZn-vq{s>VNEZ+1S_?m?yvd{7bAzBbh=5^>W`ljtYyrIm8t zDXgx+WTZp*9sW3W&$0zug6E+kO_KME@GRyf>iT*A)7GR~fM(4PF)s3DfZVYb@GkgO|&%cl(Hy+fJ7D&&ISWqaU=C2VX=p>HnNw-ICP56HJ?SE z6^_8j`qz=sxg-_y^HU6Vw@Bb?i)btShL9wBh7iVpq^-t?N@ zp_Ortj8}1(-W)7=|2!!ML3UDnKUhU}B30%oE8jO) zYI|J$^nP;2_&)L_J_R+?*?#}_3u`)!=#WfweoC%I*@owbqk$`2nnj2LkGzmDw`3Y> zUA=gHq6J-(%BXGXkrLm(@&<>5ocoubQ_`Y=HOkTUd)71nvQ;107!%lhm-3!&q&sEG zUSNTtqEV9aM}FzIgE3*2$&BPdkhF*ZIOeLjd&E5^P7NbUp_Ed*f%3?^?}1|=))QfW zaKqqKEHJoZxzv+?3{HKMkuo`vV#Dt!C192|bZJ*Ma48T0hlN0Av;IkvQ6P}g0)ul< zSR>FHrLg@#s32JzQ}qqZ`4*UyYNvRe0BM_v1T2`9-b$h`@;I&p#_+Y0{+BoAQKhM9 znCW|y_X0lHF)}7ZF>m~aV1chU_lHEXAz}5}Dz(LdWN){qyKUHE_-{bEU){zvQ(pi{ zuIn+T|K}UgP86cLO>71PqePk&7c3@`@*ht#3O@y=xnh<0VZ-w~)G;{IEQ%)ce-iTS zMN2o*>%?}p;Ad4)K&+Bva<5Ki3xE;^U>*JaB|xI;Tt!}6E^AV zDdSZ#Gc++m@3 z_E-DKOK}(Ymg{EZxPK1@eH@k)SaeTt{apb8(Mm(>!iT+pQjiyNyUa#`S_aXatvA(J zUlFywc6lBC|G)eH5}JI2QWJWp8BCeEdf@^;9Rq`rw$c-_pi$?6Fc!#O-U6J!3Y^ep z=CqL$5H*rb(RPNUMz9%RTF*2U<2GS~K^1Yn6?weQRTL;4(8)X`T?H=MesQ?k!B&fB z=T0^MXOb}Jf2PULn!{83H$H_{(Qy8*j-t$gx!Jg+Z6S>gT1sReNYuo)Q8|*v!c>7E zgnl#!zwW;;QNAa{oJ5$tps$Dn;d_+~2|*3AG0{|=E=hSH36S%p=MMbuJJQeOZKQO9WX zg*+&H00pyo{qKh`i$`XMA8o?^0@{DwnTlPfIF=UvDSBk?713#i)ifY(DIwg)J%hy_ zH$O=p6etXY-u{0)p(SlaJ6mfC?1qT!{>G!>D^B@TcAU;?9fB#$Ny2Mw# zYn?Z7=H)+r{1<%dOlfds!h`IqyS|6U}zs3kV|RQ&7{^N0TnY1^s9%5(J$IWikI zhN=8}L6*H|??6#5j0wN!lq%tK*qE?Yeer>5V^2U4W*pls) zD~BHW3x|;>fl-9{Eo&RX3{QhjKK&wQgQtv1rW|o;s9=;1Og7oIOsiWp`cY!X=Xa+f z*A>$UJTwG8_kXs$QksgEXi#xi3QDuHv(-m_pTdpU=m{QwsZADe3hL%-n)Q>|DhhXv zZ~*bN!AO@YH4&jY;>sJD?kG9D(P6otW_Wp)PbEE*FV#PRFJlMk9Tb3;0Ua3s4INk^ z=aTEwadZ@&?=S68@yWs3$#Sc*HF*W$;3^n;4tvC4_QO(&cd>Hw~UqQoBR!ImY~X{jHSLVaL&X&`+IhiwT7HU)ErzlB+DEi!Q&kG5_w>Ge$2(QqZL6DvVYt@}y*-U0@H z1dQ-x@QbJG_7B|vC`ad_4QE2@6ET&s^Al%|k$FlUtoZN16E~>!0TeL(+1KVxdL{77 zbIfCSj5VP(Q#r(sW9LrE2=YWJX-TjQRu^GZ=@>auZUOiYBa1TWN&9&U=OFDM8~>Y& z^iJ#%+6(hvkKA8wf#_jU3@G$>&W?K{XEIh__U%o(-n?-6)TvX)ua>2*f!)YUzrY@x zCXDNM`UEFS|$Z*40dnq=2FZitUtI^i^=Kx*Z)U(SqwKjTWZopnbOTHSvsvdwN+izlzw1dXqEhz1WX1ZX_hnZ!nj@r>N_fXPqp>M))^#5i!RdmRhUlXnFs0%-huA8)m6cNV<;-EJJ4YwrS z+ujn$FdxW0=Da{9xWhw80mnb$l zY%jk-xFTtZi356oD7agmcp}nyYDtqEJ_|z$MLvG>%dY^I!_$5E5#|j~|D>LNRpP>> zloGA-Gx&urS@iDleUrT8TB)i=6Ltnv$ShN`54wlww?Cl2^>b0(4{FI~H1!viq#8tj^^3o?K_IdxEgMjyv`Tu#+I2 zpX3XCHVliQMU{YxHAJXGyE&Pfv>5C=L;*nJ2ha#yBK_ym++WmD-UMISxp!5cZ zwFmj#gRs1)Cl4AxyZj>*1LAhTBZMt+^3xzBSdrCZQ$Qc5%JeU&q03;NOQ9i;*F^`7 zz~*v$f_7G4tFVF(G9;psrlm!Q7pvsnse>Kuhx$;cSO@bKR1p;~rjG1vgVDT(igh0ER719NwbEc6|CwhVw#9KN4it9U3tGyV9uL4M z!qdymhU{yLLRNQ+efWZJuO_f>PP($#lKvOc|1kDcIoo*`SrbC9S=?80xax5m(YGO} z>-uijNmM~zz)juU+idUk{>3A7{z{kUch3g%m*lNwR53Wa)K|98OB+mO$4_U;>_tcU(0M$1dW&}Bl*I5oP{Jg=?9Sital05dtGUK6{?>Ga+rC(`Fta| zWVrY-CLfswmrKb8bQ~>!<%el>ZBo6gsj6W}N7~v=SbfTz9xmBW_0>EV)U9cSZ(^+B z>2OX5?u}1ttUaA_jjP9@S?ZL^dXB|}wpr2m5YfNZP2KnySfOF@&@*;Cj}j~=(-YA8=8D;K;iwvyYl(LB84x0`!mNMX85;pGfI z?K2>99#t0gB+3D&4C~;#XG~i9)J=}z&Ekwdes|hGSfnB)0xv8s1HnqN{U`9F! zEI1(59mx?>8zzC)783tj7yt3PzO1V{ADKBMs4x5}mI*@iqjiK6IJ&E#b5^2LACOqCC3`w);o8wm`T}+{J$M|Q|$wOv;p0)X*!U2CWJRjllaKrQa zs7$F6PhZy`x2d^&A&d-JCW0#!l7`i1?JT^GNjpjc|Qm825zt%miL`y%FguH_7 z#oTlk+KGFlRWMdV6k^+3B(K)>;zcK49U*ec92+T6%;3Y|i(pSkMVD)I#Yti-Urt!r zak-#A@b##qFzBW$*M0qF4@^v?97YQjlKFc0`?I%c*Y)Gx`f{CTXGux`;y!$z(X@fM z7j`!kq4E!^gedYVR`$k6os@^BH>Axw%ZRG8aQdPl-$i#Xr-vo!jiOMNyq4)RIY1n zG|mgVAk<@)-}3mpH}AA6c59~ty`|iLe_EgCQGa7zZSXJm#uR0L6K@}O&a5|2S^qFp!GLJa&W#c(U|@x`amq`iS^#8FPHOdnl1xZA zURqB$4yxi;rY^~X1I?6>m@Oe=l*DyBpd170Z!_G%M%poW*qSZE(buwb;j#DNObBTT zXxip$JUm=nv}`acU`@ZkaImbIdxwLPyvr5F6o=aROz`#|~JIo{UOf+5hs6GLvvH@Phd97Z6I5_i4vz{MVFbXJ<5Pa(xG}XbJh}SrBx> z^kTptp_oG}BQHh{v@oFOxn_9g5BCb!8TxjXP#!Hn2k!k4S4!;oqka>Ph&0K_2h z1m7Fz;azEN;gOHgc{}fOJ#<)AYfHF)vJRwtz z5h|<)_KU?HF>iOmb&Pp@FlYs9ngfY#o8(l-jgONl2SrZjunVluV;#pkHP8-by|h|V zXM}JMz{>I&f7rm}&-hDS^p(px;}07 z9d@AZf2K5p4`ornBm>QR80`cgG?HEa&4#%lW7SfQ6#mtCrJL7;==wy*)t#HdOJH?itDySW$ZGl~J(`60-3f!>=(f zQmF^p>P(UYY>2>|atzNAeoP^1nsKyOJx za(Yf}qhtf5qJ{3M4mCq?#YIO~7Gu0k~c(8vvB-A)H*VDZr%!Oh_4M z-Xh6O2W`d9FFlzZk3Z5N^KH0+d5Dwxc=&W7&F^Q)`$72ofgbm?Hgyvz>AfSV_f?@~ zs@6-*=*OpMok$p=HG)CY461@7$k#Yb7BWJEY#Igwxywi)S{fY z59^u+4?)>yDsVHL^6Ipzr)k%FNvm0hA<8L!7YfcfaHaK3X5u~i-p%h$Z!o}{h890Z z(81~;%)l%XSLSy!j@;M0u3z?YQX;2A34ax4N~$dcjEYuD;dF<+5Z7@%CHt4xKd*5@ zjR@n7*1UHHvMHbB)TV<#T`xnmxoY&Xzdd21t5%81CnP|RAB47gFrODH|7pXkBQcPo`Ldy_PLE9Sp(J)+)UUyj` zWs+VeE2?w&OqL;FlA@^GUv{G5LbLKYUZ4M*B^ALsK-*o zKEUSn1|Se4~%yClTxQ~BT7%t2WJ~H3+I5^sg8d+MwkH} zjSA60P){ahT77p!T(T{K~T!L!}fazeO1-5vYY&gPRa3 z!Kza|3`}}Go^9LibU!k(;AG$0(kM_RH2n1X1Mn;(?NL2P{^M;15ZntMc6zAPU zH?Tf>K%>OO9HK75o}_$uGs*B-loW|R=V_?*f}|}>)nKGO)XgsA@}(mAXK-Lj_5`QN zFbkK$YUZqxwi14iE=0{}g~?n@Fi0xqmRmpR!D`2z?jraR8MjXj5U7IrLwPgl!%j&tQdB!F(y(7WL^P;h!<9(a z2z9*wXwRH4UtsE=J8yr^IW*6$4z!jb5b^YJSQ&%02f?yH0lE6Dg}$hcs1%d?TBMW| zgLy69qA^~hLBkCLK<0nUf<=laWjh0dmw5pKf;Avjl{=-|){K(|m7rOO$O&nC;+9dM zzOzT{Q_rWjny<2CfF%JqwLDvzUNgCvU}W9Kf^N7AkF$vVSX;dq)y*V9;&54s_a*`QvY z33D(Z*n8Uuo@##oqvL;R=m7nd+vILQlsE|*b++f4u`*({Pu(0MU!@)(vpNlx5#x(-ICV-72kAsj?t74(HuT*9G5<%xhY4Rq(MXwv!@P@+ z1(c&fuiel#5+cqUe&weofqO;rfT~rX+B^6xDBq=9o@(v0WBILirV9-X+8o^6by+rqvIfipDz3u6;A}iNrPoQb z!w4e2uIwfvZX$jOYuLH)yR_=*xx%A1+u!(ck^LO;E7*?(a~^tECkEmX6CMeKMts#_HoN@v@n-v`cfSt594h+0 z4j79I!_l0r0+|-1w_%;($?lqBB6y{542ajpM7;Z6#Q40Ac~q*<=K!7~&4>i~NvK#! zj$}zChGt=E_!fue_D}PLT^1RiM9UlWOp3M-r{>qNGbRF{`M$yFo-H_S&bh7sGLyq%EhO zK1CGmFOXrn#)W^}xQ$v{O|e3b;By~l=jdH29DbI9mj1z_0TCdgb1f*5IgRAmpPP-* zn`%@aTjL&po&o~?Ca|5w(RQ4ma8TN+nTya3pNBfhpih18bwZDJJLd!(+#FOnddkZ zolPdjS4%GSEoEi>UiIpD_1t-0RAtOPAy$jPyCNbDBZ)WB`OjI76_RwvQR5xS>v@nIo#ws_^J++OnCp>4(U8LVBSg1Vd+9wYk+M?uVdg z?wz<9klR{jqIJX4f;v$g0S)cZ^z8c7dP!dib;Zt0l3tzG$CysODQ*qAanYCEF5gWv z_2E(Kj@QM^L?XsxQg08Mo@TX&`|etmP3vaY|ruR(_I}lbBW^%Ug9o4xZ=hu0^&eQ2wMMPD>@r^2-FOb{^ z>fR6pRiFJ;5&sUdtbFoL3>=9 zI$1}2SR9q^YQW__ zgyxMBzoY_=W97JThU6|xn}AKVow#n;2A&BcWNuT4;Wu8BplU`QyfjF@uCKS22e5XbXV8T6BNA%F}D*~Jl&K%(w)CB!&Rrk6!5sh#|a`}aEIQ5mHujKHOZ{#Xr(%MSE15{hjJRwO7C$PBWmi<@$7{;?* z-6S1Hpfmptvg=vT!_7_q$1nND5%AcWjR0N%b`ZV;8;9QfB9g!wz;Gy-+cqiV&2}SU zd-bAP9i&G#Z8;A0M!>abD>$&EOUS<{@mz0K8c8L>tveu$G{s>Xk$OymuExUFP;sSE zNaY*gM!J(WbI`5%r@>TUx>@0@N9NjD4>UI{LM~L53`ouu0yyeuC(tmmSheQYTwhby zs=`^brWd8gTJXJ(98NmW2t^Z)J3+-AY@L0O2suxL)VsPP&{taw@x9*(rW+m;Q4t%D zX_6$&6Bh+UmsVY`q*zje2SxIjp+}{1gse-?P2O#~b(}<6TVM@Ig-txhRaD8F0_$3Ni$t?<$WRF}p7Ff^yc$Dhx{zaVuU)`PezY4{1C2Gb#R^_H$ zV~EMUxT^Zw1+#TO%)bzoISwfV$p}1dMVj&p#_EBP9NluZH!RniuQT9>h&~_><>cJF zHm>7+z$;<})4vu*U{)z7uf(TaD>2gBaG)tg6dCY(^T_8@M~U|FE>QSxaty%>AW%aa zeP6bQS*F{blNJtRy|Di|0qw^0+v`OY0$}k@8^!B4D}&j;?XfG1?K7#eWE;DXG;F!=lJV1^vU> z!QVVcVYB9fVb>HKE2FV*x^&d+WdJSFC4WPkv014^g6{x%GEMu_Xi8EGOfb8X94}T1 zbZ{M(E<_CpHE`6rQ-?%4);tGwIGrf1MA}=*MwoM=nCa3aW%38!<41{nRRWLrvbwKj zavHo~oxL9oNE;-K>u2~-JYpgz&Fh2egJoAn|l4QNt=Lt&P z3D(u|{KEk)+`=`hxo^BHjQfk{7W-@!7wxa~Og&{!%v@~EnrAZ${J>_GHg_Y9h-Bdb zb7|SJCZ)8<;lD1NJp#ueTZjG&<8?9U^zIZ^p+EQQ_v`n!z1kIy%nru~&AF_;<$hPa zvUS=|U7twj1<1^e6&4W{+9vE%Sin&h5&Rz1wfU*I_q@C-R>s!hd(T0`eBLXWQ>{<2 zH;CLi2<2{3seRQattgXsI~I<$_GR=gCH}m!WY@ElRl%$_cs^|{;JN?_i8@uoV&Z0C zY+!b^cC6;iSYC5i??YbGW_`1r&Z#qYAD)!#yE-tP>j3R%9u&?t??gJw_9eeuVR~%b zKYs(SLqItI40cd*prZkBjTM4C9yq`2CdE8&ui65TJ7br8+v@n^&677$Gq)Y^`4TiK zdDisdPAt+fwXf87oc`0kTy^8V4+nUzyq+Yjg&SOmN;@Y-7~eOY zfty#3O)2!kNMHS9U$^3=e50lbI{6D)rm1N`%XGY_Yz`pjQ z$=W`If<8FRMW^vGwwz*-{`P3*YtLh^OJ#gDjTgm6r%Ot_FKQhvFJ1h_?Y!_MBq+jh zvGl+3^`7x;x9=P9-Jv>eU0Qo}sG_8;O^FUQLrX;wR24N-dxo~!>N0BYv}%-yEq1hK z?Gmv`ts;>G5eboazV6?B|39DSdGWmHTP3-!^E}SuIIrU%H^1N(Jq?&Xm)(H^w&u^s zhvsHK$caI19TTloYw01?Jb*p1do~HDzC55S&I4w5nlyV(l`As*d zzH;KRs|tLNdV1)G$JfcXyaxXF%MsdRK#iGk`P+ET;K4pu(wH?}g5GEix16Z`I(EuV z(5urCe7VIAfzZ>JIOMG14))^CP#T6QWO$DYO5@`iQW~oAu?t}KvPpBFhWs|-0w;5O zFgfN3JaA5!H(=d*F;oGzIz?+SkZruN=P@#aCb0uN@-xH4`ft#TYFNef&47InAD?`R zcvvW{;iR~-N5GZFp6m9=7@kO`1S&#%8YmJ$;^+QbHn;>dL&(wnyu5<7jRYVXZd#;8IxZZ9;iESXw2YCY!QP6{<{dqYdIA{&a&EC`}d$s6FIa0b^#Xc z1Z?YO-xnB<06taJDtz9EO^9F3O1U9&>NBwJb3Y$GL z16X1m2?s1}N{Ozh*X+&RK!G%^#$W6~0O!UXNs!b#`}vYE^`f)6njYf!3qoYC%Gfta z9Eq^{=2^St;zfI3W1WN1oWc9{8xw@IUlS+7o@vx1ncrib6Y>(X!~^-z=hlYq10Di`HD-wyY^-QZ($?&XUf+sut5xq)>qp$;skG;hNvsX%Affs z3vgGZeS{l5W$_=^IPc!D?1Z42?cK;yQlAE92ewOzQa&CjzSV}YjYM!NWBRw-TiZRlWTS%Q};&(aM-FG=ZJl)KJi&p1W za)g@#9`vo5jrvFf)m(0vmjvlw&*(d+{rU5Loxe@FzKt1U3J(dBSSb`2ZCPTnI2a-PTRnlMEd1HIM1i`0jNEk zrtUGgMnEKIHUn8JgXgSDHH;G=u3LCQRVIBE7L_-#_uZq>Ixz%B-sy@3R)li z$#j)}uo0EqZynOUk5bQ=Ud@tI&K$ZxiB(W&q*~2 zzYYuTYtSn;GRqpqG(l-$%HfZ#{m7JcQ_`gm*Y5wx23DpN{#~N|KU9f0 z{d808^Udi7pggokp6fLGy*7Ck`tTeqY(Yt`nxXlqY33Q*Z^T%=j-bWZSw)rvn@YYkzT2w%bWtINOt13Ywr`SF)W}_A4Y0!n9UT*hL`Hq7h=y>O&y(kTeT2^ zIf~rba6aU%P)fe&4NMlA$L2MGVzzmsRh0i;n}m{lU-SxCYvUfyywd!HDKh9!>S|Jfn=w&5 zf-X?1@jp(k-s*p7{)HG&h50GEA=Bu&_O*lS0>Dw?)(9vR z$jJ(zJY~+_*R6aF{yoPXDM;Pbo}gvi<9Pu^mYh&$^4d?3oi;?z!N~>O%waXc-yI@T zTCiwq=MK5bbkU}Ux#-m|WT^o1#a4cQrh0 z)F;MUJ(#9CTiS0@Hy8+{@wrkB0jc+% z){7fl1^b6WdrAtq3c|O#?V9=0J#H-ff0?_W@U1^(D@D+1;PNxTcKQvkiE7yXp8vkS zR_?R22ab`WnVvlk(V0($NN=;=>Z1f)QoZSYp2eX3)Uv@75>>v_}F zKGL}5pW~$M2Otf+Xox0U?iVo=X#op@?4fsxhRt55iJfn>n_H|t+Kcs}RJSBYtCyom z@9e%>N zL|tQ;)K&+Wb1ExzUH{9Yslzt@%aSwSE}TGa*e8L)wq(zRfqXXrQT6{ubYMvnfLCVP z5AGBkq%_bhW)4hvXnZlr3=J~oy$C0}tW5k@pj&OUsP%+nX0}^hI(;#+R&!m${v8%3 z8KQ6UOeVc+*Xs5l;mi~@R2Mfp+Z*4H;9OOaov?RarLm~>AlMH3Lo+BmNH=i(omBJn zZh2vV3sR1iBVJ$10v79^o)mZxE2G^@eNbc?yZym#$Ar1c9S_HAEGFQZ(?`iAI;D7g2uVts<%jLrCvZX=kr_MP^T|n!HhnB*X3&(uz&5W8hqVTqp9Vtx_L(%gN)9VV_!c=Gkmh!l^hgFQOyO871}<^)ST@Lu-z|MZlpV;c$p z9`Xkpvm44~ZW7H*G|i9Q52vGcAw8^KpSvw?wyg2iMHBzWuufI~{)Ym9`};G7P1BL4 zEqaODF4)r!h;Ig=8hzFpsT1-=m4gNaL;113T(f)?UZ}D53(K#=L|pN3EC|1&8x^}__Z%X z`lF~;Y~~N-;k}{vbmBse@4?59I4z`-V=B|DEvw%ToOmUf3@hJMPdacyJqIIN{&nv? z4ksA(@4l=apv9%_&pvM;`Rq66!VCa{Qh()t0M(ywSaIAfp(1+!8GECem*LCq^gDC7 zNqx+qPzjC6i{;BJNndg5H1TYbTPer222^8$P233F(ATf|->N(j3tc@?l|M1Z@&cmf zb`O_43|hz{R9OcKh)S%2UW6S4s?cLdb>YDRf+{^>wTicCeBk&0b%y9-LGTt}!FXhs z)onL?>m`t=sW_>iJdDYxIS*U~(BB{%uQtr0`j_ziYZYz3#@!&_u11gGM>tn+FlzhB zb`G}=1MtsbF?<_`e3con!#I#eewIC#Xko{{XDRAz0@=37iI<@75d+k*$GE&+!6DS+ z@iC^_|L3Ko#9826$$UI2{q8$3-JenpUEJoy_zMkTu)xosyIOrzb^;PQnpAM8Ek3-j7qm?`g;cd2u9tZ&{N3s4OW?8qF?#Qf|Ba-suf`eSy~b{0{# zXR`pm)&O4*L1Q4Zy`yE9vmU+2K@7Kh?^%k<7Ol>fZoAp??r|Df^#UEfECyWVO9sP} zg?+XvrG@v|%H)d$?jdG(1)g;_TGMIP_*Oif)>?u*GhN2tR8<{apy|r!Gf~y zP^xF67bQk^D?sX0dA#~*CdqDBXqJJD6brq4rFJOJzRKL{hZ<-12`rS-j*JoDCMoeD0ezRD8y{V8?3VGB>auvd(BUGlR;u)xqLS`oUALmsjA zXoRS4m>~}*+XU;+l48^-DEme_M`K_M*q8u`_}>`~#63|lBFY8>xu|Z`&7CQDxHSu~ z>uhfp7Z3i%=h_}V8&xHh6?*rd?N=}MkFOY+145sU6Et@l zR`GK&9<1Z&wYphldj-b0hWl0)8f{AU`wM2L2Ry$37y>v>V!KLXJX0ZnGIxXJT)60{n%NZOJBrm z>7udQ#41J}qx^so^n^KkHatwM@5bClp7mt(fno(kP4~v$kQNO>mHl~~lN16-T50y` z5L1}ZrO()nybkcP2O8<r%MQ)?c96(5=@qO49PO8SqkC56!-2#SEgTIHE*V{ zi7OrhEWKpad~C>ME`q_Q#kO!4W;(-fScb&@251G%EA=boL2iGv$9Z>!H@{QE#b*QF z=!$KbF=KWEbsnxcf(#qw_ORTu>7d0od(0z_dU1`_kv{xTA(VMR<8=-1ga5Z>;EN;x z`^QIoZ{pHociv8$o)jA5GupfFura?y(H3&`)bQGw_b7uozIqJ=^R}oBYoVm)=vtml z+*kA1@y^0wAdp8EV7_8nAS&$3ldCPH_y>S*cXg8k`&sLKyT1*ulr0rT)rhX>@FPHz z3jP8A@T`ryVIkcCU)wyoXU)hln%hrt%B|_q+@d@0ipP^XKDeT^K=q1OU^SmN<*SuO ztWb{6IUQiFGn->>{rVP>S)t?0RSXHgkbeg3MJ?8H@t@!#nx?gfsFvT7b3J<6@8D6{ z*t@Y-NvoAM+Jf9cbp{D*Z=FLJ%Yg`nnXB z4Hp@P!T9LqcWf$5tNc()^6B+c`Fs9L5x{CqD;%;Qq3ezFTdNrMwwn>C7Jy^5uV;8> zGPebUvJ|#6TNhQ1nL3pEcRKPC!qanpsq#~I@R>7MR+z;EmQ~%v_5d8FJKRqAYOOJO ztGLzY%2uv-zv0I&=~SVcRi=G$(Yx|WZiYMX?`s`myt7AtZ(p#=QVlU)2>AK#%O9x| zYESo1ozg`}yE7?Kaeu=nd;mvk!W)oi(47JMBe-bBSyNzdnbQR0_{>Q(o|D?ff=lf?}qdcmEOsqSMZ~cM3ZVM>V-1r?)C6_$M z(z%(VH5@AasKe#>?2&Ew2XnI6A%1oeRzM`#X}N{ zS>ZPyhHdduG(LeT0lL)7<$98Wy`Mym8wcygSz4Y#TzE z#ydY}TE#lVfXlw(g9E1M>3KhFK}ZL6p_DiAfR@pYZOzo%gH=g3ANTp9B2SuV%oRNw zm-cA}7@XLYr+7T{*dE_FOH1b}$SiV2tdw=^G1{iqWr!9Yu+au(uDx9+0u;6^YyWh6 zLnrJEnAQ<_XO5A{EVU06y&L*1drbD)<9osu6ijf6#IV;N%fg4b%2^B z2~C%G$OF1==hlE}_q6SLT(Y7ZrGc6uj18QhkwX!cjH1qME>-s*0cO&%aA%V62HB^- z#G@lCAJ}fmANa-hujI4$Wad=fk^3d@8|~cg5hA70-mPZ}t?=%u zJ5L96r%UeD9<)S{YfqScLHsxB+Z<5nJGxD@snYRq&f-B#4NZ4)Urzdk;?Eyc`sbjv zUa!ZmcV7`rf_WEMJv<$r{zd?p4C`e|F{Lr&*Sm9(@od(y7NTIPMDo~#R$>hjHsFn0h@`tR`c5=MC)?=OTnNC2K+%M-$ zHhG-U)*$k7sNv%)|9@-Lzl!@q?-w=1qOtnC;wCT8hd@#TYnrupo+OG7ezfjW)@W~8 zgxWh*r-+YoZre8xx!K`^!Y}C_AH%CXvs6kpjdRhM@w|n?^gu`jKncCY za^8f)9q}$d1uf2X;D74Jb?Q7;qF*j!xV z4#vd4PSGzm@$UQlqFW%R{VaTeC(PwR4rst5$PsQ&dFLGG9idi0|(l z>Vo?fsj#F`cbZ(wI6TNOkVE4~ptY;#!tsgV!)2mTx;vtQ94`JUSOLBe3!M;-L5dUV zgS$GVcvij?x|P(bhweOQ*DYAh*EvM9T)Rhj6``egzH~^6?waNYujJfcMmHF|v z00D9v(_tQ*-oVP7nTZtl9M4tg=Rh9@7v%DiiG%O-rKl=X05_YC1*u`vRk5>kfU1UW z6b{^ZEBky7>FlLQ`AJ&XA|=8`>Qzqzg9`^0dKyvx6HG6UobbQ%GQuj-Jw6$SgFuI6}=WVPGcLuF)1r) z!qWOyIppbyxZicco=5kDSC6X2L)3?$`+6$24;!QzohG8=8=4B44e8&;%znGXjLDeg zy-I`z2a%6ZYvhJ^ovFsZ(Ly8K(cpHsnmdkf4f7`5YMP}p&+8n1XCR}W8x-63>7?nW zRAYVG=LBz~sPjS>&+uHgq4LF7&cKJ|H@PJv~lrpQQ(rT(|bs2#3 zKtg5GSev3xORRl$UAg#UZ?;07Njyt8F&BoRCR)5Z^!mEAF;^((e67Hs z!?a@vJ*yb_C7#zW?y>js6vJS75U-9vTa%57JK$t}Ph?&_d7?hwg`vc}5%6P^ppSaw zgz>C9kMAtR!ZDtSmz*=Q$c`|uiIh}EKv+drF#3{Pd;W7s@=J}{Pur|o->qYJkNV{6 zR4-nxXNc+kxaHTh`@#zoe+^xX+Q-m2#{UxtyOY@78J<};E}x6mNE7o^vZQSbdlvTe zeT~0g?@zg$wtv(m^&08aKw`ad7mzl6K9|i>vq~%~n4@nwO^sK%poaAe##W{>Ftb;V zOT?ye3FS*X|7-tYiRaq(T!(Y`hLI@N?MDyB5Z8U$Z%aq%X9W?>-Pprf^I_J#kB8?J zOLJI$?eDRHIR0nG-8?EhNqLJ|peSPn6A*J<)(yg->K7B-&A;N5Q$+JCzK}-X5;}fF z1M;|3*G+D5cgswz9pZG_ZXiqU!msi9q6#yp*LZFT-g3->K}|OILE3CLtN^1x(jxJg z5h5s?we`g8I>aq+Bt_Zp{l}o+oLAk;T}C!vT%bj^l!nS}^RY^oc;EW$_=Tef>$RpV zlnspGL!+1pp}(7IbbKbZm97?tOjCgPt~@`N_fOcO*JNK?&1CgH^f|u2_UIEKJv_-0 z0g8efUU|CwBxVT4`o2Xu`bsWVJ$xGGK-ET3c`CZaO;QzoM>4Cs=VFrbGs>B@j;2*s zRSCNk#gjR_1-`e;wcg`p#@(?})?7=E$# zmy5B5Kz@C{f=dj(Y&&!3yO7e8UG|Bq(AOkd2M}nwfCDHNo_p=d9mtI2*shSxqSU+) z&vwk0Uu2VDe^T82&-Y4H!e-A%7n(_VFfg8Mt4q=1@LKd+8?L%>CXllV35gorr#@Cj zmPDV1){jH(1ukZW>veR?Lke~aIl10J`F-u-0dW-7+Wk{l9b8hr?D(|xn`!sDWdEI7 ze%qnul%L)|&qWIDT1GIsljM}H^H`$Y#WbBlyTWRh$Ky+oyn4OKSLo;eJ!8{=4d$Py zr1uBTe?IYkQAtzQ(Jx&qgBW&wEazHpE-B2-#M`cCGwL1KiAfGH!1?r}c1+Up3VYlB zu--_Hph4+bHszCgIwKoJ3Pur@F`8-?$2C?~D_fx_oLx*lYG_-0&+_Y2ySqyM_SMzV z+jZQbcRL@YuWmDdSAJW_Scn0$pVSH|1hof+-VS5Va69BuyZE6w9!UH?rxy zi*C)2J^Kqf_c53x9$Yf_C6u~zBKVTUCdEZ~e#rbV6r>PuaVVXB&BkC!nES*hJ~`{L zZFFE*EzHO7-SV|=Xv0&%=e@!AR8Q?g9(}^A7v-mOz;oNg(vm1s!=$)gH8<3Mw?)k` zaXCLxbd}3gDVLS&VPzeiizi+{X%hz*Ez+D^e_p)(8aqFtZ;*DjwRi9<>1^K3+<63l zCd{X2>Hc-{O&XeCQCrRVv1g$3f4czFx5oTNY@+n-oF;_l-AofHoukDbZWHeO+nV&Q zFpGC#$WgzM*2+$QcCg>np^2deJH%G5q9&{7j8+AECR1QB_#43}R-hF~03 zocm--U{y7a-F#~K@7NdXnC~~lLcJ;bQ?CbQ-%xe*?o!&`AWK>JLME+;iq1io?R^A= zdF~Bs=4O5Rvzg)$W2pBd9YzIm^c<(sEEa%4DYhbA`7Aq3#I6Wx6Pf`wIlUyq+Bf)&w&zw7><^1}ls+>jgr}0&RAHGRK)fV{i_=Vg^cRR9*nY zV%C}HPa4#>C6u09NSN0Q>md#aj`0w4kvty$#ai>gpylH-RO4kkIJc~X7F)t?Ep=yh zn&ka(sqK`sT}<*?8a2bL}Rl+44?M-;(YR@~Zun@MR2Nik3&=(w*Y@Y)dOEO!@I) zjkvh`ve5g=aDeve!mIPsW2(1FV|uo1lbAPXSn*labm5k)^D-!A2DE#o|R34&^PjM zV+gHt85E#kkM;)NND+)yhE8R^So(TGokZ7lzP z&eC|LXKE}_YFGyqi3;UqqBYmpc<%d%(n_nfk?lBT@2StiW^a9=8?Sr&Acl+{AH!U$ zDq_1=owuol{Y8E$mX}K@#BN(nZhNqdF6j{^IAIK}S4WQ!Yuq;>@44=~iTK9eCm)l| zO5T@S@PfnCcHW`uM%P$~Ww|tg^Rc0WXYMa%;40T0qp(+snX3na9Ol=ShYYtQvF2TJ>r@hp1@a_Lp50dsoHg5n<`D?SE6G^( zC#zBAO9ijG=pk(jtnV!~t}e|L@Scm4Mc(jwX(Rw19#D!uF(_dlf63W!%FOs$`NsU$8;b0k zf|{;R+RaIYMmBg=?^Z5J9-MUO9XI!n@qRS*f;L^$~QbwuAH^a ziFTqNB)2GDqwhw=yUNB&M6+!U^uUK&~H{zQLEFAsRU!=mDv^udcS<~p98LgugeIYjO zggNofKlKdA4n2BOuU8h7$bxAz%2uWwS+-JZI?nN0*POA7Xohu2+`hry^O$2=;LW8A z-i*v)g_?NhKx;CZcu~MnDPFY}y!{z927JfTkw*rEZG*XCZ}dd_UuX26Ord5+n9m(eNBir=xMEab>cb=Xum zr<`RMKje(^djEbA9?D%ooCEg7qs~^ZN!mP`a@BZ#H>&?+Z}Y6*8t~Dvc6+k(pR7Vu z9+qDT4K5RT_Wgx4Slo-MnJsNbHehwjClBtMzzv42a#-2%*1cVY79*WNqNKZZ@|32d zfA*n6poVX`pNh?!tpC)1l&Lpy>I=O5wY!zOKuL9MW)PDX{$ch&X4x@S{Y-QRk!qNZ zN$wg5XQRpr%}XkRXp8HAgyBCY;EmE9VgU_`Y9#kLc&ex6VD{El9_l;N8gpxN%vpSH zz3wqRA)D81+qFv6Aa9=?T9xlk6c-kKsdU`2VX%pssbTA+c1h@=Rv}cbzyfw9s{iTv zy~xXjGWB;4r5H;oR*=BqhNE3#kQ%5)Ky^~nl_ zd5VX*b$v00YCq&^^KDyETg;LY4_n2$Pi!`ptq;oRSZj4?#HaRc@27s!Md}^SZXb85 zdg)D1qaP_i`|@G?+N)(o2+!h|j!hbM^+e`%fBSU7$Yem#H!WHo>YJF4QL0^HXEPw7 zor>hOZvze=lleP!>@F@DX3xikBHO||t8QK9<*j!hZ@%&)2@OR&CmdWC)r+EnZkjhm zVS_?mis>P!cDo6iQ5h1Gc*sg*j(doGW`s>B6f+ zZ>~$L>z7(Hva+rJS^>1cXp^e6jJ)NY{Qi`MnyiKHq^_zFm`8BKp-+m;({>FR5AeCT zg(QLr8teSf55@?Gv52U05|VR~ThuppP1G?z)4$x8h^weywoM3_MwFbox?Tr#kWO}W z+S|_^V~CMKZJZ?9dfYfPWaFKhH*-c;AbIFW1VjjKM^Axe9Q;~jY9=+3yU%AT0PoJ_ ztkp5?Y2G;I*P9iEHq|EF@J&%{D%EjR+GP=T??~N$sCj_+LjBYRC8Yaw78j0AkYCs; zTJQL1PAPl5$;hFdwaI}u4XtP$`_rFj{pPrSz*ZMhiu*pD8{UAuS!X*Sof}O{w~=V^ zZ0xP{tbcdONovX2m`W$c>JShV(E~iHt{m6-U!~d?4mecM zJ5+xirE>Y|5XK-v>BZlK!Cvpr;5$8Idfh{2t2Zw)T8J;M`e)#arkTOm?RCT2a+8vT zvF^0v4Pj~UY}3QK)Zfq(-+84>661~KyZqn6H2USMY}R$!Kc-9M56LPw z`fpvRi1LMYImW7b|I^!)I0L#!pjdWswUNF(0Ef>9z$x@LT?@2^#?_XiLXkCP{W(r@OuY-_eAIW_$)TFAjC z7AqnX9%{VM!-SFMLJz(?|JxB@;9h+$45!9QaGN%jo2NqATBsSwJLnQ)g0n0+~o z&+fprbTHu}GIaf2kzG^Y{XZ|6pK!kG4pK(tB1Y`c5lw+)Ih(kF=OpLBvo>{|ujQ<| zfK1R&n=n*A&X;#-R(2ZWkzQH1w7o+D*0n#VQG~8x zb&0Fi`261IdTr{?or=FD*Zf8uhk+_!CnlhwH52C)a)7jtkr)++6IQ~A?niwd%_+dm zcLP`*$PhO=eq;6f%Y)rjGtgA)9(BjF&nm5MrZ(_=z`yDmtx~jEf2D8c!C)JCCYw8D zjd~s9(~K{DKk07j!rtcj$i5qIc*!M(j`+bo@ewjgxlO|T_F|ivBGq~+3YxanhnGjs z(JecB2|k(3Y~DvZriXL39WOrR{~E@) z!j`1fZIl~MJnZww3zqfETT(8|6=5G(E5hs7fY9chScR_-S5oA z65@@ohcD;SjRaKgUNT8bF?BrGjxVzFL5AM)6wczbW#rA`1b;)@hzm}k?-yRv7T@)~ zSCMs+)y|ll6Vk?O7n%?7A5BZU0xjD7=@A%wLGao-=auUq*^E!)p&jg_pvr)EVkF*h z)UCMDb{tc$IFlyyarkqkrQkK!%ErDx+c{Tp}tr4{^qYl_3k!x7qF00`xO z(46fj#ibNmh?E|I*_=;XA3$Zx>QZZD*@b2{*S3dVa%HF9P~<7WWfP_pKNkv0uWU|M zq`4>PyV~(Et;qSKl)=sG0DM2vyXkG3c&Jf06G}*cM0^u6>b1DfTct0*Eo#w33f>!U zm>jp@VPKG`?D;t7Tb1c_e(-N?;6((!GpW(ojofx#vdCmphqXt29uu30*rG;GjM8SXz{QiPDR zU&zH>(F#VODjPaIa7|z8)YY`wJa?{br+#X%D8O&tOf5d?PduYxY@um4P-UH$zd#S% zUl6pA>kZ6kd*$M1Bw(W*`pwk&N+nZ>+!Hi_=rK&y@ff*^_%gn>zL_UmOTWP*qDAlVITE7lB7wD! z2+qS_%0Cz8!3G{|^l}y+)baBtf_RJu7bK}|s19Wfst7LeR+%;i`wQ`8^GYgx)u=lx z-Ot#$ju1#YiEFGo8no->aj@a^>TL-nY{^0MMxh`Q>7Y{V_ILKzJGh6z=8E@lobuy> zWJ2-HsAOB1w9n#=b9MWBTn73d^irPbrz__XxD_P}i+dDgWue$B!C0Fh|1|~gxzDZI zE-QhW0n;yaqy>e6hVw73{=>T+C-p{6$uAgIEuT40=MgqNKJpur=fAnvU)I>w=>p$6 z5@Coa*zga_H;JPG4(z`1?KsR}o{dI2xx@g_WlCbkZ;db?kQ)}_y%xP-fdsuZkFnh@0%z6f+Ne7{sKht1hX=;YiYRX=T zd3c{E>$U!t5gQL7Kf?NL3Q{BDA{x@cyRbg+$c?CK&ty}yl{xN6vvH$u{}`%7@M-$v zNf#SWF+@IcX^iyR^>caL&*nb-mrqki(It8|ap!9SHWajdZHP3wV#FhKD( z#_oot1L4RgfVk`3ctRKpAGl_U4pu6FqU+XTunA+;uI9iyuU+?`%z8=?c%9zk+8YuPTTIC65H?7l3ks|0U9x0U@+PPVv4I+7Pu1pM{P`j!c{hX zVEeSw=1QcTnWAfO`RweW!P5H|x)X<8;M&KvB`)p{JC@U4ar%K=4}`B(yKw5&#p<&f z0n0MUeoHAnGJ!U^>K?>1sbF~V+$EyS&k)^@35}=+`f43S1=xYoG$%q~j^XlHN%%Y8 z6|!LPo(cfy zg`GU-RNFyH>TJsiaIB#fyG-|K(aTVRi80j-Raw!wn1AdHam_5>D9+2_G(%;W`l}zH zbbvVa<98FGj6_aP=kV1v@?dAp*RHz{i=T}+z(sD{v8r@W{FQlRuA^)I9k1!z9*khw z1ok5IW?j+E;7PXt=3?o#7|u=ILu}Fc~@tZaX*cy zy1&3y-$(S{%JK~IT)WUM;lY`!`|8bK)b*Q}Oe3ya6(qYc*u!Gvn7j*vm3RClkt|)r zdS}Prdh;_;hbgX z3vyX_j&o{sSze=XaQ8E{^>1HK8{-kcgmjd%TY;b%*+ICqT`h4 zFN^httf4^HV^r6Fz7pKpj?0bbCTpfkA%%ocJ8Dv?U&J( zs3`pG!J*0tb-{t6BAbf4!3-nF*TVkfZPskg(w#mzJA|VVak9=21#Gcbmy=o41wbrF z6vyuV_-R1v27!h93(ZmHi9|d@;gSy&Z3QWWqMQyAHdqCgW!C!yHL%tUvFv`x@_RhC zVy)i=Djf%KhuAWytU%K`!*>|#YT2@oH-o=tW*3tL`$N`r6hvI|zRo|!^uEOpUoZ3V z0}klBcwF)7(7PIEWp%lQv`*<3v?)@Gs)Rb*xVC*L=;?N4Lc))-4^Maf+A5utU5ATm zms?XRe~F1|OW-d&m5=yGYWZtF1u*0C>yJx&DY^PPq4TN$8}s&lI(Ec7QT>=*#H-XZQ~l=7Pty~%F1h&A zRS{ouKuM3=Wavp8nA~EI_0!4${W7al&n83zI@KWkN`<8cXBU0p>Qdz20td5YP9|u( zt}~Yc99|Y~)w)HT(X**t=c=s>Z_1GH-EC^(km07Jr6vQkRn`3Gh~sg| z#kI{DbEx@9A#24YPVDmgvFYg({TcQv!cWaB$x|U4Nx^Mx>kG+t0X+#wLe;JIo^kV?{1qE z`(Ees?|Ay#xJmX>GIIsg39d-H^O@if!l1~eZ8KQn*klia2XL+cBN;LYpb{m(Y-W7u z#+t66hon6|jBPV~UtYO zW`qoDGmlicb$^K*HadKd=;PZKDgM*!7=I2sJ)2w}%x8)5eFsx{_S^+&XH)%Uy|$ae zkns1ul$z3$l^cIx7r5hqD3P>|l^8zckqTBYKf}1-fBEn43IP9|b>HV4^2liJV3lj3)?ebGzkb}kM`sYi6vSROjqd0`Wd-Q_ zV2tmDwvBaOYQU#uCWS{ifHjyqPW(Sht>N9IpG2%xo$h__vT>xI-JOguS1;R%1eXQF zMWc0OO4?e;i{yHnC*T+K5HuURDfNaqaGr;epv%h`G@mvc8t$Q04U23TCzq?@OCik6Pnpm)Ka%%psU zP(szAl6LU1$3`nVf6~$G6Y!>PL1*Ppv_ANyz1?b=dSK3d+Oo1a5~VaL|9QDx)g^ev zI+91fz@UjC9}#3%2<3!v1TQHbA>13(CD(@Ye*eui4%=mmvDwxtk<$v0LAqP7?=EtU%4Xe zuRdq%UgB`_Cx948IT>3K{HX6zVtSt|eDt$zSFT@MZgIoML`b?;{2*!@`2E?kpo5v} z0iR?`h;Ym5V$QJnNaq42L{|W`0hF8Aw9XYK(=COZjB+S{x^{YQAvw4b9`eqm9xkTd z6Zk^^=AI2`^GSfp+qn%UIJP5DrIt*LL&=XO6gqm9)t3;tV3*3RU>lheSV}^ zmRyI3Pm!dr_dQ*^pCp4~bw84mqH3MrOHP;-1UA?OjB^i)YyDX{7v$zTqEAh|X-1dt zyiUHth-U-?^VTU)ZO!YiN1W9KQERErfYe>(Grj*}`rk4_r6c;8A)Fu+2YU7)Lol0~ zFUXbQ`!~J**LTt($(($;b%>LeqRrhX5&MvC&CPk38i0qU>g3FbTffbUJS`Ior3o&>%l;x_KT3IcnsEfa7zfoo+mbbZjVD+0Ih zYFz3)Vf^cIr7f&>FyXw~y<`vw5)%s0Cq}3y8YLvR)6!XLr-!mn)z0H19tBdQpR2c` z6WhCtAY4;FG{&g=OTOb?^QK4z}|mgC!YKAt2&c#pa9Bn?}32l zI!rR+a2K!FQF-!=5HqZ}y~UK`7#mqC?#s*yhe+p(k*FWZw$K<(*mYv#EPe zaY%)&Kp)gAd8Q8e(3K7>3~q|bn3NsMAsXl-vO05?818IrCQMOt^^~rd`gd6ES~y8*p)^m-SbjT*wozetAii*< zKz{{5&#CXdZv4y3oB_Ul*B_D34*{8OjI+y=Ww-BwAnOB@(V4}uVQj;X2;-fhECz>e z;zB5na!GauZ`X+UJwS&(%NcS}##x>itb9_(Z4z1*ax>M?fi5{_$MJr<7H1^xuGCo3 zcX4>v4gu!LfO<8QEK{3yriWQw{s}_a&_J0u(Li7- zU(aH)#@yHM6{XYFqG?MsQB8J!GM(5!ov5?8NcCmc_ zg6Dqa*~V5^d}Hus_9Wa zMRU}1+N9`zy8!0#2*%XN&o@5v_jPmKKYT$c)Y}Y5FK9JoyQwVWRA`7V0FJKOAWtde zx@iV)Di7Ls@Y~cD_WODAXP*2#kv4j^q6yh)-L?a~!5%Cz0R@q(=ts=iY^v1H1eX(P zVZ$FX-4ppVIb-dVipA+u7jo}A(2gKIg@bLxUpy#js}=iCJ-DJRq2zo^=5WpPydhCv zc2%B2R3p)9CnAq9?y&hRzoNvC4`bmtvlDdPeW^DuImCPVWx`pc6Kg=#1BE@KHuGxS zd;Wga{M!rBv49Y0dR+DQUn2)xgpBYHOgV!VtW6^Q|SHA<63UVJ7DJj8h%woDjF9c-Q`MC4H_o7D&6{n6_q_;%2$igY(Amgy-}` zm6ZG_$nOMXiYuTm1xvWxr4c!P;t1G08ldMLfyJI!#O7GXDEwKZsDG^8E(5i6Fgtvg zr5Qwv8gT@+x7LAa`=Kqh?xu{4Wb|*MT&F?AVr#hG?AuARXQj|AW=jFX#%21?^QvHfC9k+iiNHsRRQT$dcA8W zLH)kpIWu?eU-!;EGw(ROJA1A5tf#lNHtXfs37Tw>Inm{f0{Gw_*AJ;EB+ zPBsNF9PU)EprY*ba?Fw#QR=C($tRP!v!uRbv(>B}F;HlvVy@%}Ard%j@b;Z{lMU71h znzc^EMKVN;*m`G%>zUmZSx{bE)Uf|hcYV<5yiaZXTu;S&+A&Z&czwK$VlO|a3G|S< z1v|ST4)j+kw>Y%kC#U`;^PTIxE=6GD;+sBl%FWa31qvIlW}e8o#`$@TX_lC4Xr~E2 zz2C`t$~zA2Vp+S}+2fFux7uA5AL)XF z+mSnbZ(1KAt8WneFDvIud0=GuGstc&0h#*T`e7;Huaw1O`3Qa{hBbn#j2-B*#95-841eQ9;mEpamThCi6C88a^XE@)wTUiXp2 z$)YL=`MTJ9hx{6{gndDky_5@NOIk1baP= z-Fbb+Jg1*AiL_V+%p3!}0@`{;U)hiGdg}(CZPHCX0m{Vhay~?XOmg&CYGAI@&|D_7 zP-I~EaKw|Vu3ly9^@V!E=1;5Z%aYD_{+xV+OliySy1%=yH7>5XuyQ>Q~F!(pZErE~2HkB9k$k19Pcz}b_3_h{xg2hS+mkPwfe zcs$cmm}m`k$HcjZ&2kh4yyH!DYuGk<^5T~>Xc^jZDITBs!sGL{EM4wy^evjYrVcE`>>8%tY_7cgEqY42rPVE7h! z&zZHmlJ#%CmWktf~O_J@(T)=R^LQqikw`!J^|wDQp3iT6CGnZ)?HH{4(rb7D5I~!5r(f%W!9`VN8^l&x_*DZ zpzBvS*tzn=@Yw3--C&xtLKbLMxBBP^48lh4a=S%9#-*gMbipD{X zR`g~;IcMslPO83<+PKx?uCyyLqp>}-kDt_k%#S!_w(p z$jeo(ohJUPH66YKi_gB5h^4Hwz4q)|Xy%h}AoR^ODw*Vb*PZO@Zjo)obb9X2Jhatm z$n)>{di7}~XkE#4imY@@*EhXuF9ut9Tynv#bL{cuR)rPDz})utf(ygNgE^b4Q;$33 zix@w=-#=OY@sm_Px>dd|e^Db^@nmxxS&lvZ_?MjWh3luAHy`x6O5J>2wmARsu$bqY zx(7?L3#&K}hI?l|)i4Go(tS{`@cV7wQ#B*UvuSvPYSm)0bp-ToJ#zFoK{B3e;OicR zBB40-(?*zmC2}(rac=%|&sW~O{#G~MwZEgFv+hF1kH@gzrQ92fg%jrs0+z=2)#bjY zQLS-V6B1i}&)0EH&6G;QxzEg*W7|~bXBZEI)#CVE8y{Ej)#;L@jtId~(30KpWlTcS z^Lw~787vK?q?UWIYw0pU2eOlLf=vZJO}l$coqCeD^7XC)=zrwD^5tcc*b&yN=gSuD zdSf>hnz`(z_hs3Eo>M=LcHsS;EhD5It|N7N)}A1FQGyI!JD z^W7n!J}MyM*RKLwNxR>-IvsMiO4xoK^_jl*Q{A;z+MUOy>dPNpe%4MYQe+); z-g|Dsxagyjb#nHF*6gQxQO9B?Pv5oO3$09nv;|7COHjZNR}uGDKZJ7>ityyNKD=$o zNe;zi>XSnw9R*%2HH(EielMS0H=c^n>Q|ab=q?hg&Hqfl``aqu@Yr}ns}41;OXsb$ z;&CwFEiuD^%Fbw97Y&5=h>W$xFmx;?@Dy2n20dqU1}5xaO#+=W$_mw{^~ zPS!6SI%lVzG&Gv6C%UxTwA^ONw!g683BH@rn>s6R`F45bf?LZ4w|f3sLqk42JM+}! z=jf-aiJdFsR0`V~<&|q0Q2PRJt-u%On|1;W5(%6>D zcIbIDelj??*}>p;Wy?`vHxt_#?+k^ zMN)5@);SPhYpr~uRJxrz5Hf4x~2d6zz_2j6Q+KTuL@0yhKz_;$AJ@nFDvOn z+7_$krxMeel#<3rX3k$MoHtf_-PBCKvKAfv`c(X8fPI0_GHY$-1a{xS7@lZ(E#T5p zdaBHPZ`xUKEzx=TGVn!w!ZZI*V-FO$3MFz)hI5>D27`B%7JD4h-haJNFSZiU=a3dS zdQbfEe4u3FVlA zQ#?p_PQmuq)nTv353jx)YbD zN9)oc<=S_=++OkW^xa?f?JUfosa@r>!rWG}>2`IG@T3af1@MP!1Vw`)ykHf9lIRsT=5A&Nu%ltBX zUYN_t082^Mw^$vCPa6-)0Fl?hlt+3YKwpcVe}2uyYBM)ZX8)jk*Kc&;wQ!8H)tv_| zbfm6j@J&_2-x*pd&@qi_#KzJzXwN$#AfndLQ)>+MF^fCsJh$k#mHJ-k%pn9Wr>pHM zGPRD9+gZEUaVm65I8(}i0r)bNbY};x;RO?sH-p}QirVE@Yi0cX4`l;G$0tux0c^z- zU}pvUr(bp*5VmbT8(*8(^`W8`!~T@v;saqnJZ2tzkWCmLj*K_#7)L3rNXsl_$L07hPiCA<=RsZuM3iJGab(v< zOZE)|PZ@M2o3yNwZ(mIo55CxF_3h4ulmTv35KQ;NK%Mqje3at$bcg1R)-T>Opj^N$ zm};H6U~{EqfPzLZfg&$ez-RJH9IK@BWeW{OJ#Y%?p*tL z5{M5&WJM=r{O6KEgN&=DBZ|b;*?^!)dSPi0nC;BIn+XoO)MW^ zQH6^KyVQ``6wcLR&eIQsE{r7eMxOgz5cu=1nBU~my~&?8Kx?C|(kpfo7Js%RP##aL znnRmfi=*KHgFG>;1C2q`?HVJqa&wv!LqpuJM_xsk zhddu%Isii~3cGuJpdQT%4-c8@llBd(aO@G7`}zyM4qW~uE>mYx%Nn@x7c|%M2pdCK zR%9?+>QbqJk~^C&y#g1$>^%FBGcpN#_WGdQ!>Vhf=JsP-#ji&>a)OVyb zi-XQp`I6o6^@A5EvNI+ob=A)tqYPMnTWGnlrPTvc_Lj9&-QdMT2yg4w-IuGWM!jxI z`~rnMal6Y_{8!VvizcvAqbf-1b zK?Od?rzk=vpfAr&sRpEcI@2m02Xqv-m*PUwXb$^FSh#2SS<)TJDMF%ccrfUMUY_%S zxYBcXuMY-l2)hjm=Xi}ty}*R_`(Kcj(|rmkW{Z1Tzhwvx&ugkA4cwyLiRcs0)N$^X z{a#T|tLSU}-J(xy)B#j~&;h0e_dVF88zFtQeez%h96ECWSWyhz%$)>dz5^Wtf-xTw zTT^dmDli#ck(+x^-s+SzKkQb&uAU^}`p}cA9KY`dPn$PF9^An?N#~6f2JJGKTM5eN zH2MOfKc-5GkKQX|;5`GH)GzBZfC(`V$wO)C&I2J!?05^Rzb&t$<$1e{@?51Br++$( zG%0^QmYmfrV)ab^qfy2j8?LL3_9O>URiVLs$%e0`a+i<*Oi>2lyT`o*2^FEwAU~50 z@-)VrccX@5QY2Tn7}BDI|bE*zg0aJtkmnZT4BhT*rt`n zFoWZbE-jzuo!WR1s8agU5BIXh*aP}J#9Uup?y-$lpf5b6;G7Lw%o{^?O+ZX6FM3V> z8w@nOu?{t6O%NMzPa6Z07-^&i@nA7sh-8FececY*#g4_F5u;N=*4l}JvbitW`)ykl z?B2E{W;JO913^&*TyglO+K@ZE;0N1?op$0opMB?6b>_a8+*z9Jl)Ax@s{wR;0zS#q zI##jH|6A&XhM;{loAqKnF3QatXzNn}>aOh#RmSmB-hatEd zI-B`kDm5erkPWH=h$~MRmWRaI4BjeY%?mn$e)Wwv>~13-$p)%_Ue<@fks%krCaRmI z9uLrIep3H5IJPwR)zm6z;%&V8=ZDK-TQhFWYcCcm4#E)f4DR5pO4E2t*FfOLnUq?e z(VbYkT&LGh>w>_kab6#5`&QZ4w><%llEk^0Nd6A?D}bVodCVVz=m%o2eNB&MysIX{ z!q0d&d2;A7P*)8=mCZ+cyT_@q{(Eap82Fmx2+v{1jeryL_yL$QkIE9I&4g zKcs0ORa}af+x5NBBrj?Xj>ZUNS;c&j;t@Wmj_v@c;ZfMiP2r1SO$D8`gF$h>z}b_Y z7y1FdUwX#gaa`|bNd4Tw05qoVP#U;<#Wc~%ZsYmu_iCS={Gaq)Yqr!oIC_W+*pB6e zp*9Jk6r)60&@nA3IKTs^O>GAHWW=@}WJJN#BRP3!^b|fVEU%=mb(C{wb5L*Cy78wH8W>!+a+pGV6S9Gs?E`1@MYl3-_1s`GBFru z-_Q-v5)vn_r_^1c3VLJA%&VL^h&z23fggK(>V4Z|xBHS;tbB|b?3*4*_#{nNbm~P# zjiGmeu3w(})+V;MI?!1yCD7{Vd8lVB`|hyXs+_nWR9lc2b-g(a?$Nb@h#v)nL-ta5 zmoiW$l6{e^r$jAvo5IW7BgN~K(P{7`-mM>pY2R;WwOkb`2NzGb@Qg-lN8UQv;^39v z3mr$}RQ5VdTGRb3h-LyFbcKLb5tc&@X+Z*Uyt*-<9o6`n+N$gezj2{wo3k(W*9BB? z0jYsFi~AZ@*tpzYw-X!&+x5r#E1xq?4{oSm1lP^v+Q`ee;n+RjWuOUgl7riPxWvFg zoyEq@xyPyJPEEpON-nB4FMFg1lY_&gf_1gmb)9THJj&t|6Irr<$Zdh7R|CsPiv=Tz z=i$I!NQ$besX~rvD`M$bQ!ilIcS?Umlv@+nVlz?0FCyxq%rtiT;JnQMP>%AeHd z#w(GTug7s%F6jO=xN7>g3%jWX84gd7xW&Go4wsk44KkgAD~&)GAO&sBZM5@{dS`7T z!cbM>p*J}J0NinqqloNq>@+3e3xj6^2po{!KO+Dl-BXvo%xr_Jphi}HJbyREcIus; z+}zP-uHEYs-7iE6q+W}^Swi4$BF7)_KpJavx;#4B!sw2I0ks&_`itYh-{~D(C}1UR92J%ib`^H;Mv^Oo=4d_1Ew2SqmL<2c#7=|`1G!pD^XKrQn~Dv-s#i+8!Rs;9=cQL@5tZ2kSeA-jDhd z+tt&}4@Za%4xO&%2BIy$=FI^uie7{VZ3Tfo`^D$S$cRI;fsiH*gz=D)3Xt_>07IXB z^vAcaLw5L&vx`1N8hmK=U>Pt72DJkRt ztT>r!(Ys_0Tu?T+FeE(?-g>}+gHP&_UVHGs5Yv!L9Q=^7*s9K4qR!2eJEmp(CgE+& z8hJ6ifQgBTd`e!>kXpU(%;` z7IY*|k*6!&aLwlr^igxppu%;jy!WysZW|81X$?FXwR3+^iv{q}yu&c1KdL=qY?{lP zxDc1CX<(27uL#rLJ1|{OzO~U0^jf_}>IMlS9DoRQ;D{xW2?2d?@D6=Vv{SSVzw8C} zki8l?GK0w@ynN)y9;&t6w6(Q!va_>or~rZgZ*8xKuRFl&2~g<(nV1ant!kyhB$TGm zQcB}82@>!@kS?+V^tyXwK)`vy9&Pshy69B`FZhmFIkW7Ju4W`sgOjrO!gCXUpE+m$ z%J6WiX!DCfhv5a}qhmjbe2V~ImUI?vx@+>6>OlXqX16camrTb^*2$2l4ZiChv}2`- zuGGIy{*_1-GzM_(J_$1kraf^*hGEmJACiZNc73{>)}>D?lA;O_1>{8;FHqu^v8vIn zPOmv?zvt&yfbpI59wfej_8SVGh$AAt{kS#}*nBqWh?<(u5nh$+q>blcyRWOvxMXy0 z&}-6+DQEyE8Jgam)GR~98JO*~71+~=8?J(^SW8#?4UDu?D|~B_0cAGD&PwJ!;v-+6 zSi9LAo)DQ48hIw&H2TRi*O~9}!ikBA+8!Pr@yExDZj%)m)<_apLq5k2nq~n2R<||g zNC4>KC&|?fQe?%zng6d(RMm*HL&DGwRWnk`KIh~WYkg9S_9E_No;1hu>bB|>agO;& zU`+=PkkddhB;kxJX35Mh{n4fG@&Mo5m<%nNO_ID~el5(#xwFcSDz;?h?+ZAATq@}E4;vM=7REPQlmN~xH0HLi1B0> zXIFaPw;0uBQW%|ma_)|f?Cjz>HX_CN;d=Y>{V(bBw$W*hs%wz=T?1ZZ`jk1)Al3r- zbw7tmGy+iIq;XaXwMkF(kBf+~SU$oi;|nmg3g?EW+OdTXKx>dpN$MWc<8c^Xm{Ex^ zU}hIVoY8(MvO7eSVXvmJYL zv#{7`RFQc00qq5&Nq%De!UVW#ai;>^JZF?2OKYGde{bFrCo)c0F}7Rr^mH6+&xM;& zq*#p%ewcbZ;<;7-3wq5@W>E~&h-uy++1O4UIiS)a8S!O?MoZ1R*9`YJ69Gh>f^82l z3)YSmJn-I3H9w5FsAN=L#h^*kYpEMdT;^RCwwv=b$Vi6ib^oE?JH$6G$;GDiHKXcn`&y&PNFT1UFA>WOgo_Wl$7J{1N~o9Q($X@ED};iF0G{1wj4e z8L}rvU=;Ti)>K50G*wtQ_6eq&tZ+x}-EE9>j7D@7af3r;SmW9gz>B6uBocKzKqveJ zWcNDb-IaSq1o5h)X0dj)Yw9H8qTV3S?QcGE>1)ANnwaS7UTsoNnUUeWjo+DE6GAs zVm>8F>TWhG7G!WKalSaS zS}WcDqGRI|0W<1}7=cwq(%{QS+p1hHbe`|aUi}mnduvY};xwsBm7I^>>0eEWqAi{* z>^8f&TsU_pih^KBN6w>VFs!%nPGbDHF8RUBM$rUVFy9cGb8J;~IZAtb#a@S-I0Y`} zSe!X?_d3lFE#zfxjEy9<1R0rQJ-Tev`Pm z6l>`}Nb-%4Q*AzbtU`vn3^U2{Jo|@ONCf1zqckEn)KV8B7K^@d7xyS z<`i3~K{heb4PeJC0o||e_Hp7Q@ueron1QdPx&vV&Dk2#3!N1ujfIk&MU|1_uwVO$D z)j&28_}lFx6G;?+8K5p@l^o~M-26l9ae$i0*>9a*L@+&>OD`mR&vI~^7$`UW{1tfb zf5r~|W0{ApMLqbV50D{{S2BL*;X$IO6=Mx6GA{7%CSiE%6!!bxa=^Fbw_!OgBrN=3 zanaMd>aqGi+8+gMX-nOnpvVA=q$N~%PNNC5PM+IN{$pX$C>>Z)eEoMelPn4^9Vf{> z6rv;nV;Hb`Y!Cwq3@2jRBPF7^FEiYREtclT;2i%39Rj05Ziy3i^qzTS_Mtnkm^|MDa;73Ats?EH3i zb`Ox#iL5i5w+2Ym#7L8OkQ)$P`=iMzd8U7bC<98we9y?SB|c)*!wbKCS{0s9aGQ>*E(Vi-R3~aa0w~~ydx#m*#xD~PJVV#m6+E8KzoO=I`n~dBpcMho zlbyJX^TbCX%2*rcL+Y`Le?c%$`QUoD3d~uG&ZUHT9@r!O zP$cFlr)6jELiYnlR%^?kzyLxHAv{%H1~i#7W5pGFUL>8^cw-B+<|9J9!HwUnS~UPj?3s z9X=v3$Z{}|XtZ!3X(9~pC5&t>PXIj=9zMysn>gf*rAW=)gL8oV=dE1-GM_8Y=Yo@T zFETlwa!%ema`iuX14#~gr*evmKPC*k-ERt#7g5PT9s3Bp^&pKHdW4_sE>hd|5KK>c@`KeqQlu=QWa9Ji*xrm- z7Kne()ZDuO>+qi8!|s*vh`)zGg_wsLiSC5Dh& z6AOR%Cj>>u)Ly(O`@M=c5QkLh5rS<9T+awx|4a$e;*;y`ZhQajha-YXJ%&NOMWc=`x^mGHb9{# zZ0=RGh%7K~26RR9kOe4V{N)Mo7w!b`1?+K6e>jO?&@jS4s=eBWE&lUmP$B^0HW?{y z`X}NhOeO#nsi*iK(vtuh3I^!g;o>p>lVpJ2;D)yJ43;DJUtni|6ftYd#hi2hJ|A)b zQv`8xeE;jE5g7r04g3H;ul?_u*k%B<0>zl;e>?^I0q!CCk9)YM>Hb@D5cW+V9F$dX zORjP`C@#9d-}3tNe>7==w_;=UP{@1|GI)%HQo`+wxA2*fl3T5!~U4-I=5$L|8p*;Z1oK>dp-7055Tyie=; ze_#pt!hjVd<6mp#{reo>3Qh4Jju^o{lHq2+58#`$|FLE?z>@!g&!hkO6kH|&S}C2G z9skim5K2a%JT8mEXHpLE4~(Ref)~@?J_swO8v>k4?fXxovz8TSzWZ?FfTt9Y6o75yWMV4egBr-%O=d-I@vYP1_o zC-f)F3*NxRfN+VX2iviCte!*&P%Z$^M@u{S|HU5gr*TF~(Fa}rdYQ!cZC-#MW)c6s zi^1ChpgC`Ok(~3t;5>th7k}@oCE>6J;@>~{%)k6F4=N~yQlBmMqIBXs`9L4GeENe< z(*Ci#fO9s2?>Tb+i>MIrX*B0|6!EkFi6vwRU)t_$KM2_p0i3gIdz$(mHw}Zn3}#S0mF~U&5Bn3i9XOZZxlFM))xXcTAm(e$ zQvJtKBx3cOFffgB|Eb3Rk7*=jPX18fzntnnbR7CWrtuFhAj$kcrtz=NBmDLM%rv^6 zNq6b3Qr89Eu%U&;v|)+SUvq>~2Rim5yYU7iFRr`DG-BtRp&@;)Ydg!on)sbQP&=VE z>`|C!SVfD(goMMiJ?0W&<_$rC>Hk1j%Gfp+U6#qPa2&YjTsY$*6Jy1}h0TdHkBnm= z<_dysZ)#J9V(T|AKQj<*3J2vq%q-9@UQ*eiVN{Mo5>-@^nFw#Te-+K&*^eAgk2urv z;RDY%uUZfJkYkElA;*sP=`TI zuMw5Daks0!j~LBZQN2;TrKvEUBl}eaeH~_a2Ylq7205aQ9eZ)(y`c!u$u=VCb6v+9 z>1;e@P$K8kNqU;2X27l{$rvd4ajA zlDadn_~P*kK1mR}U|oM?rj?nm-S9d0pq4P=rZ8&r;0LX)^ys5Apc_I53L7%tmLbi6 zRU7?6P#woZ$kLQ<>yIT7wERUV@LHkE4JQ0w4|0C%Q6%da(E5iU*YP6bglZ%C+7W|! z?sTZwh&f8Wwsm);w3ATG!O2625EtzxB##h=*s)g3LAFSBG8hXOT#MCYLe8t*3?TYD zDEN&XMwOH+1phPyc$CiHG#>5{4Ax~H{BH+hWPK4T%tKjFa+aZp)6l_S$k4?y4*ll4 z|5}e&gkFTQA+2~~;u)n0OI6w@b5#OV11-SdN%#?CG@gZWmOy`;3UWU4Icm~^0s3F` zH2g+=Xkf?$7i5+(r{FJlKN8CM=mUU(UWopl~ zAPKEGs53Cd;3G2A-h{$6x)m_vufq=~3H}qDA3tV`z7wg=gT+z;Vf@&Ye}qTB-2PKbr+|RKvx1vmL~;S2d8D-6v4L_vedak`h62Ry z5(wXz$R_DdZGujABloTU{pE2HK>$Ik#8|7%Jq9R`whG+KS~smoI>sSoxtq825}`G@ zkn`xNTgvIgP(FiUXkFM_RqGBxEpwl{#ezPe>= zlB_sb0{WC%T3YUHXlZHnT{$XAj2DK)VcQ=&TQ!6>*3Vm6&6YEFutVxE81Uh52klRE z^zIw!9NhG^!1N&p({lLsdI8WwpmoTIHl_wa(5H@wP^da5JuTHTyDKme**5VcEhcI8Qa zzGxjbZtO)Yj|OXzf$iHu;p+hyQfND(f!--^ofiLe1}*uZKzX-B(4<66X(Vrd7X;}D z=zVnFxmqBj0@vfL;CZ8LN{}96AGky!AvcDM#Kt*D@io8MZwP80XJ*)vfq zS9`{e#YR6MV;Z6^2oeb^-e)f!j{sKYQ zBg|I$<8Dqjz0|p%O1IAY#Pu9q*OnS&i(#?RR3?wt#CHJ)(Bi< zMg{?fSSHQY^@p?Taji}724ub2ahkyBN4^p5e6K^^5$LD+M5O=LwXKEhns?q3(u5`r zybikm)(L0AXyBL#$IPw^^|$)v4u2-(zK~hi{xR#?vDUTj{Op&s!~M!>C{UQf5eie! z=`>g8*@K;V3z*J$HM$TI2t%Qm+WqIHo+xt=rwlPb_d7V&IyCB}?!Fq=NF(TrSD=k! z;OlI;c+kz*79%605QZ=-ZY<_9&^zka#wUDbao)6Fdk}`}mtyr&(g?1FG}F5guNJ2i zc4C?We4J;RxB5G|;rU3`1i27!^coP1q4YzW!|HmogYD8Rlm9)?3+gBarVhQ*GklcE@D$ESL1UMnpZ{*Q2+{6g(7ZM*Cn`4&7f{y_)!VBR zHp_<{SM}ni11{F{jDv6-bZ#2W?gJ+(J950kQzj*1CYPis2oW}DOe)G*@aahrsJuL3cwyMxQ?#%Z=th8NV* zf$!yVdLp_Au4ip1M;Y^swYxhbLkyD2c(|U@^@`t**o&Iw){Ci*36S!Xcon=ex$dbV z4-CP8V}MU%5*J!LaHT2OUiN)4BL&X}0ZqWaf>PCok={e)>snM%!lY?(AiA4RbdVs|tZ+&=A=eQW$9`3CjINJZd%`vS^^UcWl= zNE@!#v6&05jSTK-vyU}w&uPqHg(ePx>nTDLFaDGYk)k5-H0s@X&{^hQ%koaXb5!Rn zIRQP2%%YDSFEeY}y?PS8KQ92>s)PFyS8)kL)rNcwyhB&jYaviD8y-fSnN^1 zGxxJbN-tY@h8`PB9=-PO9NCwHx9Qhe-XTfJMo&+dL6 z`>i;q@GDu?r@ni3LeZy?3V|U*V1~HhhAffU$+@2Vi;eWqBb7)p_JRz1gW_gJxU&d_ zbCgl#psoR{iZXbc3~^N(<1)C=9UFejx>u22pPeuX{wUngK*})6=tk$G%Vc5XFccXK zReBUQR~M5~6vua#*?V+O%k{}_gZqzH6`rG7+E4ar;mD9!EFEGs`*QFi_uifNI04d9*WZMz3xk25?ds_Ci&(l3JyI)qcRcMnXd5%qZf2pGHKxt z$O%t}l<}LH24-dt@9-?Yi{*JG#V=*??d|J*;QdLk42=)9Z6KIWuGmC2`&#ejCu8jPxjy?duOwjc>jhKfGtYcVo|U!wGvP zLlkR+1?jAhdR|Yi%{1^MmvS4Zm{mzXjKq>54`IlnjP{{bLd22BbI!+mWoz}jUZW5y zCSVwYw}v}U1XfBLdOT{j7POIvMqsfFNMCE4pOP=+4E>aRB-xqee%!^g5+Qkr-CWI< zqjSp>5I*$74+cQ?fxBmi*4{2te2qNLq5eU}u#(-AEf`=%}<7WsDs~reDKa&-q)eN(7wrF7f>-?*|mbv(*aeY$Knw%)O;Kueh~bP`xr`J ztNi$wrqAy6muAvq=NmyS7Jwt z3;vpST{es^?*O!=VlZjLlB*o!_A+o!C*~aXg4zAwUZP%_nSD;yM?&a4aTdd0_cSSs zG12dYo7Hgw@zFNiaOo3OF(=chhzPPn9=DWGX$!Oah)@G#ba=@$3g2V76)TX{4f5^^ z*aSgSDrzII?4NIsRHW`Jrhq1ft6}&{Uf$s+~uO;aY{Ja1_h?1fm5FR%srH1A8Y!VtFO<( zpwM}OIYpIPkSH|siC7DN>zq2kh+8>P3lOkmaJ{4H@dGRdQhGOzl-d&s_qB#^E_RkC z%1E9Kdf^DbL|k!XC94Wvu1=&u+E>odAn`M*izG1f4IgBmM4PNuEP6&6S^R3WC87az zA~3{+YrVZ#HYb0Q#F;yY>=ktj(Ps^oWpJ?643M1e&r}M^81zt}_M@rnS~SHC{7Ou4u3%6tlb*eLlguqByI&1Do>L|b)m#~AJCh@Jq-$kg;11?W&t**AWQ-|AD9SS zkNKKlp1R8JI=ics>7cTG7r#No{r~*;BP0|(A>2xrDSf|Y{I)uN1gG+4$?Om$L5SPf z>;BdzuM+f9&uuTxJqi6FuY8h@m&kcU(f#*d9_=X&;H{cyPYXmGO~XP!Kh86QDR395#;WzxUcN06G26TW;$9#*qd4<>y%QHCLG zj|x^3c;`xWP3F0>{p4>Y9x{?h^WPgoBPj8)tT)x)-iK>ogaw#lF}QM=JsctqADw!%aapG4$L+W&hTL9&Stw|D?%g11D!|3GB37la`^*Hm$%w;(pVi-_-Nd6E0pj+23g zteq%x|MTA{NXW82M!ro_#>>efV=?ynp*lovXMyG9LvrOsv`>44{dbNgIxJ7O^n;iY zefVsQbiN&}4~6~RKI#U1)}SCvou*y^r11V7`A`ijVpyBBxf?k0j;=g6!~&v6dTFUB z-R)Pmsi}8=Z4BZOa6L4HyyAP3H}@6_If~2AKmKEwoJ*A3$?62r6(aHq9*3}q>UbZ= zM_qLNda^8P|APZSb$h^XaQ^VDz6LyipWjs!P~b!WdjVBnww2NR;?Le&p#}ijUDW(F zUp(w)-oF0@1(MHTTUmcd_~g!%IirxqaN#il_M!~ui(@4p+M|qu41Vl-N>HNbS^F=ihMNe-_bVW2X3!}^EOr&Tade^=;ds4Qp; zj;{o>2&}a>^xDm|F#v0=i1-=x_HMia^Xh8D+wHmFq7q&)F~f0qULh<0ma82+$|L)g zlk;dZsuoDKBRAIQu=(+x7ttU1!?hJxs(32)oVGFc zdh+ZX)9Bs#k#DgWO*zBLm&wNuf7-4}EaI}VnqGUT>VN_q^YxQZqKUJ}VbAY8x}lZR zg{Ew7dHhGTm+4Oj1Ih}buwLC%CP)R2QjO45-LfMPB4;f7^zVN3h zE&B0jB>wDcKQs^n1QKNdkUd#Z5G;4Tj_tF=tt(s~pQK^l$rvt6pY?dHy}R!g9U$H> z29&G&wun#YgI-7<>=H{e5%K3hl~dfBF6& zmkdfvaO=QmY#qaDtMAYsC|P*E?xgNpS zbP%D^6kB*rlqRo$gHvN1xH{>lF;@x?ybLj$lQHbg>XW?I5czR8D%yFfhk+G`JxA%- zf5+kmSh(P9_=?X|57+s^4!a{=VV$2j6u{yYbozzV&GZ%a%&&s-Bd_N#-|rpv6}k0W zqN_M^83>CbcRFKEc)5-cqMh#{;_94cw`V!^{`STjZ$zbl@;i@MBR|c5iAjXpKK84y z=?wOe-np3*?GXDUAu41|%lOWB1~s;6xwv^xgeZs|`qG#T(B)4#{)snaTVLd2z)Ey@ zcqWjSfaA3L3l&@_O8`tCxzS8H;R+q>Ut*jOzzPFvW%`f<(rM7uIT;E!Vj6isy6_O+bY_=_>2!21}PK9sn-iT?`V1sO*`9-)4ll8|C=u*_9-)*00A4$W{qBCPGFh=M zN9-4On<|&;5(V&EcwwjWWIJcCU8pQ!FR1RZd)`dF0aOYig3Q&2JXKzJ9g!+y51i=I z{jljXE2VLV&0Fjo@r}<8S8zk1MmuuKG|zEKMA%#cf+NQ;Kftvf0PfC7xMoG~O-l!_ za?jSPiyyK#4U@;2Q2<0tAm(FTIWK{wqJ@qEs~ExytBG%@c43!YIY>*((ygq9a3250 zA_&AS~@)S+fEv->Z349W({KhQs9L4c?p={py_W^&-z)c5N`53XnXa~Tfjys4*U zlK?&xQW(b@$!1_-VeyjQmu{hzvwCEhOX(hj)l)EwMM~XGZ5ac)1cs!lCXj_i+~dJ2 zv`at0#UD-hyxJ>hx~~BK3o`b&m}6uP&hL`GR8f~#@EEt;t-b72>~c}n1Je7*Q?Qp| zhit)gCqsF%31&igJ)}t-&@$tqMllH6(7p(1+kL@BKYm`BznXsCe$GwBg1ti>4`W`#S(QD7)Zfx7EF)_ef; z!XV8e0g2b`=`0o(BH7dlz7_C}&vhfuudd!ZEbf2&P8j>tcd8`t%ePx(E2pP=-v`dA z4&7Ovfexb~rghOD?r^;kA+u|lcF~r*;&w)(Fu;Z5$z3YG!W6EP;|Ia|#=wJQbQ^f- zWNa*y!IGT+Ky+U`gsqE^oGA_ae?5Jhp*!eQNE2YjFGtB73MFS=nb!of{QTHad+nw@ zc$%S#?Hq)z1PW5tTws%}s_fJ{F{Zh!n6I93O-@0t4U$?{L|;llb$8BHh1CnqYc^HA zaR&n-09EjT)+&>{S1!;v(UiDwfv*`s7^$|otiX0@t~^yYoenUcIC4m?{qVJ_MFF8P zO*Q`mf=a?|K%CKC$RU%&QxQM!N1xIgCq((`D9j`;&+#O|edk#}j#5tm=+Hw)0ENH! zGrQY62?c=9nv%iBQ1glnr$xiOuDT^8n771+FC8-RJCVL0EENY$`K+>T>RJ{>RNT%K z-W))AQ<*IB#!Y3*!DMO8FU{0-5If+x0F)!N3d2o5Jxe@Ja8p7?{RTHyho)Zt^`R`J zSras^`Xfe2^e`AD*p1{5;$Ba8DfkL^9H@Dqhj{j)C%GU8XiJssd2MzY1v~R_13iKF55lrtsSaMRJRjR(8i@+p58I?)n|j)i zub1-Odv>~KP0f3*{nqyirz?YMw_|=)+b~+#{(K=?y>(I!ny(sJ9n8lpZ1M2hqi_m1 z4sAoJAz-Bmzv%oC28Ajuh9Xt=2X;j7uo)&8yKQ!Me zSGvBqNN~>%>ESnrlg|an)MU6mk1L6;gv8`Fnee*VTl&-0Th1I8FbpWs2<&EK_LV4? z-sk6Xbsl|6qWAPc0St1K9+;gBa$TQED5B85BMqU345omXkIcWryuqr59tSz97&bdC zC@Vn*+oeqZf>%1=@xFO?jFO%}GHZxrBFe)}w$>6+$ByE##*~^ehF?oIZ+eb=bTUrh z$V=Q@HNSlMs1I-jJr2O@Y))rUBuW1wqy_BYYwB+p=z;D4nFIxOQBD}7I3UjuQ-W(a zKXjjVr-%U26bgSVZKxN%d1>B-j0S^7K_SUW?FaN<(W>PXup3Xw2M$(iGWz~{NC)8? z$N`%HpsG|7rj4UuR^+I5Vv}K`%I$krJR^|KAVZF$vF7_yL2$p=zA&FI2t9g;!7C(i zbN%w-V6VW_Velup?dglPiyINvh#=vs@sH^~{b+6OAo2edJy#`<0K`DR_`H9B<|d&X#cMXe)YJUl@q?XVn^-?SHWZu3s<+JuqkZ z^UYnF1e%NALGI^y-%G!9z33%4jEfS{9m~qesizUm$z-JQCOVq=jVJ_|=tFGh6`CR2 zo}v9zKM8~;gXdE^ke%jI8(J)i9uoZ+i;(qpJj)(C*?(=iZ`8w}HQaq?ZJ2iB_yv<# zXVp72Srjy^<_31nlOs!Wl^E86?LPYROYk$CUT5fRnl0z76!?XO_10umLl0DVb~|f5 z5<7+^kHOz%XfC*L`DlMbu=T^rt3A&oTs979#UD2ytHc}WfTK+d?1=jqeU5^#HOlOQ zlPnP39QlSu66w0|?K{cogoqjuPG%@Y9$w+gX%R%j8SwF{5y>7$SVua>`$8giR z$RtUjy7b+h)rR=t;prnBn=4ai>FDSleCfa2BC{SK6tmJ)V~2M7(7N1tR;gWT&>|b$ z)M^#>Mko3BNv4C;S?n^N(k*_HL4DFmQf^Q7=hLSJY5g7(DGkkF&s6)w+C2ecDbAvq z?iW&E3oYD;;&L5}s>&##6#7m6mY2Mr&$Utq+^F zNE8Qt8oA=SZ~wteUfsv$=GXZI?og}$y1)^TzJ9OY!FOYczwXBR{hf^|k=lUOgO>H) zkLq_eu7pR(2_&nYk<`2Lp)y}PL6~Q2?)iYfV5!!A->UX3^^Q!nh|}eqk#fw7S7=3@ z-%bda7HXP4^-pE8`}834l*{+4$0Rv2s8>`p9`=Yugz$8WQ5CwHS>wo42SmyjAF6hsp{_a!t3Pap4pwZZGAXfA^;;Y% zTWCqIWryW`>2GWn+j&gXPW1RYoUSL-y)7ee>GO zcf-2h8Dtf>_Sc4rg^2pWJ)^ovY8)qwNA!s>^}qBJ$Cum6^3frac$j=BRDPcv?xIk= zbAVSEYgKL{RA`XJdrbSmJ*$_#+}R4!K8r^exGGQIrSEv`R)6L${juC5Y7vHIm)@!{ zXoeMQ&0Z7J$o9M}VD3oo{*%T5IGdjw4i^D$5_r?@*%Sj&YxdMcHuT)z_c9ZFO+ddn zD&<6-^p2j0Y1KW==KOqqqZ%zH?WtL#(OliftXywd4j(>a`Qn<70M)rKk#4HzN}H(b zd>4*LI)B4Fds1N_9?dQjKKZ4{A-+2O!;cdlH=LSZaFYkv=$^~<=6~Jl$eRfs42Sl5)6R8p%cUYH-nwr6I^XS=Bc`J0@&u`@c zQk8ClpYzUatjuW8eR{kp!F=l@ANV|*l74LItIWIL7CqaIOWPE`neYdcxrg2)(y<@f zKWgwg8yF#U`Xe$|_ABakni|s)p*9sEG2jxp$oogns8G@$Ee%o3$UhzNi_Uup4||_e z>D-;<|9>&|mQhi7U(~P&h(RgRB_LfY9n#$}14xTBNT}{u_s%_MpS|}v=Va4l!^EK0!LI;X{CwE{1YQfRbF%n*40f$u zy1hm?oDu98+M`8~X()Yx{3SRW)p@UL2GvZ7RaOR(C@>jV9@#eDKb~WyxjC3&^s%Ca zlQx(z>+*CjM1&B5b~614<>jV=6yV-xv@23<{1~_Gfwxrf^=Xv-t!?olY~6r@~1@xv5_g0 zOr|9WU=$P6Q(Nxskk+zKFAoP2UZ&(qNDkdz&)=$og`D}$$=s4&o49xGJi8++Aqo{6 zf5vI_po_flfCV`AvVx-Nhy^lQhX>Mxl`AdBab@)yc>TWz2R5T)rBW<>cJla(R?N0di zA%TJEP1hG&t3fDDkMA?<${d_0^End4vT6?SWDu=zpy)Ck5R>2AL=t~WZmA=d%yJ~^zx)f9rk^2ni~y%^6HbkP}K*{3Tzz0efjz*bm=0+8BYB*pNps4rj}XkC^qP>()A0rSkK;KQ7X)jhd5<51;eCg)ym@ zJq2lDL{Ml{K*;?lFqj~6EmGJUMZ3zgHs_@saq7i!n)%x~HLDqwmIx zMJe=sJdxP_cv}=SRS_1AU4ADu!)kzCR;g(3>_-R>Q+nTsmNXnZq*Y9ciK3I0>yDzQ zcabw2fNP!caKg}*$q;jqt<5(V%rNgmy4fbjfK(wEwL;Xgf$V2YD%oh-tmW>gp)E!= z6#x?7?=4SA5B{1{5~$~lqsZQ%!hI^uw28U?QuWWp0p-<9t;0P9pTfzqA@D;+<))+? zR{A}VG7T*-mhDC%Y_63E!lNH&!+dQm-eSQ6w}1OSSsE`Gl>fX5cZ6b4%{)(StCUg% zAb51Z75b5xWK8Fo0EEZ-CQ2<^CWD`FqV%h3V^Th4-s5GkC5lj?!h2rGa#9oTva0$$BQ>IYXFlK}(eSe^ z+NVVs$rP0NQ2cdOAD3*0FlcdXu<7f&aQB?|#2azs^8Dl|AW1Uudn z^MmoTzdW=8?MECF$iDmiP9B}#Z<6`&q=^nt%AsGuh+EDN4l$cY4qTIN;E>MWPHk8( zd_w0m?K9WHBz|R_u2+Bhh9nx!;nq2zw|sNjG8|%;?x4in`Gf&)DXM5c+sMesVjXwK zq-}G&NS;Qz`&RFG{7-ub;rDsNfx^7e)xH$@9J$Go-NSOTpMmU3c2A2Vi|nK6IL*Jf z)D!VllbhJb*w&VrU~0lPD>k5|?3f1py(gEHV*j+S!vVQ@o?cFn#6VePi zlwX8ghGkClN;{{jRfWO)?#;dO&Kj3=p#m;&uslamzWjwwEtN0ENSR4#*~l-d%i#fX zfqdAdOC}c?zvpMQ961wiv98m|J8VSf_mxbD$PgV4jV?PR>IH8^%Lj3U&OU0CXl+40 z&w(A4`8vkg+qz`Ooy5e$dCDbPX(G;7JJs{+0hC_pV9DD9Gs}tQ%W$v`^p!o96hQ2m*8oydQOz7eml zgbv<}s{7U1OTD_IFymIhKUdhy`q!PV`#|3kl$OFt?6S3IL{xD81^YXcJ*%YTj9C3< zWP!vrLfRW@!z+BchaMB^(ZlkLe&nZVd{*4Ctjw^DSGZdh-^m`Qc`F5AlgFt0jhwXq zU@84nP{#lM{6KzlsxoB#bi`~x4b(@hMfK6K;2V=|wMANzO$gs!MS~w(R+3Bn&$uLR zYpERh_*|TF`g3~i5-gYBkJ(%eUyh9z zMNy93Y_*^S-wBKI&q(Tv>*B*4u$R@;uu zvKQ0iLY#on!vP0tyL=hF-=QnL@mZu3CB4A~y0xz)6%#FgKy$l))!3sRug~c{ zza3t&y>>89$bEs=Uw3*f39I3C~{sM3x z-o#rh(|X4>ZZ7L#2}f2%8mAS<+0!|X2;LRX#IlwAx2>O~0xK&++X8Tg*7}psov`>E zS2Dqv!}wQC$&r?0#jIEJ=1q7lLhAwop`%K`f(G@BvEm8Wb1!~V3crU%#-KKVU2U`T z{nz`~d^;0Ag>My3_Z%<2csOm1pYEe?DyIoqPaS~+@gINDs>D~=F=zaX22~UZlHT&8 zf22Sd%k8#Dh0noOy5M_&o|n(4AP%bjhE8yf=F)ILwE?F zkyJdpUKBo2qg&~hmA+7J_ag@F3cHMK^Cl6eKZ6VD_q-M0FML+}60JLi(Gwe_*bs4R z(aH>(1G+J^RvH;c1WEKhQ@0e%d-x7Ub_U5iN2V!QFrhKkV# zJc@kOYF|Z*U3UdP9H`8Bo-+W7Glu>reGEU>s}{t|<+_SmhgFrW+$~oZeeWn9_Lbh` z2%&*UUw!XuXT7B%EkM{&ZkqW&z5v2Sr3?xnSCYn7W1 zX9uA`KHGA5AE~-f>7lP(mk?4YAxmvkZehBEu$Br7h{e=s^T-!TP#yu#01VG~U-kb9 zO*8j@i%(v4qJ%>ai80>YtE@}e<+mH9YA*4p2KD67!AkGbu@!NkqiWoHCmRP}0zwM( z+s87Rx8HvRL5q?qtp;WJ)XopK;))Ekq6A_t0)Xz=A&h00P106jIrh^yOO;P9E$g`7 zhLrn#dg|*R7QG+l{*^F2@qT@y%0GpQvN5DncjwA^(tRF+<#u5MseH`FT0F=Po)+`ESR@@+2XfFJJUJ7N4E{{YW218JU_$G0ygnJNU2Q{i5k{lMhi(IE zAcZkytJV8lwEruhDsB9)ycWKrFJQWLp6o;&{6gpyX>%3mu5h#>HmyAN7tsN9FFBgB zDoMP7$1v*keBrOWV%g{LBXGO>3j_7%Oc2MtnaJRXgF%nJGoujUx`yeY8l$6e9b3P- zPvUPS$zT%|vPNwh-`VBh!yypO9I82Y}LZBa&?~CE&!O8{Q#AGt$H`r|`$0hGq z|3=Av8Z7Tve5C0z+lL&}Qi%I&YrWQdc>6cbL{>0GXzxyEU6 zO0UV^M=GtBr@OOR2Hr5GpWYvoysysbO@5}P=zmzjZytLPJBoi5rP1tFGcWk2C0*1P zVUKr=N+3?>`{g4ZF9D%6Sj_Wel~uRqMNv!*=VQAZg#=uT4muG(nZNX65O|h!KF)X{ zup!S5)1P)1M-~N|nwJ^u-p%dg0&Eh$Ym(wBnr0?8!l5@(g>PCpk|w5`0AhN74)rcK z9modH{zLx2fbTxuT}|wc?OQf6bs_98Vbv^Th#QXB-e8pq^OxFB$#-tiNN5>y41_&) z1wl!~sUM|A^82;NscN0m2Im0N?g^6M18m)A797CsK5NSV9(H@X(B?nX1oPNvW6{co zcB?_NWgb(Bv-ZEb+ua=I$)eUfP%Ut8zcK+?+2dS)X5_hF_}@`EosO5z2pn$A2yhoRY#Q?|4;L zWk%@{|#fyzn2@d9$SR14&#S+&MCl-|3_MK*{35>8+hvqs-EPv5#dk z@hF|@F4Ye;kl-p~_|h?vG~CBKk6R-xZ;U>o8XpoTzi!L)dt?D7>DJ{2={F< zxyX%Y=1W?k>zLJ_2I=x~;{)$|!5)j?0a#_Y>+w?lFfoT&ffP0K4#KbPI|H3*&>Ujv zl2X7$AyCknE8Iq}=^Z-2ruy*;wPppHdvl{@IcHiUv}7qT>W{EN`u9ZHNS7zSPey%$ zNF0tZ?zItsH5PrpO1+jAu{=gP!od7l`fS_ZNy(sakBhlnAEL3G6BIk~f0}3XrpnVV zmsF+rff%22yQTq5GDW~S#<-i^5M;3I_me`IfMa;Xb9Ba?0flAL${YDrG?{0BD;^!J zVAl96;kh6kYUkr^C}roN{izv}hxhsIet#^s;O)gUOI{c+u{F>xH?7RwmzprG{{V{m zdXIPtK~EVRikZB4b_2BnC}IPYD7n>;yxi)M>^{C1 zG}!jNc^Ml5H%54z?2hf!97rdt(nza0Rq$#@lY8h?dEv8b7qQhqN!}K@cboVbNweMV zj8ENM9V++4afRg9Cmt7i{9`gE3Nqgb>oZR{sLTB%t#@L?BIlP-3vZ6B{;%#h`ZC)f z`Dk-QnoO^t;tSvmRImTIXR?tCxI7ug$wE)PbJE(v;kp=Vm(4%4d9OGT0(r{xM&x;^ zPWA3+m7YMeS&|TpL^0)6ySUQc94ue5H;&@A1A79j1}Vr*4E)C|Sq=9-Kro-|O-s{A zg{KSaH)X$T55^NvIb8e6PFy=(F73L=Nk7-07B8#lFOrx3ONz(g#i8I=06K8G9DXchtgg2^iIo)WDVi*yeeH1)T4ggU*ZVI1 zi5aSz6&X4*T{0Oo-OW)<)+nk+GPmKsV)PHZW@0tk1i3FsANz( zZ_O!k9dyFj5POZguiaIWxvdpfFcxm`TAJ2{#cdz_!nihaU;JTj`77N=M&?D&M|&4^ z1ZSyY5_gUdDn|R38wf>AdU8#3TzifU^XJ1g8Y~l`$2M^O6iEJ-1yD;^>lBO#G@Du= zS?*k~HxVZ${>+-inakbL@+T`XwQzpTg8iK5P4H!TBk6PL$+BSoGKlLfit<`P(iOnz z4=0Hmb%1$-(Xt0xxo|d%hAgJ;;8RN?VYT=IAxiCZwG>!WY|unDTjMn=E{#am0Ln#r zEVG8Wb}s;NjGDzW!LZ0Wu~4(C8hcLLIo%-~fNu6TI?CycG9ZIUbE6ZU2vU;-f7tKx zzTm!%1jQ71A#k5wIH&cLgv&!AyVxGiHq^6OuhmfE9~N5`+8w3 zWU8;xNrG%k!%gBCOd5p=+L~jwEw*g>PswVL0^DA)LEeOcho0T&>Oe~JLjop-jRZlr zKhL-#&UDpWeD7gdtba2U+GvlIe)%#aC=20fo!mSlrd1Z06`#FaX+4tUvNyN0kC%76 za@1r#PWKTujEb70cNrnBS9Y1%XCkpLsU3CCE*3Ost_ zSOZas8%PBlCNsX~#WLuJ?#?%}lQ>-ckWK4N?Czh>bF~GCtjP2jN#{%o4~Uk^$1~*K zEk+VW7Q@D}sXNn^IRL844e&WnIZ=6}vhM)C3tI2O+MO}4Ut1ub4Ii0Z$0V7Xn`ARgJfv{P^Ek@~ZGB0)=;LL@MfE-tn50M5d|I{9uBUF7y@!vV< zvIlIfy78Q9%XyKK-DQymd8s!M?IBWN1y1DOEx)d$?_hMTa&SA|3wK{wCqxhoHSje? zE@CAKeuy$;I&*(9Fwm1B9`JaG}l5>BvEGAc&<}D9&+pn=g$+uq(x)Y^HZ!e`w{o_t_sRd}eKl3Qm_A+xaGO z4h|>IaAaXp`uX_>BzE2S6Veh&JYpdSjgj$J<;lplX+JP7o)&{E`D_h!&F07Hz=u~u zM)8M7c^Bhpyc?&#EwmxLVD>!(%@iz{RhS_9uo^vN_-Cj6BmDP0sQ!BoNj@}!=XQHQ z9FBp1VsB`=uyiO;X==z|S`S)v19TBUMUfgsx)%A^Y=+kunCYhd*)$T3^u~I;)@;Cb zSS9B7Q1xh$2#BzeFFtp=Y|O|yoVDAssRBK@GMM1v`s(oKC`i$ZUGHc}q;>6=W3`t! z{jOj;bEX4uNt51K2x;)Vufm8?Sf1rfi2*k1P(MO3j&6LFoUHuUUckx1-QAgyz zluDr%!$!69t2X47XnlMRxY6icwG=ml*-}{jIz1cyHz_IMaq`u!FKHBMLw@Qvik&gKo(%t%ajv>9bw`G!Z`3GcM9H@J zWY3a+{wgHKrWBG$8)RXO5LT^TCW?a5v zkJFtz*SQbUUx~)NX=K*Y)e#9r{l!Ah}<^>k0%BO*b!`&^bYyk2nK zYEwMWo9jl}ye*=+~f3Y6_2-qt1yxnzkR7*y_H;hJtS4~2BX&=P8 zNI~3G4p%mSg|t=SYRkDZEl8?lf01_G7e_d|Q}{*(Xcg2h1Xi^Qz4MG@DM`;n??$>6 z+jb;|RdhuT_g{bGtWiXDDVsV&o%d>@&xABxEBVfen<12#^bouKd(^;3jN%@VD zyi-ZV?w8uW3Zo|vgv?gf6%h)b=D5bsG2>}$a*|32J$@zXWJx*Kd2U2y>lzHB8`Qgm zF{u|mivs+TVMLmZ=s#<@5H2v|W0(^i;GiH#a;5mkV>XEyat>3P-%_poOiN4RiNGEE zV7RlAX$n&I%|_f;!l8Usnng~E{};+;D1DrNOu!zVYo%GD`X=%PU`HR)0-<&^ekb90 zq2-q+Wa-W%C;5 zSP|G4^!Fz>XR~OPTS|WN5(S09klP;sv&=~T2UovndF%YL7;n_c-u9!`SRGtwi(!i8 zMYs7$G3$qkI#;B7e!ywep)f1L%LIhMGA}*5$RtkSMwHnsv%YlciD4|adA)K^CI?hN zU&p!|+{c)xNd2IWuEzS!H!1Hoe8Q5sRjb}pZs?~C&&-AE7JZ*&)T!Jw%iNVRH6JZ_ zbaAZ+60qO{A7I|Me8v#)hfIz13{;+eIQldswh9i_{>#ht^Hs2;^~dvtCg$R4a%Hdg_M%{)dT0CmSm;SW>M6|u3WI3 z+lLJ!5@#y-rti|j_NFFq&V7qjt8!9wCS4TP z9n;UMQ%jB!} zqb+wXx07vRto3>2p`fX11MlF#z%Nf(34OZf9|IC-Z6n8X66HMKC!{>0IjmY7)HoOKhOLTQ786BL>a;$b7Q!Y*?E0X)p-C} znVm$G$(~GKtq=eiBjiuAZjbJbb^lal40F60ziDL3nw0hirgX0KYNltj`6E~{nt{)s20udj8R$D9x!*&@k0I@0C--1)wRe>+K!)wK zS=Sp8Bk7=8jy3zXR9EVnHJ~4^6>M3b)&r+lh(*h-e{xNXSh$$HYach zO|ldCt@+gp-@JT2zIG&uM0522(44zJC`I%FU&K;4+nN8*3(z~&b}Y!pG}Ne9W9MTz zo?k4BKS4YCKWWyeLc*DW&t31X7Pf%U@%RUwdKawYoe^mV*6sTE6u+eXmilUkhETQnJx>_{*_Z6`$X^zb(R;c3`B{dQdFP18D3a+PALglR=Rzl)CdKBQxy5de3WrGP=r4Cnr=KTo5lON!FcV-tHq{VWc90x4ysC-y3P;mxZ zU0P0a_rT9#vs>|xco&{n=~^lxkPt{LrqDzCa=d67H;>rG7SLvR!}|qXr%@=Duv)rW zC_&Ia+~7Syx)LHU`91q|zN^b-v>4`5X|d?NwvBH+T@hLIvxmG=YSdl1hw=E)@=ta| zuV2XgAahAWX8zi_<66E(Q4$60m)1`u56*dHixDYL=r9s=!x{#s!#?Wvpf@F6f_>}L zwi*o^CcPJ@MbBBS!S5Qn2-%n3|Bn91ypPEK^(f#S9#ZD)Vj-B9WqWf>5c-Y$VQDW^ zLxKi7{gI?l#!&hz0N|x9DG9NKh}L?A%}?k*q8c4q2O`-a(${SrVWj!er@LkEPIrVw zEJkwiuZ1j^Wfo(~|cP5=O!7!eDW%HVW1fyE^AZN4MlwABBMvSUzyKw9RNPX2de0v7W z!vFdk?f27Y1mre4DvEp!7$EZe9<4I-K7GQV%bzqWjaLf3o$ucnErM1R5%nlAQT+o=%3i-0bj`AC0Atflef$b5Vc7) zP8%~pH5zrUOPEgq>%yu-RtV)`WUMt@Lt_bCdm&KW@(QY=y*lKGR3+T)=IZ3%)~Ms9vKx*YO^`62=}| z3j5mWmas^SzJ{XjY`yEqmsa1Wr|fa9*fm(BJRfw$BBNMcUDh}2-Rwgxd(HvR3fa+vX=R{vRgujbxB@A9=f0M3VY;tq;fzxLDjrw&CysNxcsb`~9zPss1>mx$dnUT)!snOYqTW#BtVxul8%m zbJ-l;nN(q{Ye$KT&*jIY`FM}2SV%5h%NFCH;o6xEI=$H;bdDV}L6*|ScARlkEZ`*==UJ}BOJAm1vB{oj1s zd1k+4J}3B7MxscGi&Z9^QdkMB@Kf3DVShDUwWrdkHC|zX$M3!z7*O~R%||cZg7_O# zMC~dNfkSroTeL`ate*UDxx^A~sS)irj|u|&XiI5_!n3!ucLjjJ#Hv$$-`NPn9I6j* z-_pVwoq-=*iqlpJhzjLpQ-g{$pxU%TTao4=dv=M!-XebfyITEgTvWh^!iUl{HuI$r zE_YuVD6d+3DjIFF*ad<|&Ib4t-t`Lf`@&B}j$Y%x`B%WiLNTNK7Vs1P2U$ul>Y z%9hGOVrPn4+YvswIcB9Ypv$*$eJY&Z89@orol%{tv8S=<#{JD2zvXJym!P^8_1ViI zT#8E?J~RKJxVQV5Z+05aS8lP022*;?cHZt$YR9n;|Bd5eF#D@<)l!&uV#{{(MxvWQ_M(c=E!XR9f5dW27sjo`~)=bl;Up_|m)y z*_F&=LzEK%_<{?!jA~L8#x>)~G82o{!byL-se_exu~N63T$lPOE|>3T`<$-3WH_pG z)x}@x#3tW6NY3Zjlzfnn3OR{ovb61LL#6UZe&CB3T(rAYRL|7E76H5ekH;x47HS9> z>{_O(Hz_5X9y_$Znj8B|zuEgyOowpsc+b}uj>T0S@&IWs$ezIMSAHc>`ux;HfmqHumn%_5z_zRM%;@ zg7`L1UVk`={NAcK)*iQeA5npxju4hs*L8qV8#)l##IrgacZ@AK zf$FYEgZkI;3bUU!-M*Uuey9XLO;uHOP?hxj)?jr?;l5Q=z5ZuGoD(+XG|L5@^ce`K z)lP=fjK#N^6gg(5*&_jT=P>Pq%k+JPvCP+D36bn~_xTJb9acOE@=mJtBi@b&KpHzb zI^@RbOtb>9DRZF+H7f$<)6*pMA1qo-5|(v5N&z#~%C@orbbCCWh8^`nwa^z2d;5~v zDO31AsFfF6lG7`PXELS1=q9a!sk*|U{UO)mB(JqHoBm{;JT(xCPS)mYuaZ-%-rjhU z2)Klr2kI)$MT76s8BYG3#yYHTH{T2^W0Wv_RHZD}dE%~gfwQ%dRzL6QZRo~`i7mbT zbu5osb~Ch+`1KU3ar$5*GAVkWP%d5&DT{w-^8d%A5<@mmsAZErwAb3Ntt{~ zsTWB|b6|UIt-raO+iNX)rjid1qffOP+)9nLWe`VAaG@yjM{`BMe>j?3WEQ*(G_;|s z4~`aV11V+morZ>n&~GYK1zP2~T^3*OY3#mDr3vL5ChDI;Uc4x+|E^)L=YJ8g^NS+C_khV5n+?ZL61 zqDlH)0R?z5fK5{gGPSw@v-Ih*PA9!`YPONRv;{C775FV4e~)4+D~7U5*;*^Q+<3(( z!l_#uLFbxgzE0TIs+}JC9=GA5M1h9lg^C2*ED@zSocDvAYXQD9>rk5v_;eCkyfTrN z3$&U6Wx!0);;%%gD0SNe&{Y}`Yk9R`V~S}>Be@a1zrZVv_*c+Wow|Ix(+8F`t1K{6 zq|sZ` zf|0&6T^;@UY_078*zO+@yYtE=r1Z`0_~Hnqv+6V-oSB8�GCqR#OW#`*{JnxNX=H+mZaKl9BVI9$;KM$6lO?#VYClAoirk-bv( zKe9$C7JCcf!!AJThGqvk6*w6fA0(uCT?Fx0*p@KUe6%W70uJu!s+~m+!EX>dkz21Q zF0RRi@iG(Ue#-+P6;RN7rS+B_ekrE}Eg8C;JQFBu?!V{4M*cWdeeRB?lg+Ihs|&nA zy?1{F^$DFqKEDK;hwqfgoGZNoTmo<@5qkwstqZGv+`+&>x`q|8#^!Sd+SD;Jd;>xL zbz`_I3Q&@WO4SA#JX&aX{qf>cg{x9i(gPbHS`8hkA)p7Yr@3G zd7U`TrNQ<@e5WjX<`k`WWtbK99%lA)*{J-2F2+C{GF}l-*{KzptWm^*3WXC0l5ALQ zTnc9Wsp3dcN^o_>fg6u5l#uM3L)b*USDO^oV%J#qrqiMAKRd)>=jkKR3Gu2uEfL$U z*11da(NePFv1-NtW&ZaS8fhJ}=LomPhd@AblFmOci9T5Af$Tq_WdKb9UV6_}wHf2- zbSf=r!zg)_)TQTesHu}4c_c#p(S>NX2qt5JO1=5gJ{r(}&PfW}R;NI#2dx?X&OEyV%|2z8yc_x^CSt)LI*$gh=SVqPU9uldRU zu6cpX^RPgr^)AYYZowR*bW@cTbGWBg4MS2eKHmI+Iz)qQaa3LW4!GR=PZ%X1n00;e z;dk1WX%@VctSRlpcB{P*y(;@=dyC?JeLOKnFB$Qlh|kK!H=ntJ)2*rhpc7>t~bfbC~VwGN~6-bUN@} z+g!S&B0kiLj}?rWFJ{Z`!&b_!(b_(IK&nP+TJWeu2!UgrDW?^(Xl{TZ-!xQ-`Zi1$ z;L&#S*K#-%Lg}C@Z7?eU$1jA=)AYyVQ;Xr;oKb`tWXg3e<1YHcL4mynJYMvm^|dr_ zAYL>*8LrUh^%fG+V-xu8*8FZw4~*2|@?{+*_oE-b6m}2gygxS(VA-!ndLG( z(Rkxb855loujS(Jdg*ebmGD;VORT|F~EmgpwY;` z(x0r^dtiKNck;Q><75~Z6E|jR%fEAcM|njyqnhdVfC|4c?ui|+Bj@xda}U+Aj}OfX zxOx_U?R6h5K)hxxzx;mIXtdg=RYuHkb$6wu75G60*G1pFB14FNvm!3nWd}6xKKRMOfynZTAPVC>%fiK`B$*hlWMMnH^W8{kZuo%Z~!s96B3@-;!-# z$h~3e$un$|;xx|Z&B-cpe5?ECKJrI&!Yjg^Rw~TVmnIz1!KjP5@AB)MBYx%=(mF5a zr7bo*)OGy2b~gz+k{tB~=lu$s8B@_$APsVwZu80|aawrtC~as05x{=|v@xym}} z;xxKsh>=Uj?;$?ItVI84w|=-6z%M2lN z;Iwu&!{dyLIv;jFSyzI4y-{9zWCY*no@FdD6TAop!816I8W$WF7Mcm^K&f5pT@nt{ zij1Vgd#dRoq8H1+oNZRD`@JZeXWkRyjIDI|(tVU6R+Lk7KXFAXj#B<#DNLN~K+cUyVdjEv$f>JWMAbQg(8CiL)^w&E+w3v%9X1~rO3 zkxwnH_ZNvCGOI}xk$P1VqtrTmZU_Z>?IX%L7jvY^h2PC~E))HDBWv$lE4Gz$W;pPX z%y0g8CtYLZPTf}yy&4ywU+_9nY~+oSGcRzWs2Tuz2X=|AMQnLtm86@2x2HK<=Sm2( zwd4Q&3H{)5IpH{Uz?oQ639I%|K77)LL(ncd|OC)6#|WP>j`h zaSrGx!%)NT%ZUb8lG4KI)G3sY@}v$e_KtTmk0k)I<5l(VJPCRU<-ShjivlmiQ1;>a zP?k9$QZt*}w*oDb`c_kTjqXY3lyPVw_Ul!BFR<3j3)C6a3!vNvmB*t2G-K}E42g<`Fh3 zXFu*4_@d=#SL77G$MNEYcr=}~tQ6(tb6j5qB$>Do=qrRGoqS+u0A@54u;1?X94t^! z9%T3t_&jM3>CSzLDe*a4m9yOQ%tz*F-uvy*+HzHhh!XDK5PJ6aq?!tdWpoMSjXH_%c$Dhg#+^T_#BLkSC ziJEbfe@OmFvZI{O=Sohlp&?r54MN^ zqiA2miRjn}1QwpXeY{)JJo?`{N+pdaNe@VfX`)6{?2~eU3(#XmO=|O@wWs&x4iA-j zZ1r`ll%snAkO`o9HhOXiFEXlv278|%W6x(|OwCV_<(=7bX&KQrjKn5Rp+eo#cRB3h zFFP{PC|$ozZPLjJUxG})m~i$d@YO(dH-LevWE$zlMFRl;bYu3LM@{> zmK;zMoipo?xD}@ja7|@rzCgN*Xrvj)A6&jE&6G5zF+RWJ(m&;V zbaS~1m?ML>>Ul5uG<5_^ZQ+i9%S9})To}IBX#{u;w`a0pfLK7@PsE))>v_*Ztl1j- zlvl=Xnl+!JS+riI(Zm1glG2Z(x_}v6}iBU~tSs$!~m$P|(8^Tr77r{IpFThE z!)XAWR+DiIYMSOey_{{-8Ia52mK#n%c&A_U8dy@=X|1H}3 zp$;??Hm_BGqB*`e-ZSst4Lt&hB+VlJmHutuW#)@tx_qK3SIv?d$o;$`-uc?Y zWuzOA9!bX*nEz^hlPU24X(GgD*4I5v^1eAY3l808@?5jD5HtXtr9eTNs$=&9KaiNI zN^875mJ~t0-~!JR0+ciJf%o<&j9?U~k$9d37?d}{ zLIFSi9!WyBn=tkX4W!dFRbEB)H6?P%NQ5U|s_kd*6Rok2v;Gr#eC}IRAfixck8k%{ zm}->hOMb&5Jb(Y>ENl^~A+DATQv zD@K}tGQB_G)Aox`6WMPbyVe&Q0{;1|-t#WP=3@O?u?V0*>o<9df&(SIKJTQAd7(<} zP9ynH?)qd}?3{?EY{pI_9Q|-@1OOIE)r{>L&(m2~KZXg=OLpn36>^#rM#`;{RAGTI z?vA2}kC<#F)-Nf>ZOt478l)IJ^9E?dqgOGo+v`(gH~CbfRonkDgKLThy2g3>Yk&&c?m2dIwNukz$xKtza!g>f89iawF z=+7~n(@ht_+%_{JwhjAqW5p2ZO74WYbI*DzCJag-Hof zWQojxERlI*OxFi9Uhn*B(sPh~L?{UX;|N%qXOM=O!?oszLq%{Ht-yDo5D*$o=+%8T)=Hz!;$F-g>O#$;AikTeeZ%|`E;{c!Dz|gSkb2MCd`8&S2C5E>M_oX;uVaL%+L3s1|*cjmDUsC zn-jL_0_v&khvys}(~*{h(rLn0+>Y;mtfevMLYJL(UcPL4t!cWs!+P23ivewTqdF9$ z7I5wW)AJ={56}2UEqNCqv^tEOi;VT`Zm|NTEFeqH}SNNtxvUXHMLTKaO`{o;BEu{bn}C3Sx)bphO?agl|u1=h-aCqS*1Atymh&y)wi=XC+CC)h4fW;NsWIc zCoiBtm^}t#8aSu3+pzBT{>4gEXixaONgfXd#xplBqv!wGjRUH3Cx}8l4kYM*fC$L@3E{9?IVFa+SPoMQ@S-L=e;(gIJU4#&(BpW zSVrle0PMug4gL2Jpk}|PwgYg;Gm$O}8PQ~rs8sX{7`GO?hJ8>kAiBD6#sfXEbB9sj ze_N2?igoz?8H`>^<@#Sh(EYF3a>JuX=_LLr=?hQ2{CyWC3u&Irq@F&SgVEr|SK_iph__NRakt z9&S#`1+4u{gQU*XK6S2=?^ofI-rRld!3eaxOfIFASAMz5X}L8g|C@>vt0=Ce^?$iA zB94zA$xysHFDqoELj5s`^WFBJi1kmsGU=V#y_;hAOiTqA)F@%WutB%n zFXohYJsAt$sM>#sMvUY_+Jo``|6Sg@*KB$h{srnq1@f)HeE^yUE}$9jB$4I6b6Dj& zGejT|$CHOX`k)r&7w0%5V=X?*7X#JK2YB>qV5SFaBesUpKqQIlHgDtkYBKdTUzx}7 zkD%{9m1+V`XD>jv$JD`=>nG&>&G#U|EOP1~D;iP&3?X1k2li{rjol#-o8lN$Kb^f& zS@>=k!=}eS`|3pt#b08T{$B}piHn>)#$XkPBl)n}Vk8c9Z5yWfi9kA$zPp#X*qdVD zfiih1*w7*$D6N$U;aB9}Fva0++5hMAr$Z+N$z zFahZK<@@>4=sAzQ*S#h44<0Up!j<{2S1$nt?FhF9G@Yafe)!bXeEwRm`Gy?kdd&~U z3o+`WO&lpj(_z{7Uv5*br8j&yd}tAiM=O_YMsifih}4ZpvHt}oeYDN7B5@GwPgIgP zNVlgNJM|y=RD~m_(9-mg9Z3TCfHeDm1+(lPetmP-{SC5vOry*{kFDNiCli=W^O)Nvv7H~;;f-lKos&J~3lEvO61nUUzeNZp}fNn0(5STXK#+wC=JR2{h3Wr~VDIkiW zNSHKLV~Co<`{9$<)xI>+5W=9|1LZVcS5aIV>h?sssncE}sl8BQ;3ySoVYvH?FpT;< zg6!Yzc|0cKwYyWKTPl`)bb&?6=y9@rqK#RBg>-iNHw3YkiU4=E;XMXJbmZ-!GmBt+ zMXG+51n1Ie0QspM2L4pXUv_HJomOSaL2SMaaeY%^M-@i#S}vPbKy5N3I5(xmbzUzk z82^VVeD_IHtNXuSV} zO;1id0Q)iHESMFwIaHq4NPucBo(fKp%J!W+3vM`6J^@7z42@IM1ViHn+nA6^EtvL) zyP)?C{9t(?M^;p+mBXz|B|}N$8);td((U;|fHWA(^==UgDuzaJ;D{bCngYGm`L=07 znf3nKgg_VWI?l@MUoC{RR56gJSQZ@{6|KFgmNalaW8Z?iC-_6&h_wd%3xaSdh2=Svo`q1p(%fxUPAZhw z$G4f!$7wzs5R%Zi_)lv!-B6&M#xT`|YY&b#V|lKOcsgeVFR-J2K+&UhTcWyaJ1f zd*5d3_r;Cr=yq#qtjI^;SH!VCG_-VvcfONQ{p&60zuRx=`s3;%thwW$t?32I1;T`( zkdOVbA^0veoGWXZU^VsD2Q8c!2Rvd0iZS`7T=S{YZ|0f z9w9(>rvLw6;syT3cr|*NX{(8*b5pEOCAGWN4i@a?e6rqhc)`UeP9QxG=0?gb$FM$p z=KLS4S%TDMYm9kow4k0Xn6l(MZWS2UX}-8RkL56vmEw6`Fx-ZOV%48KF8Vz(ELk$H zL|cP`HLg7lU7FoQ8LY#Otxf3A5PGi8h`TB!y5&a-MiX|^R?tO@j=!{czQzZlx;I!P z8!$v2%^zZL}Ad1fY z0aCsk_@<{FHHP@_!GRJSz|g(l_SarF?%B8D zVYiC`doQuW3k1PfhhZ-`l4!ApQE`XF zvzzAgUxSK98BjYd(8~jtjZ7o_j+j-#=*8MT-J`<%N`eVwrDOK;_w5J3@}e zfr8KnlFGoOta1N328jKLc18^Om2-{as-oNRmQKv?-gtIKHpCIyx$r~Sh#iW;b39RItOV62TQgiUbC^Fv0Vl)DV;z-w``m-~ha)(j1^s(RH zwx1*~%B&;C88-eSL6(tMa=qAi_vLGpc3Byir4u&uGb~s$>gbKpZ~x#+Abi=K_igj{#m$K*w90yga4& z5MKmMBKU(BERM%i%y4KYV%(4OcBiXTI-|)MUk-iY3Be*{k{9thAq@Zy6X_gi!p>HyOtZbxim^nid&@Qyla?Ib?5G=yU$F`CKl*0jK~J12Xgcioe5; z$q9ijp4I$rLASVaC2G6ne1yM>6ExG=S=H-(^mQ>tE9ZqqjN{D>A9WNG9NA5YDMrLD zPf?tW$HRzNdpxK`Uy(ix4aL7|IqQ8GORq${yy!UJC}W}6E2ra)w?^37rItFj+P%I3 zQft{et*wPTSPDP2ba@3%h)0Y#ai{gds70WOy#|CbK^e~nuU>N7L5 zH{ng?bxs=GKrHTS!3tcXMNrq?aTU>%pvo1~_ao;8Dj+5f1!~vF2&7E6I>guK)Nv-@ z>cV>AbAzZ%=zp$=Wr7?9Pp&>2XsNhrgImHa;s?^~*Dn+i*P+Xi_1w$ROo0=`T+avl_&x}^dYpOm68Ga!P7J{i$f}sYmX`kq+!ugotDDT_U00KFNW!Lx7{dpt zg?YUo6Y0^zeio(X|6H*)RRcgCNGRAmDAh)YO^c@|C8QFqb%6Z z3h;1cZ|;sZC1`o-R{xJh+>iQ`>##Ymxvb}hE2=0$m zQCF6CC%feeyrqKEO`oGyY!ddkLOhQ z53GPm(fiicDOyKcrtGk{QUp;dG17ATStcUhf+B{^UN}U|hOvz>W&Eu1)V@8Hg#zEF z5lJh}kP!_STvx%dvmKzIM!|VF?sEQYL3EQa7P9VT*Ohf!3Bqxg) zS440kOT^qN)|T5`XhuJd8%ucYT4Rp6Zdi_>7nEmw?q+~locShNonCs0J$9ytaivS% z-sdO^QROs2_t?4s0zv-!_v?s@3TY`4ySE6x|Dadu?&?Y+BQ_{~l)Q;~?l4_U6fi(a z`pV;&g;Lao9~Za##eB0Z2{7x*sn1~$90ulZ-h3P8o>s&AUoT-E=OHuDqp$Gs7WBQ$ zh%oTxFeFi@6cX$^-blA^wBYe{ocm0lDrx@Hl1$J%WV9Np>g~PT*_E?o>)lBo_U4;e z$HJADJLIB9K<#kvl|#+C_AieoCiG(~V`9z@}`)Jm!&Bl0j{*)wXSAC+;A?R{X*$5Nhqp8R;}&$*w&MsVKgfZT0H^uABFU80U^zso^&{el~TsAO-^lcbiFQsn@dt655&k+DzZ$HF-h+RgfR#voY0STq zSEGfqRio|o+Qok&$Fh$O*BbGIFvHDO5llfj<^^Mir}~4#!??>W--EnFdBYb1{~ zAYD%;Uo}Y*KJ&0qH#Ilz)t=*kU+(6^tNklGc3X1|STI8*wp{E9*^JICK0KdYduTZH z8grmliX(HiH-7kQXx2y(Z8)E8e5p1VJF*z(V}oM?Z{5SH>xnmSz|F_@b!?{R7tGw}S&z|W3C*e#^WvVZMpU*CHNiR)SP zDnNBA@G#uAR1D{gk>N3t@j>WOXKL(~U?VVDWjKsyRk6&}?6!FiQ1L{tiOI7_-x4Sl z6Y+VAa46&&4X>|*cUpMNPW(i;a}MR-J=UddThG>!ZyD-X&pb}7lKpqY!f$Q}X^Omg zuw5$$xBN3TDdItSf|`p_LC0flk8A!yHG@|tnI4^c!S}*ma$br5DGjhAb8rK8--v&S z`Q%|w3X)4B%x+M`DfLFlc-VjuPHlvu`PgP?p(rD#g!l253PipS=N&%Os(S!h>iCvR zaQPc)sIDOA=Ne3FU5eFV$sw8`XRYvFF@0VfF){x4O1JFNdgs$8w=So!FU}8(8N;#L zbxJ0`I!|pyqEAt8g^9vgYRdZ|?jI=q2RZeJ{VV?BDzWzw|9 zgvLL>T%)dHYFfa+$@pL`@~kARn>MlyvZ?tMH@=3BQVF5 zHkbR}gwio|wQoPNBaF(oq~Ex@UB1TDu6Jmj5{#w&@}I9~&+1fsVs2xjl0U^aM+yFOl?8(`* z8nkfx3gW`-d~ihGbSgdc-WZZ7s-|Z5J6#U0;kBb|KAw22xFSKPu0?WL=wEY{)gi~E z6#t;-{6{q{d4xzb@Bh`O-mIrmcU+%Dg>yr{@}6tzgHby8DjO^Y>T* z6}mboOtuLJ+K5>7hS90N=3$}La@e?pnNR(qxrL1#;dA77{m0KRQq$bcOL5{&FoDg8 zU=IKlg?_ERs@DsJ&SuHn3e6_#$>Sxpjfur~C@3WRlZ8eXWMv)lv!vc)j2pE&b4_TJ z%?v^uPN7y8?#LSNjHQTAwn&!bE^AI$xnV8KL!+9hqTci4?M+MJSrc)FogSSMv+7T~ zM6{zd=AqGP6rg?=eD24Y~ogtEQLWLKnf@C5#v7S&`*UAudjMGo|KW@-*~*-r%A-G5l;M& z#a`646FXVVFMafoLcmp1xo=_04?XT7lRf3lT4o7atQ}yFN@9`pc^a}dS7v7ny;C>7 zH3Ki^f~xUnDzNAB27rPI#A2@SEfXbS(nFCEx+rD z*L_MMeU-t_yKNpUJyEEXkPbHYA+K2UG`%Kc()Er;fVfa-+;k(W#dV$i>jdjJ zvJi?$eEe<90qv9I}oy=HMK|6XWqZzg`iR3(4>6NF@WPo;u~AQb~= z@{U<5k9U-V5Io}u$FCASN2x2|V`oDzP~Z`Dv3{`9rCk}=5~NMC741)}*f zf*4gP3cy%u6x3&`03y;;Q&V?1Vr=6-`>!nFAx4Ds+80T3?a;HnbbsmauP%KGg(@|V zI`m^wElp4c$^N){t0&>3cmSoKI*g>7wUZ`VR7hJ=3r(I0Y(}5Nn>_Y%0nJieto0-f zJSQ=D#Tmf$&mI((P^*=NyqBO{{{HbPO2=9pi(dY>@C4Ek>g$6pYVAgCg{&OsgZK2W zv6c9*!U)-ZjxnaHIax9&#VhltgAcw+$Dfn3*`wM0R%_fU$iqk;xM$L2i4#fwCz@c^ zWGBrb8>vlUuGyQfh=2(96nZ0@0j?oAI+PI&gy+i)Wo_5yqQ%|Dy0=cR+L2h^Lz8cH zC}CL=c4Hm3v)dDUgaVo=^;QxD45;KD0gaV~)Z0uvuY3S7IIFzIUeW0#9^f&v$GJUK z;q4jwUPEauwsw#k0F|3>P`9JS`0vLebWH*-KSO}E@_ru{&fvs%i>ruoz4-mHEkMU%?-z@1Kq7AYJl-`-)a#)Uf_#LfOk@9ra z1j-k(ePDC?`I58USB1vQi!p+S50pdS)lgVgwMc$)%?fm z*E`yu+mE$E*K~*|rOoOcCspeOC>)(Qi$|yckR1BXAApODf`VxfYU+EIC;M@SNj6G* zT;Ogkg3NtWEpcfKn@5@Cfsypb*1uk<&msK;OC?S4&60l{W@nUOO5KiJ&Gr`Lu=|OEF~oIFI0>72-vX(i`|sT0*tB$Vjp;Cmtyhe_KW4}C z{=cRxHTlckR=3TnDZj-QA95UuN8-ghUm}?P4;Mi7&B)|m3ytRS|MiH@hFt0#dLMOl z>U~R_MM=jrbCrt(3N5$?vcH(IQwYBLN*919JC*Ws0L_fcs$pV+9H+ay|MlWC#Mv&b z%0UCSJvl`__yy6TW(NGeVBBlUaNCvzn!zNFU=7v7iSB>pqWvCBdau8U6&(#kX*b28 ztjL`yOpO#T~Dm9IZZ08_OE~ThtI0nmiqE+J>y6t;1ZLUiC

q zZupGUhJR3foHm9KN`#RROd1$BTRx0q*6OsHauAv9AlpVcH#CoWmfAff!$8NWe2nU@ zT%uh>R`9*W=c3TNUMTM0?O+dm@+8r1P(k)30~2Ub;VHwcXw1 zWF0079Vzo1n!UhPq;JY^p~jjHhniQJ|6$jTPF#c8;M4u-a5H<8@m8+5zvlMZg^BaS zj^j;skLqqIG*CHm(|G$%{5NTY62RRrGv#v>Mvv5gV#lYm{tVy6oPK=8r}Bne&E<9hgA{VCu?B2o&I`SL(i$%z{P*n6UVLw&)~4=p6lv) z8*|Etd{XzB`86(6R4@*?LoUOBtNO2a;7}xf3s!D^U{LjrX$4>L^77K^cWhDqIoB-d z-M7cvlUx_cHCI2sYjuCrpetC{*II-q9#9FoczOKX;P6^c^ZapnfpE_qR`olXejXpu zzg80~u(1x#_dK>?+c$e{ruL>PM=RbwLf#TYt@BKl5*HUDd+Mn+=9;@F=Jc+jwZHrS z^s*m!W+-JET-T$nJPl;4l3I>t(nVc)HBj-ZzDyMpC2SwaqExLv?-%(uPrT#D2b|*E z{jk$jy)!k3)4L+Bt-&x$@+ON;BX=IyimmcEAdV|SJX;*5y3l7ycdlrrW6rNP&&|1g z>c-!2?0j&TM+CscLKTv3-4OoY)Ta$UMlp-#!K`XL&w7!@QinIV6f_Rdj_o9g+LJvr z`P{ww)IaQ#Qm**PY!ETWMSKewoMT(bVu8GZ{j&?+=T}Z-jbm7+=}kU6tQ|vN>fUJ-b`3s@6P+W~N4a zA^DMy?45|1?f(FvfUlu|Wlnrd;Y$z7LVz7-E=mko@kzYY zi#Il2e>SCHDX6*GUeu59W;HT+9=0%?)1|)6YtP;DSRHs{HI2*o_-q1BI_RvfN5w68 z&sPD4tG$Rwr(vyy0tdQ}Z~Xn>Nm8sK9FDrLoXoG0L&(eO^fQu7gaOz62!?6Q;XhG7 z^sC4Ip0zZ47kU37QB|AedK5`hqVLFl4U$6bs;^zDQ8xVc%b#mW_>EI|?C;D@%%5lf zug@vu_o=a-jfefkF1MfgUyrRRi3ZE!9z)Xcbe&;*Q+vb!o!dO@zjI~)$g#gCtv?r0 z-)-1b7uZeDv4<`hSFa2M69cg1?6YV#csrv1;ftH-HyZ;{do-b?$^h@t_N3zS8{PYe z%N>Zo-%w2h?{nNh{0LMcDZ^|xYN2YcB5Lc;m6Z=n8$%ImUkCq{*a`%~w7-bedi_@j zh&2C(7w;44yAvJSN%)*nKDMWttbTqy?`VU1Se<7mgkSipw-QD%lzdAMI{~M|v0@#i z0=7qgLqd)n#*uH~VCECC{t!f2PJ9z|0_vh<6cvWbJbQX$T;y!(wK)%)GY2TKQG4lX|;EWZ(0cQiJB? zcNRA6B3?)L#|{ZGHLv8+_`YQjDkYIznz&TD6%Y^sDoK z%jkuKtx$nH-hY%lW#W(0Dtm0>TtUU<@!;&Do_OmLim>%GL?14F(KCGV=4z2VwXu7n z7L(!2a>etXDz#v`SQ>T}>sslqazSs|aqt1tY;% zrxhp7Zye2DRfF&IRbGu z^g$N%|C&`&*bO@nJUoB+_c&qjf z`14Qj%S|lT?F!9_21#f=F3ESKcy3gp`ND}_xEb%IT3h}E%fT6u?`w%tS0jzb z5!*HQKCBI8Nx7YmYPqfU=m^5vf5JX0RiG#=8ZYjnx<+r9N*qhvveTn$u)kJ!bE~Nf zULQy)7$1*QP*&zX-Wl%Lbas+w;;|*>s~)QyhJ*&W-I<%Ww}vGv3ZkflsG8QX(YIw$ zF0kj|p}$=vZ3>!Ri%7A{UlD&Vj{P>ptVeF2IDzbD8q=i75IasCIX>!m$U4dJ`0^U-Y*xCkh0gzt7A7 z7awvkl^V8R&$AaUP)*PE_pht$z{17Nvg}W>y{3+8-Wo67!lfKg8tb*2E{?0>!tRV2 z6uXGPr4~@mEWG)qBxlJtAmTAbYD2do02aj`IBq62bW2LdoA1uJ$5O;sVP2iCMA!I$ z_s#meCl6Yb-nDyoigDAaO>`!*I&#jdAeE&DAeNv9ij9sn>68{YwB!OE>6`8A15nj( zl+`bI*fyIJ7&Tc&#xw6Q?arc){K}-EH~O4~5BON?tpywLlgqx9lV4zCvdee2oD+Nm zNmiqHgT6GVp}BrsBCuGzIFcwgIszMPuPeuAuU0y*8iH?}K*q|z(M925WS=&%Y#Tn} zV_YOy@WuTh8tk1`kcYrWoQ7!TZ^)`e2OM)DF9fzF4(GrU=+Uy%ZTUz?6~}AU6XFMD zROmvQWwNoyq42A0wHCex)O(6!fTyr@hPP0_|G_)JjF`!_%C=IDA0dY#A(3u^OGdc> zu6P#)ndH!Y-9mL*4(#hZN;)A;)0sS#ccHX�I{A80I6!QJ{#|%$QnA>vD}m;BwhZ z1n@sllZ11$9Q`+iw63SzovKyme<^ z1hU|aUz-3q@pK4`9wqSsFhJXMIMcsD>(2H${gt(5Z=TwX4Flj}(+Ymg)*6O4Zr< z;M2r@eX_i{MT5YIW;YZLHj(H)#}LjK{2`SW7*fak>;q`j6TP{VCeErry*!5IU1>n;KT6LWJC1#m(LP>iV6GrcgK*ukC zq-lGbfuN>mf4Vx@>(AEa!IggmA!(4n?7)h;D|+Y+B=anAKvv<`B*lJqvX=)c8~VVc z&#$D+VS=d74}B_IxEc0i^`M}9L z;9=kEI>gW}G5p4?j&yvY?#a%9iTE517aYVrNRZ4geY~~^cV}upz|pLiH%|Szx_UgC zSUGSs9q-;2_8t}a$$F8tMqiKX%GyIEj;U6^R$06-f8AG}<0YmUZ}IS+$%W=Id7IpY zZqjULcb!I2-)_Ef-1W>OVf{fXd7D}WA2 z;_GY#^vH#yf^OZnBa`Poj1*}-2hBCjHI>($p9$K0TY_tY36s_ya98t9n!Opp)B&KV z-ql?+a}iXyj|xqmyCW^5;IAwG^7mREl-=8@1=m;Swq++ZEEXRPa+AEqA3U2gd40pi zLhs-JKk)F3WReM@FXL~(euZP}_SAv`N#VkJziFE%j*@y5H9cIf=xVrLNJ#cPx4h(l ztyQ8sI#xF7Uhnhx7(abwKy1= zfYsCgeDuL+5jKb}O6fENmU6`?J0TmA<_YpoK2r$8-k46>B~8RT9+zrLO+!N?Ke7-( zG6UhUCK#REF(+^;Kmf%RKw7*-CaUa7ni-RHn?f1PVw5(P8H)Z+$0X;*r!a0aAS1Al zvd1K{z42^3rLH#ltmYxywiBsj=V$m4M-L!~4x^hh6?CF17QF&m@{J>MubMkxElRfw zM|c-GQvh}TsU#*eLbuKmN=(PNOHEXa>d(gt3Tl7f&={5wZe45^5o! z)hdfU6R84_K(bN=T$yXvWh2^pU*7;*<=LC06M2#}D9B{`Kk#58>ZG2S8|@9cu_Y=e zw?9l#?1hFg9RcW`{qN(lqT%a=2RdP3aC80Jzk^HR5$Zsih!VIoyKP;ch7yEEiDuw* zrZ39E2N+MAsoB(n?NygN0nHm|85eVFtQUt&U0KOg5b z{yLbcaMyiUvgm( z&J*(79|8gkW*`0O^%K_bsb`cA)f?hkTSoI6@x2FzLwh(Y2601QPm!4RdMjxf@nCtogf|l>b?7s+MFEcd)BSQuyEuZJ! z%Mxq_vnTRGXP#X0(49HWv^paHf5pUx4V z_k5GrDj{1y|o|FJVNMIDGX%H%`o!e?*6rk~O_7mJJ|aHiI^ zTwwEzXobpz81?Vp3&$A(A{lT(t9+#xg^gN8R&+wy@#)pklyjY$ehb+I%TL!~L*cCc zl)*0*wYJvq_EJ?oo?&hF{u32{J5W>k-|}(ZtLKTMezg}5(3!@ZVwNz_ESz=)-8bev z7kTV5NXclP-njb?y0b)e;yr}UFtrPQr|?O^v2JXi%hO6?;X~k@Gu#LI$E4_|PDU!Vtu}x z{rR=jGpKUe=jRi*WlQ=}_%dux&kd_?-j3mVHKlIt!7@o#iMp0xh-sy}w|K4D>(uB< zeneQmF$09?pCAD=K0k`(Q6fgTTga-#u;{QvKS3fZYRc^T@9zUvGf>sTL1cLfAVN!c=Bjm?~mCgu8tB)19D(Uj}PzFjMpBp+ORcpn)u35 zLSH{#scFC2zrNHRLuch9{d!c}5)55xZ6|fpvbBUz(R~iS@ie*}-21i@In1Pgj2f^9 zG|X7R);fo2NmvrPSPT1{pNyv(X0q_HB1+YKi+ebg-(lpM@Fo09H^iVT|HwAVa0e-n+KD4DlX3AiL8;z^)*>u$=<Q+dW+UwH59|N31X zpc4Vfz)3Fk{D>PtJc!?ohxaS>l8pD=mXPZS1$=4)6=M8}qD@|L>B2tQ8s?Y3cF-07 z8oHEC5N&X9S`2^-(E3uem(=~{RwSFfGy&IS*d|*gyw33ZJMA=-X^4sS`1$m?81Tgr z3Jjouzc<04M6Y$5c7fvZep$aEz!QPw?HiF$5&dJ;SmNKliG_w9tX{p=-(Rfw>jW`w z+<8%rWZ-zR(iJ_5o!~VBm3wP0FWlNJZCzuX%ESY8Nn4yasmC*QT%Tst*7Yaxa!c?i zl4=qWYdluuZWaNfjRg9Gh46n+WF(ZM$V)OC`mYog5Q~i(e^W$aZRv`=b!}G>MMFq zH2!jGkIqqc`fgD5B4$mU>kfr{H+JdckLkn>zd143{FXk;y(Y?jIhv=Wba!Du{0xgQ zL@x*9UWNGh!Q#i(35sb)NGSs}u*wcQ#0h=%&PJN}ho>jc!fO{kS#uFFYkpm|K<`9& zVa{tbZT778<{xVp8zsfF8C#x<_`>qID^dyEV#<7)= z%S7!IKyHU%0BNOfP;VbTX2U)B>4nc1?8GNhu0VoYXiLKJkQ}>;%d(G~o70=hc#oF( zUIloTKz5MfXBA4-z{@kNA;7QHpcl>pUK;7h;g3>rl?EwvJbdg`*!zxEnaD;`0O?0^PdHWf9ij_UhqF7s!H5*VQxl$X0ci@Nw<$g36Un zOjC_49KO-Xp{$NVpC_3|$a63AZ{UxK%sZ4KmZiOqZw!k=&=Jj;udv5ah^D=N^Tml4!nxn) zUP^7PA8SIYjZ;3_&B#oMDo#wt6D@U^|6HVD=6?^RhB{;hIg-_igY$W(%&u|t-*=|X zE^nZrFMV9`8N?ih+D6cGDi3O`-du9JoZ>KSwycd6L>~C#V$o|&QVF}D!otYWH9#I} zJRKv5@4niecsRO0+gjl=&XQNidEsd{{jrdwR1Z(e;)zT+aUR$L7jZ{U@QJww?pbmm4BTp$^@3@!`*IUA>`5y;iKMY3wFoC4Wci{Onx zD)cFI=6#LtQ8W2o1F?UkW|Qj&f8$c3Vauz#{mBBOUpH1&menb_JScdqhT3YRT)$kN z!B)_%zH>NFNso-bn}@~R?LkSJt%o{-V6o$I57TaOA@6}YKg;22@5ku|F3TSI&C$Gq z*dk5M9zteji(+lbBZT{YbefiBx#8CP-Hp8$-AhQ~MKC)%(hhVyrklKV%~o(em;)WL7IH zP_O+R%0asOsmZj}*SPnc^lkIv*rJ-9ZamS;cQD&0GEHm&cuhD|7inSk?Gjk5DhQe5 zk8feNi|pXCUZE_;s`v^)?Y&BVxh;U9+xR8uPJR@foh|raE^o8m{0wn|4wJ>P%yLUh zzlm6Ro>Nq*bwyH|y|lc4F8kybQ@_RchbJp2O7pSbKUi;$(8t?J&h;OVJIX#no2rg+ zd-mo@S~R7Y=>WF&|KS2Gn=8dt^7ouePgp>qF7k>bQ`9ex08#!#LpLGeZG#syA` z9!>C-(P{jpPp*+f#9zAnsgA?|$9%Yr*M33y4OYr?8fi;!;I2t}uXnEjpG4LmqPWk0 z%o;gB ztp7)K%B!;^yF09%GE8q1u0-b~wW~j*M#S_h#zO-cCoYF(>FH@jmS(W<1fxqYas5e2 z)8Rqg9~k%TS=(2hq9P#6Ht;wv%#S)KM(~*}`wbOg%%DhovgFKNp4dS5f_x3p&4j@s zz`$bd)FVJ*SfkEicCXe#)hR<}`i^=}L%U^8V0anMxun7!b=r1(8*Meb{Q!JSp7qN8~LgkdC!cvR5#m^S8V_%>LqI1O4IG*^^wbUF?Jc6hX94_ zlR1i@OG!zk2DX=FTwz64#EcbcNSV1YnPvq#c*$zPlV{CqsBhjk)1K|XN*KG9r* zPYR@J2X*9C1u=IQwok&f3x;?SUK)>|I98%&Gc?@*WguhI?Oa<61Tk zrlOdwNV3RsZs1ji;cgVk`F6RY9K>uCaR7m31KWbsJx2t6*R|TR|6z%U?RJgfILpk{ zH+d($dP_?-IoJMdg>FBq;; zHksXv$71hU-0DwM2hl+uhBJQ{t1i?2$lorio)kXz<<>%ctS;d|T=C%m4DP%1Axh!2 zFBIGi62>hkfh0=op2v79GzVbC>eaJR96?<3La`U8@BV%e_Sj;&lE^Qyx!22IF z-Qph+N*I|%LM>A?Maa>2Ck^Y1;Wz*zoQhe*qaiVL{dnzI3r}EbCi(Ihr<;W(-6Bp4_C)U@iTjbywZYR%rAYQ_i*72ypka9&{`q`33r#CWXb5W?39T1s#!MSl|VN8F*{TwlMs_+!13IINgwZFdEZ z)ZyoN8~ZL#?(GdhX>-qR9YrC)9S0!M2JIPF;IWh+)Bdu9v0$@rjLPE%O&Kn!4|T?$ z%`wkTwJW6)7-Nl^z2bQ7=L&!7`y<^Am;{3Ec*|oA-tG^9cEvzb7Dx-ulmoo7Fwf65 zJvCUK5AHnO9&~}t)ooh_E0+i>rCQfg@c`sx96E5;=xL2+5~&2ssLhHaCNq2XF9Nk- zo|Vk)oUL@i+%;g|%GEs+RCa6qY})lXCK8)$pr^C!+CbXlDZ<$H!?k`M&78DKjFaF< z+^fBX=Es73kGA`hMY_}r%8-7c?Rf?M)p$N(nbOg`dU{}n$SY>Xh5DP>f}6JwUxj3v zpvNN$@*QWVmY4U`^MM40AY68Eyb#PnLGkBu0cB6688avWz@t)DuuiSf1D|1V?$eo? z0AH-T?Ln`-a5t0je7K}oE&48PnS{KLLAMfTfswGRa>b0D9*FS~Vh}34j zQ?U5^sSyf7n`95|UrC?Qn!JeT-@rRcID2b)<=_v<7)22AmZej7_!Am7sl2h+*;l7- zl=D9YltOh~*1;pBfbcr^R-&NqrUT~{6v2(MU0~ip(DHESrbOop_WS%(PM1|^U`jo) zWtW6M=D#;rl%w>eLl(`k%bBKwx>T;NkC7i2kzUh$aWot#QAS*PlQcv+aQ_!r=vg1H zEY*ms+e~xjy;%&lop@_q)3U%&QP5Txp-iHOx*fdV3uaDa@n0)9yJl$3&KBmIVnyU| ziw$d&xwk3k?U2E1*w-~DmOxREUUwN-OmVwK(wy6QzCubP-T%@$CVn)Poi_vo&0%!GJVZ);Z~awY$Ta$nrWUQTa>Be zimUyatr6fHVdf$#wQ`Es#0-1|$CMc-QjIqeKv(V4$U!nm#+pvA=++&zwjEb%Ha^Ij_9E{;&jHDL(^7~LxyIdOW2B$Y994vF7 zXBL{+friqWX8!bGrAB0(Nw3^De;#=5R(tPvUw&zq(S``tc_06(M@)Dt#XGZp{tJHB zK6<9n34Z_pWPikXraBdKzE2FxBQf`jT-(nM7!>coB!(e_MYP>Zx9-al^0HP#)t&`r zOezl9xrT33DIdiE_(_C-^_cQDUnyV24AQr^-9wfmuA`iUS~fG4Lef2`@>%K zvF#f%9k6_7rZf)`M&O}1)w;|6($Xo-{#zK+nJj_Py}nl$tbRtt8`3EpIdZHrI6(`j zBpQ8_A6&rv`k~@2?@tDY#Esl`RlOn22c34AsBFE@2hzESh$3lTFTH!hEb>Xjqzym8 zO!}o)6HM~6dLdn1Ks!^wL?V53c}YgLz^DBPU&Ev5^EwWK{d1=C3s`Jhr4~_RYHArV zXgBUz0ri@|?N^(GpwpAq$FepT&VlwArQDnnG!~WFKW21+0yCzVn@Du2YByV_gSDfD z30?{uk|j?q3FcbmP;nCC+tn%YQez(1IcSw1qcx6uAlb(iu9=&)QBp15kWK1p>#mn| ze`?zQxF!>mz{r!v%nkzs%~ti?ZL`Su!lM#f;r@krYu}le+jtBhLx@D;p_{+=qN6w*l zoZmi^4tKN#lN02!c++Qu2ofX@5SjYT>*@IK>uin1uFgL&R9CXd=+UmoF|n-*C{!y+mb1n9XTBqLM*k1Kk5mmV=&6c2)arYA3x>~ zfPLHM5etQHe+rLIxk`9j1udkdZp@Y4DIEvBQ|#E66W(WE<%_lI%a@a~bBD@fx?_sv zTJhu zEVn%Hv>S}l_Qe1BN9P~0fN43{0%CJn5|bQ)qPvA(9@=P_o?*R`K092^fe}M}TER?P z*|)7NV63*pxrQsdm@$Hyy)Twag!x`s+hx}`vHS-Rm8X%pG%e=3R`Y%tNt@`?RETuk z0Ldq`8ruR5tN8|>;OuMiw2dW~`#R1kiD=C|LjyS_Q zyHNxI{+sZ}-|tY9<0yFh0-ZBjT3#)MH%^@0?tXVF+P7fFo5N0-eASo}dqc}~n(G{? z*V$Gt$yM%d9oC#US_oDd^URPZ1W#8RhsQ=h@+hdwHx&5iC;Fqb>uJqz{)6`Q!w}d% z_Y9-w>QPd4=zABcL(8DbrV_`?79br)%3Y~pO*c;r4yxrOdH=nXJ6}!6*aRk4L9O5U zu9i|Df%FPW2WFLodG@;eHzD2$n(>AeA(s0u0+;zBnnf>WDuL>hD&BXYip_=bbkCsC8F>w;#|zNAKSe9;h1k-oe%ps>xU?#<0HD|_lll( zae2po@APk7zxM{~F0S(n^>@TH=5MPgF&}i_x!S27e0^Rpm?&|)b>-?{H&+o582DDH zE31cvHrqO0;1Q+JXf?q~y&c$7zdg>d2zb7s#9j!#wQ^*x8H$lAv!8^(mzz@1t)}nb zt@008sF3a5e2fzNYiqiM4^XIYWqr06xH3^=Ce2=29~YY6tZV^QU7rPsS54$?dtPt6 zt%M$>AnGjx&7+-pNb{mCtqv2%Q=|;BA)H00Yp;Lg@)g7W%DL-{uOs!837>>=o}cF| zym-c_XFfy#@tm_O0teEiL|*{62_Ox1U~p|T+wE(BaZ61vmaq0)*)hk9A#rQzM};iy z?GwzV@Bf}}v)?kUR}Ej_kfOJ3O>$xyOyM)tzqmt^^6>G!S}V3@_lPuDRyUmUw8=bqPN(>4)qZf(O0^lzPzNpb&@ z+^E4Oj-a_xaaWb8V=fpEFS}DEndxIg4_<)DQRd1;NHH}Pce_~)ZnjsY7p{>3hllhc z(f{P>Ki+d-5I)Zo93>dLfqa#ySko#$2t?=_!@CO)KuEGSzFf%xAcUsO4UL^oC8YDU z=BHz2dTQbC)k^;fepBIs>pY(a@z~}KL7jQa*-Z;f@q`#UBF}4)VUAbrUStUMcx@1VP_aU@OhAJ2nuAvV7h!7Uk6 zM%DC@G7{qdDRyM}?{n3J-Me|Og9W?0Q0=*_x$Lvy`rNzCRduK7(~TBbUy5)PWl29< zL?uFE!_e!4mAR^n!ic*b!4>Y?sZ2ZvY@O;Ysa#5T5*sZCyzs{!n6bY<-cQUb<5YFD z>Hr2%$x`HcW?uk0BR=VkJDL+^Sfhyp6gQ^}n>^&0XMaSjsTX)WoPUzVtW^N{wm6Yv z7#Va~_a0R=Kk6k2yx$t_w=p#P^h^u)LmWuw`dix!x?usM#?U5h5Jm9D7ry z*V>{hHh1EUEBh8UHY2dEcT*yv>A?3SO=|o95%t$$S#96U&Q%#s%zk9E@PVgXY6dWZYfF)l?^G>-lmoyJ_^(U<@+f!nF%<$CsZg(t}5x%(dk0gJPBUKIu@bo*-zgw%2eH1w-7 z4c{-HO7VWf?*QHdX`wQ((|-vP7jwhVC*WoP9J$$=Ps3Q37vZ`w`1Ob-slogx^g-e_ zAPpddla#*I&fe$~5~9_Q2V^Qg5YAMfKC7oq7ne-VK|2=C++VF@-wULogDDQ?^GeCave`%CB zB<@M$wlA9V8w1P~?)Ret@A#$e(-Yx5_8-@C`eiH9I_0{Ifq=CT@CR*n)J^j8k+@jQ zVbb0c$eUwv`V!c$Hmm$4D3>+*;38>ukFkMkgvU;OL``M2J%p)L6-g{?U@B3oTK(jH zG#h=j7R|r1*4kx+RD1Se&cnn9;WLMmQ*TN3J4K?qKi^pPk6Y^GZbHo)97SzYS*l#o z4DK1NI2x|)OeHlpz;uRTGYVfHw3#vLfL^}#m|Y{8|479<*BO{|IXx?{9E<5W0uL9j zGrj}QST0aAl$2pLCYZP7v)O2aO)!6Z@NDk1M6f913t+Mow%wlEKuf~`dlTi9A>fL-YS&=+%=C@Ta$K)y-PJDA)e}Iq zifmi^@WQZ=dDFW!|3rkvXI+y-csKf~Jm=jfF#f=`Lq1no+y+V9>QxS;YxlV00%kht zr6?@-_Z+`q4v^QS&cLpYNa^nY2TIJhROc)B^nc@Ce;^@Y(rKI}G5o9I@^xk-^D7LK z^%Rwfl8>(fItk|x=}pZU74qMQ7T4C+*h*na-o?Z%)X?RUIO+oOvTi)MAxho=+l4YZ z(pOYVniU4(gd7&Lq{Wd%|3JgP=Em9#`S)Z$ftk`xBQ!J)4yRZOGK>_g^6-dTHy=$z zXRpxlLK8+Hm8`~Zqy9}tjS59&&VX?Vjn{7@pVB$)ZEOtp5AgX^G3WL^?ynb_4#)r0 zL8GmMX+={ORzyY11rQxXL+NcZ07=cv>e-A^pwNh$oqea(iJ|C*Vylgkz> z@8-3BS}dqjzer@?Tm08?A%I>LfX5&!$Ozy4xHsI4aAw={!g2_I+1f2CETJ^K3}@ld zTfnW@)-Ak5F;&RVyOcihxO?qjw<2BVS}0OoDo<=MSt(m9pXmGdk&#p_&R5B0L=~j}6JeqEU}js?Iz+H1f8eN)IPH`SLF&-%IF~x zaKJT+nR*n^2lOkhNp1d*R&MVbjhmLNv+id9<<6sICU`QmmWjZh6@$l^%mQd106pk4hFBP|24exLBo6AdxuMXy{UoDurtVG zqe``*%>1VP`Yt96u~>y#`Vf6e^AKhOOtU+O=dF8 zJ^C1=Ru5tm#fmG-ElMNdr^&u>+4hyZxDoWs&{guweL5oLH1MdWa;|L}Q|UNftpOb( zY~wp=(VQK`;0_aus+G=AiKR@)jjKJsDAU>v)0i_ z<#jA=8^XpB;I7WO=k+>0tvGhc-m{Oq(Mt1u&-2H1%DcPcg5=t9PVW1^J+y`d^JABb z9hskeJQl|vn|NT3xrLG#kX{AnnVRfhY&p{ARPY|nX$**P1Opdf{`5absbW7}Wf&Ii z*3m-#yqqtRuTcnaqsatqY!LUGd~V5Jir@eC;97H|g^yiAVsuyYkj|4%*p((~$x1sB zhGdrF$;sEngK=@XB}Tc;4ZbhBl0vz%F~zF!vSZY&|KKy7=eGN6t~T@Kk{p&}?DiYk zJdVs`X>`nJ!P^{*AFx#y1If2L ziSdFeNZNe!0uwUO_5PYY;-imXo|TJ=aL#a=kUjs`&?+@TDLZ04A%4#TIl$FnyU8kH zqdJ$0L}b_ajFu8psV|(X`BZ^g_u2eaqCV<`VRMO!C|`sDnz0HwL`i+cHSP*;LX1YK z;GLp@gkIpl&n^x5wIQz#qTX7*KC6J%pHysl?_7Oy(Qv@&GlN*?|KGi%Tpa3_JmXt! zEpi?d<&k%cup(HS@8|z_={V%;iTxANy{KE|Ab44-D#>#+i@QdDR-$}EseA^;%*(ew z;<^JP*hzqRcE%n#l<{m~!z$O_tEDh9(~ zM{r%!B2*>G;&YWUg>gyz`V9{oMo|*R=J#}Ae`;?O5fA9;j&iIm-W^Qk3!FrSJ0H#` zEgQ4zECc)4%s$(x0Ln1}-LFJ#Ft4O`ebEr$H56C>{iFVFmh7d7Y%&Re+TYPN=zmiFlh}&8_mETiFNw^TO)d9 z=Lul+d1yxq3~Q-&nM?5!54uf7z^At#ZF~>0=X46-aP6<&6vZJRAlTEVhL_Scg^j7u zYtXUjO3w6yX!kq4%MYoPF{*kG47S!%qOfaRC5DEr$+Y*ZM%^M!^P_7iU<$_^^w5d? za?P0b|G3HOf}@-aIatwxla6Uv>ab0@rif&brq8Qf-KjtjDcPVNN$ifXFCNb8ali0=!ayS9ycf?K9#kZUq3tzHlt+XK=!(91ou){)m zjuD?w))-z~3LN)P9-}$F$JD3IZOO;i(xMecn1U#fcg(QdaZzIsk`%4kYAlta;qyLe z-<}v`nN2~rzhSEH8kcTV>_JOMqoA4eKjqVUAXLzT$H}s6NxnHp<7Pf#k7zX4a))XE zt~9tPTzyaV>jQRcdRJbTZA5oq1}rR{hp+E=dV12q5q?s_(gCh8dE13@XIp~50tgAp z0DysdGj!VBh_8}G<+$Fi6Tl`HVFqTmnl$VPz*HmmD}^a3nN!ph6n9hnrSN{;B>-$= zNHR!#m<8$oi_sS?%0;z zxI}WQHmKUHyV0-P5~e^0%N5x5SVkQN$TD~wJGtR__2tt>^=kP*y3^yHqv`Y0C2+YLP5t;oy-xGVf!p;C5bg@VMCl;;)hagWiA2}g zX=|!qHfgB^<`$&#g~3NvLD5>G%#&xN9Q=k7&JA#oBk|BFg-A;Oev*oFW`HFR`b*)K z_&Zj)Ot-1B9 zcP({NBDMQG5I%CIVZ|BwV;(K24Meh_D$x6EUrz}pcK$ytz`=};A8l&bAw$W21Mo_! zR*1y|zY;j?Uw0u=Y@wv8gweBTwASn12K*&$adxz0*K3m`0mIE? z5SJt<*MTU;D)y4(H)w^};!iz4Njr8tHYSIc;WyubsVXMznS=?41cT?3qqwj9-9N5G z|Ab~Sd`1+A18Agf-Y?l;PHm3_DVs;5Mp|F*v(2_feTS&Jk9v=vGH40Upgxvh9s`wo zncUrAcxx75g%RCHGv?rm;?jKJ6HrYQkPirXDZ^TuGK0OBuJewak1(eeXzPA`%#5SY zcdIh!;2$$I5`ymLW_XCcJzNqWsWi$j?eafKvk1mQ8Xvkj;!)0O_Y#D8?tJn6dhV6$ zVy07NRMOsKws_Sg4v>uZ*6xhhKo<2{gwBV)pIm%Z1T|jj2)YKZhcmkNTc;9cgZfx* z2b?-5UdSt-!^<6pfY$M++s4ffb}d<~?PWERa1syCE>_NvagsZ2(P? z+&;WJOy0>htq=OTEpwl@Dl2&eRP`mJH#lBo7vVCyByoHWqfo~Vx`aEl*8`!{4Zz!5 z@7>^|GDYx%4DNN_p0aeUa!=7^8ry#)v z{Gue_@L$b(MUtkX($?Tz$6K!fhp_WoH26h2QdT9ivPmKNCPn)d#^G0wqxnoAdMObI z0c=nkC41>ONp?I2o&M^qayhegv#NJ9wdhfY++`4J zcY#%mP&Cp!bDuKg3< zm<(6N$N5$gb41H7O^@0F+#l6%oQi=+=Zh*wG!|w}mWXg^3hwUTap9XD&q3$B<>euW zi5Nm%n!ceB{uq_T>iWvKs9>v-AiwKYs?Lm!ZwRicBT zPY#DLslVJ|Yraj2H?M2pB`; zenr`3kp5uY-pm;i@}*UuRiD$-j6H}kQ2Q8PC6c4}yX}OzY$- z*buLr?w693RLNfN$Qa0zO9($(05M5wZf=~9msb-ynjfz4mdgYtZfEpFaUshg{en);NmB9Nx5JSFqy&RrUOe-FOdaCqxeSHtqFC`r(BoZe^ zue4^Uy$dHrH zM}$Oqg@Dc$&9O8k@kYzHEx=E#c_$({DI5gn|9rw@9p#crX2uolH%0t~v~;~1ndO<_ z#Q|MQME?KGb^FXBiYl{sxWk~WZ%X?96y0U6ED7hmF$f`k$jK7)TxS|wxitS}^+$s3 z@wE^mkF>+oe%3WRO2zgdzVX|YSB+s>D*}yu{s48=+sb4WD&FW~PEM~|nIRPy_uk9J zGe!E=&*@~V56Rt29k#(7^b{;wo0}DQNXm|Y#=!*S8VhJi2O(k3zJ zN-fUXMG5jg2pueIq_mV+P36G`muZW>Lt>XBk({%lyX#zVMIVW5kKQmOh#8G5+k4N$ z@eJCCuQN`lYWU35pQXGZ$C5zq5a;m>m^}%Geeg;XAm@RdL;A-G@E>vItq+doXkp1D z3pxnWyF_`5?$P2G=^PGQjpi^_=oU9R4Aa3zaav8rf&vQfmlISQE{Ww!gS|#AN3$sc z%^rYM0AI9%x6k`}KMwEM_)Rrh&|$mHoJ}H&p%6`)PyQ`w_Ae}T^$EDT$AboGLGAKa z`0&QoLmt?ACKDk2+_4pLBJsOFu{dVTR%7MGuh}Ndv)WN%KnGLporw& z_%vgwRe{3(6%6S2swmN$mAf0|K~aBIybfB3WRD;fNaAyHz9r&1l(ZJHVK? zj_${7OCfM6JM;{)FuWKXsNx^b-S-#cLeTv#^2@_Y3&)hnaXBlgYS`sm7REDYs|*c| zh5r-&Jl>^dEno=0&o5Ogma~-;%k4;@2jE`)!T6;};W_AERGphc{Gb7MT^kH8iawqP z|Ht`K0dj6S>F5k3ycTZ&LQUiGjK;cw5!%ORGB8=*&kLnPjtvd1qBip=<5dTP8!>-S z7Ha(G2P)iD4fqwP$hw!zv`v^kgt*=zAUB-K#6i1s*7iRrcJ=9e@6U=)7k?5#hmCiD zS`YdEm|gU9X59$FLJ!zFpqVhq(Ea#-tCN?i8}v5M!uto&B^qZ7V)(ZqWrYzVV{>H-FoFA51QuDf!j=#r#=9)ZZ_$U^9kAR#qQ zkR6YGv>QX17967X3*9~u5|bXsVC%q(FyZU4K%2Y zgHP=Dau}O?xvwUKo_q7__oWbzl0?Od_}S+x;wy9;Cx7?YG}JpYi>lkHL$>;SQ1F=J z`9Z>BuQkZv98nAEd?8Rxnfxqq%I(!5)!-n^*q4#|7i~H8{R=l11}{A|4}cJb-& z(f*DCqn@Cm5SX^(yss#QH#3rZ-$oq};o;v%rWP^}WWb}7(2m98&IzPshDJlYQ>o`7 z$EL}YG3!F9JD$KfUdfjR^al<$OzOmKBh9&G8+HO71!|7~q{sMz9h{4k<)4J+Z#vhx(2ZNCFi0f?fx6WPx)Ab1k{@p>!f zdS*g+iu`$l_N%{vY_fuB?(la?YV)D*aUi!cecfLmcsIZ;R8c~T!-n&7-T3TetdX%7 zk{M{`%o@A~2;E_mrUJNMbat{PLulc8YxS?UE@6%hP?d zJirPoxB~4?7;u#yolHfUr>&MI4F7@pp>|aJxGkYrXzm2>W{h5|Y=5>$KBbb=DR%K+ zz;An4XMu?Qbd?Z8&_A|*f^p^}r_MMh(dO>(hQwB)mB?QRA`)_V*E5q+JHkdfMiT22&w`!j{srGTs9usmf;tf`!-}pAOgyLA%?V0*C96PxZu}KH|_V zeY42~B*NGG+&VDK@*M=Ap}VtPZ4c-F5dI$`cJq?pw;KqjiXCVLR0`u48yU7=d525K z)1Uk|X^$UY+s$?A3mmMnWswQ`sDQ|nqzlnYXh@ulgeRY%rZ_L{U?d@k;XL42dGGYQVO>C}cNK15yVnG;jbpc8tszo;P_ ztY<<-l9}**n){$pbWnKLL=FNl?31kUrzm5{{{qz=(dCw@?~vSe#pX-6&;oL28#QY* z4vVIGN$bGM*W!MZEahpB z?fV6tgf|Ryw74bTK7!VVPKP({W(AYw2iyHhN>YE5g zcE?UtypN-zeJr4yT^^QN6Wsh4amwFyJwX^A?|xNXUrh)V}Kv+Mm$ z@f{<+j*H)?dX>Ij$Vf;U=n=sx1fnJ08zxc&-<^v_O5EcP!tPsSSfzZ=fM+=!V0YAi z$c7xJ0RGLw#>AJ#r$wlW8A-sAM#gbhca^pWD)KIzb%@`-RdG!= z8w_JC^z8^k)klmkc~TujrAG9K$CIKe2|65RD=*!#?;VvzqN~Vuz5ozIFHmUgc{JUU zP}=R)aSKE%EvVKDm_pozk1a4IpILER-Fn z%j-!C*O(aHEFfZ|%s{`A7b%cUzvRjL;Uh59!V?4PwbZxD#o8_EZ(&R{!# zi0=;RFF0|hnr<#oQWR&X6VgxpR{ovWEwC4fd{i@>kwdMB93eH8Q6Qac@@+7;_M4$1 zRJ)DwL<5otr;;Et>d%2NFHuS}v5U{Ym%C7EfT>C;>EG&af~hy`-*~MDBuZUY_PeZGMek0ib`+# z(;n>TF-why1flC>F@NOky!zeqN(W47^4(?=1gEAF85R&~6t552@Sgvg0_VE22fh9O z6j%c87}90H3tDGur{Oc+UxPf1@7;cK|2n!j<{Lit@<@9wF@3f3dr!>u>9`k9tn1BzC8r1yc5 zL>omL3d%`8OnZIG+)dUF`kwUTTvZ1ujWx2Py(M~N^6pXXioc~WVxqq)_s*rH)L{ji z_sm+JsgTDpfjgodc`dZh=b) zHBp12?dKHg-i!PgL}p%16kKKrazFLS*lFp3iWoj}0sr1}KZfc+bnOn!bpJ`k7y`EG zS>tcCKR^X9%i%|w>A@a>7zw=?q*Us4g$Eb~q|{Z%7q!J!wU76AVTeS0E?=V>9Z1A+jcZ? z_TTmOoSiyWeZ1(4USI#d3@%|J7M3`*oUz%=?-koW5`GB8B;h4$+8_fgXzDdixfF~P zE-cFJ=}ZbhVI0ruY0fYRMO}DnM?qe}|55;DCDQ?V!lP@D9I<&r8A z3+(@oJV2AsvXw&8;}TH*sa&|zT&~^m4B>RY>lFY27V}U@AKab zViqj5F|mTSZ}L!y1&O9ARvK+_Qp>1*m8;h1mq0cTfJ?7|%-HckNRRci(Aal2!Ev*W zLdMs2+FX9Nyyioj$6`SdG;n^>($X`jGuc@2p50kSZ+LY@XLKsO-A`_NBkT-d-Ixq( z^oA-eILSKpfDm|Kp5&_r4*gU-OtbWT$^^E{+?qBK5rt(CGq1n z*e%f~Gns6xcgzB+aMPWNAK|E*QAr&>uPYc+nS4(wTMkuedxe03lnO%{jQE=OD^%Iv z*Tmo6`Xd-2Ncw%$_(wziSNHC8<%|}X8^6?%dP?16;m#B(6-s`~c$InmT|oW5sg?Bq zv9)9_qtzdj1LigGI?QlR4i4-K2PZ1=cm9kwn+YeL?n~g{3XRs84d)fWYex<#%-!>< zsa`u$OR!MhW2NxUk_`cec1E9Ql@fXhS1&bH;^E_W!i4e)MuGWGL3Z9>Ni5R;uBcz} z1aouas1EuM82|k>Av`lTQ91q3bq|##9=L0#k=IsWZ&Zv6rm%l&x71=J(P{_PnvY4n zc%(t)mS;9>Fn8Ca@;NYXYjp|44>V`s$!_x|sw9U!k=0120o3C?&+%#KmlBJFZTH#ha)d>;YWeTsp5O~7`l3soa$qFI-2RTJMJh%E=(`rx;uJ1 z>av^h40wWz?}~-HPFpv#9CkRbWDphrad|%xQr_5=<$H;=LQ%HPsTJ#c4>_5Jc&65A z%rx=Iz7I$-hiULD3SA&H^P|VISKMboH zoWfH>Hv2@DX_xye;V8GR>a6{Wd16HbuBw4UGSm2JZ+)+5)}F(bEDlWMa1WCk4*=a& z1E|&*o*y8(OBcbA506Hf#qI}BifKcEis^Dds)&+IgLZzvt)CtC`S?%sN_Q@}i z*KM~7{;Za?RdR7|K`)`UR;LnSz0p3M@zbNbEmsS^VTNP+t!x5K@v%VJ4?S*OrgLB%}r&{ea$E6n1xQ zTi&wls5=m}gz>glPNwBq;_!FJ$A%&t@{3-rDexl}q^T1Xrl%|-s5V$-62<$^GhiGh z4L8F3Q$h$QeUA+L`O}WpeNG{`L$e_<-0Sx@XJ^(DR=rBnyC$<}q69$WwTiHXLhFWGLHm5PSFN2z?S1$loi#a2hS6^nNg zPNa;A9T&4?oKDiXTrPeJNE_(;jWtXI=0^KfRVIJytkxw$)tUifII-{LFwNy;FvfgL zxf1$QF-uR8=)YhLp~b`VR367Hdoo2`F?!}G7noA7=Mg7L7(|NKts;7dMU%z}4pt@z zF3d4@xO z7f7Pq`MP{?M_n#d_tfwi4Q!76>u4 z_L~8f#Uu>3!&mn<`pnXgTDQs9`(Dw-B3Uaw5E+$jya%i8%3HRR^SuQKw!8;3aI@n7!{h8e9aP{#Ls5hSTWy{64fI z=^6wTx*15lWOx4aMMTQ=?PY#`og4PQe&>mXN zDb50DU)r+#c!aEd0PjfHR!Sn_@wN#?qqeVF&U=mE66(q>AzKmU zl-r|)v-XUWd^i+*i{pRXJDZ_91H{j}-($ATVO=fe_mGwkgzOfTQ?IUPgw&S3yiC&Z z^;I>ssBC76qHq_-J@$_)DT6yEEapYyJ=eMD%_pmwPfA4j+O%bqg2gf~ck30Zv`RoU zSXgCs!cnzu;js1OkD@|oQvFQnd$pkV^z;lB9xrW})9>Yt+K!zEMTyYREf#AQzw&3G zJlZz0DYCgr>c59$CvL|aObVf{C*pX*le!DkaoK9>iVC9{@WVa3X&#KVW*3c(_%COd z=;i^(^Ah$Cbk3zJXVZU!jrLhpbHqz%1?N z)Y@eBJv3TV2k_AL+hR99Tx(*Dtt97^ysSE?;^O^Cr+tlQ_NENt_CqVT5*Ql z7YwMu^4H1bt#B(=N@oV}k*1E&C#6;IBPKop%Lg;%l=s9O5TQYge9$H|F!=n)wbbaI z-r)H&%jfi9&m*hy1;J{T?FSkAF8iAfGc;l5R#_&ah!mdaH_t(syd@W!xn$Z#)Bn=~ zB&_pa20_6fn&H1J%F$bjh(L8x*yW4w`B!9-B^*SbSBjzyoxj=%V;xt%*a9BzfXx=ayXOk+E%273jo%-Mc0TtmKg zdrDqiGilKFmP+Z|YZ}t^<2G~MR+r9T{y6;kNoyBsvY&5Rn$>TabHltia~^!8Mb}3g zEY#;`>JH1+{zptrAA_;$6@6AtsQnt&%6^oY0gl&A)652W=5Ts@y$3-T#=q5IaEq{6 zs><~vXs4rFMtO6m)8q?DwPIDh??Jcsuo}T?_0T%Znt2>QB9?hBNw1x947Q%JY}Rph zx;gZeJQ7_c3(!c+K$F#jrp(DFOy*wNzO?J@csC{3N!lY}nV+^idwbolw zgRSb>^1+3zFhLHaP7-Y%^AzQ*jeJg92Ia0VGOr_Dwny-#yLBhXN?$gzjLLA8n#r-S zz^W#Uxc!XaKYDiCkY%Zx=>5c0AD5F9P^nmn#e z?~Z*yIy|~@xYe9a4F=}GSWPBfZ4O!Y0pmZDwbKs=gF*0OdlTs?br(N4#P_=Xst!=^ z4;`mFw6g8p7-Z`UtGji*(MM)j$(_jUnaz<3quG#`PC39LRI&Q*eKY3%#Kf~<*B09M zxHa$%K1GrL&$PCp_)-O<>WI3bdSZPDu-{Xez$8`{AX^Ozx$D}&Bi%gxAQTC%RgwY(DGxe++sBd zToXNxd-B(Juv$-V-)XpB`jK-Hl#rGH^G@P~(7n2wI~ke{nWx?^PnE~J3I9VnLY~tn z5rL4p6Rq?oy=9v7OL>-G0%z^ij$IwSHld|o8@s^ZfU#9BF1^syJw2`1aw1tfW+J}p z^Fz(*r!B?#GDU+JFzt+JB)#MVJm@$1Wo2rb1H;MDji&C|o7>tm6GFG`KLT2lGaWGM zx1)5Xx`b=p(`YVp>U6FJC!y=^vO*e)GZF%Y#Bb{+YAsZ)?|u8jj;LqO#3;D3H)2;8 zzVjlz@vP*dR)T4j=&YpG3S*jzCv8}t7yOa%nWmlp=ndtgk7iTrw7hHGZQ!}>la*bHihTdp8lf%9i>CP6(IDGe6^`9Z> zoJg6_MKV*iwkWIquml3yVSHBtjbgIl(`7*Bz17pMll6RiOR?kT!&|kgB(7F`SX{0qt5-(|{5LAG>uJ+Fjm| z`(4M)&NC;AKW{19O4$kx0@0>9fZSeAICdqGJ{ye5%}hnRu6_91U8GnDrdj{q=ug_# zCm33OA^r3_5U%6HWVjj&JX0%22k_Bpzj}2I7|dz)EDQ{zQ8KvD*)Yop{Qj$t?2ha2 zv*{i`wLn}w_)dBGTKlG4mJf~kD2-K zoUqD8fBWb*Y?$fygLsYwj4g=VPjMD!;p;-1tt_tWV3H{s7n6N%XYW+E3_);)ie$9Vu9fvvp1DD=^JNRm!gm^{ zXTRIw5*5!vGX*WzRDoN#Eq3{p$!03LB~(lqrlq`{o(E(`px>(`;U1mY-|#&x^;ttF zde3|ImWi{uRNm~s1*etsYxJf4WR9qi_V7Is+O9tcB%SAYbD8tY-N_8uD&QHM85R#B z(svk)EIbj64)>^&%G`%H)EtNu0Ra`=#0?G8U@&!;ZNFTPR0@OmT80U#EUZD{7)d|o z0}|{ZR(tE0%Jdx*lsJaWnjb$3KMA^hkRPk_Sl+EOiCPgU)9(~|yjHWt%>gZcEzR~0 zF+ApvM(GG>P#C1#3T_)qCFJ}oGPiwe?Ov`U`TKS&uHHBCGfD!2E_Jl|2Jectg-oF>-JHim z0N)FWd3x|UPICe|^a(~b*HKO48zsRT^-{3O2oK|J>%f%48NXE_4$Om9{YS8=9~sdN zK&TZR@MPZSJtDVsb1}(Tf&eNo{+?rAepPBm5RpB5`QKEF+F<0HB})pKorh_v*tBe7 zElRN9jFGDmls+w&&ngqx>KpIohLR;>2>(@%=KX`n_ssH{?(SHWRK(l=`RS{dQM$Uj zR3Au3#az4`Y28RY9WIqewynPV#nM>~pK_R0(E2DxN7{OPTBgCUbAQ&^lw3A1LNDD8 zSAvhC^>nR|H=dk`Dbj5hc)B)`+@w`5Gi)YlUa03XTqtG!-T_-AvNy)q0kjg(+a!K*BGN53n+z3K%@mRZQ-`irJ7I4T{ zwLyqVD=L_*JlweZ#+p`%KE7_qNqy@KOGP2l@o`biL|MhyPnHxKb`%>VQj!+{GySl(Upwtx||K9P9Jkc^O9$Yr|Y9GoxB^YBl5FH|X#`_Gm# z&>%{bwqE3tB0DXOXv!Rw51BfaL}u;;!HpOudKzysx^6GaPt%QM!52P;|6sp-SSgcV zH(0JM%PFYgEfN9@A}qB?2SH@M8a*mKeK|`hbwGd@3{Z*1nMR++#?|a)eKr6_eh2lC zEpF(3+6MxQBC9}kAt{r;jTb|R6cnCoUd55bo}VMWfJDwMJefH$<5-NYf|oCB3C6iR zJwk&Dm6blZo$yw_)+}5h<2)9SR3}_R++vOPfRg;GW^hDB+TG|(X(C83AixLuQ1Vvu zUxw_4xsZ)sFci!?q<#8ttqN7ADJ!k+#K7_9+Gy?KuT@@>K(^Lq`M$TzQ%gP=9!yO#jY}hHfh^AojU-p-41;PEAyd%s;4RD1 z52PvX>z~6vnPDUFWOhvZB@djCVGt8DM4B9EihR5GUjY{Z)AUt_Yozyl^C^c2#azik z?C8r9rh{>=NRRkORIMt`cGCjV&?#c|1H9QXc;2)yXZ-KPMA;iyg#(M{obtTt^P{{H z-@k&ylYOk=gEGpkfv6_ZzuYE&ol@CMy+<-9_(8JdBP3491^c0JHn1l>-oyi%8m2Vu zXT#BYe)($=sK6nl{jdiv+Xb0~0mX9oxexxA*n^gfHKw5tts1`~vnR^E`iVLmr{mh+ zOPQM&s;jGiPwWWVDQs=kDJv^0zgEWt#i(bLMTLeH!}$xc0(TC7RLW@=-R10MyselrIv9*ElrL1t?+_dHgXsSBI*`zmXWiv`Lk*r%Bt*o;R68YO;pK~C zh+Q=x{%FAb3LzOE5>gNi3O|b9Isx8OTuD{f2`gb~udzjaon6>q5Wl=}IupG8xZ+_h zz}#k+lW6Gg^+i!W91)~Cj z3ynK6W-HQ7HCG-WO@Ml*z67cdwec3yZ5c0cMWOrbG0H}2{-;1zO3-6fn!C;|L8Xd1 zRTMl9V_-AJCEc!V_qx+h5J>s53Ob-}Oqw0xUw!Mn*>wOvr=bx5Lru4+Nk1cn59zuy zu;1~%lI4w&AOJ@$Q3@=T@+`RZLjRJ4Q;mKDDsM_;MxAEmyW6vrn`kiqj0ZFL5r?)D zi_GHm_Uy@0_BfUt65y9J2X6Yv5{pmwUV5I)u z=QI+$P+p{dzjIr*byRpfT=7gFs(=9UK){F{Mq7wEPxwOO|9A89FZPE!IqIQrgfch< z@2tHrX}u|uRP+rZzPW-k4HlXwP<^a;T%n@8y?43Ke*2{9SAi@xN3F0)|IKuyXfF! zXzy10AV>!?Z$<<*0gzSJJJZKueF4GC_--4c;<=t|TZ95NFb_fXpc*~@d&|q7PgiX^ zh|YI4%3(HkIMMc1$p;O5DjoO*Q&=sdR`Un_~UlG*~;Z03C{vVMOv+&ZFazJ zJzEmDcZBQ=ebOsd5?`bXy#-ThZf?#F(7;Xq_TGc)C5INq-Q9hwWe0M zrk0kxhK9yti|s3k~~+0bCItYKt>JKCn|DfNi*|8{g2E zlIQ6J-s^Js_&)2sm!$tyn1mVs4&A z7LowA!@z1d;O8e!<#&&0kQe|F04E2KcT7jWE(ua4Q-p>>y@zLx#)e-$A;-XLQK3kF zh~gmwG=H?;g)`WID1Pe(M4N4V349|W!1h`S3#21aq3o}hQUlTV$*)y#mEDPZ_E$&e z-_N1ibJ;tYI9idmv9+yueivpV5!cU-4ZeAe>;W&v4N)?p!1&jT?Ot_Y03(6NqO*PI zwcKdW01A*H=CKAhM52+FqgZbU2?p^K_TR%FRok1PH1h7#@jFbq4e=mkPUg~vb6ph;#0fJ7-@Ivf zfQv*fU@v2M4%(Ij_n*%0Xi@>QKmADTmyne@3yT;*NNn-mXxu9dJDqCB~Pd02k1X2;3*%IdgG7V`7n!HI-LQHUY@Du~LE)1v55 z{4}c~Evu-gcp$|3!J<9DIZz!p-`U~8M;sp9RYC*Cl`=W@tXr<41td})=frObnr|9c zB9Zdg%a$H(N2e7}D|o?v{`#Cj#InY)r`KQtD!=zGo*%sYSmxMOhkWX#@>^;@_Mz@e z7ee0Z)pCV+;+N+!fjRiGr*bP<4kgdoCIzp$^to7KgPRN`kO0{iR92V0t1Y$Quav*9 zWv7Nn%$XQ9#@wc`d3{EdBKEo6LI(ed?kMnnS7~vdN#F@^yHJOqVG#Sp13c3I?PQQC z6joUIr^bnciy8P6*Jm;umg|RCQ4{JG5eW!oH2kF8f9{fQi-JGU44P=`Um(X2jWAE; z%XhnqtSsD;#J=@j6O%okWC#x2fGS1qA~Qxt_FbyzgiFCwYpax6YYM~1`fx>?8TkvJ0S0Fl2iI(ybW)s2mfPrHnJ#QuCv zgU2vZ2&za$mv>ZP74;xhtN5@c!>R>4zrlU^MJYvIbG4qqxSP2Bip~HR>lT;;QS~vp zUqorT2E%-HC`DMxQBq5B@?Zr=cYFEW7r-iW;lrwYQ^Q;3`V*+$W<*a;**^dBj6Tq9 zar%rXJL?mt|6hC885dQO?H$M2b#~_c#+h+v#+lt6lH_o2N1B|Yl7qk%=u3AaAWi62 z5F~>VCAJ_rN)iy13<3fUf`TAO5)ma!5Xm6=y6dtZXWy6ialcg6Ip=>)g?s9ps(bqf zXXI`i?0OJ+PxFM1o2;p_=IVsKTirb86ova4g!R=@a1oJ6lytekj31qi9xeV}0;=MM zrkrm^hP+b0UTk_675rPnNyuNz`3+^~xQA74`ZaFg68d_?b-+O65>#<~(*(Ma&rP9g zC+*`5oj8r>A@l99=Lb`=ya0Uc<<}$C6Z1xw9Ub3TjWP;vR?FnwAXGl<`tj)TlSpg% zKa0rppPbtk>Vph}x{jG_Qmh2p9i3Pdt|w@O<`nl` z52^m|)7+FMYW_u`T;w%Y@y)seJ+=G&dbLegaNJEPtuiq`apz?DGg98axEbn3Y+_1r ziO92?l3^xur%r@l4}9TL?KT+s$tHFz;IsU}!YRpt9&^ZoO^)z|$TV{0d{l4BE{SWT!r;hzHZ7Mr1n*U^JP&Da>E8OYd{i^7<_Mgbx z{-eU-`EzoVy42%$g|pf-r)O{CDeBoLkNwcr-*N9kt640f`h40^B+&YAZfZ?g{2#a- zuGOCx4(UhM{HLz?9LtrA+fnJC$=a}IC)XF;lE^Nhh!i<9cjNKLPITD?sw(Y{a~3(H zM@t5;7g#SU{LJ;LZ-n{=DqY$u)xKQSFIPGn%DXCv{_TW93|buTC=!(Lb-wUl%{E)^ zNhRheI~ITIFUAcbv@dE^pLgKjd!P)bS?3cX76$flvA1Tl%egzMHgJ3|(nq|OE|xWQ zTPa^!a)un}sLq#pwEizY8r9Y1NEJ$ooO9pkn($4-vGS+zBGH&Dh@Cuc^fnUU@2t9+k5s$nGuYM={ zPR%EBbH}eUy3*f=mj;e!J3YKECtmIrxz1`V{_Yg5!hyZEpH? z$t=%R#7^lQvyDpAU*X;d3dH07b#j^erlE)Zb2B|l|Gv83 zG+ud#?&V#fC!>(}Q5aYTg`QqutF!LkMZODB>Hq4NGiDnCd|&>1B7xr@J!TRk`WGB8 z$nX~IPYdC%PjJ5-5xTzla|ovU3qSWZ9**+;_@|~01@;L?g3J>2LcsT({{;T;Aphf& zInMXrmHbcc!*%Na`sDwyYybatH=-dyJ96a6_qrOYX8-QgKB4vV7e{d~f9w#T>K!@q z6P>Mjj!kuUzv$#ZWjnbsWiB&m(sXYJ>P07-C)G>BndQbj;wx~O?#bdx1>^)^0Z9Qm z)4`4A#B}76D2wgMYe@B|UWWjU^cT!J7N@7GuH zo@&c$L*%oTtBP*IfG_I~SU6C^Xw7sQ1@Ijl00$k9AYUAnWPO-yaRv z>B(aNgseb?QHRNeGh_VV>x0wTt>GB6UD z+>)CZfU$h@R9!tC@d&!tuKamN@an_TO}TBuUDuYkULpLiwv|76<-G5kcKdkSytWT$ z(3N5u(|9eI(NlX}-QraRf|wt)f7+g!?tgqXHU@|&JJs1^uJ;e#mjV)PlCE33|He!| zFB%a*_h!mmZv$J9p?=AGc>~c;R3d%1AsLC^OkRT9>>1Zg~m- z>60?$XIJ+(8~3ZXfi*^!>#`l&_xkC}dylUwQpai6m$6Z zUY3Q;1V5eMuLV$W|7*4MS*P&lkpPYhNm{pMmWygLfB;(;Ndw? zL!Aj&oFWY-stv?VT6$-;-OIGJaw@3`o(k6MJujOqciu|O6hQ!-@>Km(FXB0`2Pm{W z?~#Jcv;AfZ1Zil`LYPxRXFkCPyQ`BKUKNEHr075)m9di=%A+7i5J5s>n+-4-(f%{w z(l27Jk2~xHH3MvN8>mRVj-}%%8MPR16$A&EUY^3EVaURj0E4XoF|=bv0S2$rs*$~| z4%&p+moJL8^0wqF_tHjhpI+)%eUZhq=hI#;gXiW1wWQ~@8b7@q->fVRdRG&sp(E{) zOM&fEatQqzD+v2WM=ri(IbjZ%a?VsDr`*(vta&2FQ7?t@>I?>-G|MF=-w zI=_2vQUr_%z6Iqd5Ne~lLQM|+)*Zwt7!GiX2ip|JWswPE!1xY2LU`4 z7v=d>;XXhS^I+Lac`rp{0fl18h!B_`?IfPaXLm&t>`@4syrtZ2oSm-`<246N5J8E1 zp9$1(w~RMpgkqnx;p5UVajwflS>J1G4Z-Qy7A9klhdc?ylxYpMx5EhPnQ5e_9S2x= zH(w>y-r2QJPnMM~i6=H1JayM0ju=OX- z^AsR*^JVcG6-QH`jVAaOtxp;)_32BHhQcj7`2iR18>z%5ip37pBVGTjD8g(&5TT69 zY;+Z%Xc=Vz&XY@C)8_ThF$RKElbGct71?)gtxiZ*rA4i;ZHAp5}3 z!`5iPf^#`$!5X%A6P6BL?}2;^&A~{=Jvd2%%0B?KvBW_FZ7@v;Dz?^*ucetXpC#I8 zRYkRrGndaVY++y!Hekx|GUg8SX~9>c={Gq!9kOHULziSguQ+TT1O=c%Mi}ZIw-L4@ zf?TsxMpHfdUQ1zqNCP1TBE|AqUFgMTjAbrr(mAR;Fhv~q?GX`+=TysU>RO4vi2+Ok z$P5sK#$S+^SJ%aYJrpISIh~esW~}f)ND6<|SS^~>W6Uts;!n-Plmx+Oh^p;slmaZa zLq_q9E9(IRc|tea#GCx961+8_V#Kq(ze6z>; zEVh5>?cDM%{|GD-3jmvbAxQ7=B5s|-vKv?M@{y95CE`QhCRL8DOJB-}#xnGYLSa3pDB{gVNP(H(%L0Mf42 zJvycaN@^Iw=nKafGA1EdqdNC6b~+Sf4;!v0FyyAJEBt_r;x%7m(bz~yut7YZ^XHAd zazD6ZH>Quu5tDPk6NGRGsz1EQ14H=kJ0fC;Kw9Cj=7NYN&}Vd%e8jQ}MC{_stOKTO z$F)L)nIs>t`qr`#8y%$!i`RsYR1Z88qaN&&w)QaltI88FeB^n93kshj)Nfjy9ndmScc{CKv_Y@2T8$vtIpGv!(_K&JZ|PLZr`< zK**4BP6jII`r@iZ!C2!1%IN2fwG}K$(z1YzhLtOJ2Ti^D0k>XbF^Rf6U(uM%nwLog zVnTcosEH$Cl+%bnX1^jJp#m(x$Z~ijL{hi+0(@E(oT-7$kDMWHGt~vSgtNIGGYDD; zu<+UjU{XE+5=9u{DPmKxv-{7JR|r1ZKqmVlK@(wuCgC0iC+7na0!oBTYBRzIGvEX-2D<4 zwN~bsG%zn&k2xzzSXYPoyj|iJmYLCs%>p`DXBdYtTnH@bwa091F6i{(bpaa*z5*NY zOm}~B_wLh~WkWzSI*Ke@m(0$&1=w=R4zkG{b=g;#0IWSkU-f5Z_*vweJvO8VDM}c0 zspZ{EW)6Btll9+; zU--z^n=w^-X3-UJA@U#f@u@A0j}3n6!%p}1mM?#{VHxd@HMnq;TzgCJw6D#!c~sV1 z(FX=mdnL=BNolg)ZYTH72w0R<(@!H4J(MJKggN8U$m2o89w3rE?FwqJ@Gw<9_e1!# zXdhyTtQJ)h;K(VW!k4CZ%|Gs8ApxW%7zGguLM3uzoYh~3JTK)rMCKwtPAxKG=Ct^ATpy8m{rA@6}sgb_6v7>|8 z1nvZpE#)1(IOFSQwGG5w!(Jc)i3SeCwBQs3#WE42*<@tD9`iN)@Y4cdjfgoMK3b)| z-=I!AFj)a?JqVX_)29;gJROCwmmZ%FBZIYr$f_~bMm%eI5{P&s0(*Q6s!Pv3lBWk#LHaq+ak#*4@ffqzG1T7cD~Rts4d6I^ z7sod9M~krlLP%gSYylryuzshbx@p5dK~LGb2ZZ#o=@!s%*cdezv663=wI9@Pb0_t0 z2rY(91V0@ZzJ;KVI>+k!fd!PpnF&bh4gdssb-7^8pt}DuCTf=8{>RkB*}xGze~j=h z;Nh{OIUFy3=4ta3!Zy37<)j=tyCY|0IIhk)FJ+RV_?~;023SI_P~iE5+j?5P{XQd_ znu;{vPqtphbeZRlplcYX70koIz@s3%;FD%Npm8FnXM)UjS_X~US@QgcDa!WE)k2UV zAiCHk;PMcIdoiwgiUt-$n*wV5+-lHY@E8K+u^r*J-jM8xGEe$9}gvh8#)ADGr$f& z9j7e}211$;WFV+d%mSRo*9|zd`)wW7siVm)X#PWlA-H+dD;_&rlClE<0N0yzaj(A( zEF*-5XvIBf1;;Pg!G62`0p)a=h~#sBM1ZtgGo93&fY&!Lfj}hVz9x}V(=niWy6PD` ze0vQH1CKc;{rq-jmzb}hhcHEV8D?PpfKv@brogRYw{Z?!2W;AjG^zu$htbH{$V7&0 zaSK5NJN!UuU~G8D#gyBiMY0+Fz_yq1{MgB=O>IFK{HLS){X?H5UK{8V_LW(c9CdfD zQ>-~m1^CBcG*GL@<@#u#&bWx3KFGsQIwSjJTe|?48l{TWwT1r9+)S_Fl>=xf)83$( z*Mgw6tvfqYR13g{5#&HG@CA0$q5(Nb6cE9w6aeU@r|YndN5WDuo)L>6Ag#hgRsnM} z7rgVi2AEw9{%Le47HEIAIy_h^3t<_JA4N+0)>mtGtm?-&ApzW?eq=e~_TqN{13 JQLbhm{J$t~5mx{J literal 0 HcmV?d00001 diff --git a/docs/source/design/arch_overview.rst b/docs/source/design/arch_overview.rst new file mode 100644 index 0000000000000..a9e7b4bd69bc7 --- /dev/null +++ b/docs/source/design/arch_overview.rst @@ -0,0 +1,274 @@ +.. _arch_overview: + +Architecture Overview +====================== + +This document provides an overview of the vLLM architecture. + +.. contents:: Table of Contents + :local: + :depth: 2 + +Entrypoints +----------- + +vLLM provides a number of entrypoints for interacting with the system. The +following diagram shows the relationship between them. + +.. image:: /assets/design/arch_overview/entrypoints.excalidraw.png + :alt: Entrypoints Diagram + +LLM Class +^^^^^^^^^ + +The LLM class provides the primary Python interface for doing offline inference, +which is interacting with a model without using a separate model inference +server. + +Here is a sample of `LLM` class usage: + +.. code-block:: python + + from vllm import LLM, SamplingParams + + # Define a list of input prompts + prompts = [ + "Hello, my name is", + "The capital of France is", + "The largest ocean is", + ] + + # Define sampling parameters + sampling_params = SamplingParams(temperature=0.8, top_p=0.95) + + # Initialize the LLM engine with the OPT-125M model + llm = LLM(model="Qwen/Qwen2.5-1.5B-Instruct") + + # Generate outputs for the input prompts + outputs = llm.generate(prompts, sampling_params) + + # Print the generated outputs + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + +More API details can be found in the :doc:`Offline Inference +` section of the API docs. + +The code for the `LLM` class can be found in `vllm/entrypoints/llm.py +`_. + +OpenAI-compatible API server +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The second primary interface to vLLM is via its OpenAI-compatible API server. +This server can be started using the `vllm serve` command. + +.. code-block:: bash + + vllm serve + +The code for the `vllm` CLI can be found in `vllm/scripts.py +`_. + +Sometimes you may see the API server entrypoint used directly instead of via the +`vllm` CLI command. For example: + +.. code-block:: bash + + python -m vllm.entrypoints.openai.api_server --model + +That code can be found in `vllm/entrypoints/openai/api_server.py +`_. + +More details on the API server can be found in the :doc:`OpenAI Compatible +Server ` document. + +LLM Engine +---------- + +The `LLMEngine` and `AsyncLLMEngine` classes are central to the functioning of +the vLLM system, handling model inference and asynchronous request processing. + +.. image:: /assets/design/arch_overview/llm_engine.excalidraw.png + :alt: LLMEngine Diagram + +LLMEngine +^^^^^^^^^ + +The `LLMEngine` class is the core component of the vLLM engine. It is +responsible for receiving requests from clients and generating outputs from the +model. The `LLMEngine` includes input processing, model execution (possibly +distributed across multiple hosts and/or GPUs), scheduling, and output +processing. + +- **Input Processing**: Handles tokenization of input text using the specified + tokenizer. + +- **Scheduling**: Chooses which requests are processed in each step. + +- **Model Execution**: Manages the execution of the language model, including + distributed execution across multiple GPUs. + +- **Output Processing**: Processes the outputs generated by the model, decoding the + token IDs from a language model into human-readable text. + +The code for `LLMEngine` can be found in `vllm/engine/llm_engine.py`_. + +.. _vllm/engine/llm_engine.py: https://github.com/vllm-project/vllm/tree/main/vllm/engine/llm_engine.py + +AsyncLLMEngine +^^^^^^^^^^^^^^ + +The `AsyncLLMEngine` class is an asynchronous wrapper for the `LLMEngine` class. +It uses `asyncio` to create a background loop that continuously processes +incoming requests. The `AsyncLLMEngine` is designed for online serving, where it +can handle multiple concurrent requests and stream outputs to clients. + +The OpenAI-compatible API server uses the `AsyncLLMEngine`. There is also a demo +API server that serves as a simpler example in +`vllm/entrypoints/api_server.py`_. + +.. _vllm/entrypoints/api_server.py: https://github.com/vllm-project/vllm/tree/main/vllm/entrypoints/api_server.py + +The code for `AsyncLLMEngine` can be found in `vllm/engine/async_llm_engine.py`_. + +.. _vllm/engine/async_llm_engine.py: https://github.com/vllm-project/vllm/tree/main/vllm/engine/async_llm_engine.py + +Worker +------ + +A worker is a process that runs the model inference. vLLM follows the common +practice of using one process to control one accelerator device, such as GPUs. +For example, if we use tensor parallelism of size 2 and pipeline parallelism of +size 2, we will have 4 workers in total. Workers are identified by their +``rank`` and ``local_rank``. ``rank`` is used for global orchestration, while +``local_rank`` is mainly used for assigning the accelerator device and accessing +local resources such as the file system and shared memory. + +Model Runner +------------ + +Every worker has one model runner object, responsible for loading and running +the model. Much of the model execution logic resides here, such as preparing +input tensors and capturing cudagraphs. + +Model +----- + +Every model runner object has one model object, which is the actual +``torch.nn.Module`` instance. See :ref:`huggingface_integration` for how various +configurations affect the class we ultimately get. + +Class Hierarchy +--------------- + +The following figure shows the class hierarchy of vLLM: + + .. figure:: /assets/design/hierarchy.png + :alt: query + :width: 100% + :align: center + +There are several important design choices behind this class hierarchy: + +1. **Extensibility**: All classes in the hierarchy accept a configuration object +containing all the necessary information. The `VllmConfig +`__ +class is the main configuration object that is passed around. The class +hierarchy is quite deep, and every class needs to read the configuration it is +interested in. By encapsulating all configurations in one object, we can easily +pass the configuration object around and access the configuration we need. +Suppose we want to add a new feature (this is often the case given how fast the +field of LLM inference is evolving) that only touches the model runner. We will +have to add a new configuration option in the `VllmConfig` class. Since we pass +the whole config object around, we only need to add the configuration option to +the `VllmConfig` class, and the model runner can access it directly. We don't +need to change the constructor of the engine, worker, or model class to pass the +new configuration option. + +2. **Uniformity**: The model runner needs a unified interface to create and +initialize the model. vLLM supports more than 50 types of popular open-source +models. Each model has its own initialization logic. If the constructor +signature varies with models, the model runner does not know how to call the +constructor accordingly, without complicated and error-prone inspection logic. +By making the constructor of the model class uniform, the model runner can +easily create and initialize the model without knowing the specific model type. +This is also useful for composing models. Vision-language models often consist +of a vision model and a language model. By making the constructor uniform, we +can easily create a vision model and a language model and compose them into a +vision-language model. + +.. note:: + + To support this change, all vLLM models' signatures have been updated to: + + .. code-block:: python + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + + To avoid accidentally passing incorrect arguments, the constructor is now keyword-only. This ensures that the constructor will raise an error if old configurations are passed. vLLM developers have already made this change for all models within vLLM. For out-of-tree registered models, developers need to update their models, for example by adding shim code to adapt the old constructor signature to the new one: + + .. code-block:: python + + class MyOldModel(nn.Module): + def __init__( + self, + config, + cache_config: Optional[CacheConfig] = None, + quant_config: Optional[QuantizationConfig] = None, + lora_config: Optional[LoRAConfig] = None, + prefix: str = "", + ) -> None: + ... + + from vllm.config import VllmConfig + class MyNewModel(MyOldModel): + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + config = vllm_config.model_config.hf_config + cache_config = vllm_config.cache_config + quant_config = vllm_config.quant_config + lora_config = vllm_config.lora_config + super().__init__(config, cache_config, quant_config, lora_config, prefix) + + if __version__ >= "0.6.4": + MyModel = MyNewModel + else: + MyModel = MyOldModel + + This way, the model can work with both old and new versions of vLLM. + +3. **Sharding and Quantization at Initialization**: Certain features require +changing the model weights. For example, tensor parallelism needs to shard the +model weights, and quantization needs to quantize the model weights. There are +two possible ways to implement this feature. One way is to change the model +weights after the model is initialized. The other way is to change the model +weights during the model initialization. vLLM chooses the latter. The first +approach is not scalable to large models. Suppose we want to run a 405B model +(with roughly 810GB weights) with 16 H100 80GB GPUs. Ideally, every GPU should +only load 50GB weights. If we change the model weights after the model is +initialized, we need to load the full 810GB weights to every GPU and then shard +the weights, leading to a huge memory overhead. Instead, if we shard the weights +during the model initialization, every layer will only create a shard of the +weights it needs, leading to a much smaller memory overhead. The same idea +applies to quantization. Note that we also add an additional argument ``prefix`` +to the model's constructor so that the model can initialize itself differently +based on the prefix. This is useful for non-uniform quantization, where +different parts of the model are quantized differently. The ``prefix`` is +usually an empty string for the top-level model and a string like ``"vision"`` +or ``"language"`` for the sub-models. In general, it matches the name of the +module's state dict in the checkpoint file. + +One disadvantage of this design is that it is hard to write unit tests for +individual components in vLLM because every component needs to be initialized by +a complete config object. We solve this problem by providing a default +initialization function that creates a default config object with all fields set +to ``None``. If the component we want to test only cares about a few fields in +the config object, we can create a default config object and set the fields we +care about. This way, we can test the component in isolation. Note that many +tests in vLLM are end-to-end tests that test the whole system, so this is not a +big problem. + +In summary, the complete config object ``VllmConfig`` can be treated as an +engine-level global state that is shared among all vLLM classes. diff --git a/docs/source/design/class_hierarchy.rst b/docs/source/design/class_hierarchy.rst deleted file mode 100644 index 58a888b17ba53..0000000000000 --- a/docs/source/design/class_hierarchy.rst +++ /dev/null @@ -1,74 +0,0 @@ -.. _class_hierarchy: - -vLLM's Class Hierarchy -======================= - -This document describes the class hierarchy of vLLM. We will explain the relationships between the core classes, their responsibilities, and the design choices behind them to make vLLM more modular and extensible. - -1. **Entrypoints**: vLLM has two entrypoints: `command line usage `__ with ``vllm serve`` for launching an OpenAI-API compatible server, and `library-style usage `__ with the ``vllm.LLM`` class for running inference in a Python script. These are user-facing entrypoints that end-users interact with. Under the hood, both create an engine object to handle model inference. - -2. **Engine**: Each vLLM instance contains one engine object, orchestrating and serving as the control plane for model inference. Depending on the configuration, the engine can create multiple workers to handle the inference workload. - -3. **Worker**: A worker is a process that runs the model inference. vLLM follows the common practice of using one process to control one accelerator device, such as GPUs. For example, if we use tensor parallelism of size 2 and pipeline parallelism of size 2, we will have 4 workers in total. Workers are identified by their ``rank`` and ``local_rank``. ``rank`` is used for global orchestration, while ``local_rank`` is mainly used for assigning the accelerator device and accessing local resources such as the file system and shared memory. - -4. **Model Runner**: Every worker has one model runner object, responsible for loading and running the model. Much of the model execution logic resides here, such as preparing input tensors and capturing cudagraphs. - -5. **Model**: Every model runner object has one model object, which is the actual ``torch.nn.Module`` instance. See :ref:`huggingface_integration` for how various configurations affect the class we ultimately get. - -The following figure shows the class hierarchy of vLLM: - - .. figure:: ../assets/design/hierarchy.png - :alt: query - :width: 100% - :align: center - -There are several important design choices behind this class hierarchy: - -1. **Extensibility**: All classes in the hierarchy accept a configuration object containing all the necessary information. The `VllmConfig `__ class is the main configuration object that is passed around. The class hierarchy is quite deep, and every class needs to read the configuration it is interested in. By encapsulating all configurations in one object, we can easily pass the configuration object around and access the configuration we need. Suppose we want to add a new feature (this is often the case given how fast the field of LLM inference is evolving) that only touches the model runner. We will have to add a new configuration option in the `VllmConfig` class. Since we pass the whole config object around, we only need to add the configuration option to the `VllmConfig` class, and the model runner can access it directly. We don't need to change the constructor of the engine, worker, or model class to pass the new configuration option. - -2. **Uniformity**: The model runner needs a unified interface to create and initialize the model. vLLM supports more than 50 types of popular open-source models. Each model has its own initialization logic. If the constructor signature varies with models, the model runner does not know how to call the constructor accordingly, without complicated and error-prone inspection logic. By making the constructor of the model class uniform, the model runner can easily create and initialize the model without knowing the specific model type. This is also useful for composing models. Vision-language models often consist of a vision model and a language model. By making the constructor uniform, we can easily create a vision model and a language model and compose them into a vision-language model. - -.. note:: - - To support this change, all vLLM models' signatures have been updated to: - - .. code-block:: python - - def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): - - To avoid accidentally passing incorrect arguments, the constructor is now keyword-only. This ensures that the constructor will raise an error if old configurations are passed. vLLM developers have already made this change for all models within vLLM. For out-of-tree registered models, developers need to update their models, for example by adding shim code to adapt the old constructor signature to the new one: - - .. code-block:: python - - class MyOldModel(nn.Module): - def __init__( - self, - config, - cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None, - lora_config: Optional[LoRAConfig] = None, - prefix: str = "", - ) -> None: - ... - - from vllm.config import VllmConfig - class MyNewModel(MyOldModel): - def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): - config = vllm_config.model_config.hf_config - cache_config = vllm_config.cache_config - quant_config = vllm_config.quant_config - lora_config = vllm_config.lora_config - super().__init__(config, cache_config, quant_config, lora_config, prefix) - - if __version__ >= "0.6.4": - MyModel = MyNewModel - else: - MyModel = MyOldModel - - This way, the model can work with both old and new versions of vLLM. - -3. **Sharding and Quantization at Initialization**: Certain features require changing the model weights. For example, tensor parallelism needs to shard the model weights, and quantization needs to quantize the model weights. There are two possible ways to implement this feature. One way is to change the model weights after the model is initialized. The other way is to change the model weights during the model initialization. vLLM chooses the latter. The first approach is not scalable to large models. Suppose we want to run a 405B model (with roughly 810GB weights) with 16 H100 80GB GPUs. Ideally, every GPU should only load 50GB weights. If we change the model weights after the model is initialized, we need to load the full 810GB weights to every GPU and then shard the weights, leading to a huge memory overhead. Instead, if we shard the weights during the model initialization, every layer will only create a shard of the weights it needs, leading to a much smaller memory overhead. The same idea applies to quantization. Note that we also add an additional argument ``prefix`` to the model's constructor so that the model can initialize itself differently based on the prefix. This is useful for non-uniform quantization, where different parts of the model are quantized differently. The ``prefix`` is usually an empty string for the top-level model and a string like ``"vision"`` or ``"language"`` for the sub-models. In general, it matches the name of the module's state dict in the checkpoint file. - -One disadvantage of this design is that it is hard to write unit tests for individual components in vLLM because every component needs to be initialized by a complete config object. We solve this problem by providing a default initialization function that creates a default config object with all fields set to ``None``. If the component we want to test only cares about a few fields in the config object, we can create a default config object and set the fields we care about. This way, we can test the component in isolation. Note that many tests in vLLM are end-to-end tests that test the whole system, so this is not a big problem. - -In summary, the complete config object ``VllmConfig`` can be treated as an engine-level global state that is shared among all vLLM classes. diff --git a/docs/source/design/plugin_system.rst b/docs/source/design/plugin_system.rst index bfca702b9267a..5a96cc8b3a464 100644 --- a/docs/source/design/plugin_system.rst +++ b/docs/source/design/plugin_system.rst @@ -8,7 +8,7 @@ The community frequently requests the ability to extend vLLM with custom feature How Plugins Work in vLLM ------------------------ -Plugins are user-registered code that vLLM executes. Given vLLM's architecture (see :ref:`class_hierarchy`), multiple processes may be involved, especially when using distributed inference with various parallelism techniques. To enable plugins successfully, every process created by vLLM needs to load the plugin. This is done by the `load_general_plugins `__ function in the ``vllm.plugins`` module. This function is called for every process created by vLLM before it starts any work. +Plugins are user-registered code that vLLM executes. Given vLLM's architecture (see :ref:`arch_overview`), multiple processes may be involved, especially when using distributed inference with various parallelism techniques. To enable plugins successfully, every process created by vLLM needs to load the plugin. This is done by the `load_general_plugins `__ function in the ``vllm.plugins`` module. This function is called for every process created by vLLM before it starts any work. How vLLM Discovers Plugins -------------------------- @@ -59,4 +59,4 @@ Guidelines for Writing Plugins Compatibility Guarantee ----------------------- -vLLM guarantees the interface of documented plugins, such as ``ModelRegistry.register_model``, will always be available for plugins to register models. However, it is the responsibility of plugin developers to ensure their plugins are compatible with the version of vLLM they are targeting. For example, ``"vllm_add_dummy_model.my_llava:MyLlava"`` should be compatible with the version of vLLM that the plugin targets. The interface for the model may change during vLLM's development. \ No newline at end of file +vLLM guarantees the interface of documented plugins, such as ``ModelRegistry.register_model``, will always be available for plugins to register models. However, it is the responsibility of plugin developers to ensure their plugins are compatible with the version of vLLM they are targeting. For example, ``"vllm_add_dummy_model.my_llava:MyLlava"`` should be compatible with the version of vLLM that the plugin targets. The interface for the model may change during vLLM's development. diff --git a/docs/source/index.rst b/docs/source/index.rst index b04acbbce4169..c2afd806c50f9 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -157,7 +157,7 @@ Documentation :maxdepth: 2 :caption: Design - design/class_hierarchy + design/arch_overview design/huggingface_integration design/plugin_system design/input_processing/model_inputs_index diff --git a/format.sh b/format.sh index a57882d2ac3f9..b3dcdc15bf948 100755 --- a/format.sh +++ b/format.sh @@ -299,6 +299,10 @@ echo 'vLLM shellcheck:' tools/shellcheck.sh echo 'vLLM shellcheck: Done' +echo 'excalidraw png check:' +tools/png-lint.sh +echo 'excalidraw png check: Done' + if ! git diff --quiet &>/dev/null; then echo echo "🔍🔍There are files changed by the format checker or by you that are not added and committed:" diff --git a/tools/png-lint.sh b/tools/png-lint.sh new file mode 100755 index 0000000000000..a80fe9837342f --- /dev/null +++ b/tools/png-lint.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +# Ensure that *.excalidraw.png files have the excalidraw metadata +# embedded in them. This ensures they can be loaded back into +# the tool and edited in the future. + +find . -iname '*.excalidraw.png' | while read -r file; do + if git check-ignore -q "$file"; then + continue + fi + if ! grep -q "excalidraw+json" "$file"; then + echo "$file was not exported from excalidraw with 'Embed Scene' enabled." + exit 1 + fi +done diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 92fa87c7fa45b..ee4b6addfd466 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -793,7 +793,7 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: type=str, default=[], help="The pattern(s) to ignore when loading the model." - "Default to 'original/**/*' to avoid repeated loading of llama's " + "Default to `original/**/*` to avoid repeated loading of llama's " "checkpoints.") parser.add_argument( '--preemption-mode', From 25f9c78961daae10b9084d78901d71bc56691aa1 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Tue, 19 Nov 2024 02:43:21 -0800 Subject: [PATCH 046/397] [misc][plugin] improve plugin loading (#10443) Signed-off-by: youkaichao --- vllm/plugins/__init__.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/vllm/plugins/__init__.py b/vllm/plugins/__init__.py index fdc848cedf054..05a9739d99e71 100644 --- a/vllm/plugins/__init__.py +++ b/vllm/plugins/__init__.py @@ -9,12 +9,19 @@ logger = logging.getLogger(__name__) +# make sure one process only loads plugins once +plugins_loaded = False + def load_general_plugins(): """WARNING: plugins can be loaded for multiple times in different processes. They should be designed in a way that they can be loaded multiple times without causing issues. """ + global plugins_loaded + if plugins_loaded: + return + plugins_loaded = True import sys if sys.version_info < (3, 10): from importlib_metadata import entry_points From b4614656b832aa8ac95e5450ca7b861f46049635 Mon Sep 17 00:00:00 2001 From: Yuan Date: Tue, 19 Nov 2024 21:16:43 +0800 Subject: [PATCH 047/397] [CI][CPU] adding numa node number as container name suffix (#10441) Signed-off-by: Yuan Zhou --- .buildkite/run-cpu-test.sh | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/.buildkite/run-cpu-test.sh b/.buildkite/run-cpu-test.sh index 14756b5964aaf..f0128f091b742 100644 --- a/.buildkite/run-cpu-test.sh +++ b/.buildkite/run-cpu-test.sh @@ -13,26 +13,26 @@ numactl -C "$CORE_RANGE" -N "$NUMA_NODE" docker build -t cpu-test -f Dockerfile. numactl -C "$CORE_RANGE" -N "$NUMA_NODE" docker build --build-arg VLLM_CPU_DISABLE_AVX512="true" -t cpu-test-avx2 -f Dockerfile.cpu . # Setup cleanup -remove_docker_container() { docker rm -f cpu-test cpu-test-avx2 || true; } +remove_docker_container() { docker rm -f cpu-test-"$NUMA_NODE" cpu-test-avx2-"$NUMA_NODE" || true; } trap remove_docker_container EXIT remove_docker_container # Run the image, setting --shm-size=4g for tensor parallel. docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --cpuset-cpus="$CORE_RANGE" \ - --cpuset-mems="$NUMA_NODE" --privileged=true --network host -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --shm-size=4g --name cpu-test cpu-test + --cpuset-mems="$NUMA_NODE" --privileged=true --network host -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --shm-size=4g --name cpu-test-"$NUMA_NODE" cpu-test docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --cpuset-cpus="$CORE_RANGE" \ - --cpuset-mems="$NUMA_NODE" --privileged=true --network host -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --shm-size=4g --name cpu-test-avx2 cpu-test-avx2 + --cpuset-mems="$NUMA_NODE" --privileged=true --network host -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --shm-size=4g --name cpu-test-avx2-"$NUMA_NODE" cpu-test-avx2 function cpu_tests() { set -e # offline inference - docker exec cpu-test-avx2 bash -c " + docker exec cpu-test-avx2-"$NUMA_NODE" bash -c " set -e python3 examples/offline_inference.py" # Run basic model test - docker exec cpu-test bash -c " + docker exec cpu-test-"$NUMA_NODE" bash -c " set -e pip install pytest pytest-asyncio \ decord einops librosa peft Pillow sentence-transformers soundfile \ @@ -45,20 +45,20 @@ function cpu_tests() { pytest -v -s tests/models/decoder_only/vision_language -m cpu_model" # Run compressed-tensor test - docker exec cpu-test bash -c " + docker exec cpu-test-"$NUMA_NODE" bash -c " set -e pytest -s -v \ tests/quantization/test_compressed_tensors.py::test_compressed_tensors_w8a8_static_setup \ tests/quantization/test_compressed_tensors.py::test_compressed_tensors_w8a8_dynamic_per_token" # Run AWQ test - docker exec cpu-test bash -c " + docker exec cpu-test-"$NUMA_NODE" bash -c " set -e pytest -s -v \ tests/quantization/test_ipex_quant.py" # online inference - docker exec cpu-test bash -c " + docker exec cpu-test-"$NUMA_NODE" bash -c " set -e export VLLM_CPU_KVCACHE_SPACE=10 export VLLM_CPU_OMP_THREADS_BIND=$1 From f028dff33d3d0b0dfe71e0e0354b355b8232a4ec Mon Sep 17 00:00:00 2001 From: COSMOPlat Date: Tue, 19 Nov 2024 21:42:50 +0800 Subject: [PATCH 048/397] [BugFix] Fix hermes tool parser output error stream arguments in some cases (#10395) (#10398) Signed-off-by: xiyuan lee --- .../openai/tool_parsers/hermes_tool_parser.py | 25 +++++++++---------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py b/vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py index faa6f653b835c..18816cd665b3e 100644 --- a/vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +++ b/vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py @@ -12,8 +12,6 @@ FunctionCall, ToolCall) from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( ToolParser, ToolParserManager) -from vllm.entrypoints.openai.tool_parsers.utils import ( - extract_intermediate_diff) from vllm.logger import init_logger from vllm.transformers_utils.tokenizer import AnyTokenizer, MistralTokenizer from vllm.utils import random_uuid @@ -190,8 +188,11 @@ def extract_tool_calls_streaming( diff = self.prev_tool_call_arr[self.current_tool_id].get( "arguments") if diff: - diff = json.dumps(diff).replace( - self.streamed_args_for_tool[self.current_tool_id], "") + diff = diff.encode('utf-8').decode( + 'unicode_escape') if diff is str else diff + diff = json.dumps( + diff, ensure_ascii=False + )[len(self.streamed_args_for_tool[self.current_tool_id]):] logger.debug( "Finishing tool and found diff that had not " "been streamed yet: %s", diff) @@ -307,22 +308,20 @@ def extract_tool_calls_streaming( # last case -- we have an update to existing arguments. elif cur_arguments and prev_arguments: + if isinstance(delta_text, str) and len(delta_text.rstrip( + )) >= 1 and delta_text.rstrip()[-1] == '}': + delta_text = delta_text.rstrip()[:-1] + + logger.debug("got diff %s", delta_text) - cur_args_json = json.dumps(cur_arguments) - prev_args_json = json.dumps(prev_arguments) - logger.debug("Searching for diff between\n%s", cur_args_json) - logger.debug("and\n%s", prev_args_json) - argument_diff = extract_intermediate_diff( - cur_args_json, prev_args_json) - logger.debug("got argument diff %s", argument_diff) delta = DeltaMessage(tool_calls=[ DeltaToolCall(index=self.current_tool_id, function=DeltaFunctionCall( - arguments=argument_diff).model_dump( + arguments=delta_text).model_dump( exclude_none=True)) ]) self.streamed_args_for_tool[self.current_tool_id] \ - += argument_diff + += delta_text # handle saving the state for the current tool into # the "prev" list for use in diffing for the next iteration From 11fd7ea639cf3c4fae29322d8e5c839ff6f8a1ca Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Tue, 19 Nov 2024 18:33:06 +0100 Subject: [PATCH 049/397] [Pixtral-Large] Pixtral actually has no bias in vision-lang adapter (#10449) --- vllm/model_executor/models/pixtral.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/vllm/model_executor/models/pixtral.py b/vllm/model_executor/models/pixtral.py index f7f46770057e2..d14b89d6b3f85 100644 --- a/vllm/model_executor/models/pixtral.py +++ b/vllm/model_executor/models/pixtral.py @@ -331,6 +331,7 @@ class VisionEncoderArgs: num_attention_heads: int rope_theta: float # for rope-2D image_token_id: int + adapter_bias: bool = True def _reshape_for_broadcast(freqs_cis: torch.Tensor, @@ -595,10 +596,10 @@ def __init__(self, args: VisionEncoderArgs, dim: int): self.w_in = nn.Linear( args.hidden_size, dim, - bias=True, + bias=args.adapter_bias, ) self.gelu = nn.GELU() - self.w_out = nn.Linear(dim, dim, bias=True) + self.w_out = nn.Linear(dim, dim, bias=args.adapter_bias) def forward(self, x: torch.Tensor) -> torch.Tensor: return self.w_out(self.gelu(self.w_in(x))) From 1ea291a4173a82c537ab42487e23375be4926d30 Mon Sep 17 00:00:00 2001 From: Manjul Mohan <49657164+mikejuliet13@users.noreply.github.com> Date: Tue, 19 Nov 2024 23:04:57 +0530 Subject: [PATCH 050/397] Fix: Build error seen on Power Architecture (#10421) Signed-off-by: Manjul Mohan Signed-off-by: B-201 Signed-off-by: Isotr0py <2037008807@qq.com> Signed-off-by: youkaichao Signed-off-by: ismael-dm Signed-off-by: Andrew Nesbitt Signed-off-by: mgoin Signed-off-by: yan ma Signed-off-by: Angus Wang Signed-off-by: Lucas Wilkinson Signed-off-by: rickyx Signed-off-by: Jee Jee Li Signed-off-by: Mengqing Cao Signed-off-by: Travis Johnson Co-authored-by: Manjul Mohan manjul.mohan@ibm.com Co-authored-by: B-201 Co-authored-by: Isotr0py <2037008807@qq.com> Co-authored-by: youkaichao Co-authored-by: ismael-dm Co-authored-by: Andrew Nesbitt Co-authored-by: Michael Goin Co-authored-by: Yan Ma Co-authored-by: Angus Wang Co-authored-by: Lucas Wilkinson Co-authored-by: Ricky Xu Co-authored-by: Kevin H. Luu Co-authored-by: Jee Jee Li Co-authored-by: Mengqing Cao Co-authored-by: Travis Johnson Co-authored-by: Russell Bryant --- cmake/cpu_extension.cmake | 14 ++++++++++---- csrc/cpu/attention.cpp | 12 ++++++++++-- csrc/cpu/quant.cpp | 6 ++++++ 3 files changed, 26 insertions(+), 6 deletions(-) diff --git a/cmake/cpu_extension.cmake b/cmake/cpu_extension.cmake index 5912c5c02ede7..426189481575b 100644 --- a/cmake/cpu_extension.cmake +++ b/cmake/cpu_extension.cmake @@ -16,10 +16,16 @@ include_directories("${CMAKE_SOURCE_DIR}/csrc") # # Check the compile flags # -list(APPEND CXX_COMPILE_FLAGS - "-fopenmp" - "-mf16c" - "-DVLLM_CPU_EXTENSION") +if (CMAKE_SYSTEM_PROCESSOR STREQUAL "ppc64le") + list(APPEND CXX_COMPILE_FLAGS + "-fopenmp" + "-DVLLM_CPU_EXTENSION") +else() + list(APPEND CXX_COMPILE_FLAGS + "-fopenmp" + "-mf16c" + "-DVLLM_CPU_EXTENSION") +endif() execute_process(COMMAND cat /proc/cpuinfo RESULT_VARIABLE CPUINFO_RET diff --git a/csrc/cpu/attention.cpp b/csrc/cpu/attention.cpp index e73eca1b345fd..e6c03dcb034fd 100644 --- a/csrc/cpu/attention.cpp +++ b/csrc/cpu/attention.cpp @@ -24,12 +24,20 @@ struct KernelVecType { template <> struct KernelVecType { +#ifdef __powerpc64__ + // Power architecture-specific vector types + using q_load_vec_type = vec_op::FP32Vec8; + using k_load_vec_type = vec_op::FP32Vec16; + using v_load_vec_type = vec_op::FP32Vec16; +#else + // Fallback for other architectures, including x86 using q_load_vec_type = vec_op::FP16Vec8; - using q_vec_type = vec_op::FP32Vec16; using k_load_vec_type = vec_op::FP16Vec16; + using v_load_vec_type = vec_op::FP16Vec16; +#endif + using q_vec_type = vec_op::FP32Vec16; using k_vec_type = vec_op::FP32Vec16; using qk_acc_vec_type = vec_op::FP32Vec16; - using v_load_vec_type = vec_op::FP16Vec16; }; #ifdef __AVX512BF16__ diff --git a/csrc/cpu/quant.cpp b/csrc/cpu/quant.cpp index f42fa2361a2db..d9aed657a3113 100644 --- a/csrc/cpu/quant.cpp +++ b/csrc/cpu/quant.cpp @@ -25,7 +25,13 @@ struct KernelVecType { template <> struct KernelVecType { +#ifdef __powerpc64__ + // Power architecture-specific vector type + using load_vec_type = vec_op::FP32Vec16; +#else + // Fallback for other architectures using load_vec_type = vec_op::FP16Vec16; +#endif using azp_adj_load_vec_type = vec_op::INT32Vec16; using cvt_vec_type = vec_op::FP32Vec16; }; From fd9f124971c58376ca294091951dfcc96cc03474 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Tue, 19 Nov 2024 12:48:30 -0500 Subject: [PATCH 051/397] [Doc] fix link for page that was renamed (#10455) Signed-off-by: Russell Bryant --- vllm/model_executor/model_loader/loader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/model_executor/model_loader/loader.py b/vllm/model_executor/model_loader/loader.py index b41c23704b7ff..936c2fe415375 100644 --- a/vllm/model_executor/model_loader/loader.py +++ b/vllm/model_executor/model_loader/loader.py @@ -105,7 +105,7 @@ def _initialize_model(vllm_config: VllmConfig, prefix: str = "") -> nn.Module: msg = ("vLLM model class should accept `vllm_config` and `prefix` as " "input arguments. Possibly you have an old-style model class" " registered from out of tree and it is used for new vLLM version. " - "Check https://docs.vllm.ai/en/latest/design/class_hierarchy.html " + "Check https://docs.vllm.ai/en/latest/design/arch_overview.html " "for the design and update the model class accordingly.") logger.warning(msg) logger.warning( From 803f37eaaa11568f65acbf0bcd1044fb9b1610bf Mon Sep 17 00:00:00 2001 From: youkaichao Date: Tue, 19 Nov 2024 10:09:03 -0800 Subject: [PATCH 052/397] [6/N] torch.compile rollout to users (#10437) Signed-off-by: youkaichao --- .../piecewise_compilation_config.json | 5 -- tests/compile/piecewise/test_simple.py | 18 +++---- tests/compile/piecewise/test_toy_llama.py | 45 +++++++----------- tests/compile/test_basic_correctness.py | 13 +++-- tests/compile/utils.py | 4 +- .../model_executor/test_enabled_custom_ops.py | 4 +- tests/tpu/test_compilation.py | 47 ++++++++++++++----- tests/tpu/test_custom_dispatcher.py | 10 ++-- vllm/config.py | 43 ++++++++--------- vllm/engine/arg_utils.py | 29 +++++++++--- vllm/engine/llm_engine.py | 4 +- vllm/envs.py | 8 ---- vllm/platforms/tpu.py | 4 +- vllm/plugins/__init__.py | 14 +----- vllm/v1/worker/gpu_model_runner.py | 22 ++------- 15 files changed, 129 insertions(+), 141 deletions(-) delete mode 100644 tests/compile/piecewise/piecewise_compilation_config.json diff --git a/tests/compile/piecewise/piecewise_compilation_config.json b/tests/compile/piecewise/piecewise_compilation_config.json deleted file mode 100644 index 798a34e8dd92d..0000000000000 --- a/tests/compile/piecewise/piecewise_compilation_config.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "use_cudagraph": true, - "non_cudagraph_ops": ["silly.attention"], - "cudagraph_copy_inputs": true -} \ No newline at end of file diff --git a/tests/compile/piecewise/test_simple.py b/tests/compile/piecewise/test_simple.py index 45f56cbbd4b16..0e40e3b4ebc96 100644 --- a/tests/compile/piecewise/test_simple.py +++ b/tests/compile/piecewise/test_simple.py @@ -2,7 +2,6 @@ Test the piecewise compilation with a simple model so that we can exactly calculate the expected output and side effects. """ -import os import torch from torch import nn @@ -11,7 +10,7 @@ from vllm.compilation.compile_context import set_compile_context from vllm.compilation.counter import compilation_counter from vllm.compilation.decorators import support_torch_compile -from vllm.config import CompilationLevel, VllmConfig +from vllm.config import CompilationConfig, CompilationLevel, VllmConfig from vllm.plugins import set_current_vllm_config from vllm.utils import direct_register_custom_op @@ -77,12 +76,12 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: def test_simple_piecewise_compile(): - directory = os.path.dirname(__file__) - config = os.path.join(directory, "piecewise_compilation_config.json") - os.environ["VLLM_TORCH_COMPILE_CONFIG"] = config - os.environ["VLLM_TORCH_COMPILE_LEVEL"] = str(CompilationLevel.PIECEWISE) - - vllm_config = VllmConfig() + vllm_config = VllmConfig(compilation_config=CompilationConfig( + level=CompilationLevel.PIECEWISE, + use_cudagraph=True, + non_cudagraph_ops=["silly.attention"], + cudagraph_copy_inputs=True, + )) with set_current_vllm_config(vllm_config): model = SillyModel(vllm_config=vllm_config, prefix='') @@ -109,6 +108,3 @@ def test_simple_piecewise_compile(): output = model(input) assert global_counter == 2 assert torch.allclose(output.cpu(), torch.tensor([3., 1.])) - - # clean up to avoid side effects for other tests - del os.environ["VLLM_TORCH_COMPILE_CONFIG"] diff --git a/tests/compile/piecewise/test_toy_llama.py b/tests/compile/piecewise/test_toy_llama.py index 8032304e95806..356d119a40334 100644 --- a/tests/compile/piecewise/test_toy_llama.py +++ b/tests/compile/piecewise/test_toy_llama.py @@ -6,7 +6,6 @@ if the config `tractable_init` is set to True. Otherwise, the weights are initialized randomly with a fixed seed. """ -import os from dataclasses import dataclass from typing import Optional, Tuple @@ -18,7 +17,7 @@ from vllm.compilation.counter import compilation_counter from vllm.compilation.decorators import support_torch_compile from vllm.config import CompilationConfig, CompilationLevel, VllmConfig -from vllm.plugins import set_compilation_config, set_current_vllm_config +from vllm.plugins import set_current_vllm_config from vllm.utils import direct_register_custom_op # create a library to hold the custom op @@ -254,23 +253,17 @@ def run_model(llama_config, split_attn: bool = False) -> torch.Tensor: if use_compile: - os.environ["VLLM_TORCH_COMPILE_LEVEL"] = str( - CompilationLevel.PIECEWISE) - + compilation_config = CompilationConfig( + level=CompilationLevel.PIECEWISE, + use_cudagraph=True, + ) if split_attn: - set_compilation_config( - CompilationConfig( - use_cudagraph=True, - non_cudagraph_ops=["silly.attention"], - )) - else: - set_compilation_config(CompilationConfig(use_cudagraph=True, )) + compilation_config.non_cudagraph_ops = ["silly.attention"] else: - os.environ["VLLM_TORCH_COMPILE_LEVEL"] = str( - CompilationLevel.NO_COMPILATION) - set_compilation_config(None) + compilation_config = CompilationConfig( + level=CompilationLevel.NO_COMPILATION, ) - vllm_config = VllmConfig() + vllm_config = VllmConfig(compilation_config=compilation_config) with set_current_vllm_config(vllm_config): model = LlamaModel(config=llama_config, vllm_config=vllm_config, @@ -288,10 +281,6 @@ def run_model(llama_config, input_ids[:2].zero_() output = model(input_ids[:2], positions[:2]) - # manual cleanup - del os.environ["VLLM_TORCH_COMPILE_LEVEL"] - set_compilation_config(None) - output = output.cpu() if llama_config.tractable_init: @@ -361,7 +350,6 @@ def test_toy_llama(): @torch.inference_mode def benchmark(): - os.environ["VLLM_TORCH_COMPILE_LEVEL"] = str(CompilationLevel.PIECEWISE) from triton.testing import do_bench # similar to llama 3.1-8B @@ -387,15 +375,16 @@ def benchmark(): for piecewise in [False, True]: if piecewise: - set_compilation_config( - CompilationConfig( - use_cudagraph=True, - non_cudagraph_ops=["silly.attention"], - )) + compilation_config = CompilationConfig( + level=CompilationLevel.PIECEWISE, + use_cudagraph=True, + non_cudagraph_ops=["silly.attention"], + ) else: - set_compilation_config(None) + compilation_config = CompilationConfig( + level=CompilationLevel.PIECEWISE, ) - vllm_config = VllmConfig() + vllm_config = VllmConfig(compilation_config=compilation_config) with set_current_vllm_config(vllm_config): model = LlamaModel(config=llama_config, vllm_config=vllm_config, diff --git a/tests/compile/test_basic_correctness.py b/tests/compile/test_basic_correctness.py index 08747ebc58b75..c0db2e78824be 100644 --- a/tests/compile/test_basic_correctness.py +++ b/tests/compile/test_basic_correctness.py @@ -96,31 +96,36 @@ def test_compile_correctness(test_setting: TestSetting): final_args = ["--enforce-eager"] + model_args + ["-pp", str(pp_size)] + \ ["-tp", str(tp_size)] + all_args: List[List[str]] = [] all_envs: List[Optional[Dict[str, str]]] = [] for level in [ CompilationLevel.NO_COMPILATION, CompilationLevel.PIECEWISE, ]: - all_envs.append({"VLLM_TORCH_COMPILE_LEVEL": str(level)}) + all_args.append(final_args + ["-O", str(level)]) + all_envs.append({}) # inductor will change the output, so we only compare if the output # is close, not exactly the same. compare_all_settings( - model, [final_args] * 2, + model, + all_args, all_envs, method=method if method != "generate" else "generate_close") all_envs.clear() + all_args.clear() for level in [ CompilationLevel.NO_COMPILATION, CompilationLevel.DYNAMO_AS_IS, CompilationLevel.DYNAMO_ONCE, ]: - all_envs.append({"VLLM_TORCH_COMPILE_LEVEL": str(level)}) + all_args.append(final_args + ["-O", str(level)]) + all_envs.append({}) if level != CompilationLevel.DYNAMO_ONCE and not fullgraph: # "DYNAMO_ONCE" will always use fullgraph all_envs[-1][ "VLLM_TEST_DYNAMO_FULLGRAPH_CAPTURE"] = "0" # type: ignore - compare_all_settings(model, [final_args] * 3, all_envs, method=method) + compare_all_settings(model, all_args * 3, all_envs, method=method) diff --git a/tests/compile/utils.py b/tests/compile/utils.py index 729f10676888b..078c6bf9ea1df 100644 --- a/tests/compile/utils.py +++ b/tests/compile/utils.py @@ -4,7 +4,7 @@ from tests.quantization.utils import is_quant_method_supported from vllm import LLM, SamplingParams -from vllm.config import CompilationLevel +from vllm.config import CompilationConfig, CompilationLevel from vllm.platforms import current_platform TEST_MODELS = [ @@ -65,7 +65,6 @@ def check_full_graph_support(model, optimization_level, tp_size=1): # make sure these models can be captured in full graph mode - os.environ["VLLM_TORCH_COMPILE_LEVEL"] = str(optimization_level) os.environ["VLLM_TEST_DYNAMO_FULLGRAPH_CAPTURE"] = "1" # The base meta llama uses too much memory. @@ -86,6 +85,7 @@ def check_full_graph_support(model, enforce_eager=True, tensor_parallel_size=tp_size, disable_custom_all_reduce=True, + compilation_config=CompilationConfig(level=optimization_level), **model_kwargs) outputs = llm.generate(prompts, sampling_params) diff --git a/tests/model_executor/test_enabled_custom_ops.py b/tests/model_executor/test_enabled_custom_ops.py index c3219bc50646b..c54e30995da49 100644 --- a/tests/model_executor/test_enabled_custom_ops.py +++ b/tests/model_executor/test_enabled_custom_ops.py @@ -1,4 +1,3 @@ -import os from typing import List import pytest @@ -53,9 +52,8 @@ class Relu3(ReLUSquaredActivation): ]) def test_enabled_ops(env: str, torch_level: int, ops_enabled: List[int], default_on: bool): - os.environ["VLLM_TORCH_COMPILE_LEVEL"] = str(torch_level) vllm_config = VllmConfig(compilation_config=CompilationConfig( - custom_ops=env.split(","))) + level=torch_level, custom_ops=env.split(","))) with set_current_vllm_config(vllm_config): assert CustomOp.default_on() == default_on diff --git a/tests/tpu/test_compilation.py b/tests/tpu/test_compilation.py index 941abe17a3378..65bee85e7a1ea 100644 --- a/tests/tpu/test_compilation.py +++ b/tests/tpu/test_compilation.py @@ -1,24 +1,47 @@ import glob import os -import runpy import tempfile import depyf -from vllm.config import CompilationLevel - -# disable custom dispatcher, let Dynamo takes over -# all the control -os.environ['VLLM_TORCH_COMPILE_LEVEL'] = str(CompilationLevel.DYNAMO_AS_IS) +from vllm.config import CompilationConfig, CompilationLevel temp_dir = tempfile.mkdtemp() with depyf.prepare_debug(temp_dir): - cur_dir = os.path.dirname(__file__) - parent_dir = os.path.dirname(cur_dir) - root_dir = os.path.dirname(parent_dir) - example_file = os.path.join(root_dir, "examples", - "offline_inference_tpu.py") - runpy.run_path(example_file) + from vllm import LLM, SamplingParams + + prompts = [ + "A robot may not injure a human being", + "It is only with the heart that one can see rightly;", + "The greatest glory in living lies not in never falling,", + ] + answers = [ + " or, through inaction, allow a human being to come to harm.", + " what is essential is invisible to the eye.", + " but in rising every time we fall.", + ] + N = 1 + # Currently, top-p sampling is disabled. `top_p` should be 1.0. + sampling_params = SamplingParams(temperature=0.7, + top_p=1.0, + n=N, + max_tokens=16) + + # Set `enforce_eager=True` to avoid ahead-of-time compilation. + # In real workloads, `enforace_eager` should be `False`. + + # disable custom dispatcher, let Dynamo takes over + # all the control + llm = LLM(model="google/gemma-2b", + enforce_eager=True, + compilation_config=CompilationConfig( + level=CompilationLevel.DYNAMO_AS_IS)) + outputs = llm.generate(prompts, sampling_params) + for output, answer in zip(outputs, answers): + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + assert generated_text.startswith(answer) compiled_code = sorted( glob.glob(os.path.join(temp_dir, "__transformed_code*.py"))) diff --git a/tests/tpu/test_custom_dispatcher.py b/tests/tpu/test_custom_dispatcher.py index 53b10c06135a1..df348258efcba 100644 --- a/tests/tpu/test_custom_dispatcher.py +++ b/tests/tpu/test_custom_dispatcher.py @@ -13,7 +13,9 @@ def test_custom_dispatcher(): compare_two_settings( "google/gemma-2b", - arg1=["--enforce-eager"], - arg2=["--enforce-eager"], - env1={"VLLM_TORCH_COMPILE_LEVEL": str(CompilationLevel.DYNAMO_ONCE)}, - env2={"VLLM_TORCH_COMPILE_LEVEL": str(CompilationLevel.DYNAMO_AS_IS)}) + arg1=["--enforce-eager", "-O", + str(CompilationLevel.DYNAMO_ONCE)], + arg2=["--enforce-eager", "-O", + str(CompilationLevel.DYNAMO_AS_IS)], + env1={}, + env2={}) diff --git a/vllm/config.py b/vllm/config.py index ea9ec43cc5a15..e69cbd3eb402a 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -2174,8 +2174,14 @@ class CompilationConfig(BaseModel): enabled_custom_ops: Counter[str] = PrivateAttr disabled_custom_ops: Counter[str] = PrivateAttr + @classmethod + def from_cli(cls, cli_value: str) -> "CompilationConfig": + """Parse the CLI value for the compilation config.""" + if cli_value in ["0", "1", "2", "3"]: + return cls(level=int(cli_value)) + return CompilationConfig.model_validate_json(cli_value) + def model_post_init(self, __context: Any) -> None: - self.level = envs.VLLM_TORCH_COMPILE_LEVEL count_none = self.custom_ops.count("none") count_all = self.custom_ops.count("all") @@ -2249,26 +2255,6 @@ def init_during_runtime(self): "inductor_specialize_for_cudagraph_no_more_than is None") self.compile_sizes = self.inductor_compile_sizes - @staticmethod - def select_and_init_config() -> "CompilationConfig": - """The order of selecting config is: - 1. Use the config specified in environment variable. - 2. Use the config specified in plugins. - 3. Use the default config. - """ - config_path = envs.VLLM_TORCH_COMPILE_CONFIG - if config_path is not None: - with open(config_path) as json_file: - config = CompilationConfig.model_validate_json( - json_file.read()) - else: - from vllm.plugins import get_compilation_config - predefined_config = get_compilation_config() - config = predefined_config if predefined_config is not None else ( - CompilationConfig()) - - return config - @dataclass class VllmConfig: @@ -2354,8 +2340,19 @@ def __post_init__(self): self.model_config, self.load_config) if self.compilation_config is None: - self.compilation_config = CompilationConfig.select_and_init_config( - ) + self.compilation_config = CompilationConfig() + if envs.VLLM_USE_V1: + # NOTE(woosuk): Currently, we use inductor because the piecewise + # CUDA graphs do not work properly with the custom CUDA kernels. + # FIXME(woosuk): Disable inductor to reduce the compilation time + # and avoid any potential issues with the inductor. + self.compilation_config.custom_ops = ["none"] + self.compilation_config.use_cudagraph = True + self.compilation_config.non_cudagraph_ops = [ + "vllm.unified_v1_flash_attention" + ] + self.compilation_config.use_inductor = True + self.compilation_config.enable_fusion = False current_platform.check_and_update_config(self) diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index ee4b6addfd466..a3ae1889774f3 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -8,12 +8,13 @@ import torch import vllm.envs as envs -from vllm.config import (CacheConfig, ConfigFormat, DecodingConfig, - DeviceConfig, HfOverrides, LoadConfig, LoadFormat, - LoRAConfig, ModelConfig, ObservabilityConfig, - ParallelConfig, PoolerConfig, PromptAdapterConfig, - SchedulerConfig, SpeculativeConfig, TaskOption, - TokenizerPoolConfig, VllmConfig) +from vllm.config import (CacheConfig, CompilationConfig, ConfigFormat, + DecodingConfig, DeviceConfig, HfOverrides, LoadConfig, + LoadFormat, LoRAConfig, ModelConfig, + ObservabilityConfig, ParallelConfig, PoolerConfig, + PromptAdapterConfig, SchedulerConfig, + SpeculativeConfig, TaskOption, TokenizerPoolConfig, + VllmConfig) from vllm.executor.executor_base import ExecutorBase from vllm.logger import init_logger from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS @@ -189,6 +190,7 @@ class EngineArgs: override_neuron_config: Optional[Dict[str, Any]] = None override_pooler_config: Optional[PoolerConfig] = None + compilation_config: Optional[CompilationConfig] = None def __post_init__(self): if not self.tokenizer: @@ -868,6 +870,20 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: help="Override or set the pooling method in the embedding model. " "e.g. {\"pooling_type\": \"mean\", \"normalize\": false}.'") + parser.add_argument('--compilation-config', + '-O', + type=CompilationConfig.from_cli, + default=None, + help='torch.compile configuration for the model.' + 'When it is a number (0, 1, 2, 3), it will be ' + 'interpreted as the optimization level.\n' + 'NOTE: level 0 is the default level without ' + 'any optimization. level 1 and 2 are for internal ' + 'testing only. level 3 is the recommended level ' + 'for production.\n' + 'To specify the full compilation config, ' + 'use a JSON string.') + return parser @classmethod @@ -1142,6 +1158,7 @@ def create_engine_config(self) -> VllmConfig: decoding_config=decoding_config, observability_config=observability_config, prompt_adapter_config=prompt_adapter_config, + compilation_config=self.compilation_config, ) diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index e72dc81f35b67..2a5eaf1340762 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -262,7 +262,8 @@ def __init__( "num_scheduler_steps=%d, chunked_prefill_enabled=%s " "multi_step_stream_outputs=%s, enable_prefix_caching=%s, " "use_async_output_proc=%s, use_cached_outputs=%s, " - "mm_processor_kwargs=%s, pooler_config=%r)", + "mm_processor_kwargs=%s, pooler_config=%r," + "compilation_config=%r", VLLM_VERSION, model_config.model, speculative_config, @@ -297,6 +298,7 @@ def __init__( use_cached_outputs, model_config.mm_processor_kwargs, model_config.pooler_config, + vllm_config.compilation_config, ) # TODO(woosuk): Print more configs in debug mode. self.model_config = model_config diff --git a/vllm/envs.py b/vllm/envs.py index 716e835a555f1..853c49bc4dbc1 100644 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -67,8 +67,6 @@ VLLM_USE_TRITON_AWQ: bool = False VLLM_ALLOW_RUNTIME_LORA_UPDATING: bool = False VLLM_SKIP_P2P_CHECK: bool = False - VLLM_TORCH_COMPILE_LEVEL: int = 0 - VLLM_TORCH_COMPILE_CONFIG: Optional[str] = None VLLM_DISABLED_KERNELS: List[str] = [] VLLM_USE_V1: bool = False VLLM_ENABLE_V1_MULTIPROCESSING: bool = False @@ -209,12 +207,6 @@ def get_default_config_root(): "VLLM_TEST_DYNAMO_FULLGRAPH_CAPTURE": lambda: bool( os.environ.get("VLLM_TEST_DYNAMO_FULLGRAPH_CAPTURE", "1") != "0"), - "VLLM_TORCH_COMPILE_LEVEL": - lambda: int(os.environ.get("VLLM_TORCH_COMPILE_LEVEL", "0")), - - # Path to the config file for torch compile - "VLLM_TORCH_COMPILE_CONFIG": - lambda: os.environ.get("VLLM_TORCH_COMPILE_CONFIG", None), # local rank of the process in the distributed setting, used to determine # the GPU device id diff --git a/vllm/platforms/tpu.py b/vllm/platforms/tpu.py index 9057afb6514e4..2a7ca9fb8c576 100644 --- a/vllm/platforms/tpu.py +++ b/vllm/platforms/tpu.py @@ -1,4 +1,3 @@ -import os from typing import TYPE_CHECKING import torch @@ -40,7 +39,8 @@ def inference_mode(cls): def check_and_update_config(cls, vllm_config: VllmConfig) -> None: from vllm.config import CompilationLevel compilation_config = vllm_config.compilation_config - if "VLLM_TORCH_COMPILE_LEVEL" not in os.environ: + if compilation_config.level == CompilationLevel.NO_COMPILATION: + # TPU does not support NO_COMPILATION compilation_config.level = CompilationLevel.DYNAMO_ONCE assert compilation_config.level < CompilationLevel.PIECEWISE,\ "TPU does not support Inductor." diff --git a/vllm/plugins/__init__.py b/vllm/plugins/__init__.py index 05a9739d99e71..dc183dbfc9b96 100644 --- a/vllm/plugins/__init__.py +++ b/vllm/plugins/__init__.py @@ -5,7 +5,7 @@ import vllm.envs as envs if TYPE_CHECKING: - from vllm.config import CompilationConfig, VllmConfig + from vllm.config import VllmConfig logger = logging.getLogger(__name__) @@ -54,18 +54,6 @@ def load_general_plugins(): logger.exception("Failed to load plugin %s", plugin.name) -_compilation_config: Optional["CompilationConfig"] = None - - -def set_compilation_config(config: Optional["CompilationConfig"]): - global _compilation_config - _compilation_config = config - - -def get_compilation_config() -> Optional["CompilationConfig"]: - return _compilation_config - - _current_vllm_config: Optional["VllmConfig"] = None diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index d60f93a44f6dd..1f9b544637bf7 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -8,13 +8,12 @@ import torch.nn as nn from vllm.compilation.compile_context import set_compile_context -from vllm.config import CompilationConfig, CompilationLevel, VllmConfig +from vllm.config import CompilationLevel, VllmConfig from vllm.forward_context import set_forward_context from vllm.inputs import INPUT_REGISTRY, InputRegistry from vllm.logger import init_logger from vllm.model_executor.model_loader import get_model from vllm.multimodal import MultiModalKwargs -from vllm.plugins import set_compilation_config from vllm.sampling_params import SamplingParams, SamplingType from vllm.utils import (STR_DTYPE_TO_TORCH_DTYPE, DeviceMemoryProfiler, cdiv, is_pin_memory_available) @@ -508,20 +507,6 @@ def execute_model( return model_runner_output def load_model(self) -> None: - if self.use_cuda_graph: - # NOTE(woosuk): Currently, we use inductor because the piecewise - # CUDA graphs do not work properly with the custom CUDA kernels. - # FIXME(woosuk): Disable inductor to reduce the compilation time - # and avoid any potential issues with the inductor. - set_compilation_config( - CompilationConfig( - custom_ops=["none"], - use_cudagraph=True, - non_cudagraph_ops=["vllm.unified_v1_flash_attention"], - use_inductor=True, - enable_fusion=False, - )) - logger.info("Starting to load model %s...", self.model_config.model) with DeviceMemoryProfiler() as m: # noqa: SIM117 self.model = get_model(vllm_config=self.vllm_config) @@ -562,9 +547,8 @@ def profile_run(self) -> None: def capture_model(self) -> None: if not self.use_cuda_graph: logger.warning( - "Skipping CUDA graph capture. Please set " - "VLLM_TORCH_COMPILE_LEVEL=%d to use CUDA graphs.", - CompilationLevel.PIECEWISE) + "Skipping CUDA graph capture. Please add " + "-O 3 to use CUDA graphs.", CompilationLevel.PIECEWISE) return start_time = time.perf_counter() From efa9084628b32787ae1901a2d1e9b80f7d08809b Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Tue, 19 Nov 2024 16:05:25 -0500 Subject: [PATCH 053/397] [Core] Avoid metrics log noise when idle (#8868) Signed-off-by: Russell Bryant --- vllm/engine/metrics.py | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/vllm/engine/metrics.py b/vllm/engine/metrics.py index 47472c274ccb6..5bfd6a9f4b386 100644 --- a/vllm/engine/metrics.py +++ b/vllm/engine/metrics.py @@ -421,6 +421,11 @@ def get_throughput(tracked_stats: List[int], now: float, class LoggingStatLogger(StatLoggerBase): """LoggingStatLogger is used in LLMEngine to log to Stdout.""" + def __init__(self, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + self.last_prompt_throughput: Optional[float] = None + self.last_generation_throughput: Optional[float] = None + def log(self, stats: Stats) -> None: """Called by LLMEngine. Logs to Stdout every self.local_interval seconds.""" @@ -445,8 +450,14 @@ def log(self, stats: Stats) -> None: now=stats.now, last_log=self.last_local_log) - # Log to stdout. - logger.info( + log_fn = logger.info + if not any((prompt_throughput, generation_throughput, + self.last_prompt_throughput, + self.last_generation_throughput)): + # Avoid log noise on an idle production system + log_fn = logger.debug + + log_fn( "Avg prompt throughput: %.1f tokens/s, " "Avg generation throughput: %.1f tokens/s, " "Running: %d reqs, Swapped: %d reqs, " @@ -472,11 +483,16 @@ def log(self, stats: Stats) -> None: self._format_spec_decode_metrics_str( self.spec_decode_metrics)) - # Reset tracked stats for next interval. - self.num_prompt_tokens = [] - self.num_generation_tokens = [] - self.last_local_log = stats.now - self.spec_decode_metrics = None + self._reset(stats, prompt_throughput, generation_throughput) + + def _reset(self, stats, prompt_throughput, generation_throughput) -> None: + # Reset tracked stats for next interval. + self.num_prompt_tokens = [] + self.num_generation_tokens = [] + self.last_local_log = stats.now + self.spec_decode_metrics = None + self.last_prompt_throughput = prompt_throughput + self.last_generation_throughput = generation_throughput def _format_spec_decode_metrics_str( self, metrics: "SpecDecodeWorkerMetrics") -> str: From b00b33d77e33c5516e73de663539dff96e8b61a4 Mon Sep 17 00:00:00 2001 From: ElizaWszola Date: Tue, 19 Nov 2024 22:31:12 +0100 Subject: [PATCH 054/397] [Model][Quantization] HQQ support through Marlin kernel expansion (#9766) Signed-off-by: ElizaWszola --- benchmarks/kernels/benchmark_machete.py | 3 +- benchmarks/kernels/benchmark_marlin.py | 4 +- csrc/quantization/gptq_marlin/gptq_marlin.cu | 277 ++++++++++----- csrc/torch_bindings.cpp | 2 +- tests/kernels/test_marlin_gemm.py | 88 ++++- tests/weight_loading/models.txt | 3 +- vllm/_custom_ops.py | 8 +- vllm/model_executor/layers/linear.py | 3 +- .../layers/quantization/__init__.py | 2 + .../layers/quantization/hqq_marlin.py | 325 ++++++++++++++++++ .../layers/quantization/utils/marlin_utils.py | 6 +- 11 files changed, 632 insertions(+), 89 deletions(-) create mode 100644 vllm/model_executor/layers/quantization/hqq_marlin.py diff --git a/benchmarks/kernels/benchmark_machete.py b/benchmarks/kernels/benchmark_machete.py index a0342d08f1db8..46bab74ae8adf 100644 --- a/benchmarks/kernels/benchmark_machete.py +++ b/benchmarks/kernels/benchmark_machete.py @@ -210,7 +210,8 @@ def marlin_create_bench_fn(bt: BenchmarkTensors) -> Callable: size_m=bt.a.shape[0], size_n=bt.w_ref.shape[1], size_k=bt.w_ref.shape[0], - is_k_full=True) + is_k_full=True, + is_zp_float=False) else: assert bt.a.dtype == torch.int8 assert bt.wtype == scalar_types.uint4b8 diff --git a/benchmarks/kernels/benchmark_marlin.py b/benchmarks/kernels/benchmark_marlin.py index 536c133bb3341..8fb44e3a3dbd8 100644 --- a/benchmarks/kernels/benchmark_marlin.py +++ b/benchmarks/kernels/benchmark_marlin.py @@ -131,7 +131,7 @@ def bench_run(results: List[benchmark.Measurement], model: str, results.append( benchmark.Timer( stmt= - "output = gptq_marlin_gemm(a, marlin_q_w, marlin_s, marlin_zp, marlin_g_idx, marlin_sort_indices, marlin_workspace.scratch, quant_type, size_m, size_n, size_k, is_k_full, False, False)", # noqa: E501 + "output = gptq_marlin_gemm(a, marlin_q_w, marlin_s, marlin_zp, marlin_g_idx, marlin_sort_indices, marlin_workspace.scratch, quant_type, size_m, size_n, size_k, is_k_full, False, False, False)", # noqa: E501 globals=globals, label=label, sub_label=sub_label, @@ -141,7 +141,7 @@ def bench_run(results: List[benchmark.Measurement], model: str, results.append( benchmark.Timer( stmt= - "output = gptq_marlin_gemm(a, marlin_q_w, marlin_s, marlin_zp, marlin_g_idx, marlin_sort_indices, marlin_workspace.scratch, quant_type, size_m, size_n, size_k, is_k_full, False, True)", # noqa: E501 + "output = gptq_marlin_gemm(a, marlin_q_w, marlin_s, marlin_zp, marlin_g_idx, marlin_sort_indices, marlin_workspace.scratch, quant_type, size_m, size_n, size_k, is_k_full, False, True, False)", # noqa: E501 globals=globals, label=label, sub_label=sub_label, diff --git a/csrc/quantization/gptq_marlin/gptq_marlin.cu b/csrc/quantization/gptq_marlin/gptq_marlin.cu index 6dbf9594e8492..0c698ced7713d 100644 --- a/csrc/quantization/gptq_marlin/gptq_marlin.cu +++ b/csrc/quantization/gptq_marlin/gptq_marlin.cu @@ -54,9 +54,10 @@ template shared // fetch pipeline - const bool has_act_order, // whether act_order is enabled - const int group_blocks = -1 // number of consecutive 16x16 blocks - // with a separate quantization scale + const bool has_act_order, // whether act_order is enabled + const int group_blocks = -1, // number of consecutive 16x16 blocks + // with a separate quantization scale + const bool is_zp_float // is zero point of float16 type? > __global__ void Marlin( const int4* __restrict__ A, // fp16 input matrix of shape mxk @@ -82,7 +83,7 @@ torch::Tensor gptq_marlin_gemm(torch::Tensor& a, torch::Tensor& b_q_weight, torch::Tensor& workspace, vllm::ScalarTypeId const b_q_type_id, int64_t size_m, int64_t size_n, int64_t size_k, - bool is_k_full, bool has_zp) { + bool is_k_full, bool has_zp, bool is_zp_float) { TORCH_CHECK_NOT_IMPLEMENTED(false, "marlin_gemm(..) requires CUDA_ARCH >= 8.0"); return torch::empty({1, 1}); @@ -516,10 +517,11 @@ template shared // fetch pipeline - const bool has_act_order, // whether act_order is enabled - const bool has_zp, // whether zero-points are enabled - const int group_blocks = -1 // number of consecutive 16x16 blocks - // with a separate quantization scale + const bool has_act_order, // whether act_order is enabled + const bool has_zp, // whether zero-points are enabled + const int group_blocks = -1, // number of consecutive 16x16 blocks + // with a separate quantization scale + const bool is_zp_float // is zero point of float16 type? > __global__ void Marlin( const int4* __restrict__ A, // fp16 input matrix of shape mxk @@ -692,8 +694,10 @@ __global__ void Marlin( int act_s_col_tb_stride = act_s_col_warp_stride * tb_n_warps; // Zero-points sizes/strides - int zp_gl_stride = (prob_n / pack_factor) / 4; - constexpr int zp_sh_stride = ((16 * thread_n_blocks) / pack_factor) / 4; + int zp_gl_stride = is_zp_float ? prob_n / 8 : (prob_n / pack_factor) / 4; + constexpr int zp_sh_stride = is_zp_float + ? 16 * thread_n_blocks / 8 + : ((16 * thread_n_blocks) / pack_factor) / 4; constexpr int zp_tb_groups = s_tb_groups; constexpr int zp_sh_stage = has_zp ? zp_tb_groups * zp_sh_stride : 0; int zp_gl_rd_delta = zp_gl_stride; @@ -768,9 +772,16 @@ __global__ void Marlin( constexpr int num_ints_per_thread = 8 / pack_factor; int zp_sh_rd; if constexpr (has_zp) { - zp_sh_rd = num_ints_per_thread * num_col_threads * - ((threadIdx.x / 32) % (thread_n_blocks / 4)) + - num_ints_per_thread * ((threadIdx.x % 32) / num_row_threads); + if constexpr (is_zp_float) { + if constexpr (group_blocks != -1) { + zp_sh_rd = 8 * ((threadIdx.x / 32) % (thread_n_blocks / 4)) + + (threadIdx.x % 32) / 4; + } + } else { + zp_sh_rd = num_ints_per_thread * num_col_threads * + ((threadIdx.x / 32) % (thread_n_blocks / 4)) + + num_ints_per_thread * ((threadIdx.x % 32) / num_row_threads); + } } // Precompute which thread should not read memory in which iterations; this is @@ -832,6 +843,7 @@ __global__ void Marlin( FragS act_frag_s[2][4][4]; // For act-order int frag_qzp[2][num_ints_per_thread]; // Zero-points FragZP frag_zp; // Zero-points in fp16 + FragZP frag_zpf[2]; // Zero-points in fp16 in HQQ // Zero accumulators. auto zero_accums = [&]() { @@ -1126,7 +1138,7 @@ __global__ void Marlin( // has_zp implies AWQ, which doesn't have act_order, static_assert(!has_zp || group_blocks != 0); - if constexpr (has_zp) { + if constexpr (has_zp && !is_zp_float) { int pipe = full_pipe % stages; if constexpr (group_blocks == -1) { @@ -1170,11 +1182,44 @@ __global__ void Marlin( } } } + + else if constexpr (has_zp && is_zp_float) { + int pipe = full_pipe % stages; + + if constexpr (group_blocks != -1) { + if constexpr (group_blocks >= thread_k_blocks) { + int4* sh_zp_stage = + sh_zp + zp_sh_stage * ((group_blocks / thread_k_blocks) * + (pipe / (group_blocks / thread_k_blocks))); + reinterpret_cast(&frag_zpf[k % 2])[0] = sh_zp_stage[zp_sh_rd]; + } else { + int warp_id = threadIdx.x / 32; + int n_warps = thread_n_blocks / 4; + + int warp_row = warp_id / n_warps; + + int cur_k = warp_row * 16; + cur_k += k_iter_size * (k % b_sh_wr_iters); + + int k_blocks = cur_k / 16; + // Suppress bogus and persistent divide-by-zero warning + #pragma nv_diagnostic push + #pragma nv_diag_suppress divide_by_zero + int cur_group_id = k_blocks / group_blocks; + #pragma nv_diagnostic pop + + int4* sh_zp_stage = sh_zp + zp_sh_stage * pipe; + + reinterpret_cast(&frag_zpf[k % 2])[0] = + sh_zp_stage[zp_sh_rd + cur_group_id * zp_sh_stride]; + } + } + } }; // Execute the actual tensor core matmul of a sub-tile. auto matmul = [&](int k) { - if constexpr (has_zp) { + if constexpr (has_zp && !is_zp_float) { FragB frag_zp_0; FragB frag_zp_1; int zp_quant_0, zp_quant_1; @@ -1219,10 +1264,14 @@ __global__ void Marlin( frag_b1 = dequant(b_quant_1); // Apply zero-point to frag_b0 - if constexpr (has_zp) { + if constexpr (has_zp && !is_zp_float) { sub_zp(frag_b0, frag_zp[j], 0); } + else if constexpr (has_zp && is_zp_float && group_blocks != -1) { + sub_zp(frag_b0, frag_zpf[k % 2][j], 0); + } + // Apply scale to frag_b0 if constexpr (has_act_order) { scale4(frag_b0, act_frag_s[k % 2][0][j], @@ -1235,10 +1284,14 @@ __global__ void Marlin( } // Apply zero-point to frag_b1 - if constexpr (has_zp) { + if constexpr (has_zp && !is_zp_float) { sub_zp(frag_b1, frag_zp[j], 1); } + else if constexpr (has_zp && is_zp_float && group_blocks != -1) { + sub_zp(frag_b1, frag_zpf[k % 2][j], 1); + } + // Apply scale to frag_b1 if constexpr (has_act_order) { scale4(frag_b1, act_frag_s[k % 2][0][j], @@ -1510,7 +1563,7 @@ __global__ void Marlin( fetch_scales_to_shared(true, g_idx[slice_k_start], g_idx[last_g_idx]); } - if constexpr (has_zp && group_blocks == -1) { + if constexpr (has_zp && !is_zp_float && group_blocks == -1) { if (i == 0) { fetch_zp_to_shared(); } @@ -1697,23 +1750,27 @@ __global__ void Marlin( } #define __CALL_IF(W_TYPE, THREAD_M_BLOCKS, THREAD_N_BLOCKS, THREAD_K_BLOCKS, \ - HAS_ACT_ORDER, HAS_ZP, GROUP_BLOCKS, NUM_THREADS) \ + HAS_ACT_ORDER, HAS_ZP, GROUP_BLOCKS, NUM_THREADS, \ + IS_ZP_FLOAT) \ else if (q_type == W_TYPE && thread_m_blocks == THREAD_M_BLOCKS && \ thread_n_blocks == THREAD_N_BLOCKS && \ thread_k_blocks == THREAD_K_BLOCKS && \ has_act_order == HAS_ACT_ORDER && has_zp == HAS_ZP && \ - group_blocks == GROUP_BLOCKS && num_threads == NUM_THREADS) { \ - cudaFuncSetAttribute( \ - Marlin, \ - cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem); \ - Marlin \ - <<>>( \ - A_ptr, B_ptr, C_ptr, C_tmp_ptr, s_ptr, zp_ptr, g_idx_ptr, \ - num_groups, prob_m, prob_n, prob_k, locks, use_fp32_reduce); \ + group_blocks == GROUP_BLOCKS && num_threads == NUM_THREADS && \ + is_zp_float == IS_ZP_FLOAT) { \ + if constexpr (!IS_ZP_FLOAT || std::is_same::value) { \ + cudaFuncSetAttribute( \ + Marlin, \ + cudaFuncAttributeMaxDynamicSharedMemorySize, max_shared_mem); \ + Marlin \ + <<>>( \ + A_ptr, B_ptr, C_ptr, C_tmp_ptr, s_ptr, zp_ptr, g_idx_ptr, \ + num_groups, prob_m, prob_n, prob_k, locks, use_fp32_reduce); \ + } \ } typedef struct { @@ -1905,51 +1962,96 @@ exec_config_t determine_thread_config(int prob_m, int prob_n, int prob_k, } #define GPTQ_CALL_IF(W_TYPE, N_BLOCKS, K_BLOCKS, NUM_THREADS) \ - __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, true, false, 0, NUM_THREADS) \ - __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, true, false, 0, NUM_THREADS) \ - __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, true, false, 0, NUM_THREADS) \ - __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, true, false, 0, NUM_THREADS) \ + __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, true, false, 0, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, true, false, 0, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, true, false, 0, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, true, false, 0, NUM_THREADS, \ + false) \ \ - __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, false, -1, NUM_THREADS) \ - __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, false, 2, NUM_THREADS) \ - __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, false, 4, NUM_THREADS) \ - __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, false, 8, NUM_THREADS) \ + __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, false, -1, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, false, 2, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, false, 4, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, false, 8, NUM_THREADS, \ + false) \ \ - __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, false, -1, NUM_THREADS) \ - __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, false, 2, NUM_THREADS) \ - __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, false, 4, NUM_THREADS) \ - __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, false, 8, NUM_THREADS) \ + __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, false, -1, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, false, 2, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, false, 4, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, false, 8, NUM_THREADS, \ + false) \ \ - __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, false, -1, NUM_THREADS) \ - __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, false, 2, NUM_THREADS) \ - __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, false, 4, NUM_THREADS) \ - __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, false, 8, NUM_THREADS) \ + __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, false, -1, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, false, 2, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, false, 4, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, false, 8, NUM_THREADS, \ + false) \ \ - __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, false, -1, NUM_THREADS) \ - __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, false, 2, NUM_THREADS) \ - __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, false, 4, NUM_THREADS) \ - __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, false, 8, NUM_THREADS) + __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, false, -1, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, false, 2, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, false, 4, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, false, 8, NUM_THREADS, \ + false) #define AWQ_CALL_IF(W_TYPE, N_BLOCKS, K_BLOCKS, NUM_THREADS) \ - __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, true, -1, NUM_THREADS) \ - __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, true, 2, NUM_THREADS) \ - __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, true, 4, NUM_THREADS) \ - __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, true, 8, NUM_THREADS) \ + __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, true, -1, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, true, 2, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, true, 4, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, true, 8, NUM_THREADS, \ + false) \ \ - __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, true, -1, NUM_THREADS) \ - __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, true, 2, NUM_THREADS) \ - __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, true, 4, NUM_THREADS) \ - __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, true, 8, NUM_THREADS) \ + __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, true, -1, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, true, 2, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, true, 4, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, true, 8, NUM_THREADS, \ + false) \ \ - __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, true, -1, NUM_THREADS) \ - __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, true, 2, NUM_THREADS) \ - __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, true, 4, NUM_THREADS) \ - __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, true, 8, NUM_THREADS) \ + __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, true, -1, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, true, 2, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, true, 4, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, true, 8, NUM_THREADS, \ + false) \ \ - __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, true, -1, NUM_THREADS) \ - __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, true, 2, NUM_THREADS) \ - __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, true, 4, NUM_THREADS) \ - __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, true, 8, NUM_THREADS) + __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, true, -1, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, true, 2, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, true, 4, NUM_THREADS, \ + false) \ + __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, true, 8, NUM_THREADS, false) + + // We currently have 4-bit models only with group_blocks == 4 + #define HQQ_CALL_IF(W_TYPE, N_BLOCKS, K_BLOCKS, NUM_THREADS) \ + __CALL_IF(W_TYPE, 1, N_BLOCKS, K_BLOCKS, false, true, 4, NUM_THREADS, \ + true) \ + __CALL_IF(W_TYPE, 2, N_BLOCKS, K_BLOCKS, false, true, 4, NUM_THREADS, \ + true) \ + __CALL_IF(W_TYPE, 3, N_BLOCKS, K_BLOCKS, false, true, 4, NUM_THREADS, \ + true) \ + __CALL_IF(W_TYPE, 4, N_BLOCKS, K_BLOCKS, false, true, 4, NUM_THREADS, true) template void marlin_mm(const void* A, const void* B, void* C, void* C_tmp, void* s, @@ -1958,7 +2060,7 @@ void marlin_mm(const void* A, const void* B, void* C, void* C_tmp, void* s, vllm::ScalarType const& q_type, bool has_act_order, bool is_k_full, bool has_zp, int num_groups, int group_size, int dev, cudaStream_t stream, int thread_k, int thread_n, - int sms, int max_par, bool use_fp32_reduce) { + int sms, int max_par, bool use_fp32_reduce, bool is_zp_float) { if (has_zp) { TORCH_CHECK( q_type == vllm::kU4 || q_type == vllm::kU8, @@ -2111,6 +2213,11 @@ void marlin_mm(const void* A, const void* B, void* C, void* C_tmp, void* s, AWQ_CALL_IF(vllm::kU8, 8, 8, 256) AWQ_CALL_IF(vllm::kU8, 8, 4, 128) AWQ_CALL_IF(vllm::kU8, 4, 8, 128) + + HQQ_CALL_IF(vllm::kU4, 16, 4, 256) + HQQ_CALL_IF(vllm::kU4, 8, 8, 256) + HQQ_CALL_IF(vllm::kU4, 8, 4, 128) + HQQ_CALL_IF(vllm::kU4, 4, 8, 128) else { TORCH_CHECK(false, "Unsupported shapes: MNK = [", prob_m, ", ", prob_n, ", ", prob_k, "]", ", has_act_order = ", has_act_order, @@ -2135,7 +2242,7 @@ torch::Tensor gptq_marlin_gemm(torch::Tensor& a, torch::Tensor& b_q_weight, vllm::ScalarTypeId const& b_q_type_id, int64_t size_m, int64_t size_n, int64_t size_k, bool is_k_full, bool has_zp, - bool use_fp32_reduce) { + bool use_fp32_reduce, bool is_zp_float) { vllm::ScalarType const b_q_type = vllm::ScalarType::from_id(b_q_type_id); if (has_zp) { TORCH_CHECK( @@ -2148,6 +2255,12 @@ torch::Tensor gptq_marlin_gemm(torch::Tensor& a, torch::Tensor& b_q_weight, b_q_type.str()); } + if (has_zp && is_zp_float) { + TORCH_CHECK(a.scalar_type() == at::ScalarType::Half, + "Computation type must be float16 (half) when using float zero " + "points."); + } + int pack_factor = 32 / b_q_type.size_bits(); // Verify A @@ -2257,12 +2370,22 @@ torch::Tensor gptq_marlin_gemm(torch::Tensor& a, torch::Tensor& b_q_weight, if (has_zp) { int rank = b_zeros.sizes().size(); TORCH_CHECK(rank == 2, "b_zeros rank = ", rank, " is not 2"); - TORCH_CHECK(b_zeros.size(0) == num_groups, - "b_zeros dim 0 = ", b_zeros.size(0), - " is not num_groups = ", num_groups); - TORCH_CHECK(b_zeros.size(1) == size_n / pack_factor, - "b_zeros dim 1 = ", b_zeros.size(1), - " is not size_n / pack_factor = ", size_n / pack_factor); + if (is_zp_float) { + TORCH_CHECK(b_zeros.size(1) == size_n, + "b_zeros dim 1 = ", b_zeros.size(1), + " is not size_n = ", size_n); + TORCH_CHECK(num_groups == b_zeros.size(0), + "b_zeros dim 0 = ", b_zeros.size(0), + " is not num_groups = ", num_groups); + TORCH_CHECK(num_groups != -1, "num_groups must be != -1"); + } else { + TORCH_CHECK(b_zeros.size(0) == num_groups, + "b_zeros dim 0 = ", b_zeros.size(0), + " is not num_groups = ", num_groups); + TORCH_CHECK(b_zeros.size(1) == size_n / pack_factor, + "b_zeros dim 1 = ", b_zeros.size(1), + " is not size_n / pack_factor = ", size_n / pack_factor); + } } // Verify workspace size @@ -2282,7 +2405,7 @@ torch::Tensor gptq_marlin_gemm(torch::Tensor& a, torch::Tensor& b_q_weight, a_tmp.data_ptr(), size_m, size_n, size_k, workspace.data_ptr(), b_q_type, has_act_order, is_k_full, has_zp, num_groups, group_size, dev, at::cuda::getCurrentCUDAStream(dev), - thread_k, thread_n, sms, marlin::max_par, use_fp32_reduce); + thread_k, thread_n, sms, marlin::max_par, use_fp32_reduce, is_zp_float); } else if (a.scalar_type() == at::ScalarType::BFloat16) { marlin::marlin_mm( a.data_ptr(), b_q_weight.data_ptr(), @@ -2291,7 +2414,7 @@ torch::Tensor gptq_marlin_gemm(torch::Tensor& a, torch::Tensor& b_q_weight, perm.data_ptr(), a_tmp.data_ptr(), size_m, size_n, size_k, workspace.data_ptr(), b_q_type, has_act_order, is_k_full, has_zp, num_groups, group_size, dev, at::cuda::getCurrentCUDAStream(dev), - thread_k, thread_n, sms, marlin::max_par, use_fp32_reduce); + thread_k, thread_n, sms, marlin::max_par, use_fp32_reduce, is_zp_float); } else { TORCH_CHECK(false, "gpt_marlin_gemm only supports bfloat16 and float16"); } diff --git a/csrc/torch_bindings.cpp b/csrc/torch_bindings.cpp index e4cc7ec951848..3dccdf61abf3b 100644 --- a/csrc/torch_bindings.cpp +++ b/csrc/torch_bindings.cpp @@ -244,7 +244,7 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) { "Tensor b_zeros, Tensor g_idx, Tensor perm, Tensor workspace, " "int b_q_type, " "SymInt size_m, SymInt size_n, SymInt size_k, bool is_k_full, " - "bool has_zp, bool use_fp32_reduce) -> Tensor"); + "bool has_zp, bool use_fp32_reduce, bool is_zp_float) -> Tensor"); // conditionally compiled so impl registration is in source file // gptq_marlin repack from GPTQ. diff --git a/tests/kernels/test_marlin_gemm.py b/tests/kernels/test_marlin_gemm.py index b6dd68cc51a9f..3899ad1a325cf 100644 --- a/tests/kernels/test_marlin_gemm.py +++ b/tests/kernels/test_marlin_gemm.py @@ -29,6 +29,7 @@ marlin_qqq_quantize) from vllm.model_executor.layers.quantization.utils.quant_utils import ( awq_pack, gptq_pack, gptq_quantize_weights, quantize_weights, sort_weights) +from vllm.scalar_type import scalar_types ACT_ORDER_OPTS = [False, True] K_FULL_OPTS = [False, True] @@ -40,6 +41,8 @@ MARLIN_24_K_CHUNKS = [128] MARLIN_24_N_CHUNKS = [512] +HQQ_SUPPORTED_GROUP_SIZES = [64] + MNK_FACTORS = [ (1, 1, 1), (1, 4, 8), @@ -226,7 +229,7 @@ def test_gptq_marlin_gemm( torch.ops._C.gptq_marlin_gemm, (a_input, marlin_q_w, marlin_s, marlin_zp, g_idx, sort_indices, workspace.scratch, quant_type.id, a_input.shape[0], b_weight.shape[1], - a_input.shape[1], is_k_full, False, use_fp32_reduce), + a_input.shape[1], is_k_full, False, use_fp32_reduce, False), test_utils=DEFAULT_OPCHECK_TEST_UTILS) output = ops.gptq_marlin_gemm( @@ -244,6 +247,7 @@ def test_gptq_marlin_gemm( is_k_full=is_k_full, has_zp=False, use_fp32_reduce=use_fp32_reduce, + is_zp_float=False, ) output_ref = torch.matmul(a_input, w_ref) @@ -441,6 +445,7 @@ def test_awq_marlin_gemm( is_k_full=is_k_full, has_zp=has_zp, use_fp32_reduce=use_fp32_reduce, + is_zp_float=False, ) output_ref = torch.matmul(a_input, w_ref) @@ -451,6 +456,87 @@ def test_awq_marlin_gemm( assert max_diff < 0.04 +@pytest.mark.skipif(not is_quant_method_supported("gptq_marlin"), + reason="Marlin is not supported on this GPU type.") +@pytest.mark.parametrize("k_chunk", MARLIN_K_CHUNKS) +@pytest.mark.parametrize("n_chunk", MARLIN_N_CHUNKS) +@pytest.mark.parametrize("group_size", HQQ_SUPPORTED_GROUP_SIZES) +@pytest.mark.parametrize("mnk_factors", MNK_FACTORS) +@pytest.mark.parametrize("use_fp32_reduce", USE_FP32_REDUCE_OPTS) +def test_hqq_marlin_gemm( + k_chunk, + n_chunk, + group_size, + mnk_factors, + use_fp32_reduce, +): + m_factor, n_factor, k_factor = mnk_factors + + size_m = m_factor + size_k = k_chunk * k_factor + size_n = n_chunk * n_factor + + quant_type = scalar_types.uint4 + + a_input = rand_data((size_m, size_k)) + dev = a_input.device + + b_weight = torch.randint(0, + 10, (size_n, size_k), + dtype=torch.uint8, + device=dev) + scale = rand_data((size_n, size_k // group_size)) + zero = rand_data((size_n, size_k // group_size)) + + gptq_w_q = gptq_pack(b_weight.transpose(1, 0), 4, size_k, size_n) + + sort_indices = torch.empty(0, dtype=torch.int, device=dev) + marlin_w_q = ops.gptq_marlin_repack(gptq_w_q, sort_indices, size_k, size_n, + 4).to(dev) + marlin_s = marlin_permute_scales(scale.transpose(1, 0), size_k, size_n, + group_size).to(dev) + marlin_zp = marlin_permute_scales(zero.transpose(1, 0), size_k, size_n, + group_size).to(dev) + + g_idx = marlin_make_empty_g_idx(dev) + g_idx_sort_indices = marlin_make_empty_g_idx(dev) + + workspace = MarlinWorkspace(size_n, GPTQ_MARLIN_MIN_THREAD_N, + GPTQ_MARLIN_MAX_PARALLEL) + + output = ops.gptq_marlin_gemm( + a_input, + marlin_w_q, + marlin_s, + marlin_zp, + g_idx, + g_idx_sort_indices, + workspace.scratch, + quant_type, + a_input.shape[0], + b_weight.shape[0], + a_input.shape[1], + is_k_full=True, + has_zp=True, + use_fp32_reduce=use_fp32_reduce, + is_zp_float=True, + ) + + b_flat = b_weight.reshape(-1, group_size) + zp_flat = zero.reshape(-1, 1) + s_flat = scale.reshape(-1, 1) + dequant = (b_flat - zp_flat) * s_flat + + output_ref = torch.matmul(a_input, + dequant.reshape(b_weight.shape).transpose(1, 0)) + + torch.cuda.synchronize() + + max_diff = compute_max_diff(output, output_ref) + + assert max_diff < 0.04 + + @pytest.mark.skipif(not is_quant_method_supported("qqq"), reason="Marlin is not supported on this GPU type.") @pytest.mark.parametrize("k_chunk", MARLIN_K_CHUNKS) diff --git a/tests/weight_loading/models.txt b/tests/weight_loading/models.txt index a4ee9538d646b..2afffb5b9d1c8 100644 --- a/tests/weight_loading/models.txt +++ b/tests/weight_loading/models.txt @@ -27,4 +27,5 @@ fp8, neuralmagic/Meta-Llama-3-8B-Instruct-FP8-KV, main marlin, nm-testing/zephyr-beta-7b-marlin-g128, main marlin, robertgshaw2/zephyr-7b-beta-channelwise-marlin, main qqq, HandH1998/QQQ-Llama-3-8b-g128, main -qqq, HandH1998/QQQ-Llama-3-8b, main \ No newline at end of file +qqq, HandH1998/QQQ-Llama-3-8b, main +hqq, nm-testing/Llama-3.2-1B-Instruct-HQQ, main \ No newline at end of file diff --git a/vllm/_custom_ops.py b/vllm/_custom_ops.py index aa89010ca8ecd..782dc6aed1b8c 100644 --- a/vllm/_custom_ops.py +++ b/vllm/_custom_ops.py @@ -343,7 +343,8 @@ def _gptq_marlin_gemm_fake(a: torch.Tensor, size_k: torch.SymInt, is_k_full: bool, has_zp: bool = False, - use_fp32_reduce: bool = False) -> torch.Tensor: + use_fp32_reduce: bool = False, + is_zp_float: bool = False) -> torch.Tensor: return torch.empty((size_m, size_n), device=a.device, dtype=a.dtype) @register_fake("_C::ggml_dequantize") @@ -601,11 +602,12 @@ def gptq_marlin_gemm(a: torch.Tensor, size_k: int, is_k_full: bool, has_zp: bool = False, - use_fp32_reduce: bool = False) -> torch.Tensor: + use_fp32_reduce: bool = False, + is_zp_float: bool = False) -> torch.Tensor: return torch.ops._C.gptq_marlin_gemm(a, b_q_weight, b_scales, b_zeros, g_idx, perm, workspace, b_q_type.id, size_m, size_n, size_k, is_k_full, - has_zp, use_fp32_reduce) + has_zp, use_fp32_reduce, is_zp_float) # fp8 marlin diff --git a/vllm/model_executor/layers/linear.py b/vllm/model_executor/layers/linear.py index 9da38d4857d6d..2471c160d66b7 100644 --- a/vllm/model_executor/layers/linear.py +++ b/vllm/model_executor/layers/linear.py @@ -27,7 +27,8 @@ "AWQLinearMethod", "GPTQMarlinLinearMethod", "Fp8LinearMethod", "MarlinLinearMethod", "QQQLinearMethod", "GPTQMarlin24LinearMethod", "TPUInt8LinearMethod", "GPTQLinearMethod", "FBGEMMFp8LinearMethod", - "ModelOptFp8LinearMethod", "IPEXAWQLinearMethod", "IPEXGPTQLinearMethod" + "ModelOptFp8LinearMethod", "IPEXAWQLinearMethod", "IPEXGPTQLinearMethod", + "HQQMarlinMethod" ] diff --git a/vllm/model_executor/layers/quantization/__init__.py b/vllm/model_executor/layers/quantization/__init__.py index da841d052d728..ff342c4f9479e 100644 --- a/vllm/model_executor/layers/quantization/__init__.py +++ b/vllm/model_executor/layers/quantization/__init__.py @@ -21,6 +21,7 @@ GPTQMarlinConfig) from vllm.model_executor.layers.quantization.gptq_marlin_24 import ( GPTQMarlin24Config) +from vllm.model_executor.layers.quantization.hqq_marlin import HQQMarlinConfig from vllm.model_executor.layers.quantization.ipex_quant import IPEXConfig from vllm.model_executor.layers.quantization.marlin import MarlinConfig from vllm.model_executor.layers.quantization.modelopt import ModelOptFp8Config @@ -48,6 +49,7 @@ "compressed-tensors": CompressedTensorsConfig, "bitsandbytes": BitsAndBytesConfig, "qqq": QQQConfig, + "hqq": HQQMarlinConfig, "experts_int8": ExpertsInt8Config, "neuron_quant": NeuronQuantConfig, "ipex": IPEXConfig, diff --git a/vllm/model_executor/layers/quantization/hqq_marlin.py b/vllm/model_executor/layers/quantization/hqq_marlin.py new file mode 100644 index 0000000000000..28538d2993355 --- /dev/null +++ b/vllm/model_executor/layers/quantization/hqq_marlin.py @@ -0,0 +1,325 @@ +from typing import Any, Dict, List, Optional + +import torch + +from vllm import _custom_ops as ops +from vllm.logger import init_logger +from vllm.model_executor.layers.linear import (LinearBase, LinearMethodBase, + UnquantizedLinearMethod) +from vllm.model_executor.layers.quantization.base_config import ( + QuantizationConfig, QuantizeMethodBase) +from vllm.model_executor.layers.quantization.utils.marlin_utils import ( + GPTQ_MARLIN_MAX_PARALLEL, GPTQ_MARLIN_MIN_THREAD_N, + marlin_make_empty_g_idx, marlin_permute_scales) +from vllm.model_executor.layers.quantization.utils.marlin_utils_test import ( + MarlinWorkspace) +from vllm.model_executor.layers.quantization.utils.quant_utils import gptq_pack +from vllm.model_executor.parameter import (BasevLLMParameter, + GroupQuantScaleParameter, + PackedvLLMParameter) +from vllm.scalar_type import scalar_types + +logger = init_logger(__name__) + + +class HQQMarlinConfig(QuantizationConfig): + """Config class for HQQ Marlin""" + + def __init__( + self, + weight_bits: int, + group_size: int, + skip_modules: Optional[List[str]] = None, + ) -> None: + assert group_size == 64, ("The only supported HQQ group size is " + "currently 64.") + assert weight_bits == 4, ("The only supported HQQ quantization " + "bitsize is currently 4.") + + self.weight_bits = weight_bits + self.group_size = group_size + self.pack_factor = 32 // weight_bits # packed into int32 in GPTQ format + self.quant_type = scalar_types.uint4 + self.skip_modules = skip_modules + + def __repr__(self) -> str: + return (f"HQQMarlinConfig(quant_type={self.quant_type}, " + f"group_size={self.group_size})") + + @classmethod + def get_name(cls) -> str: + return "hqq" + + @classmethod + def get_supported_act_dtypes(cls) -> List[torch.dtype]: + return [torch.half, torch.bfloat16] + + @classmethod + def get_min_capability(cls) -> int: + return 80 + + @classmethod + def get_config_filenames(cls) -> List[str]: + return ["quantize_config.json"] + + @classmethod + def from_config(cls, config: Dict[str, Any]) -> "HQQMarlinConfig": + wq_params = (config["quant_config"]["weight_quant_params"]) + weight_bits = cls.get_from_keys(wq_params, ["nbits"]) + group_size = cls.get_from_keys(wq_params, ["group_size"]) + skip_modules = config["skip_modules"] + return cls(weight_bits, group_size, skip_modules) + + def is_layer_skipped(self, prefix: str) -> bool: + # Split the prefix into its dot-separated components + components = prefix.split('.') + + # Check if any of the skip modules exactly matches any component + return self.skip_modules is not None and any( + module_name in components for module_name in self.skip_modules) + + def get_quant_method(self, layer: torch.nn.Module, + prefix: str) -> Optional["QuantizeMethodBase"]: + if isinstance(layer, LinearBase): + if self.is_layer_skipped(prefix): + return UnquantizedLinearMethod() + return HQQMarlinMethod(self) + return None + + +# Empty HQQ parameter, will be ignored during loading +class HQQEmptyParameter(BasevLLMParameter): + + def load_merged_column_weight(self, loaded_weight: torch.Tensor, **kwargs): + pass + + def load_row_parallel_weight(self, loaded_weight: torch.Tensor): + pass + + def load_qkv_weight(self, loaded_weight: torch.Tensor, **kwargs): + pass + + +def error_loader(param: torch.Tensor, loaded_weight: torch.Tensor) -> None: + raise ValueError("No loader provided for HQQ parameter!") + + +# HQQ packing creates issues with sharding - therefore, prior to loading, we +# repack to GPTQ. We also reshape the weights to their proper GPTQ shape. +class HQQweightParameter(PackedvLLMParameter): + + # unpack function from https://github.com/mobiusml/hqq + def unpack_4bit_u8(self, + W_q: torch.Tensor) -> torch.Tensor: # uint8/2 > uint8 + assert self.weight_bits == 4, "Unsupported quant bitsize (must be 4)" + + dtype = torch.uint8 + step = W_q.shape[0] + tmp = torch.empty([2 * step, W_q.shape[1]], + dtype=dtype, + device=W_q.device) + tmp[:step] = (W_q & 0b11110000) >> 4 + tmp[step:] = W_q & 0b00001111 + return tmp + + def __init__(self, packed_factor: int, packed_dim: int, weight_bits: int, + **kwargs): + super().__init__(packed_factor, packed_dim, None, **kwargs) + self.weight_bits = weight_bits + self.input_shape = self.shape[self.input_dim] * self.packed_factor + self.output_shape = self.shape[self.output_dim] + + def load_merged_column_weight(self, loaded_weight: torch.Tensor, **kwargs): + loaded_weight = self.unpack_4bit_u8(loaded_weight) + loaded_weight = loaded_weight.reshape(-1, self.input_shape).transpose( + 1, 0) + loaded_weight = gptq_pack(loaded_weight, self.weight_bits, + loaded_weight.shape[0], + loaded_weight.shape[1]) + super().load_merged_column_weight(loaded_weight, **kwargs) + + def load_row_parallel_weight(self, loaded_weight: torch.Tensor): + loaded_weight = self.unpack_4bit_u8(loaded_weight) + loaded_weight = loaded_weight.reshape(self.output_shape, + -1).transpose(1, 0) + loaded_weight = gptq_pack(loaded_weight, self.weight_bits, + loaded_weight.shape[0], + loaded_weight.shape[1]) + super().load_row_parallel_weight(loaded_weight) + + def load_qkv_weight(self, loaded_weight: torch.Tensor, **kwargs): + loaded_weight = self.unpack_4bit_u8(loaded_weight) + loaded_weight = loaded_weight.reshape(-1, self.input_shape).transpose( + 1, 0) + loaded_weight = gptq_pack(loaded_weight, self.weight_bits, + loaded_weight.shape[0], + loaded_weight.shape[1]) + super().load_qkv_weight(loaded_weight, **kwargs) + + +# Zero points and scales in HQQ must also be reshaped to correspond to W_q's +# GPTQ shape (transposed - we transpose them too when processing weights). +class HQQZeroScaleParameter(GroupQuantScaleParameter): + + def load_merged_column_weight(self, loaded_weight: torch.Tensor, **kwargs): + loaded_weight = loaded_weight.reshape(-1, self.shape[1]) + super().load_merged_column_weight(loaded_weight, **kwargs) + + def load_row_parallel_weight(self, loaded_weight: torch.Tensor): + loaded_weight = loaded_weight.reshape(self.shape[0], -1) + super().load_row_parallel_weight(loaded_weight) + + def load_qkv_weight(self, loaded_weight: torch.Tensor, **kwargs): + loaded_weight = loaded_weight.reshape(-1, self.shape[1]) + super().load_qkv_weight(loaded_weight, **kwargs) + + +class HQQMarlinMethod(LinearMethodBase): + """Linear method for HQQ Marlin. + """ + + def __init__( + self, + quant_config: HQQMarlinConfig, + ): + self.quant_config = quant_config + + def create_weights( + self, + layer: torch.nn.Module, + input_size_per_partition: int, + output_partition_sizes: List[int], + input_size: int, + output_size: int, + params_dtype: torch.dtype, + **extra_weight_attrs, + ) -> None: + self.output_size_per_partition = sum(output_partition_sizes) + self.input_size_per_partition = input_size_per_partition + + weight_loader = extra_weight_attrs.get("weight_loader", error_loader) + + self.scales_and_zp_size = (input_size_per_partition // + self.quant_config.group_size) + + qweight = HQQweightParameter( + data=torch.empty( + self.input_size_per_partition // self.quant_config.pack_factor, + self.output_size_per_partition, + dtype=torch.int32, + ), + input_dim=0, + output_dim=1, + packed_dim=0, + packed_factor=self.quant_config.pack_factor, + weight_bits=self.quant_config.weight_bits, + weight_loader=weight_loader) + + zeros = HQQZeroScaleParameter(data=torch.empty( + self.output_size_per_partition, + self.scales_and_zp_size, + dtype=params_dtype, + ), + input_dim=1, + output_dim=0, + weight_loader=weight_loader) + + scales = HQQZeroScaleParameter(data=torch.empty( + self.output_size_per_partition, + self.scales_and_zp_size, + dtype=params_dtype, + ), + input_dim=1, + output_dim=0, + weight_loader=weight_loader) + + layer.register_parameter("W_q", qweight) + layer.register_parameter("zero", zeros) + layer.register_parameter("scale", scales) + + # Ignore extra parameters in the HQQ model. + # To be added as needed. + ignore_parameters = ("axis", "channel_wise", "compute_dtype", + "encoded_state_dict", "group_size", "nbits", + "offload_meta", "optimize", "packing", + "quant_scale", "quant_zero", "round_zero", + "shape", "stores_quant_config", + "unpack_view_dtype", "view_as_float") + for name in ignore_parameters: + layer.register_parameter( + name, + HQQEmptyParameter(data=torch.empty(0), + weight_loader=weight_loader)) + + def process_weights_after_loading(self, layer: torch.nn.Module) -> None: + dev = layer.W_q.device + + # Repack to Marlin + sort_indices = torch.empty(0, dtype=torch.int, device=dev) + marlin_w_q = ops.gptq_marlin_repack( + layer.W_q, + sort_indices, + self.input_size_per_partition, + self.output_size_per_partition, + self.quant_config.weight_bits, + ).to(dev) + marlin_s = marlin_permute_scales(layer.scale.transpose(1, 0), + self.input_size_per_partition, + self.output_size_per_partition, + self.quant_config.group_size).to(dev) + marlin_zp = marlin_permute_scales(layer.zero.transpose(1, 0), + self.input_size_per_partition, + self.output_size_per_partition, + self.quant_config.group_size).to(dev) + + layer.g_idx = marlin_make_empty_g_idx(dev) + layer.g_idx_sort_indices = marlin_make_empty_g_idx(dev) + + layer.marlin_qweight = marlin_w_q + layer.marlin_zeros = marlin_zp + layer.marlin_scales = marlin_s + + def apply( + self, + layer: torch.nn.Module, + x: torch.Tensor, + bias: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + workspace = MarlinWorkspace(self.output_size_per_partition, + GPTQ_MARLIN_MIN_THREAD_N, + GPTQ_MARLIN_MAX_PARALLEL) + + scales = layer.marlin_scales + zeros = layer.marlin_zeros + orig_type = x.dtype + + if orig_type != torch.float16: + x = x.to(torch.float16) + scales = scales.to(torch.float16) + zeros = zeros.to(torch.float16) + + marlin_out = ops.gptq_marlin_gemm( + x, + layer.marlin_qweight, + scales, + zeros, + layer.g_idx, + layer.g_idx_sort_indices, + workspace.scratch, + scalar_types.uint4, + x.shape[0], + self.output_size_per_partition, + self.input_size_per_partition, + True, # is_k_full + True, # has_zp + True, # use 32-bit reduce + True, # use float zp + ) + + if orig_type != torch.float16: + marlin_out = marlin_out.to(orig_type) + + if bias is not None: + marlin_out.add_(bias) + + return marlin_out diff --git a/vllm/model_executor/layers/quantization/utils/marlin_utils.py b/vllm/model_executor/layers/quantization/utils/marlin_utils.py index 9a1defa409714..c9366ca97d149 100644 --- a/vllm/model_executor/layers/quantization/utils/marlin_utils.py +++ b/vllm/model_executor/layers/quantization/utils/marlin_utils.py @@ -303,7 +303,8 @@ def apply_gptq_marlin_linear( size_k=input_size_per_partition, is_k_full=is_k_full, has_zp=False, - use_fp32_reduce=use_fp32_reduce) + use_fp32_reduce=use_fp32_reduce, + is_zp_float=False) if bias is not None: output.add_(bias) # In-place add @@ -340,7 +341,8 @@ def apply_awq_marlin_linear( size_k=input_size_per_partition, is_k_full=True, has_zp=True, - use_fp32_reduce=use_fp32_reduce) + use_fp32_reduce=use_fp32_reduce, + is_zp_float=False) if bias is not None: output.add_(bias) # In-place add From a324d3a1a74ab0a3fafc0f2d19860bd1d1301a85 Mon Sep 17 00:00:00 2001 From: Maximilien de Bayser Date: Tue, 19 Nov 2024 22:16:54 -0300 Subject: [PATCH 055/397] Change granite chat template to keep json list formatting for tool calls (#10452) Signed-off-by: Max de Bayser --- examples/tool_chat_template_granite.jinja | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/examples/tool_chat_template_granite.jinja b/examples/tool_chat_template_granite.jinja index 2cc19e77188dc..467dcb2d10237 100644 --- a/examples/tool_chat_template_granite.jinja +++ b/examples/tool_chat_template_granite.jinja @@ -21,11 +21,7 @@ {{- '<|start_of_role|>user<|end_of_role|>' + message['content'] + '<|end_of_text|> ' }} {%- elif message['role'] == 'assistant_tool_call' or (message['role'] == 'assistant' and message.tool_calls is defined) %} - {{- '<|start_of_role|>assistant<|end_of_role|>' }} - {% for tc in message.tool_calls %} - {{- '<|tool_call|> ' + {'name': tc.function.name, 'arguments': tc.function.arguments}|tojson }} - {% endfor %} - {{- '<|end_of_text|> + {{- '<|start_of_role|>assistant<|end_of_role|><|tool_call|>' + message.tool_calls|map(attribute='function')|list|tojson(indent=4) + '<|end_of_text|> ' }} {%- elif message['role'] == 'assistant' %} {{- '<|start_of_role|>assistant<|end_of_role|>' + message['content'] + '<|end_of_text|> From d5b68aba2ff6dd17060a62c0cb799c0acedb524f Mon Sep 17 00:00:00 2001 From: Alexei-V-Ivanov-AMD <156011006+Alexei-V-Ivanov-AMD@users.noreply.github.com> Date: Tue, 19 Nov 2024 19:19:59 -0600 Subject: [PATCH 056/397] [CI/Build] Update Dockerfile.rocm (#10434) Signed-off-by: Alexei V. Ivanov --- Dockerfile.rocm | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile.rocm b/Dockerfile.rocm index 8fb79afaebe97..62d4a9b4909c3 100644 --- a/Dockerfile.rocm +++ b/Dockerfile.rocm @@ -51,9 +51,9 @@ RUN --mount=type=cache,target=/root/.cache/pip \ *"rocm-6.2"*) \ python3 -m pip uninstall -y torch torchvision \ && python3 -m pip install --pre \ - torch==2.6.0.dev20240918 \ + torch==2.6.0.dev20241113+rocm6.2 \ 'setuptools-scm>=8' \ - torchvision==0.20.0.dev20240918 \ + torchvision==0.20.0.dev20241113+rocm6.2 \ --extra-index-url https://download.pytorch.org/whl/nightly/rocm6.2;; \ *) ;; esac From d200972e7f4969da50f533b46c856c5ff5a9d27d Mon Sep 17 00:00:00 2001 From: Lucas Wilkinson Date: Tue, 19 Nov 2024 22:40:33 -0500 Subject: [PATCH 057/397] [Bugfix] Marlin 2:4 temp fix for large M dim (>256) (#10464) Signed-off-by: Lucas Wilkinson --- .../marlin/sparse/marlin_24_cuda_kernel.cu | 15 +++++++++++---- tests/kernels/test_marlin_gemm.py | 2 ++ 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/csrc/quantization/marlin/sparse/marlin_24_cuda_kernel.cu b/csrc/quantization/marlin/sparse/marlin_24_cuda_kernel.cu index a33e2660d760e..8fce76eb52f9b 100644 --- a/csrc/quantization/marlin/sparse/marlin_24_cuda_kernel.cu +++ b/csrc/quantization/marlin/sparse/marlin_24_cuda_kernel.cu @@ -910,13 +910,16 @@ void marlin_cuda_2_4(const void* A, const void* B, const void* meta, void* C, // than better compute utilization thread_k = 128; thread_m = 128; - } else if (prob_n <= 256) { + } else { thread_k = 64; thread_m = 256; - } else { - thread_k = 32; - thread_m = 512; } + // Also had + // if prob_n > 256 + // thread_k = 32; + // thread_m = 512; + // but this is broken, + // TODO(Lucas, Alex M): figure out why } int thread_k_blocks = thread_k / 32; // 2:4 version with m16n8k32 instruction @@ -1079,6 +1082,8 @@ torch::Tensor gptq_marlin_24_gemm(torch::Tensor& a, torch::Tensor& b_q_weight, // Verify A device and strides TORCH_CHECK(a.device().is_cuda(), "A is not on GPU"); TORCH_CHECK(a.is_contiguous(), "A is not contiguous"); + TORCH_CHECK(a.dtype() == torch::kFloat16, + "A is not float16, currently only float16 is supported"); // Verify B device and strides TORCH_CHECK(b_q_weight.device().is_cuda(), "b_q_weight is not on GPU"); @@ -1091,6 +1096,8 @@ torch::Tensor gptq_marlin_24_gemm(torch::Tensor& a, torch::Tensor& b_q_weight, // Verify scales device and strides TORCH_CHECK(b_scales.device().is_cuda(), "b_scales is not on GPU"); TORCH_CHECK(b_scales.is_contiguous(), "b_scales is not contiguous"); + TORCH_CHECK(b_scales.dtype() == torch::kFloat16, + "A is not float16, currently only float16 is supported"); // Alloc C matrix const at::cuda::OptionalCUDAGuard device_guard(device_of(a)); diff --git a/tests/kernels/test_marlin_gemm.py b/tests/kernels/test_marlin_gemm.py index 3899ad1a325cf..5e047f4b099f1 100644 --- a/tests/kernels/test_marlin_gemm.py +++ b/tests/kernels/test_marlin_gemm.py @@ -50,6 +50,8 @@ (13, 17, 67), (26, 37, 13), (67, 13, 11), + (257, 13, 11), + (658, 13, 11), ] DTYPES = [torch.float16, torch.bfloat16] From 9e05252b46a92a5d14e4e6fd02b75383c5cf243b Mon Sep 17 00:00:00 2001 From: Yanyi Liu Date: Wed, 20 Nov 2024 12:44:57 +0800 Subject: [PATCH 058/397] [Misc] Add __setitem__ for LazyDict (#10469) Signed-off-by: Yanyi Liu --- vllm/utils.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/vllm/utils.py b/vllm/utils.py index 5d0514cd9d168..2bbdc8d1ebde8 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -1491,6 +1491,9 @@ def __getitem__(self, key) -> T: self._dict[key] = self._factory[key]() return self._dict[key] + def __setitem__(self, key: str, value: Callable[[], T]): + self._factory[key] = value + def __iter__(self): return iter(self._factory) From ad44437ba33e8d31962d272be238eeed4a1b4f84 Mon Sep 17 00:00:00 2001 From: Isotr0py <2037008807@qq.com> Date: Wed, 20 Nov 2024 13:04:05 +0800 Subject: [PATCH 059/397] [Bugfix] Fix Mamba model initialization and MLP Speculator weights loading (#10456) Signed-off-by: Isotr0py <2037008807@qq.com> --- vllm/model_executor/models/mamba.py | 8 ++------ vllm/model_executor/models/mlp_speculator.py | 3 ++- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/vllm/model_executor/models/mamba.py b/vllm/model_executor/models/mamba.py index 405b8f7787ba8..ac0d265a961f0 100644 --- a/vllm/model_executor/models/mamba.py +++ b/vllm/model_executor/models/mamba.py @@ -1,5 +1,5 @@ """PyTorch MAMBA model.""" -from typing import Iterable, List, Optional, Set, Tuple +from typing import Iterable, List, Optional, Tuple import torch from torch import nn @@ -243,10 +243,8 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, - torch.Tensor]]) -> Set[str]: + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): params_dict = dict(self.named_parameters()) - loaded_params: Set[str] = set() for name, loaded_weight in weights: if "A_log" in name: name = name.replace("A_log", "A") @@ -258,5 +256,3 @@ def load_weights(self, weights: Iterable[Tuple[str, weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) - loaded_params.add(name) - return loaded_params diff --git a/vllm/model_executor/models/mlp_speculator.py b/vllm/model_executor/models/mlp_speculator.py index f2aa2653c4f5c..d49da5f29aa14 100644 --- a/vllm/model_executor/models/mlp_speculator.py +++ b/vllm/model_executor/models/mlp_speculator.py @@ -193,7 +193,8 @@ def load_weights(self, weights: Iterable[Tuple[str, params_dict = dict(self.named_parameters()) loaded_params: Set[str] = set() for name, loaded_weight in weights: - param = params_dict.get(name.replace("speculator.", "")) + name = name.replace("speculator.", "") + param = params_dict.get(name) if param is not None: weight_loader = getattr(param, "weight_loader", default_weight_loader) From b4be5a8adba95020187ae3cb43a7db7eef20c0ff Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Wed, 20 Nov 2024 13:12:51 +0800 Subject: [PATCH 060/397] [Bugfix] Enforce no chunked prefill for embedding models (#10470) Signed-off-by: DarkLight1337 --- docs/source/serving/compatibility_matrix.rst | 69 ++++++++++++++++---- vllm/engine/arg_utils.py | 6 +- 2 files changed, 60 insertions(+), 15 deletions(-) diff --git a/docs/source/serving/compatibility_matrix.rst b/docs/source/serving/compatibility_matrix.rst index f629b3ca78318..5fc86ab0a11d5 100644 --- a/docs/source/serving/compatibility_matrix.rst +++ b/docs/source/serving/compatibility_matrix.rst @@ -39,12 +39,13 @@ Feature x Feature - :abbr:`prmpt adptr (Prompt Adapter)` - :ref:`SD ` - CUDA graph + - :abbr:`emd (Embedding Models)` - :abbr:`enc-dec (Encoder-Decoder Models)` - :abbr:`logP (Logprobs)` - :abbr:`prmpt logP (Prompt Logprobs)` - :abbr:`async output (Async Output Processing)` - multi-step - - :abbr:`MM (Multimodal)` + - :abbr:`mm (Multimodal)` - best-of - beam-search - :abbr:`guided dec (Guided Decoding)` @@ -64,6 +65,7 @@ Feature x Feature - - - + - * - :ref:`APC ` - ✅ - @@ -80,6 +82,7 @@ Feature x Feature - - - + - * - :ref:`LoRA ` - `✗ `__ - ✅ @@ -96,6 +99,7 @@ Feature x Feature - - - + - * - :abbr:`prmpt adptr (Prompt Adapter)` - ✅ - ✅ @@ -112,6 +116,7 @@ Feature x Feature - - - + - * - :ref:`SD ` - ✗ - ✅ @@ -128,6 +133,7 @@ Feature x Feature - - - + - * - CUDA graph - ✅ - ✅ @@ -144,6 +150,24 @@ Feature x Feature - - - + - + * - :abbr:`emd (Embedding Models)` + - ✗ + - ✗ + - ✗ + - ✗ + - ✗ + - ✗ + - + - + - + - + - + - + - + - + - + - * - :abbr:`enc-dec (Encoder-Decoder Models)` - ✗ - `✗ `__ @@ -151,6 +175,7 @@ Feature x Feature - ✗ - `✗ `__ - ✅ + - ✅ - - - @@ -166,7 +191,8 @@ Feature x Feature - ✅ - ✅ - ✅ - - ✅ + - ✅ + - ✗ - ✅ - - @@ -183,7 +209,8 @@ Feature x Feature - ✅ - `✗ `__ - ✅ - - ✅ + - ✗ + - ✅ - ✅ - - @@ -199,6 +226,7 @@ Feature x Feature - ✅ - ✗ - ✅ + - ✗ - ✗ - ✅ - ✅ @@ -215,6 +243,7 @@ Feature x Feature - ✅ - ✗ - ✅ + - ✗ - ✗ - ✅ - `✗ `__ @@ -224,14 +253,15 @@ Feature x Feature - - - - * - :abbr:`MM (Multimodal)` - - `✗ `__ + * - :abbr:`mm (Multimodal)` + - ✅ - `✗ `__ - `✗ `__ - ? - ? - ✅ - - ✗ + - ✅ + - ✅ - ✅ - ✅ - ✅ @@ -247,6 +277,7 @@ Feature x Feature - ✅ - `✗ `__ - ✅ + - ✗ - ✅ - ✅ - ✅ @@ -263,6 +294,7 @@ Feature x Feature - ✅ - `✗ `__ - ✅ + - ✗ - ✅ - ✅ - ✅ @@ -279,6 +311,7 @@ Feature x Feature - ? - ✅ - ✅ + - ✗ - ? - ✅ - ✅ @@ -353,6 +386,14 @@ Feature x Hardware - ✅ - ✗ - ✅ + * - :abbr:`emd (Embedding Models)` + - ✅ + - ✅ + - ✅ + - ✅ + - ✅ + - ✅ + - ✗ * - :abbr:`enc-dec (Encoder-Decoder Models)` - ✅ - ✅ @@ -361,7 +402,7 @@ Feature x Hardware - ✅ - ✅ - ✗ - * - :abbr:`logP (Logprobs)` + * - :abbr:`mm (Multimodal)` - ✅ - ✅ - ✅ @@ -369,7 +410,7 @@ Feature x Hardware - ✅ - ✅ - ✅ - * - :abbr:`prmpt logP (Prompt Logprobs)` + * - :abbr:`logP (Logprobs)` - ✅ - ✅ - ✅ @@ -377,29 +418,29 @@ Feature x Hardware - ✅ - ✅ - ✅ - * - :abbr:`async output (Async Output Processing)` + * - :abbr:`prmpt logP (Prompt Logprobs)` - ✅ - ✅ - ✅ - ✅ - ✅ - - ✗ - - ✗ - * - multi-step - ✅ - ✅ + * - :abbr:`async output (Async Output Processing)` - ✅ - ✅ - ✅ - - `✗ `__ - ✅ - * - :abbr:`MM (Multimodal)` - ✅ + - ✗ + - ✗ + * - multi-step - ✅ - ✅ - ✅ - ✅ - ✅ + - `✗ `__ - ✅ * - best-of - ✅ diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index a3ae1889774f3..9288cd22c0036 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -1014,7 +1014,8 @@ def create_engine_config(self) -> VllmConfig: use_spec_decode = self.speculative_model is not None if (is_gpu and not use_sliding_window and not use_spec_decode and not self.enable_lora - and not self.enable_prompt_adapter): + and not self.enable_prompt_adapter + and model_config.task != "embedding"): self.enable_chunked_prefill = True logger.warning( "Chunked prefill is enabled by default for models with " @@ -1031,6 +1032,9 @@ def create_engine_config(self) -> VllmConfig: "errors during the initial memory profiling phase, or result " "in low performance due to small KV cache space. Consider " "setting --max-model-len to a smaller value.", max_model_len) + elif self.enable_chunked_prefill and model_config.task == "embedding": + msg = "Chunked prefill is not supported for embedding models" + raise ValueError(msg) speculative_config = SpeculativeConfig.maybe_create_spec_config( target_model_config=model_config, From 709c9f1f257fd15545ad19b89ed5019cb5ea338b Mon Sep 17 00:00:00 2001 From: Rafael Vasquez Date: Wed, 20 Nov 2024 00:35:31 -0500 Subject: [PATCH 061/397] [CI/Build] Add sphinx/rst linter for docs (#10366) --- .github/workflows/sphinx-lint.yml | 32 +++++++++++++++++++++++++++++++ format.sh | 6 ++++++ requirements-lint.txt | 1 + tools/sphinx-lint.sh | 3 +++ 4 files changed, 42 insertions(+) create mode 100644 .github/workflows/sphinx-lint.yml create mode 100755 tools/sphinx-lint.sh diff --git a/.github/workflows/sphinx-lint.yml b/.github/workflows/sphinx-lint.yml new file mode 100644 index 0000000000000..e0bb24276a653 --- /dev/null +++ b/.github/workflows/sphinx-lint.yml @@ -0,0 +1,32 @@ +name: Lint documentation + +on: + push: + branches: + - main + paths: + - "docs/**" + pull_request: + branches: + - main + paths: + - "docs/**" + +jobs: + sphinx-lint: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.12"] + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements-lint.txt + - name: Linting docs + run: tools/sphinx-lint.sh diff --git a/format.sh b/format.sh index b3dcdc15bf948..0b196de9d0773 100755 --- a/format.sh +++ b/format.sh @@ -41,6 +41,7 @@ MYPY_VERSION=$(mypy --version | awk '{print $2}') CODESPELL_VERSION=$(codespell --version) ISORT_VERSION=$(isort --vn) CLANGFORMAT_VERSION=$(clang-format --version | awk '{print $3}') +SPHINX_LINT_VERSION=$(sphinx-lint --version | awk '{print $2}') # # params: tool name, tool version, required version tool_version_check() { @@ -57,6 +58,7 @@ tool_version_check "mypy" "$MYPY_VERSION" tool_version_check "isort" "$ISORT_VERSION" tool_version_check "codespell" "$CODESPELL_VERSION" tool_version_check "clang-format" "$CLANGFORMAT_VERSION" +tool_version_check "sphinx-lint" "$SPHINX_LINT_VERSION" YAPF_FLAGS=( '--recursive' @@ -313,3 +315,7 @@ if ! git diff --quiet &>/dev/null; then else echo "✨🎉 Format check passed! Congratulations! 🎉✨" fi + +echo 'vLLM sphinx-lint:' +tools/sphinx-lint.sh +echo 'vLLM sphinx-lint: Done' diff --git a/requirements-lint.txt b/requirements-lint.txt index f9132bbf96437..711bb50a0e936 100644 --- a/requirements-lint.txt +++ b/requirements-lint.txt @@ -6,6 +6,7 @@ ruff==0.6.5 codespell==2.3.0 isort==5.13.2 clang-format==18.1.5 +sphinx-lint==1.0.0 # type checking mypy==1.11.1 diff --git a/tools/sphinx-lint.sh b/tools/sphinx-lint.sh new file mode 100755 index 0000000000000..04f8075c5527f --- /dev/null +++ b/tools/sphinx-lint.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +sphinx-lint --disable trailing-whitespace,missing-final-newline docs From 7629a9c6e5e29d60be9ef60e4afb9842effcdc73 Mon Sep 17 00:00:00 2001 From: wchen61 Date: Wed, 20 Nov 2024 13:35:50 +0800 Subject: [PATCH 062/397] [CI/Build] Support compilation with local cutlass path (#10423) (#10424) --- CMakeLists.txt | 17 +++++++++++++++-- docs/source/getting_started/installation.rst | 12 ++++++++++++ 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 5acbd762ee957..bfe435937e3bb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -206,7 +206,19 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") # Set CUTLASS_REVISION manually -- its revision detection doesn't work in this case. set(CUTLASS_REVISION "v3.5.1" CACHE STRING "CUTLASS revision to use") - FetchContent_Declare( + # Use the specified CUTLASS source directory for compilation if VLLM_CUTLASS_SRC_DIR is provided + if (DEFINED ENV{VLLM_CUTLASS_SRC_DIR}) + set(VLLM_CUTLASS_SRC_DIR $ENV{VLLM_CUTLASS_SRC_DIR}) + endif() + + if(VLLM_CUTLASS_SRC_DIR) + if(NOT IS_ABSOLUTE VLLM_CUTLASS_SRC_DIR) + get_filename_component(VLLM_CUTLASS_SRC_DIR "${VLLM_CUTLASS_SRC_DIR}" ABSOLUTE) + endif() + message(STATUS "The VLLM_CUTLASS_SRC_DIR is set, using ${VLLM_CUTLASS_SRC_DIR} for compilation") + FetchContent_Declare(cutlass SOURCE_DIR ${VLLM_CUTLASS_SRC_DIR}) + else() + FetchContent_Declare( cutlass GIT_REPOSITORY https://github.com/nvidia/cutlass.git GIT_TAG v3.5.1 @@ -216,7 +228,8 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") # Important: If GIT_SHALLOW is enabled then GIT_TAG works only with branch names and tags. # So if the GIT_TAG above is updated to a commit hash, GIT_SHALLOW must be set to FALSE GIT_SHALLOW TRUE - ) + ) + endif() FetchContent_MakeAvailable(cutlass) list(APPEND VLLM_EXT_SRC diff --git a/docs/source/getting_started/installation.rst b/docs/source/getting_started/installation.rst index f02626bda4c64..e3dbbc9affe66 100644 --- a/docs/source/getting_started/installation.rst +++ b/docs/source/getting_started/installation.rst @@ -170,6 +170,18 @@ To build vLLM using an existing PyTorch installation: $ pip install -e . --no-build-isolation +Use the local cutlass for compilation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Currently, before starting the build process, vLLM fetches cutlass code from GitHub. However, there may be scenarios where you want to use a local version of cutlass instead. +To achieve this, you can set the environment variable VLLM_CUTLASS_SRC_DIR to point to your local cutlass directory. + +.. code-block:: console + + $ git clone https://github.com/vllm-project/vllm.git + $ cd vllm + $ VLLM_CUTLASS_SRC_DIR=/path/to/cutlass pip install -e . + + Troubleshooting ~~~~~~~~~~~~~~~ From ed701ca9637306a44ba8403ba9e85be024e0dafd Mon Sep 17 00:00:00 2001 From: "Kevin H. Luu" Date: Tue, 19 Nov 2024 19:36:03 -1000 Subject: [PATCH 063/397] [ci/build] Combine nightly and optional (#10465) --- .buildkite/test-pipeline.yaml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index 24bf223fb12c0..501743c887596 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -9,8 +9,7 @@ # label(str): the name of the test. emoji allowed. # fast_check(bool): whether to run this on each commit on fastcheck pipeline. # fast_check_only(bool): run this test on fastcheck pipeline only -# nightly(bool): run this test in nightly pipeline only -# optional(bool): never run this test by default (i.e. need to unblock manually) +# optional(bool): never run this test by default (i.e. need to unblock manually) unless it's scheduled nightly run. # command(str): the single command to run for tests. incompatible with commands. # commands(list): the list of commands to run for test. incompatbile with command. # mirror_hardwares(list): the list of hardwares to run the test on as well. currently only supports [amd] @@ -336,7 +335,7 @@ steps: - pytest -v -s models/embedding/vision_language -m core_model - label: Language Models Test (Extended) # 50min - nightly: true + optional: true source_file_dependencies: - vllm/ - tests/models/decoder_only/language @@ -362,7 +361,7 @@ steps: - pytest -v -s models/encoder_decoder/vision_language -m core_model - label: Multi-Modal Models Test (Extended) # 1h15m - nightly: true + optional: true source_file_dependencies: - vllm/ - tests/models/decoder_only/audio_language @@ -513,6 +512,7 @@ steps: - label: Distributed Tests (A100) # optional gpu: a100 + optional: true num_gpus: 4 source_file_dependencies: - vllm/ @@ -526,6 +526,7 @@ steps: - label: LM Eval Large Models # optional gpu: a100 + optional: true num_gpus: 4 working_dir: "/vllm-workspace/.buildkite/lm-eval-harness" source_file_dependencies: From 343041c4c4db93b4693ba437df7ae8bea485d18e Mon Sep 17 00:00:00 2001 From: Sky Lee <46676799+skylee-01@users.noreply.github.com> Date: Wed, 20 Nov 2024 14:05:55 +0800 Subject: [PATCH 064/397] [model] Reduce medusa weight (#10454) Signed-off-by: skylee-01 <497627264@qq.com> --- vllm/model_executor/models/medusa.py | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/vllm/model_executor/models/medusa.py b/vllm/model_executor/models/medusa.py index b4ed6538bddac..66bdcb89a0213 100644 --- a/vllm/model_executor/models/medusa.py +++ b/vllm/model_executor/models/medusa.py @@ -61,14 +61,25 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: self.truncated_vocab_size = config.truncated_vocab_size self.unpadded_vocab_size = self.truncated_vocab_size - self.lm_heads = nn.ModuleList([ - ParallelLMHead( + if getattr(config, "original_lm_head", False): + self.lm_head = ParallelLMHead( self.unpadded_vocab_size, config.hidden_size, org_num_embeddings=self.truncated_vocab_size, padding_size=DEFAULT_VOCAB_PADDING_SIZE, - ) for _ in range(self.config.num_heads) - ]) + ) + self.lm_heads = [ + self.lm_head for _ in range(self.config.num_heads) + ] + else: + self.lm_heads = nn.ModuleList([ + ParallelLMHead( + self.unpadded_vocab_size, + config.hidden_size, + org_num_embeddings=self.truncated_vocab_size, + padding_size=DEFAULT_VOCAB_PADDING_SIZE, + ) for _ in range(self.config.num_heads) + ]) logit_scale = getattr(config, "logit_scale", 1.0) self.logits_processor = LogitsProcessor(self.unpadded_vocab_size, @@ -172,6 +183,9 @@ def load_weights(self, weights: Iterable[Tuple[str, requires_grad=False) elif name in params_dict: weights_map[name] = loaded_weight + elif (getattr(self.config, "original_lm_head", False) + and name == "lm_heads.0.weight"): + weights_map["lm_head.weight"] = loaded_weight for name, loaded_weight in weights_map.items(): if "lm_head" in name and self.token_map is not None and\ From 09dbf9ff16410d0f83adcc9705764ea1c7f5f017 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Wed, 20 Nov 2024 14:45:08 +0800 Subject: [PATCH 065/397] [Bugfix] Handle conflicts between modern and legacy fields (#10471) Signed-off-by: DarkLight1337 --- vllm/transformers_utils/config.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/vllm/transformers_utils/config.py b/vllm/transformers_utils/config.py index 054845584c2ef..59096753c395d 100644 --- a/vllm/transformers_utils/config.py +++ b/vllm/transformers_utils/config.py @@ -107,6 +107,15 @@ def patch_rope_scaling(config: PretrainedConfig) -> None: def patch_rope_scaling_dict(rope_scaling: Dict[str, Any]) -> None: + if "rope_type" in rope_scaling and "type" in rope_scaling: + rope_type = rope_scaling["rope_type"] + rope_type_legacy = rope_scaling["type"] + if rope_type != rope_type_legacy: + raise ValueError( + f"Found conflicts between 'rope_type={rope_type}' (modern " + f"field) and 'type={rope_type_legacy}' (legacy field). " + "You should only specify one of them.") + if "rope_type" not in rope_scaling and "type" in rope_scaling: rope_scaling["rope_type"] = rope_scaling["type"] logger.info("Replacing legacy 'type' key with 'rope_type'") From d5b28447e005a79dec417a706900db0dad4e1a47 Mon Sep 17 00:00:00 2001 From: Mengqing Cao Date: Wed, 20 Nov 2024 14:52:13 +0800 Subject: [PATCH 066/397] [Platforms] Refactor xpu code (#10468) Signed-off-by: MengqingCao --- vllm/executor/xpu_executor.py | 27 --------------------------- vllm/platforms/xpu.py | 21 +++++++++++++++++++++ 2 files changed, 21 insertions(+), 27 deletions(-) diff --git a/vllm/executor/xpu_executor.py b/vllm/executor/xpu_executor.py index 36b7e2265efab..ba6177e51a453 100644 --- a/vllm/executor/xpu_executor.py +++ b/vllm/executor/xpu_executor.py @@ -1,8 +1,5 @@ from typing import Callable, List, Optional, Tuple, Type, Union -import torch - -from vllm.config import ModelConfig, ParallelConfig from vllm.executor.executor_base import ExecutorAsyncBase from vllm.executor.gpu_executor import GPUExecutor from vllm.logger import init_logger @@ -23,7 +20,6 @@ def _init_executor(self) -> None: assert self.speculative_config is None, ( "Speculative decoding not yet supported for XPU backend") - self.model_config = _verify_and_get_model_config(self.model_config) GPUExecutor._init_executor(self) def _get_worker_module_and_class( @@ -53,26 +49,3 @@ async def execute_model_async( output = await make_async(self.driver_worker.execute_model )(execute_model_req=execute_model_req) return output - - -def _verify_and_get_model_config(config: ModelConfig) -> ModelConfig: - if config.dtype == torch.bfloat16: - logger.warning( - "bfloat16 is not fully supported on XPU, casting to float16.") - config.dtype = torch.float16 - if not config.enforce_eager: - logger.warning( - "CUDA graph is not supported on XPU, fallback to the eager " - "mode.") - config.enforce_eager = True - return config - - -def _verify_and_get_parallel_config(config: ParallelConfig) -> ParallelConfig: - if (config.distributed_executor_backend is not None - and config.distributed_executor_backend != "ray"): - logger.warning( - "%s is not supported on XPU, fallback to ray distributed executor " - "backend.", config.distributed_executor_backend) - config.distributed_executor_backend = "ray" - return config diff --git a/vllm/platforms/xpu.py b/vllm/platforms/xpu.py index d0b3dca9a4195..62db285f6696a 100644 --- a/vllm/platforms/xpu.py +++ b/vllm/platforms/xpu.py @@ -1,9 +1,16 @@ +from typing import TYPE_CHECKING + import torch from vllm.logger import init_logger from .interface import DeviceCapability, Platform, PlatformEnum, _Backend +if TYPE_CHECKING: + from vllm.config import VllmConfig +else: + VllmConfig = None + logger = init_logger(__name__) @@ -34,3 +41,17 @@ def get_device_total_memory(cls, device_id: int = 0) -> int: @staticmethod def inference_mode(): return torch.no_grad() + + @classmethod + def check_and_update_config(cls, vllm_config: VllmConfig) -> None: + # check and update model config + model_config = vllm_config.model_config + if model_config.dtype == torch.bfloat16: + logger.warning( + "bfloat16 is not fully supported on XPU, casting to float16.") + model_config.dtype = torch.float16 + if not model_config.enforce_eager: + logger.warning( + "CUDA graph is not supported on XPU, fallback to the eager " + "mode.") + model_config.enforce_eager = True From 63f1fde277d063fbd36ccf43cb709fafca754ed5 Mon Sep 17 00:00:00 2001 From: "Li, Jiang" Date: Wed, 20 Nov 2024 18:57:39 +0800 Subject: [PATCH 067/397] [Hardware][CPU] Support chunked-prefill and prefix-caching on CPU (#10355) Signed-off-by: jiang1.li --- .buildkite/run-cpu-test.sh | 9 +- .../getting_started/cpu-installation.rst | 10 +- docs/source/serving/compatibility_matrix.rst | 4 +- .../basic_correctness/test_chunked_prefill.py | 63 ++- vllm/attention/backends/torch_sdpa.py | 189 +++++-- vllm/attention/ops/ipex_attn.py | 150 ++++-- vllm/platforms/cpu.py | 15 +- vllm/worker/cpu_model_runner.py | 488 ++++++++---------- 8 files changed, 559 insertions(+), 369 deletions(-) diff --git a/.buildkite/run-cpu-test.sh b/.buildkite/run-cpu-test.sh index f0128f091b742..4f1729d46dae2 100644 --- a/.buildkite/run-cpu-test.sh +++ b/.buildkite/run-cpu-test.sh @@ -25,6 +25,7 @@ docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/hugg function cpu_tests() { set -e + export NUMA_NODE=$2 # offline inference docker exec cpu-test-avx2-"$NUMA_NODE" bash -c " @@ -57,6 +58,12 @@ function cpu_tests() { pytest -s -v \ tests/quantization/test_ipex_quant.py" + # Run chunked-prefill and prefix-cache test + docker exec cpu-test-"$NUMA_NODE" bash -c " + set -e + pytest -s -v -k cpu_model \ + tests/basic_correctness/test_chunked_prefill.py" + # online inference docker exec cpu-test-"$NUMA_NODE" bash -c " set -e @@ -75,4 +82,4 @@ function cpu_tests() { # All of CPU tests are expected to be finished less than 25 mins. export -f cpu_tests -timeout 25m bash -c "cpu_tests $CORE_RANGE" +timeout 30m bash -c "cpu_tests $CORE_RANGE $NUMA_NODE" diff --git a/docs/source/getting_started/cpu-installation.rst b/docs/source/getting_started/cpu-installation.rst index 69530fd778c55..649de1cd9b53c 100644 --- a/docs/source/getting_started/cpu-installation.rst +++ b/docs/source/getting_started/cpu-installation.rst @@ -5,11 +5,11 @@ Installation with CPU vLLM initially supports basic model inferencing and serving on x86 CPU platform, with data types FP32, FP16 and BF16. vLLM CPU backend supports the following vLLM features: -- Tensor Parallel (``-tp = N``) -- Quantization (``INT8 W8A8, AWQ``) - -.. note:: - More advanced features on `chunked-prefill`, `prefix-caching` and `FP8 KV cache` are under development and will be available soon. +- Tensor Parallel +- Model Quantization (``INT8 W8A8, AWQ``) +- Chunked-prefill +- Prefix-caching +- FP8-E5M2 KV-Caching (TODO) Table of contents: diff --git a/docs/source/serving/compatibility_matrix.rst b/docs/source/serving/compatibility_matrix.rst index 5fc86ab0a11d5..a4300761d2635 100644 --- a/docs/source/serving/compatibility_matrix.rst +++ b/docs/source/serving/compatibility_matrix.rst @@ -344,7 +344,7 @@ Feature x Hardware - ✅ - ✅ - ✅ - - ✗ + - ✅ - ✅ * - :ref:`APC ` - `✗ `__ @@ -352,7 +352,7 @@ Feature x Hardware - ✅ - ✅ - ✅ - - ✗ + - ✅ - ✅ * - :ref:`LoRA ` - ✅ diff --git a/tests/basic_correctness/test_chunked_prefill.py b/tests/basic_correctness/test_chunked_prefill.py index cc5bc2aca27c9..469d18a4dd7af 100644 --- a/tests/basic_correctness/test_chunked_prefill.py +++ b/tests/basic_correctness/test_chunked_prefill.py @@ -12,6 +12,7 @@ import pytest from tests.kernels.utils import override_backend_env_variable +from vllm.platforms import current_platform from ..models.utils import check_logprobs_close, check_outputs_equal from ..utils import multi_gpu_test @@ -206,12 +207,14 @@ def test_models_with_fp8_kv_cache( # NOTE: Increasing this in this suite will fail CI because we currently cannot # reset distributed env properly. Use a value > 1 just when you test. @pytest.mark.parametrize("tensor_parallel_size", [1]) +@pytest.mark.parametrize("dtype", ["half"]) def test_with_prefix_caching( vllm_runner, max_tokens: int, enforce_eager: bool, chunk_size: int, tensor_parallel_size: int, + dtype: str, ) -> None: """ Checks exact match decode with and without prefix caching @@ -233,7 +236,7 @@ def test_with_prefix_caching( for enable in (True, False): with vllm_runner( model, - dtype="half", + dtype=dtype, max_num_batched_tokens=max_num_batched_tokens, enable_chunked_prefill=True, enable_prefix_caching=enable, @@ -260,3 +263,61 @@ def test_with_prefix_caching( name_0="w/o prefix caching", name_1="with prefix caching", ) + + +@pytest.mark.parametrize("model", ["facebook/opt-125m"]) +@pytest.mark.parametrize("dtype", ["bfloat16"]) +@pytest.mark.parametrize("max_tokens", [32]) +@pytest.mark.parametrize("chunked_prefill_token_size", [1, 4, 16]) +@pytest.mark.parametrize("enforce_eager", [False]) +@pytest.mark.parametrize("attention_backend", ["TORCH_SDPA"]) +@pytest.mark.cpu_model +@pytest.mark.skipif(not current_platform.is_cpu(), reason="CPU only") +def test_models_cpu( + hf_runner, + vllm_runner, + example_prompts, + model: str, + dtype: str, + max_tokens: int, + chunked_prefill_token_size: int, + enforce_eager: bool, + attention_backend: str, + monkeypatch, +) -> None: + test_models( + hf_runner, + vllm_runner, + example_prompts, + model, + dtype, + max_tokens, + chunked_prefill_token_size, + enforce_eager, + 1, + attention_backend, + monkeypatch, + ) + + +@pytest.mark.parametrize("max_tokens", [16]) +@pytest.mark.parametrize("enforce_eager", [False]) +@pytest.mark.parametrize("chunk_size", [30, 32]) +@pytest.mark.parametrize("dtype", ["bfloat16"]) +@pytest.mark.cpu_model +@pytest.mark.skipif(not current_platform.is_cpu(), reason="CPU only") +def test_with_prefix_caching_cpu( + vllm_runner, + max_tokens: int, + enforce_eager: bool, + chunk_size: int, + dtype: str, +) -> None: + test_with_prefix_caching( + vllm_runner, + max_tokens, + enforce_eager, + chunk_size, + 1, + dtype, + ) diff --git a/vllm/attention/backends/torch_sdpa.py b/vllm/attention/backends/torch_sdpa.py index 563178d3ab60d..3d025df26a7a1 100644 --- a/vllm/attention/backends/torch_sdpa.py +++ b/vllm/attention/backends/torch_sdpa.py @@ -7,18 +7,14 @@ from torch.nn.functional import scaled_dot_product_attention from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl, - AttentionMetadata, AttentionType) + AttentionMetadata, + AttentionMetadataBuilder, + AttentionType) from vllm.attention.backends.utils import CommonAttentionState +from vllm.attention.ops.ipex_attn import PagedAttention from vllm.attention.ops.paged_attn import PagedAttentionMetadata -from vllm.platforms import current_platform - -if current_platform.is_cpu(): - try: - from vllm.attention.ops.ipex_attn import PagedAttention - except ImportError: - from vllm.attention.ops.paged_attn import PagedAttention -else: - from vllm.attention.ops.paged_attn import PagedAttention +from vllm.utils import make_tensor_with_pad +from vllm.worker.cpu_model_runner import ModelInputForCPUBuilder class TorchSDPABackend(AttentionBackend): @@ -39,6 +35,10 @@ def get_metadata_cls() -> Type["AttentionMetadata"]: def get_state_cls() -> Type["CommonAttentionState"]: return CommonAttentionState + @staticmethod + def get_builder_cls() -> Type["TorchSDPAMetadataBuilder"]: + return TorchSDPAMetadataBuilder + @staticmethod def get_kv_cache_shape( num_blocks: int, @@ -71,9 +71,15 @@ class TorchSDPAMetadata(AttentionMetadata, PagedAttentionMetadata): """ # Currently, input sequences can only contain all prompts # or all decoding. True if all sequences are prompts. - is_prompt: bool - slot_mapping: torch.Tensor - seq_lens: Optional[List[int]] + chunked_prefill: bool + seq_lens: Optional[List[int]] = None # For non-chunked prefill + + # For chunked prefill only + max_query_len: Optional[int] = None + max_kv_len: Optional[int] = None + query_start_loc: Optional[torch.Tensor] = None + kv_start_loc: Optional[torch.Tensor] = None + prefill_block_tables: Optional[torch.Tensor] = None # Begin encoder attn & enc/dec cross-attn fields... # Encoder sequence lengths representation @@ -123,20 +129,14 @@ def is_all_cross_attn_metadata_set(self): @property def prefill_metadata(self) -> Optional["TorchSDPAMetadata"]: - # Currently chunked prefill is not supported - if self.num_decode_tokens == 0: - assert self.num_prefills > 0 - return self - - return None + if self.num_prefill_tokens == 0: + return None + return self @property def decode_metadata(self) -> Optional["TorchSDPAMetadata"]: - # Currently chunked prefill is not supported - if self.num_prefills > 0: - assert self.num_decode_tokens == 0 + if self.num_decode_tokens == 0: return None - return self def get_seq_lens( @@ -274,6 +274,105 @@ def get_seq_len_block_table_args( raise AttributeError(f"Invalid attention type {str(attn_type)}") +class TorchSDPAMetadataBuilder(AttentionMetadataBuilder[TorchSDPAMetadata]): + + def __init__(self, input_builder: ModelInputForCPUBuilder) -> None: + self.chunked_prefill = input_builder.chunked_prefill + self.input_data = input_builder.input_data + + def build(self, seq_lens: List[int], query_lens: List[int], + cuda_graph_pad_size: int, batch_size: int) -> TorchSDPAMetadata: + input_data = self.input_data + prefill_seq_lens = seq_lens[0:input_data.num_prefills] + prefill_query_lens = query_lens[0:input_data.num_prefills] + slot_mapping = torch.tensor(input_data.slot_mapping, + dtype=torch.long, + device="cpu") + + # For chunked-prefill + if self.chunked_prefill and input_data.num_prefill_tokens != 0: + prefill_block_tables = make_tensor_with_pad( + self.input_data.prefill_block_tables, + pad=0, + dtype=torch.int32, + device="cpu", + ) + query_lens_tensor = torch.tensor(prefill_query_lens, + dtype=torch.int32, + device="cpu") + kv_lens_tensor = torch.tensor(prefill_seq_lens, + dtype=torch.int32, + device="cpu") + query_start_loc = torch.zeros(input_data.num_prefills + 1, + dtype=torch.int32, + device="cpu") + kv_start_loc = torch.zeros(input_data.num_prefills + 1, + dtype=torch.int32, + device="cpu") + torch.cumsum(query_lens_tensor, + dim=0, + dtype=torch.int32, + out=query_start_loc[1:]) + torch.cumsum(kv_lens_tensor, + dim=0, + dtype=torch.int32, + out=kv_start_loc[1:]) + max_query_len = max(prefill_query_lens) + max_kv_len = max(prefill_seq_lens) + else: + prefill_block_tables = None + query_start_loc = None + kv_start_loc = None + max_query_len = None + max_kv_len = None + + # For paged attention + if input_data.num_decode_tokens != 0: + seq_lens_tensor = torch.tensor( + input_data.seq_lens[input_data.num_prefills:], + dtype=torch.int32, + device="cpu", + ) + block_tables = make_tensor_with_pad( + self.input_data.decode_block_tables, + pad=0, + dtype=torch.int32, + device="cpu", + ) + else: + block_tables = torch.tensor([]) + seq_lens_tensor = torch.tensor([]) + + # For multi-modal models + placeholder_index_maps = None + if len(input_data.multi_modal_inputs_list) != 0: + placeholder_index_maps = { + modality: placeholder_map.index_map() + for modality, placeholder_map in + input_data.multi_modal_placeholder_maps.items() + } + + attn_metadata = TorchSDPAMetadata( + chunked_prefill=self.chunked_prefill, + seq_lens=prefill_seq_lens, + seq_lens_tensor=seq_lens_tensor, + max_query_len=max_query_len, + max_kv_len=max_kv_len, + query_start_loc=query_start_loc, + kv_start_loc=kv_start_loc, + max_decode_seq_len=input_data.max_decode_seq_len, + num_prefills=input_data.num_prefills, + num_prefill_tokens=input_data.num_prefill_tokens, + num_decode_tokens=input_data.num_decode_tokens, + block_tables=block_tables, + prefill_block_tables=prefill_block_tables, + slot_mapping=slot_mapping, + multi_modal_placeholder_index_maps=placeholder_index_maps, + ) + + return attn_metadata + + class TorchSDPABackendImpl(AttentionImpl[TorchSDPAMetadata]): def __init__( @@ -409,19 +508,35 @@ def forward( assert key.shape[0] == num_prefill_tokens + num_decode_tokens assert value.shape[0] == num_prefill_tokens + num_decode_tokens + output = torch.empty_like(query) if prefill_meta := attn_metadata.prefill_metadata: assert attn_metadata.seq_lens is not None - if (kv_cache.numel() == 0 - or prefill_meta.block_tables.numel() == 0): - output = self._run_sdpa_forward(query, - key, - value, - prefill_meta, - attn_type=attn_type) + if not prefill_meta.prefill_metadata.chunked_prefill: # type: ignore + self._run_sdpa_forward(output, + query, + key, + value, + prefill_meta, + attn_type=attn_type) else: # prefix-enabled attention - raise RuntimeError( - "Torch SDPA backend doesn't support prefix decoding.") + assert not self.need_mask + import intel_extension_for_pytorch.llm.modules as ipex_modules + output = torch.empty_like(query) + ipex_modules.PagedAttention.flash_attn_varlen_func( + output[:prefill_meta.num_prefill_tokens, :, :], + query[:prefill_meta.num_prefill_tokens, :, :], + key_cache, + value_cache, + prefill_meta.query_start_loc, + prefill_meta.kv_start_loc, + prefill_meta.max_query_len, + prefill_meta.max_kv_len, + self.scale, + True, + prefill_meta.prefill_block_tables, + self.alibi_slopes, + ) if decode_meta := attn_metadata.decode_metadata: assert attn_type != AttentionType.ENCODER_ONLY, ( @@ -433,8 +548,9 @@ def forward( block_tables_arg, ) = decode_meta.get_seq_len_block_table_args(attn_type) - output = PagedAttention.forward_decode( - query, + PagedAttention.forward_decode( + output[attn_metadata.num_prefill_tokens:, :, :], + query[attn_metadata.num_prefill_tokens:, :, :], key_cache, value_cache, block_tables_arg, @@ -453,12 +569,13 @@ def forward( def _run_sdpa_forward( self, + output: torch.Tensor, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_metadata: TorchSDPAMetadata, attn_type: AttentionType = AttentionType.DECODER, - ): + ) -> None: if self.num_kv_heads != self.num_heads: key = key.repeat_interleave(self.num_queries_per_kv, dim=1) value = value.repeat_interleave(self.num_queries_per_kv, dim=1) @@ -479,7 +596,6 @@ def _run_sdpa_forward( attn_masks = [None] * len(seq_lens) attn_metadata.set_attn_bias(attn_masks, attn_type) - output = torch.empty_like(query) query = query.movedim(0, query.dim() - 2) key = key.movedim(0, key.dim() - 2) value = value.movedim(0, value.dim() - 2) @@ -502,7 +618,6 @@ def _run_sdpa_forward( scale=self.scale).squeeze(0).movedim(query.dim() - 2, 0) output[start_q:end_q, :, :] = sub_out start_q, start_kv = end_q, end_kv - return output def _make_alibi_bias( diff --git a/vllm/attention/ops/ipex_attn.py b/vllm/attention/ops/ipex_attn.py index 8df6d4ced9dc6..cbc6c74acf09a 100644 --- a/vllm/attention/ops/ipex_attn.py +++ b/vllm/attention/ops/ipex_attn.py @@ -1,12 +1,17 @@ from typing import Dict, List, Optional, Tuple -import intel_extension_for_pytorch.llm.modules as ipex_modules +try: + import intel_extension_for_pytorch.llm.modules as ipex_modules + _use_ipex = True +except ImportError: + _use_ipex = False + import torch from vllm import _custom_ops as ops -class PagedAttention: +class _PagedAttention: @staticmethod def get_supported_head_sizes() -> List[int]: @@ -22,6 +27,105 @@ def get_kv_cache_shape( ) -> Tuple[int, ...]: return (2, num_blocks, block_size * num_kv_heads * head_size) + @staticmethod + def split_kv_cache( + kv_cache: torch.Tensor, + num_kv_heads: int, + head_size: int, + *args, + ) -> Tuple[torch.Tensor, torch.Tensor]: + x = 16 // kv_cache.element_size() + num_blocks = kv_cache.shape[1] + + key_cache = kv_cache[0] + key_cache = key_cache.view(num_blocks, num_kv_heads, head_size // x, + -1, x) + value_cache = kv_cache[1] + value_cache = value_cache.view(num_blocks, num_kv_heads, head_size, -1) + return key_cache, value_cache + + @staticmethod + def write_to_paged_cache( + key: torch.Tensor, + value: torch.Tensor, + key_cache: torch.Tensor, + value_cache: torch.Tensor, + slot_mapping: torch.Tensor, + kv_cache_dtype: str, + k_scale: float, + v_scale: float, + *args, + ) -> None: + ops.reshape_and_cache( + key, + value, + key_cache, + value_cache, + slot_mapping.flatten(), + kv_cache_dtype, + k_scale, + v_scale, + ) + + @staticmethod + def forward_decode( + output: torch.Tensor, + query: torch.Tensor, + key_cache: torch.Tensor, + value_cache: torch.Tensor, + block_tables: torch.Tensor, + context_lens: torch.Tensor, + max_context_len: int, + kv_cache_dtype: str, + num_kv_heads: int, + scale: float, + alibi_slopes: Optional[torch.Tensor], + k_scale: float, + v_scale: float, + *args, + ) -> None: + tp_rank: int = 0 + blocksparse_local_blocks: int = 0 + blocksparse_vert_stride: int = 0 + blocksparse_block_size: int = 64 + blocksparse_head_sliding_step: int = 0 + block_size = value_cache.shape[3] + + ops.paged_attention_v1( + output, + query, + key_cache, + value_cache, + num_kv_heads, + scale, + block_tables, + context_lens, + block_size, + max_context_len, + alibi_slopes, + kv_cache_dtype, + k_scale, + v_scale, + tp_rank, + blocksparse_local_blocks, + blocksparse_vert_stride, + blocksparse_block_size, + blocksparse_head_sliding_step, + ) + + @staticmethod + def copy_blocks( + kv_caches: List[torch.Tensor], + src_to_dists: Dict[int, List[int]], + *args, + ) -> None: + key_caches = [kv_cache[0] for kv_cache in kv_caches] + value_caches = [kv_cache[1] for kv_cache in kv_caches] + ops.copy_blocks(key_caches, value_caches, src_to_dists) + + +class _IPEXPagedAttention(_PagedAttention): + @staticmethod def split_kv_cache( kv_cache: torch.Tensor, @@ -55,6 +159,7 @@ def write_to_paged_cache( @staticmethod def forward_decode( + output: torch.Tensor, query: torch.Tensor, key_cache: torch.Tensor, value_cache: torch.Tensor, @@ -68,8 +173,7 @@ def forward_decode( k_scale: float, v_scale: float, *args, - ) -> torch.Tensor: - output = torch.empty_like(query) + ) -> None: block_size = value_cache.shape[2] head_mapping = torch.arange( 0, @@ -83,41 +187,5 @@ def forward_decode( scale, block_tables, context_lens, block_size, max_context_len, alibi_slopes) - return output - - @staticmethod - def forward_prefix( - query: torch.Tensor, - key: torch.Tensor, - value: torch.Tensor, - kv_cache_dtype: str, - key_cache: torch.Tensor, - value_cache: torch.Tensor, - block_tables: torch.Tensor, - subquery_start_loc: torch.Tensor, - prompt_lens_tensor: torch.Tensor, - context_lens: torch.Tensor, - max_subquery_len: int, - alibi_slopes: Optional[torch.Tensor], - *args, - ) -> torch.Tensor: - raise NotImplementedError - - @staticmethod - def swap_blocks( - src_kv_cache: torch.Tensor, - dst_kv_cache: torch.Tensor, - src_to_dst: Dict[int, int], - *args, - ) -> None: - raise NotImplementedError - @staticmethod - def copy_blocks( - kv_caches: List[torch.Tensor], - src_to_dists: Dict[int, List[int]], - *args, - ) -> None: - key_caches = [kv_cache[0] for kv_cache in kv_caches] - value_caches = [kv_cache[1] for kv_cache in kv_caches] - ops.copy_blocks(key_caches, value_caches, src_to_dists) +PagedAttention = _IPEXPagedAttention if _use_ipex else _PagedAttention diff --git a/vllm/platforms/cpu.py b/vllm/platforms/cpu.py index f9a34a47959ec..43cbafe709d84 100644 --- a/vllm/platforms/cpu.py +++ b/vllm/platforms/cpu.py @@ -53,11 +53,6 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: cache_config = vllm_config.cache_config - if cache_config.enable_prefix_caching: - logger.warning( - "Prefix caching is not supported on CPU, disable it.") - cache_config.enable_prefix_caching = False - kv_cache_space = envs.VLLM_CPU_KVCACHE_SPACE if kv_cache_space >= 0: @@ -74,10 +69,12 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: f" {kv_cache_space}, expect a positive integer value.") scheduler_config = vllm_config.scheduler_config - if scheduler_config.chunked_prefill_enabled: - logger.warning( - "Chunked prefill is not supported on CPU, disable it.") - scheduler_config.chunked_prefill_enabled = False + if ((scheduler_config.chunked_prefill_enabled + or cache_config.enable_prefix_caching) + and model_config.dtype == torch.half): + logger.warning("Chunked-prefill on the CPU backend only does not" + " support fp16 for now, cast to bf16.") + model_config.dtype = torch.bfloat16 parallel_config = vllm_config.parallel_config if (parallel_config.distributed_executor_backend is not None diff --git a/vllm/worker/cpu_model_runner.py b/vllm/worker/cpu_model_runner.py index d3e1202c15e61..66bd844c94901 100644 --- a/vllm/worker/cpu_model_runner.py +++ b/vllm/worker/cpu_model_runner.py @@ -2,8 +2,8 @@ import weakref from collections import defaultdict from dataclasses import dataclass -from typing import (TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Type, - TypeVar, Union) +from typing import (TYPE_CHECKING, Any, Dict, List, Optional, Type, TypeVar, + Union) import torch from torch import nn @@ -19,7 +19,6 @@ MultiModalKwargs, MultiModalPlaceholderMap) from vllm.sequence import (IntermediateTensors, SequenceData, SequenceGroupMetadata) -from vllm.utils import make_tensor_with_pad from vllm.worker.model_runner_base import ( ModelRunnerBase, ModelRunnerInputBase, ModelRunnerInputBuilderBase, _add_attn_metadata_broadcastable_dict, @@ -104,65 +103,223 @@ def from_broadcasted_tensor_dict( class ModelInputForCPUBuilder(ModelRunnerInputBuilderBase[ModelInputForCPU]): + class ModelInputData: + + def __init__(self, use_mrope: bool): + self.use_mrope = use_mrope + self.input_tokens: List[int] = [] + self.input_positions: Optional[ + List[int]] = [] if not self.use_mrope else None + self.seq_lens: List[int] = [] + self.query_lens: List[int] = [] + self.prefill_block_tables: List[List[int]] = [] + self.decode_block_tables: List[List[int]] = [] + self.max_decode_seq_len: int = 0 + self.num_prefills: int = 0 + self.num_prefill_tokens: int = 0 + self.num_decode_tokens: int = 0 + self.slot_mapping: List[int] = [] + self.multi_modal_inputs_list: List[MultiModalKwargs] = [] + self.multi_modal_placeholder_maps: Dict[ + str, MultiModalPlaceholderMap] = defaultdict( + MultiModalPlaceholderMap) + self.input_mrope_positions: Optional[List[List[int]]] = [ + [] for _ in range(3) + ] if self.use_mrope else None + def __init__(self, runner: "CPUModelRunner", finished_requests_ids: Optional[List[str]] = None) -> None: super().__init__() self.seq_group_metadata_list: List[SequenceGroupMetadata] = [] self.runner = runner + + self.chunked_prefill = (runner.scheduler_config.chunked_prefill_enabled + or runner.cache_config.enable_prefix_caching) self.model_input_cls = self.runner._model_input_cls self.attn_backend = self.runner.attn_backend - self.sliding_window = self.runner.sliding_window - self.block_size = self.runner.block_size - self.device = self.runner.device self.multi_modal_input_mapper = self.runner.multi_modal_input_mapper + self.input_data = ModelInputForCPUBuilder.ModelInputData( + self.runner.model_config.uses_mrope) + self.att_metadata_builder = self.runner.attn_backend.get_builder_cls()( + self) def add_seq_group(self, seq_group_metadata: SequenceGroupMetadata): self.seq_group_metadata_list.append(seq_group_metadata) + def set_seq_group_list( + self, seq_group_metadata_list: List[SequenceGroupMetadata]): + self.seq_group_metadata_list = seq_group_metadata_list + def build(self) -> ModelInputForCPU: + self._build_input_data() + + input_data = self.input_data + input_tokens = torch.tensor(input_data.input_tokens, + dtype=torch.long, + device="cpu") + input_positions = torch.tensor( + input_data.input_positions + if not input_data.use_mrope else input_data.input_mrope_positions, + dtype=torch.long, + device="cpu") + + # For multi-modal models multi_modal_kwargs = None - # NOTE: We assume that all sequences in the group are all prompts or - # all decodes. - is_prompt = self.seq_group_metadata_list[0].is_prompt - # Prepare input tensors. - if is_prompt: - (input_tokens, input_positions, attn_metadata, seq_lens, - multi_modal_kwargs) = self._prepare_prompt( - self.seq_group_metadata_list) - else: - (input_tokens, input_positions, - attn_metadata) = self._prepare_decode( - self.seq_group_metadata_list) - seq_lens = None + if len(input_data.multi_modal_inputs_list) != 0: + multi_modal_kwargs = MultiModalKwargs.batch( + input_data.multi_modal_inputs_list) + + attn_metadata = self.att_metadata_builder.build( + input_data.seq_lens, input_data.query_lens, -1, -1) return self.model_input_cls( input_tokens=input_tokens, input_positions=input_positions, + seq_lens=input_data.seq_lens, + query_lens=input_data.query_lens, attn_metadata=attn_metadata, multi_modal_kwargs=multi_modal_kwargs, - # query_lens is not needed if chunked prefill is not - # supported. Since CPU worker doesn't support chunked prefill - # just use seq_lens instead. - seq_lens=seq_lens, - query_lens=seq_lens, ) - def _compute_multi_modal_input( - self, - seq_data: SequenceData, - computed_len: int, - seq_group_metadata: SequenceGroupMetadata, - ): + def _build_input_data(self): + for seq_group_metadata in self.seq_group_metadata_list: + for seq_id, seq_data in seq_group_metadata.seq_data.items(): + if seq_group_metadata.is_prompt: + self._compute_prompt_input_tokens(self.input_data, + seq_group_metadata, + seq_data, seq_id) + if seq_group_metadata.multi_modal_data: + self._compute_multi_modal_input( + seq_group_metadata, seq_data) + else: + self._compute_decode_input_tokens(self.input_data, + seq_group_metadata, + seq_data, seq_id) + + def _compute_decode_input_tokens(self, data: ModelInputData, + seq_group_metadata: SequenceGroupMetadata, + seq_data: SequenceData, seq_id: int): + """ + Compute decode input tokens, positions, block table and slot mapping. + """ + block_size = self.runner.block_size + + block_table = seq_group_metadata.block_tables[seq_id] + seq_len = seq_data.get_len() + context_len = seq_data.get_num_computed_tokens() + + tokens = seq_data.get_last_token_id() + token_positions = seq_len - 1 + block_number = block_table[token_positions // block_size] + block_offset = token_positions % block_size + slot = block_number * block_size + block_offset + + # For paged_attention kernel + if self.runner.sliding_window: + start_idx = max(0, seq_len - self.runner.sliding_window) + start_block = start_idx // block_size + start_idx = start_block * block_size + seq_len = seq_len - start_idx + block_table = block_table[start_block:] + + # For MRotaryEmbedding + if data.input_positions is None: + next_pos = MRotaryEmbedding.get_next_input_positions( + seq_data.mrope_position_delta, + context_len, + seq_len, + ) + for idx in range(3): + data.input_mrope_positions[idx].extend( # type: ignore + next_pos[idx]) + else: + data.input_positions.append(token_positions) # type: ignore + + # Update fields + data.input_tokens.append(tokens) + data.max_decode_seq_len = max(data.max_decode_seq_len, seq_len) + data.num_decode_tokens += 1 + data.slot_mapping.append(slot) + data.decode_block_tables.append(block_table) + data.query_lens.append(1) + data.seq_lens.append(seq_len) + + def _compute_prompt_input_tokens(self, data: ModelInputData, + seq_group_metadata: SequenceGroupMetadata, + seq_data: SequenceData, seq_id: int): + """ + Compute prompt input tokens, positions, block table and slot mapping. + """ + token_chunk_size = seq_group_metadata.token_chunk_size + block_size = self.runner.block_size + + block_table = seq_group_metadata.block_tables[seq_id] + seq_len = seq_data.get_len() + context_len = seq_data.get_num_computed_tokens() + seq_len = min(seq_len, context_len + token_chunk_size) + + # For prefix caching + prefix_cache_block_num = len(seq_group_metadata.computed_block_nums) + if prefix_cache_block_num > 0: + prefix_cache_len = (prefix_cache_block_num * + self.runner.block_size) + if prefix_cache_len <= context_len: + # We already passed the cache hit region, + # so do normal computation. + pass + elif context_len < prefix_cache_len < seq_len: + # Partial hit. Compute the missing part. + context_len = prefix_cache_len + token_chunk_size = seq_len - context_len + elif seq_len <= prefix_cache_len: + # Full hit. Only compute the last token to avoid + # erroneous behavior. FIXME: Ideally we should directly + # mark all tokens as computed in the scheduler and do not + # schedule this sequence, so this case should not happen. + context_len = seq_len - 1 + token_chunk_size = 1 + + tokens = seq_data.get_token_ids() + tokens = tokens[context_len:seq_len] + token_positions = range(context_len, seq_len) + + # For encoder-only models, the block_table is None, + # and there is no need to initialize the slot_mapping. + if block_table is not None: + slot_mapping = [_PAD_SLOT_ID] * len(token_positions) + for i, pos in enumerate(token_positions): + block_number = block_table[pos // block_size] + block_offset = pos % block_size + slot = block_number * block_size + block_offset + slot_mapping[i] = slot + data.slot_mapping.extend(slot_mapping) + + # The MROPE positions are prepared in _compute_multi_modal_input + if data.input_positions is not None: + data.input_positions.extend(token_positions) + + # Update fields + data.input_tokens.extend(tokens) + data.num_prefills += 1 + data.num_prefill_tokens += len(tokens) + data.query_lens.append(len(tokens)) + data.prefill_block_tables.append(block_table) + data.seq_lens.append(seq_len) + + def _compute_multi_modal_input(self, + seq_group_metadata: SequenceGroupMetadata, + seq_data: SequenceData): + computed_len = seq_data.get_num_computed_tokens() + seq_len = self.input_data.seq_lens[-1] + # NOTE: mm_data only includes the subset of multi-modal items that # intersect with the current prefill positions. mm_data, placeholder_maps = MultiModalPlaceholderMap.from_seq_group( - seq_group_metadata, - range(computed_len, len(seq_data.get_token_ids())), - ) + seq_group_metadata, range(computed_len, seq_len)) if not mm_data: - return None, None, None + return if self.runner.mm_registry.has_processor(self.runner.model_config): mm_kwargs = mm_data @@ -173,8 +330,10 @@ def _compute_multi_modal_input( ) # special processing for mrope position deltas. - mrope_positions = None if self.runner.model_config.uses_mrope: + assert not self.chunked_prefill, \ + "MROPE on CPU does not support chunked-prefill." + image_grid_thw = mm_kwargs.get("image_grid_thw", None) video_grid_thw = mm_kwargs.get("video_grid_thw", None) assert image_grid_thw is not None or video_grid_thw is not None, ( @@ -198,226 +357,15 @@ def _compute_multi_modal_input( context_len=computed_len, ) seq_data.mrope_position_delta = mrope_position_delta - return mm_kwargs, placeholder_maps, mrope_positions - def _prepare_prompt( - self, - seq_group_metadata_list: List[SequenceGroupMetadata], - ) -> Tuple[torch.Tensor, torch.Tensor, AttentionMetadata, List[int], - BatchedTensorInputs]: - assert len(seq_group_metadata_list) > 0 - input_tokens: List[int] = [] - input_positions: List[int] = [] - input_mrope_positions: List[List[int]] = [[] for _ in range(3)] - - slot_mapping: List[int] = [] - seq_lens: List[int] = [] - multi_modal_kwargs_list: List[MultiModalKwargs] = [] - multi_modal_placeholder_maps: Dict[ - str, - MultiModalPlaceholderMap] = defaultdict(MultiModalPlaceholderMap) - - for seq_group_metadata in seq_group_metadata_list: - assert seq_group_metadata.is_prompt - seq_ids = list(seq_group_metadata.seq_data.keys()) - assert len(seq_ids) == 1 - seq_id = seq_ids[0] - - seq_data = seq_group_metadata.seq_data[seq_id] - prompt_tokens = seq_data.get_token_ids() - computed_len = seq_data.get_num_computed_tokens() - seq_len = len(prompt_tokens) - - seq_lens.append(seq_len) # Prompt token num - input_tokens.extend(prompt_tokens) # Token ids - - mrope_positions = None - if seq_group_metadata.multi_modal_data: - ( - mm_kwargs, - placeholder_maps, - mrope_positions, - ) = self._compute_multi_modal_input(seq_data, computed_len, - seq_group_metadata) - - multi_modal_kwargs_list.append(mm_kwargs) - for modality, placeholder_map in placeholder_maps.items(): - multi_modal_placeholder_maps[modality].extend( - placeholder_map) - - # Token position ids - # NOTE(woosuk): Here we assume that the first token in the prompt - # is always the first token in the sequence. - if mrope_positions: - for idx in range(3): - input_mrope_positions[idx].extend(mrope_positions[idx]) - else: - input_positions.extend(list(range(computed_len, seq_len))) - - # Compute the slot mapping. - block_table = seq_group_metadata.block_tables[seq_id] - # Mask the [0, start_idx) tokens of the prompt with _PAD_SLOT_ID, - # where start_idx is max(0, seq_len - sliding_window). - # For example, if the prompt len is 10, sliding window is 8, and - # block size is 4, the first two tokens are masked and the slot - # mapping will be [-1, -1, 2, 3, 4, 5, 6, 7, 0, 1]. - start_idx = 0 - if self.sliding_window is not None: - start_idx = max(0, seq_len - self.sliding_window) - - for i in range(computed_len, seq_len): - if i < start_idx: - slot_mapping.append(_PAD_SLOT_ID) - continue - - # For encoder-only models, the block_table is None, - # and there is no need to initialize the slot_mapping. - if block_table is not None: - block_number = block_table[i // - self.block_size] # type: ignore - block_offset = i % self.block_size # type: ignore - slot = block_number * self.block_size + block_offset - slot_mapping.append(slot) - - if any(input_mrope_positions): - input_positions = None # type: ignore - else: - input_mrope_positions = None # type: ignore + for i in range(3): + self.input_data.input_mrope_positions[ # type: ignore + i].extend(mrope_positions[i]) - num_prompt_tokens = len(input_tokens) - - input_tokens = torch.tensor(input_tokens, - dtype=torch.long, - device=self.device) # type: ignore - input_positions = torch.tensor(input_positions - or input_mrope_positions, - dtype=torch.long, - device=self.device) # type: ignore - slot_mapping = torch.tensor(slot_mapping, - dtype=torch.long, - device=self.device) # type: ignore - placeholder_index_maps = { - modality: placeholder_map.index_map() - for modality, placeholder_map in - multi_modal_placeholder_maps.items() - } - - attn_metadata = self.attn_backend.make_metadata( - is_prompt=True, - seq_lens=seq_lens, - seq_lens_tensor=torch.tensor([]), - max_decode_seq_len=0, - num_prefills=len(seq_lens), - num_prefill_tokens=num_prompt_tokens, - num_decode_tokens=0, - block_tables=torch.tensor([]), - slot_mapping=slot_mapping, - multi_modal_placeholder_index_maps=placeholder_index_maps, - ) - - multi_modal_kwargs = MultiModalKwargs.batch(multi_modal_kwargs_list) - - return (input_tokens, input_positions, attn_metadata, seq_lens, - multi_modal_kwargs) - - def _prepare_decode( - self, - seq_group_metadata_list: List[SequenceGroupMetadata], - ) -> Tuple[torch.Tensor, torch.Tensor, AttentionMetadata]: - assert len(seq_group_metadata_list) > 0 - input_tokens: List[int] = [] - input_positions: List[int] = [] - input_mrope_positions: List[List[int]] = [[] for _ in range(3)] - slot_mapping: List[int] = [] - seq_lens: List[int] = [] - block_tables: List[List[int]] = [] - - for seq_group_metadata in seq_group_metadata_list: - assert not seq_group_metadata.is_prompt - assert seq_group_metadata.token_chunk_size == 1 - - seq_ids = list(seq_group_metadata.seq_data.keys()) - - for seq_id in seq_ids: - seq_data = seq_group_metadata.seq_data[seq_id] - generation_token = seq_data.get_last_token_id() - input_tokens.append(generation_token) - - seq_len = seq_data.get_len() - position = seq_len - 1 - if seq_data.mrope_position_delta is not None: - context_len = seq_data.get_num_computed_tokens() - next_pos = MRotaryEmbedding.get_next_input_positions( - seq_data.mrope_position_delta, - context_len, - seq_len, - ) - for idx in range(3): - input_mrope_positions[idx].extend(next_pos[idx]) - else: - input_positions.append(position) - - seq_len = seq_len if self.sliding_window is None else min( - seq_len, self.sliding_window) - seq_lens.append(seq_len) - - block_table = seq_group_metadata.block_tables[seq_id] - block_number = block_table[position // self.block_size] - block_offset = position % self.block_size - slot = block_number * self.block_size + block_offset - slot_mapping.append(slot) - - if self.sliding_window is not None: - sliding_window_blocks = (self.sliding_window // - self.block_size) - block_table = block_table[-sliding_window_blocks:] - block_tables.append(block_table) - - if any(input_mrope_positions): - input_positions = None # type: ignore - else: - input_mrope_positions = None # type: ignore - - max_decode_seq_len = max(seq_lens) - - input_tokens = torch.tensor(input_tokens, - dtype=torch.long, - device=self.device) - input_positions = torch.tensor(input_positions - or input_mrope_positions, - dtype=torch.long, - device=self.device) - slot_mapping = torch.tensor(slot_mapping, - dtype=torch.long, - device=self.device) - seq_lens_tensor = torch.tensor(seq_lens, - dtype=torch.int, - device=self.device) - - block_tables = make_tensor_with_pad( - block_tables, - pad=0, - dtype=torch.int, - device=self.device, - ) - - attn_metadata = self.attn_backend.make_metadata( - is_prompt=False, - slot_mapping=slot_mapping, - multi_modal_placeholder_index_maps=None, - seq_lens=seq_lens, - seq_lens_tensor=seq_lens_tensor, - max_decode_seq_len=max_decode_seq_len, - num_prefill_tokens=0, - num_decode_tokens=len(input_tokens), - num_prefills=0, - block_tables=block_tables, - ) - return ( - input_tokens, - input_positions, - attn_metadata, - ) + self.input_data.multi_modal_inputs_list.append(mm_kwargs) + for modality, placeholder_map in placeholder_maps.items(): + self.input_data.multi_modal_placeholder_maps[modality].extend( + placeholder_map) class CPUModelRunnerBase(ModelRunnerBase[TModelInputForCPU]): @@ -436,8 +384,6 @@ def __init__( **kwargs, ): ModelRunnerBase.__init__(self, vllm_config) - # Currently, CPU worker doesn't support chunked prefill. - assert self.scheduler_config.chunked_prefill_enabled is False model_config = self.model_config cache_config = self.cache_config @@ -479,8 +425,7 @@ def _prepare_model_input_tensors( """ builder = self._builder_cls(weakref.proxy(self), finished_requests_ids) - for seq_group_metadata in seq_group_metadata_list: - builder.add_seq_group(seq_group_metadata) + builder.set_seq_group_list(seq_group_metadata_list) return builder.build() # type: ignore @@ -537,22 +482,19 @@ def execute_model( "CPU worker does not support multi-step execution.") model_executable = self.model - execute_model_kwargs = { - "input_ids": - model_input.input_tokens, - "positions": - model_input.input_positions, - "kv_caches": - kv_caches, - "attn_metadata": - model_input.attn_metadata, - **MultiModalKwargs.as_kwargs(model_input.multi_modal_kwargs or {}, - device=self.device), - "intermediate_tensors": - intermediate_tensors, - } - - hidden_states = model_executable(**execute_model_kwargs) + multimodal_kwargs = {} + if model_input.multi_modal_kwargs is not None: + multimodal_kwargs = MultiModalKwargs.as_kwargs( + model_input.multi_modal_kwargs, device=self.device) + + hidden_states = model_executable( + input_ids=model_input.input_tokens, + positions=model_input.input_positions, + kv_caches=kv_caches, + attn_metadata=model_input.attn_metadata, + intermediate_tensors=intermediate_tensors, + **multimodal_kwargs, + ) # Compute the logits. logits = self.model.compute_logits(hidden_states, From 772a66732d0ff58a43dbd1ae79c0d165659aa96d Mon Sep 17 00:00:00 2001 From: youkaichao Date: Wed, 20 Nov 2024 09:13:28 -0800 Subject: [PATCH 068/397] [platforms] restore xpu check for parallel config (#10479) Signed-off-by: youkaichao --- vllm/platforms/xpu.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/vllm/platforms/xpu.py b/vllm/platforms/xpu.py index 62db285f6696a..c3c4746d3cc25 100644 --- a/vllm/platforms/xpu.py +++ b/vllm/platforms/xpu.py @@ -55,3 +55,13 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: "CUDA graph is not supported on XPU, fallback to the eager " "mode.") model_config.enforce_eager = True + + # check and update parallel config + parallel_config = vllm_config.parallel_config + if (parallel_config.distributed_executor_backend is not None + and parallel_config.distributed_executor_backend != "ray"): + logger.warning( + "%s is not supported on XPU, fallback to ray distributed" + " executor backend.", + parallel_config.distributed_executor_backend) + parallel_config.distributed_executor_backend = "ray" From 5f1d6af2b619b07b2af3151d6aa59f9adc17e1eb Mon Sep 17 00:00:00 2001 From: Simon Mo Date: Wed, 20 Nov 2024 11:06:56 -0800 Subject: [PATCH 069/397] [perf bench] H200 development (#9768) Signed-off-by: simon-mo --- .../benchmark-pipeline.yaml | 23 +++++++++++++++++++ .../convert-results-json-to-markdown.py | 5 ++++ .../scripts/run-performance-benchmarks.sh | 11 ++++----- 3 files changed, 32 insertions(+), 7 deletions(-) diff --git a/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml b/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml index eec2a51e2f8fd..5c069b38b2d7d 100644 --- a/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml +++ b/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml @@ -9,7 +9,9 @@ steps: - image: badouralix/curl-jq command: - sh .buildkite/nightly-benchmarks/scripts/wait-for-image.sh + - wait + - label: "A100" agents: queue: A100 @@ -41,6 +43,27 @@ steps: - name: devshm emptyDir: medium: Memory + + - label: "H200" + agents: + queue: H200 + plugins: + - docker#v5.12.0: + image: public.ecr.aws/q9t5s3a7/vllm-ci-test-repo:$BUILDKITE_COMMIT + command: + - bash + - .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh + mount-buildkite-agent: true + propagate-environment: true + ipc: host + gpus: 4,5,6,7 + volumes: + - /data/benchmark-hf-cache:/root/.cache/huggingface + environment: + - VLLM_USAGE_SOURCE + - HF_TOKEN + + # - label: "H100" # agents: # queue: H100 diff --git a/.buildkite/nightly-benchmarks/scripts/convert-results-json-to-markdown.py b/.buildkite/nightly-benchmarks/scripts/convert-results-json-to-markdown.py index 7cf05610b9953..d640563252a0c 100644 --- a/.buildkite/nightly-benchmarks/scripts/convert-results-json-to-markdown.py +++ b/.buildkite/nightly-benchmarks/scripts/convert-results-json-to-markdown.py @@ -157,6 +157,11 @@ def results_to_json(latency, throughput, serving): throughput_results, serving_results) + # Sort all dataframes by their respective "Test name" columns + for df in [latency_results, serving_results, throughput_results]: + if not df.empty: + df.sort_values(by="Test name", inplace=True) + # get markdown tables latency_md_table = tabulate(latency_results, headers='keys', diff --git a/.buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh b/.buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh index d397b05cdff23..0d16a83781ab2 100644 --- a/.buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh +++ b/.buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh @@ -6,6 +6,7 @@ # Do not set -e, as the mixtral 8x22B model tends to crash occasionally # and we still want to see other benchmarking results even when mixtral crashes. +set -x set -o pipefail check_gpus() { @@ -85,11 +86,7 @@ kill_gpu_processes() { ps -aux lsof -t -i:8000 | xargs -r kill -9 - pkill -f pt_main_thread - # this line doesn't work now - # ps aux | grep python | grep openai | awk '{print $2}' | xargs -r kill -9 - pkill -f python3 - pkill -f /usr/bin/python3 + pgrep python3 | xargs -r kill -9 # wait until GPU memory usage smaller than 1GB @@ -289,7 +286,7 @@ run_serving_tests() { # run the server echo "Running test case $test_name" echo "Server command: $server_command" - eval "$server_command" & + bash -c "$server_command" & server_pid=$! # wait until the server is alive @@ -322,7 +319,7 @@ run_serving_tests() { echo "Running test case $test_name with qps $qps" echo "Client command: $client_command" - eval "$client_command" + bash -c "$client_command" # record the benchmarking commands jq_output=$(jq -n \ From 0cd3d9717e38c7a122ed01fe2a8fddd8b37dff4b Mon Sep 17 00:00:00 2001 From: youkaichao Date: Wed, 20 Nov 2024 11:20:38 -0800 Subject: [PATCH 070/397] [7/N] torch.compile, reduce compilation time (#10460) Signed-off-by: youkaichao --- tests/compile/piecewise/test_simple.py | 2 +- tests/compile/piecewise/test_toy_llama.py | 4 ++-- vllm/compilation/backends.py | 2 +- vllm/config.py | 17 ++++++++++------- vllm/worker/worker.py | 18 +++++++++++++----- 5 files changed, 27 insertions(+), 16 deletions(-) diff --git a/tests/compile/piecewise/test_simple.py b/tests/compile/piecewise/test_simple.py index 0e40e3b4ebc96..0db12d6b6a43c 100644 --- a/tests/compile/piecewise/test_simple.py +++ b/tests/compile/piecewise/test_simple.py @@ -79,7 +79,7 @@ def test_simple_piecewise_compile(): vllm_config = VllmConfig(compilation_config=CompilationConfig( level=CompilationLevel.PIECEWISE, use_cudagraph=True, - non_cudagraph_ops=["silly.attention"], + splitting_ops=["silly.attention"], cudagraph_copy_inputs=True, )) with set_current_vllm_config(vllm_config): diff --git a/tests/compile/piecewise/test_toy_llama.py b/tests/compile/piecewise/test_toy_llama.py index 356d119a40334..cfe661b8871e0 100644 --- a/tests/compile/piecewise/test_toy_llama.py +++ b/tests/compile/piecewise/test_toy_llama.py @@ -258,7 +258,7 @@ def run_model(llama_config, use_cudagraph=True, ) if split_attn: - compilation_config.non_cudagraph_ops = ["silly.attention"] + compilation_config.splitting_ops = ["silly.attention"] else: compilation_config = CompilationConfig( level=CompilationLevel.NO_COMPILATION, ) @@ -378,7 +378,7 @@ def benchmark(): compilation_config = CompilationConfig( level=CompilationLevel.PIECEWISE, use_cudagraph=True, - non_cudagraph_ops=["silly.attention"], + splitting_ops=["silly.attention"], ) else: compilation_config = CompilationConfig( diff --git a/vllm/compilation/backends.py b/vllm/compilation/backends.py index 0cf1e3a95fcba..416cffd326489 100644 --- a/vllm/compilation/backends.py +++ b/vllm/compilation/backends.py @@ -447,7 +447,7 @@ def __call__(self, graph: fx.GraphModule, example_inputs) -> Callable: self.add_passes_to_config() self.split_gm, self.piecewise_graphs = split_graph( - graph, self.compilation_configs.non_cudagraph_ops) + graph, self.compilation_configs.splitting_ops) from torch._dynamo.utils import lazy_format_graph_code logger.debug("%s", lazy_format_graph_code("before split", self.graph)) diff --git a/vllm/config.py b/vllm/config.py index e69cbd3eb402a..3d0c616868225 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -2089,13 +2089,15 @@ class CompilationConfig(BaseModel): - 'none,+op1,+op2' to enable only op1 and op2 By default, all custom ops are enabled when running without Inductor and disabled when running with Inductor (compile_level >= Inductor). + - splitting_ops: a list of ops to split the full graph into subgraphs, used in piecewise compilation. - CudaGraph capture: - use_cudagraph: whether to use cudagraph inside compilation. - False: cudagraph inside compilation is not used. - True: cudagraph inside compilation is used. It requires - that all input buffers have fixed addresses. - Note that this is orthogonal to the cudagraph capture out - side of compilation. + that all input buffers have fixed addresses, and all + splitting ops write their outputs to input buffers. + Note that this is orthogonal to the cudagraph capture logic + outside of compilation. TODO: move outside cudagraph logic into compilation. torch.compile will handle cudagraph capture logic in the future. - cudagraph_capture_sizes: sizes to capture cudagraph. @@ -2149,6 +2151,11 @@ class CompilationConfig(BaseModel): level: int = 0 backend: str = "" custom_ops: List[str] = Field(default_factory=list) + splitting_ops: List[str] = Field(default_factory=lambda: [ + "vllm.unified_flash_attention", + "vllm.unified_flash_infer", + "vllm.unified_v1_flash_attention", + ]) use_inductor: bool = True inductor_specialize_for_cudagraph_no_more_than: Optional[int] = None @@ -2157,7 +2164,6 @@ class CompilationConfig(BaseModel): inductor_passes: Dict[str, str] = Field(default_factory=dict) use_cudagraph: bool = False - non_cudagraph_ops: List[str] = Field(default_factory=list) cudagraph_num_of_warmups: int = 0 cudagraph_capture_sizes: Optional[List[int]] = None cudagraph_copy_inputs: bool = False @@ -2348,9 +2354,6 @@ def __post_init__(self): # and avoid any potential issues with the inductor. self.compilation_config.custom_ops = ["none"] self.compilation_config.use_cudagraph = True - self.compilation_config.non_cudagraph_ops = [ - "vllm.unified_v1_flash_attention" - ] self.compilation_config.use_inductor = True self.compilation_config.enable_fusion = False diff --git a/vllm/worker/worker.py b/vllm/worker/worker.py index d3ca6d9d0b17e..80fd7bc3b67cc 100644 --- a/vllm/worker/worker.py +++ b/vllm/worker/worker.py @@ -1,6 +1,7 @@ """A GPU worker class.""" import gc import os +import time from typing import Dict, List, Optional, Set, Tuple, Type, Union import torch @@ -189,6 +190,7 @@ def determine_num_available_blocks(self) -> Tuple[int, int]: torch.cuda.reset_peak_memory_stats() free_memory_pre_profile, total_gpu_memory = torch.cuda.mem_get_info() + start_time = time.time() # Execute a forward pass with dummy inputs to profile the memory usage # of the model. @@ -229,12 +231,18 @@ def determine_num_available_blocks(self) -> Tuple[int, int]: num_gpu_blocks = max(num_gpu_blocks, 0) num_cpu_blocks = max(num_cpu_blocks, 0) + end_time = time.time() logger.info( - "Memory profiling results: total_gpu_memory=%.2fGiB" - " initial_memory_usage=%.2fGiB peak_torch_memory=%.2fGiB" - " memory_usage_post_profile=%.2fGiB" - " non_torch_memory=%.2fGiB kv_cache_size=%.2fGiB" - " gpu_memory_utilization=%.2f", total_gpu_memory / (1024**3), + "Memory profiling results: " + "duration=%.2f seconds, " + "total_gpu_memory=%.2fGiB, " + "initial_memory_usage=%.2fGiB, " + "peak_torch_memory=%.2fGiB, " + "memory_usage_post_profile=%.2fGiB, " + "non_torch_memory=%.2fGiB, " + "kv_cache_size=%.2fGiB, " + "gpu_memory_utilization=%.2f.", end_time - start_time, + total_gpu_memory / (1024**3), (total_gpu_memory - free_memory_pre_profile) / (1024**3), (peak_memory - non_torch_allocations) / (1024**3), total_allocated_bytes / (1024**3), From c68f7ede6a4aef0cd31f531b5d7ec22ab224de95 Mon Sep 17 00:00:00 2001 From: Guillaume Calmettes Date: Wed, 20 Nov 2024 22:42:21 +0100 Subject: [PATCH 071/397] [Bugfix]: allow extra fields in requests to openai compatible server (#10463) Signed-off-by: Guillaume Calmettes --- tests/entrypoints/openai/test_chat.py | 26 +++++++++++++------------- vllm/entrypoints/openai/protocol.py | 18 ++++++++++++++++-- 2 files changed, 29 insertions(+), 15 deletions(-) diff --git a/tests/entrypoints/openai/test_chat.py b/tests/entrypoints/openai/test_chat.py index 8d13f64dce01c..843d15e768093 100644 --- a/tests/entrypoints/openai/test_chat.py +++ b/tests/entrypoints/openai/test_chat.py @@ -899,19 +899,19 @@ async def test_response_format_json_schema(client: openai.AsyncOpenAI): @pytest.mark.asyncio -async def test_extra_fields(client: openai.AsyncOpenAI): - with pytest.raises(BadRequestError) as exc_info: - await client.chat.completions.create( - model=MODEL_NAME, - messages=[{ - "role": "system", - "content": "You are a helpful assistant.", - "extra_field": "0", - }], # type: ignore - temperature=0, - seed=0) - - assert "extra_forbidden" in exc_info.value.message +async def test_extra_fields_allowed(client: openai.AsyncOpenAI): + resp = await client.chat.completions.create( + model=MODEL_NAME, + messages=[{ + "role": "user", + "content": "what is 1+1?", + "extra_field": "0", + }], # type: ignore + temperature=0, + seed=0) + + content = resp.choices[0].message.content + assert content is not None @pytest.mark.asyncio diff --git a/vllm/entrypoints/openai/protocol.py b/vllm/entrypoints/openai/protocol.py index b7b064ae01f05..a82212677f63a 100644 --- a/vllm/entrypoints/openai/protocol.py +++ b/vllm/entrypoints/openai/protocol.py @@ -9,12 +9,15 @@ from typing_extensions import Annotated from vllm.entrypoints.chat_utils import ChatCompletionMessageParam +from vllm.logger import init_logger from vllm.pooling_params import PoolingParams from vllm.sampling_params import (BeamSearchParams, GuidedDecodingParams, RequestOutputKind, SamplingParams) from vllm.sequence import Logprob from vllm.utils import random_uuid +logger = init_logger(__name__) + # torch is mocked during docs generation, # so we have to provide the values as literals _MOCK_LONG_INFO = Namespace(min=-9223372036854775808, max=9223372036854775807) @@ -35,8 +38,19 @@ class OpenAIBaseModel(BaseModel): - # OpenAI API does not allow extra fields - model_config = ConfigDict(extra="forbid") + # OpenAI API does allow extra fields + model_config = ConfigDict(extra="allow") + + @model_validator(mode="before") + @classmethod + def __log_extra_fields__(cls, data): + if isinstance(data, dict): + extra_fields = data.keys() - cls.model_fields.keys() + if extra_fields: + logger.warning( + "The following fields were present in the request " + "but ignored: %s", extra_fields) + return data class ErrorResponse(OpenAIBaseModel): From 2f77b6cfec32c8054f996aee4b021f511630ea6f Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Wed, 20 Nov 2024 13:54:15 -0800 Subject: [PATCH 072/397] [TPU] Implement prefix caching for TPUs (#10307) Signed-off-by: Woosuk Kwon --- requirements-tpu.txt | 6 +- vllm/attention/backends/pallas.py | 66 ++++++---- vllm/worker/tpu_model_runner.py | 211 +++++++++++++++++++----------- vllm/worker/tpu_worker.py | 4 +- 4 files changed, 182 insertions(+), 105 deletions(-) diff --git a/requirements-tpu.txt b/requirements-tpu.txt index f9a0770804e55..3d1e80f6be620 100644 --- a/requirements-tpu.txt +++ b/requirements-tpu.txt @@ -16,8 +16,8 @@ ray[default] --find-links https://storage.googleapis.com/libtpu-releases/index.html --find-links https://storage.googleapis.com/jax-releases/jax_nightly_releases.html --find-links https://storage.googleapis.com/jax-releases/jaxlib_nightly_releases.html -torch==2.6.0.dev20241028+cpu -torchvision==0.20.0.dev20241028+cpu -torch_xla[tpu] @ https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch_xla-2.6.0.dev20241028-cp310-cp310-linux_x86_64.whl +torch==2.6.0.dev20241114+cpu +torchvision==0.20.0.dev20241114+cpu +torch_xla[tpu] @ https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch_xla-2.6.0.dev20241114-cp310-cp310-linux_x86_64.whl jaxlib==0.4.32.dev20240829 jax==0.4.32.dev20240829 diff --git a/vllm/attention/backends/pallas.py b/vllm/attention/backends/pallas.py index 6fee81de14420..eeab8731a2c39 100644 --- a/vllm/attention/backends/pallas.py +++ b/vllm/attention/backends/pallas.py @@ -65,6 +65,7 @@ class PallasMetadata(AttentionMetadata): # or all decoding. block_tables: Optional[torch.Tensor] = None context_lens: Optional[torch.Tensor] = None + effective_query_lens: Optional[torch.Tensor] = None @property def prefill_metadata(self) -> Optional["PallasMetadata"]: @@ -72,8 +73,6 @@ def prefill_metadata(self) -> Optional["PallasMetadata"]: return None assert self.num_decode_tokens == 0 - assert self.block_tables is None - assert self.context_lens is None return self @property @@ -186,29 +185,50 @@ def forward( query = query * self.scale if attn_metadata.num_prefills > 0: - assert seq_len % 16 == 0, ( - "Pallas FlashAttention kernel requires seq_len to be a " - f"multiple of 16 but got {seq_len}") - - # Handle GQA/MQA. - if self.num_kv_heads != self.num_heads: - key = key.repeat_interleave(self.num_queries_per_kv, dim=-2) - key = key.view(batch_size, seq_len, self.num_heads, - self.head_size) - value = value.repeat_interleave(self.num_queries_per_kv, + if attn_metadata.block_tables is None: + # Prefill without paged KV cache. + assert seq_len % 16 == 0, ( + "Pallas FlashAttention kernel requires seq_len to be a " + f"multiple of 16 but got {seq_len}") + + # Handle GQA/MQA. + if self.num_kv_heads != self.num_heads: + key = key.repeat_interleave(self.num_queries_per_kv, dim=-2) - value = value.view(batch_size, seq_len, self.num_heads, + key = key.view(batch_size, seq_len, self.num_heads, self.head_size) - # FlashAttention requires [batch_size, num_heads, seq_len, d_model] - # while the input is [batch_size, seq_len, num_heads, d_model]. - # Permute the input to match the required format. - output = torch.ops.xla.flash_attention( - query.permute(0, 2, 1, 3), - key.permute(0, 2, 1, 3), - value.permute(0, 2, 1, 3), - True, - ) - output = output.permute(0, 2, 1, 3) + value = value.repeat_interleave(self.num_queries_per_kv, + dim=-2) + value = value.view(batch_size, seq_len, self.num_heads, + self.head_size) + # FlashAttention kernel requires the input shape to be + # [batch_size, num_heads, seq_len, d_model] + # while the input is [batch_size, seq_len, num_heads, d_model]. + # Permute the input to match the required format. + output = torch.ops.xla.flash_attention( + query.permute(0, 2, 1, 3), + key.permute(0, 2, 1, 3), + value.permute(0, 2, 1, 3), + True, + ) + output = output.permute(0, 2, 1, 3) + else: + # Prefill with paged KV cache. + # TODO(woosuk): Tune the below knobs. + num_kv_pages_per_compute_block = 16 + num_queries_per_compute_block = 16 + assert seq_len % num_queries_per_compute_block == 0 + output = torch.ops.xla.multi_queries_paged_attention( + query, + key_cache, + value_cache, + attn_metadata.context_lens, + attn_metadata.block_tables, + attn_metadata.effective_query_lens, + num_kv_pages_per_compute_block, + num_queries_per_compute_block, + use_kernel=True, + ) else: # Decoding run. assert kv_cache[0].numel() > 0 diff --git a/vllm/worker/tpu_model_runner.py b/vllm/worker/tpu_model_runner.py index d7a641857a613..9a054eb8a4cf7 100644 --- a/vllm/worker/tpu_model_runner.py +++ b/vllm/worker/tpu_model_runner.py @@ -1,3 +1,4 @@ +import enum import time from dataclasses import dataclass from typing import (TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, @@ -11,7 +12,6 @@ import torch_xla.runtime as xr from vllm.attention import AttentionMetadata, get_attn_backend -from vllm.compilation.wrapper import TorchCompileWrapperWithCustomDispatcher from vllm.config import VllmConfig from vllm.logger import init_logger from vllm.model_executor.layers.sampler import SamplerOutput @@ -39,6 +39,15 @@ _MAX_NUM_SAMPLES = 128 +class ExecutionMode(enum.Enum): + PREFILL = enum.auto() + DECODE = enum.auto() + PREFIX_PREFILL = enum.auto() + + def is_prefill(self) -> bool: + return self in (ExecutionMode.PREFILL, ExecutionMode.PREFIX_PREFILL) + + @dataclass(frozen=True) class ModelInputForTPU(ModelRunnerInputBase): token_ids: torch.Tensor @@ -140,16 +149,21 @@ def load_model(self) -> None: model = get_model(vllm_config=self.vllm_config) model = model.eval() xm.wait_device_ops() - self.model = ModelWrapper(model, self.vllm_config) + model = ModelWrapper(model) + self.model = torch.compile(model, + backend="openxla", + fullgraph=True, + dynamic=False) def _dummy_run( self, batch_size: int, seq_len: int, kv_caches: List[Tuple[torch.Tensor, torch.Tensor]], - is_prompt: bool, + exec_mode: ExecutionMode, ) -> None: - if is_prompt: + exec_mode = ExecutionMode(exec_mode) + if exec_mode.is_prefill(): seq_len = (seq_len + 15) // 16 * 16 token_ids = torch.zeros((batch_size, seq_len), dtype=torch.int32, @@ -160,18 +174,38 @@ def _dummy_run( slot_mapping = torch.zeros((batch_size, seq_len), dtype=torch.int64, device=self.device) - attn_metadata = self.attn_backend.make_metadata( - num_prefills=batch_size, - num_prefill_tokens=batch_size * seq_len, - num_decode_tokens=0, - slot_mapping=slot_mapping, - multi_modal_placeholder_index_maps=None, - block_tables=None, - context_lens=None, - ) input_lens = torch.ones((batch_size, ), dtype=torch.int32, device=self.device) + if exec_mode == ExecutionMode.PREFILL: + attn_metadata = self.attn_backend.make_metadata( + num_prefills=batch_size, + num_prefill_tokens=batch_size * seq_len, + num_decode_tokens=0, + slot_mapping=slot_mapping, + multi_modal_placeholder_index_maps=None, + block_tables=None, + context_lens=None, + effective_query_lens=None, + ) + else: + context_lens = torch.ones((batch_size, ), + dtype=torch.int32, + device=self.device) + block_tables = torch.tensor(self.block_tables[:batch_size], + dtype=torch.int32, + device=self.device) + effective_query_lens = torch.ones_like(context_lens) + attn_metadata = self.attn_backend.make_metadata( + num_prefills=batch_size, + num_prefill_tokens=batch_size * seq_len, + num_decode_tokens=0, + slot_mapping=slot_mapping, + multi_modal_placeholder_index_maps=None, + block_tables=block_tables, + context_lens=context_lens, + effective_query_lens=effective_query_lens, + ) else: assert seq_len == 1 token_ids = torch.zeros((batch_size, seq_len), @@ -204,7 +238,7 @@ def _dummy_run( ) t = torch.ones((batch_size, ), dtype=torch.float32, device=self.device) p = torch.ones((batch_size, ), dtype=torch.float32, device=self.device) - num_samples = _MAX_NUM_SAMPLES if is_prompt else 1 + num_samples = _MAX_NUM_SAMPLES if exec_mode.is_prefill() else 1 # NOTE(woosuk): There are two stages of compilation: torch.compile and # XLA compilation. Using `mark_dynamic` can reduce the torch.compile @@ -213,7 +247,7 @@ def _dummy_run( # be re-compiled for every different shapes. This overhead is inevitable # in the first run, but can be skipped afterwards as we cache the XLA # graphs in the disk (VLLM_XLA_CACHE_PATH). - if is_prompt: + if exec_mode.is_prefill(): # Prefll torch._dynamo.mark_dynamic(token_ids, 1) torch._dynamo.mark_dynamic(position_ids, 1) @@ -229,15 +263,8 @@ def _dummy_run( torch._dynamo.mark_dynamic(t, 0) torch._dynamo.mark_dynamic(p, 0) # Dummy run. - self.model(token_ids, - position_ids, - attn_metadata, - input_lens, - t, - p, - num_samples, - kv_caches, - is_prompt=is_prompt) + self.model(token_ids, position_ids, attn_metadata, input_lens, t, p, + num_samples, kv_caches) def warmup_model( self, @@ -248,13 +275,13 @@ def warmup_model( start = time.time() for batch_size in [1]: seq_len = 16 - while True: - self._dummy_run(batch_size, seq_len, kv_caches, is_prompt=True) + while seq_len <= self.model_config.max_model_len: + self._dummy_run(batch_size, + seq_len, + kv_caches, + exec_mode=ExecutionMode.PREFILL) xm.wait_device_ops() logger.info("batch_size: %d, seq_len: %d", batch_size, seq_len) - - if seq_len >= self.model_config.max_model_len: - break num_tokens = batch_size * seq_len if num_tokens >= self.scheduler_config.max_num_batched_tokens: break @@ -263,12 +290,39 @@ def warmup_model( end = time.time() logger.info("Compilation for prefill done in %.2f s.", end - start) + # Prefix prefill + if self.cache_config.enable_prefix_caching: + logger.info("Compiling the model with different input shapes for " + "prefix prefill...") + start = time.time() + for batch_size in [1]: + seq_len = 16 + while seq_len <= self.model_config.max_model_len: + self._dummy_run(batch_size, + seq_len, + kv_caches, + exec_mode=ExecutionMode.PREFIX_PREFILL) + xm.wait_device_ops() + logger.info("batch_size: %d, seq_len: %d", batch_size, + seq_len) + num_tokens = batch_size * seq_len + if (num_tokens >= + self.scheduler_config.max_num_batched_tokens): + break + seq_len = seq_len * 2 + end = time.time() + logger.info("Compilation for prefix prefill done in %.2f s.", + end - start) + # Decode start = time.time() seq_len = 1 batch_size = 8 # Must be in sync with _get_padded_batch_size() while True: - self._dummy_run(batch_size, seq_len, kv_caches, is_prompt=False) + self._dummy_run(batch_size, + seq_len, + kv_caches, + exec_mode=ExecutionMode.DECODE) xm.wait_device_ops() logger.info("batch_size: %d, seq_len: %d", batch_size, seq_len) @@ -287,9 +341,11 @@ def _prepare_prompt( input_tokens: List[int] = [] input_positions: List[int] = [] prompt_lens: List[int] = [] + context_lens: List[int] = [] slot_mapping: List[int] = [] - for seq_group_metadata in seq_group_metadata_list: + for batch_idx, seq_group_metadata in enumerate( + seq_group_metadata_list): assert seq_group_metadata.is_prompt seq_ids = list(seq_group_metadata.seq_data.keys()) assert len(seq_ids) == 1 @@ -298,19 +354,31 @@ def _prepare_prompt( seq_data = seq_group_metadata.seq_data[seq_id] # Could include output tokens when a request is preempted. prompt_tokens = seq_data.get_token_ids() + seq_len = len(prompt_tokens) + + num_computed_blocks = len(seq_group_metadata.computed_block_nums) + num_computed_tokens = num_computed_blocks * self.block_size + if num_computed_tokens > 0: + prompt_tokens = prompt_tokens[num_computed_tokens:] + context_lens.append(seq_len) + else: + context_lens.append(0) + prompt_len = len(prompt_tokens) prompt_lens.append(prompt_len) input_tokens.extend(prompt_tokens) - input_positions.extend(list(range(prompt_len))) + input_positions.extend(range(num_computed_tokens, seq_len)) assert seq_group_metadata.block_tables is not None block_table = seq_group_metadata.block_tables[seq_id] - for i in range(prompt_len): + for i in range(num_computed_tokens, seq_len): block_number = block_table[i // self.block_size] block_offset = i % self.block_size slot = block_number * self.block_size + block_offset slot_mapping.append(slot) + if num_computed_tokens > 0: + self.block_tables[batch_idx, :len(block_table)] = block_table # Add paddings to EACH prompt to the smallest power of 2 that is # greater than or equal to the prompt length. @@ -338,14 +406,21 @@ def _prepare_prompt( prompt_lens = torch.tensor(prompt_lens, dtype=torch.int32, device="cpu") + context_lens = torch.tensor(context_lens, + dtype=torch.int32, + device="cpu") + block_tables = torch.tensor(self.block_tables[:num_prefills], + dtype=torch.int32, + device="cpu") attn_metadata = self.attn_backend.make_metadata( num_prefills=num_prefills, num_prefill_tokens=0, # NOTE: This is not used. num_decode_tokens=0, slot_mapping=slot_mapping, multi_modal_placeholder_index_maps=None, - block_tables=None, - context_lens=None, + block_tables=block_tables, + context_lens=context_lens, + effective_query_lens=prompt_lens, ) return input_tokens, input_positions, attn_metadata, prompt_lens @@ -550,6 +625,10 @@ def execute_model( # process them separately. This is a temporary hack that should be # optimized by using SplashAttention. orig_slot_mapping = model_input.attn_metadata.slot_mapping + orig_block_tables = model_input.attn_metadata.block_tables + orig_context_lens = model_input.attn_metadata.context_lens + orig_effective_query_lens = \ + model_input.attn_metadata.effective_query_lens batch_size = model_input.input_lens.shape[0] start_idx = 0 next_token_ids = [] @@ -568,18 +647,24 @@ def execute_model( attn_metadata.num_prefills = 1 attn_metadata.slot_mapping = orig_slot_mapping[ None, start_idx:end_idx].to(self.device) + if orig_context_lens[i].item() > 0: + attn_metadata.context_lens = orig_context_lens[i:i + 1].to( + self.device) + attn_metadata.block_tables = orig_block_tables[ + i].unsqueeze(0).to(self.device) + attn_metadata.effective_query_lens = \ + orig_effective_query_lens[i:i + 1].to(self.device) + else: + attn_metadata.context_lens = None + attn_metadata.block_tables = None + attn_metadata.effective_query_lens = None input_lens = model_input.input_lens[i:i + 1].to(self.device) t = model_input.t[i:i + 1].to(self.device) p = model_input.p[i:i + 1].to(self.device) - output_token_ids = self.model(token_ids, - position_ids, - attn_metadata, - input_lens, - t, - p, + output_token_ids = self.model(token_ids, position_ids, + attn_metadata, input_lens, t, p, model_input.num_samples, - kv_caches, - is_prompt=True) + kv_caches) next_token_ids.append(output_token_ids[0]) start_idx = end_idx @@ -624,15 +709,10 @@ def execute_model( input_lens = model_input.input_lens.to(self.device) for i in range(num_steps): slot_mapping = attn_metadata.slot_mapping - output_token_ids = self.model(token_ids, - position_ids, - attn_metadata, - input_lens, - t, - p, + output_token_ids = self.model(token_ids, position_ids, + attn_metadata, input_lens, t, p, model_input.num_samples, - kv_caches, - is_prompt=False) + kv_caches) self.cached_step_outputs.append(output_token_ids) if i < num_steps - 1: @@ -667,34 +747,11 @@ def execute_model( return [sampler_output] -class ModelWrapper(TorchCompileWrapperWithCustomDispatcher): +class ModelWrapper(nn.Module): - def __init__(self, model: nn.Module, vllm_config: VllmConfig): + def __init__(self, model: nn.Module): + super().__init__() self.model = model - compiled_callable = torch.compile(self.forward, - backend="openxla", - fullgraph=True, - dynamic=False) - super().__init__( - compiled_callable, - compilation_level=vllm_config.compilation_config.level) - - def __call__(self, *args, is_prompt: bool, **kwargs): - if len(self.compiled_codes) < 3 or not self.use_custom_dispatcher: - # not fully compiled yet, or not using the custom dispatcher, - # let PyTorch handle it - return self.compiled_callable(*args, **kwargs) - # the 3 compiled codes are: - # 0: for profiling - # 1: for prompt - # 2: for decode - # dispatch to the compiled code directly, skip PyTorch - if is_prompt: - with self.dispatch_to_code(1): - return self.forward(*args, **kwargs) - else: - with self.dispatch_to_code(2): - return self.forward(*args, **kwargs) def forward( self, diff --git a/vllm/worker/tpu_worker.py b/vllm/worker/tpu_worker.py index 096cb23416909..8754f7538f251 100644 --- a/vllm/worker/tpu_worker.py +++ b/vllm/worker/tpu_worker.py @@ -13,7 +13,7 @@ from vllm.model_executor import set_random_seed from vllm.sequence import ExecuteModelRequest from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE, get_dtype_size -from vllm.worker.tpu_model_runner import TPUModelRunner +from vllm.worker.tpu_model_runner import ExecutionMode, TPUModelRunner from vllm.worker.worker_base import (LocalOrDistributedWorkerBase, LoraNotSupportedWorkerBase, WorkerBase, WorkerInput) @@ -112,7 +112,7 @@ def determine_num_available_blocks(self) -> Tuple[int, int]: batch_size=1, seq_len=self.scheduler_config.max_num_batched_tokens, kv_caches=kv_caches, - is_prompt=True, + exec_mode=ExecutionMode.PREFILL, ) # Synchronize before measuring the memory usage. xm.wait_device_ops() From 388ee3de665c3055fbe610b66ebeef096a23cfe1 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Wed, 20 Nov 2024 18:36:33 -0800 Subject: [PATCH 073/397] [torch.compile] limit inductor threads and lazy import quant (#10482) Signed-off-by: youkaichao --- .buildkite/test-pipeline.yaml | 2 + tests/quantization/utils.py | 4 +- tests/test_lazy_torch_compile.py | 68 ++++++++++ vllm/_custom_ops.py | 3 - vllm/config.py | 8 +- .../layers/quantization/__init__.py | 124 +++++++++++------- vllm/model_executor/models/internvl.py | 4 +- vllm/model_executor/models/qwen2_vl.py | 7 +- vllm/platforms/cuda.py | 2 + vllm/platforms/rocm.py | 11 ++ vllm/plugins/__init__.py | 9 ++ 11 files changed, 178 insertions(+), 64 deletions(-) create mode 100644 tests/test_lazy_torch_compile.py diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index 501743c887596..c436d2b48d20f 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -50,7 +50,9 @@ steps: - tests/multimodal - tests/test_utils - tests/worker + - tests/test_lazy_torch_compile.py commands: + - python3 test_lazy_torch_compile.py - pytest -v -s mq_llm_engine # MQLLMEngine - pytest -v -s async_engine # AsyncLLMEngine - NUM_SCHEDULER_STEPS=4 pytest -v -s async_engine/test_async_llm_engine.py diff --git a/tests/quantization/utils.py b/tests/quantization/utils.py index 061a077592e80..8ebd8dd2be0d5 100644 --- a/tests/quantization/utils.py +++ b/tests/quantization/utils.py @@ -1,4 +1,4 @@ -from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS +from vllm.model_executor.layers.quantization import get_quantization_config from vllm.platforms import current_platform @@ -10,6 +10,6 @@ def is_quant_method_supported(quant_method: str) -> bool: capability = current_platform.get_device_capability() assert capability is not None - min_capability = QUANTIZATION_METHODS[quant_method].get_min_capability() + min_capability = get_quantization_config(quant_method).get_min_capability() return capability.to_int() >= min_capability diff --git a/tests/test_lazy_torch_compile.py b/tests/test_lazy_torch_compile.py new file mode 100644 index 0000000000000..b8ac4dd93732b --- /dev/null +++ b/tests/test_lazy_torch_compile.py @@ -0,0 +1,68 @@ +# Description: Test the lazy import module +# The utility function cannot be placed in `vllm.utils` +# this needs to be a standalone script + +import contextlib +import dataclasses +import sys +import traceback +from typing import Callable, Generator + + +@dataclasses.dataclass +class BlameResult: + found: bool = False + trace_stack: str = "" + + +@contextlib.contextmanager +def blame(func: Callable) -> Generator[BlameResult, None, None]: + """ + Trace the function calls to find the first function that satisfies the + condition. The trace stack will be stored in the result. + + Usage: + + ```python + with blame(lambda: some_condition()) as result: + # do something + + if result.found: + print(result.trace_stack) + """ + result = BlameResult() + + def _trace_calls(frame, event, arg=None): + nonlocal result + if event in ['call', 'return']: + # for every function call or return + try: + # Temporarily disable the trace function + sys.settrace(None) + # check condition here + if not result.found and func(): + result.found = True + result.trace_stack = "".join(traceback.format_stack()) + # Re-enable the trace function + sys.settrace(_trace_calls) + except NameError: + # modules are deleted during shutdown + pass + return _trace_calls + + sys.settrace(_trace_calls) + + yield result + + sys.settrace(None) + + +module_name = "torch._inductor.async_compile" + +with blame(lambda: module_name in sys.modules) as result: + import vllm # noqa + +assert not result.found, (f"Module {module_name} is already imported, the" + f" first import location is:\n{result.trace_stack}") + +print(f"Module {module_name} is not imported yet") diff --git a/vllm/_custom_ops.py b/vllm/_custom_ops.py index 782dc6aed1b8c..41892e4dddf7e 100644 --- a/vllm/_custom_ops.py +++ b/vllm/_custom_ops.py @@ -19,9 +19,6 @@ except ImportError as e: logger.warning("Failed to import from vllm._C with %r", e) -if current_platform.is_rocm(): - import vllm._rocm_C # noqa: F401 - supports_moe_ops = False with contextlib.suppress(ImportError): import vllm._moe_C # noqa: F401 diff --git a/vllm/config.py b/vllm/config.py index 3d0c616868225..7522486782cc9 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -14,7 +14,8 @@ import vllm.envs as envs from vllm.logger import init_logger -from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS +from vllm.model_executor.layers.quantization import (QUANTIZATION_METHODS, + get_quantization_config) from vllm.model_executor.models import ModelRegistry from vllm.platforms import current_platform from vllm.tracing import is_otel_available, otel_import_error_traceback @@ -370,7 +371,7 @@ def _parse_quant_hf_config(self): return quant_cfg def _verify_quantization(self) -> None: - supported_quantization = [*QUANTIZATION_METHODS] + supported_quantization = QUANTIZATION_METHODS rocm_supported_quantization = [ "awq", "gptq", "fp8", "compressed_tensors", "compressed-tensors", "fbgemm_fp8" @@ -392,7 +393,8 @@ def _verify_quantization(self) -> None: quant_method = quant_cfg.get("quant_method", "").lower() # Detect which checkpoint is it - for _, method in QUANTIZATION_METHODS.items(): + for name in QUANTIZATION_METHODS: + method = get_quantization_config(name) quantization_override = method.override_quantization_method( quant_cfg, self.quantization) if quantization_override: diff --git a/vllm/model_executor/layers/quantization/__init__.py b/vllm/model_executor/layers/quantization/__init__.py index ff342c4f9479e..dd10c434f0752 100644 --- a/vllm/model_executor/layers/quantization/__init__.py +++ b/vllm/model_executor/layers/quantization/__init__.py @@ -1,65 +1,87 @@ -from typing import Dict, Type +from typing import Dict, List, Type -from vllm.model_executor.layers.quantization.aqlm import AQLMConfig -from vllm.model_executor.layers.quantization.awq import AWQConfig -from vllm.model_executor.layers.quantization.awq_marlin import AWQMarlinConfig from vllm.model_executor.layers.quantization.base_config import ( QuantizationConfig) -from vllm.model_executor.layers.quantization.bitsandbytes import ( - BitsAndBytesConfig) -from vllm.model_executor.layers.quantization.compressed_tensors.compressed_tensors import ( # noqa: E501 - CompressedTensorsConfig) -from vllm.model_executor.layers.quantization.deepspeedfp import ( - DeepSpeedFPConfig) -from vllm.model_executor.layers.quantization.experts_int8 import ( - ExpertsInt8Config) -from vllm.model_executor.layers.quantization.fbgemm_fp8 import FBGEMMFp8Config -from vllm.model_executor.layers.quantization.fp8 import Fp8Config -from vllm.model_executor.layers.quantization.gguf import GGUFConfig -from vllm.model_executor.layers.quantization.gptq import GPTQConfig -from vllm.model_executor.layers.quantization.gptq_marlin import ( - GPTQMarlinConfig) -from vllm.model_executor.layers.quantization.gptq_marlin_24 import ( - GPTQMarlin24Config) -from vllm.model_executor.layers.quantization.hqq_marlin import HQQMarlinConfig -from vllm.model_executor.layers.quantization.ipex_quant import IPEXConfig -from vllm.model_executor.layers.quantization.marlin import MarlinConfig -from vllm.model_executor.layers.quantization.modelopt import ModelOptFp8Config -from vllm.model_executor.layers.quantization.neuron_quant import ( - NeuronQuantConfig) -from vllm.model_executor.layers.quantization.qqq import QQQConfig -from vllm.model_executor.layers.quantization.tpu_int8 import Int8TpuConfig -QUANTIZATION_METHODS: Dict[str, Type[QuantizationConfig]] = { - "aqlm": AQLMConfig, - "awq": AWQConfig, - "deepspeedfp": DeepSpeedFPConfig, - "tpu_int8": Int8TpuConfig, - "fp8": Fp8Config, - "fbgemm_fp8": FBGEMMFp8Config, - "modelopt": ModelOptFp8Config, +QUANTIZATION_METHODS: List[str] = [ + "aqlm", + "awq", + "deepspeedfp", + "tpu_int8", + "fp8", + "fbgemm_fp8", + "modelopt", # The order of gptq methods is important for config.py iteration over # override_quantization_method(..) - "marlin": MarlinConfig, - "gguf": GGUFConfig, - "gptq_marlin_24": GPTQMarlin24Config, - "gptq_marlin": GPTQMarlinConfig, - "awq_marlin": AWQMarlinConfig, - "gptq": GPTQConfig, - "compressed-tensors": CompressedTensorsConfig, - "bitsandbytes": BitsAndBytesConfig, - "qqq": QQQConfig, - "hqq": HQQMarlinConfig, - "experts_int8": ExpertsInt8Config, - "neuron_quant": NeuronQuantConfig, - "ipex": IPEXConfig, -} + "marlin", + "gguf", + "gptq_marlin_24", + "gptq_marlin", + "awq_marlin", + "gptq", + "compressed-tensors", + "bitsandbytes", + "qqq", + "hqq", + "experts_int8", + "neuron_quant", + "ipex", +] def get_quantization_config(quantization: str) -> Type[QuantizationConfig]: if quantization not in QUANTIZATION_METHODS: raise ValueError(f"Invalid quantization method: {quantization}") - return QUANTIZATION_METHODS[quantization] + + # lazy import to avoid triggering `torch.compile` too early + from .aqlm import AQLMConfig + from .awq import AWQConfig + from .awq_marlin import AWQMarlinConfig + from .bitsandbytes import BitsAndBytesConfig + from .compressed_tensors.compressed_tensors import ( # noqa: E501 + CompressedTensorsConfig) + from .deepspeedfp import DeepSpeedFPConfig + from .experts_int8 import ExpertsInt8Config + from .fbgemm_fp8 import FBGEMMFp8Config + from .fp8 import Fp8Config + from .gguf import GGUFConfig + from .gptq import GPTQConfig + from .gptq_marlin import GPTQMarlinConfig + from .gptq_marlin_24 import GPTQMarlin24Config + from .hqq_marlin import HQQMarlinConfig + from .ipex_quant import IPEXConfig + from .marlin import MarlinConfig + from .modelopt import ModelOptFp8Config + from .neuron_quant import NeuronQuantConfig + from .qqq import QQQConfig + from .tpu_int8 import Int8TpuConfig + + method_to_config: Dict[str, Type[QuantizationConfig]] = { + "aqlm": AQLMConfig, + "awq": AWQConfig, + "deepspeedfp": DeepSpeedFPConfig, + "tpu_int8": Int8TpuConfig, + "fp8": Fp8Config, + "fbgemm_fp8": FBGEMMFp8Config, + "modelopt": ModelOptFp8Config, + # The order of gptq methods is important for config.py iteration over + # override_quantization_method(..) + "marlin": MarlinConfig, + "gguf": GGUFConfig, + "gptq_marlin_24": GPTQMarlin24Config, + "gptq_marlin": GPTQMarlinConfig, + "awq_marlin": AWQMarlinConfig, + "gptq": GPTQConfig, + "compressed-tensors": CompressedTensorsConfig, + "bitsandbytes": BitsAndBytesConfig, + "qqq": QQQConfig, + "hqq": HQQMarlinConfig, + "experts_int8": ExpertsInt8Config, + "neuron_quant": NeuronQuantConfig, + "ipex": IPEXConfig, + } + + return method_to_config[quantization] __all__ = [ diff --git a/vllm/model_executor/models/internvl.py b/vllm/model_executor/models/internvl.py index 7ea2f9be2191d..5d38b4b1ef14b 100644 --- a/vllm/model_executor/models/internvl.py +++ b/vllm/model_executor/models/internvl.py @@ -19,8 +19,8 @@ from vllm.config import VllmConfig from vllm.inputs import (INPUT_REGISTRY, DecoderOnlyInputs, DummyData, InputContext, token_inputs) -from vllm.model_executor.layers.quantization import (AWQConfig, - QuantizationConfig) +from vllm.model_executor.layers.quantization import QuantizationConfig +from vllm.model_executor.layers.quantization.awq import AWQConfig from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler from vllm.model_executor.models.intern_vit import (InternVisionModel, InternVisionPatchModel) diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index 0ac81387b1bd8..531608a877f2f 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -51,9 +51,10 @@ RowParallelLinear) from vllm.model_executor.layers.logits_processor import LogitsProcessor from vllm.model_executor.layers.pooler import Pooler, PoolingType -from vllm.model_executor.layers.quantization import (GPTQConfig, - GPTQMarlinConfig, - QuantizationConfig) +from vllm.model_executor.layers.quantization import QuantizationConfig +from vllm.model_executor.layers.quantization.gptq import GPTQConfig +from vllm.model_executor.layers.quantization.gptq_marlin import ( + GPTQMarlinConfig) from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler from vllm.model_executor.layers.vocab_parallel_embedding import ParallelLMHead from vllm.model_executor.model_loader.weight_utils import default_weight_loader diff --git a/vllm/platforms/cuda.py b/vllm/platforms/cuda.py index 9c5212ace1346..d2911ef650743 100644 --- a/vllm/platforms/cuda.py +++ b/vllm/platforms/cuda.py @@ -10,6 +10,8 @@ import torch from typing_extensions import ParamSpec +# import custom ops, trigger op registration +import vllm._C # noqa from vllm.logger import init_logger from .interface import DeviceCapability, Platform, PlatformEnum diff --git a/vllm/platforms/rocm.py b/vllm/platforms/rocm.py index 022256996f97b..bb3a49c8b73bc 100644 --- a/vllm/platforms/rocm.py +++ b/vllm/platforms/rocm.py @@ -9,6 +9,17 @@ logger = init_logger(__name__) +try: + import vllm._C # noqa: F401 +except ImportError as e: + logger.warning("Failed to import from vllm._C with %r", e) + +# import custom ops, trigger op registration +try: + import vllm._rocm_C # noqa: F401 +except ImportError as e: + logger.warning("Failed to import from vllm._rocm_C with %r", e) + if os.environ.get("VLLM_WORKER_MULTIPROC_METHOD", None) in ["fork", None]: logger.warning("`fork` method is not supported by ROCm. " "VLLM_WORKER_MULTIPROC_METHOD is overridden to" diff --git a/vllm/plugins/__init__.py b/vllm/plugins/__init__.py index dc183dbfc9b96..d5056b18fe968 100644 --- a/vllm/plugins/__init__.py +++ b/vllm/plugins/__init__.py @@ -1,4 +1,5 @@ import logging +import os from contextlib import contextmanager from typing import TYPE_CHECKING, Optional @@ -18,6 +19,14 @@ def load_general_plugins(): processes. They should be designed in a way that they can be loaded multiple times without causing issues. """ + + # all processes created by vllm will load plugins, + # and here we can inject some common environment variables + # for all processes. + + # see https://github.com/vllm-project/vllm/issues/10480 + os.environ['TORCHINDUCTOR_COMPILE_THREADS'] = '1' + global plugins_loaded if plugins_loaded: return From 6c1208d083fbaaf89c6d812f4d3424e15182f652 Mon Sep 17 00:00:00 2001 From: Pavani Majety Date: Wed, 20 Nov 2024 19:56:47 -0800 Subject: [PATCH 074/397] [Core] Add Sliding Window Support with Flashinfer (#10462) Signed-off-by: Pavani Majety --- .../block/e2e/test_correctness_sliding_window.py | 12 ++++++++++-- vllm/attention/backends/flashinfer.py | 13 ++++++++----- 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/tests/core/block/e2e/test_correctness_sliding_window.py b/tests/core/block/e2e/test_correctness_sliding_window.py index 9320a9ef62314..415d0bd8237df 100644 --- a/tests/core/block/e2e/test_correctness_sliding_window.py +++ b/tests/core/block/e2e/test_correctness_sliding_window.py @@ -3,6 +3,7 @@ import pytest +from tests.kernels.utils import override_backend_env_variable from vllm import LLM, SamplingParams from .conftest import get_text_from_llm_generator @@ -28,8 +29,9 @@ @pytest.mark.parametrize("test_llm_kwargs", [{}]) @pytest.mark.parametrize("batch_size", [5]) @pytest.mark.parametrize("seed", [1]) +@pytest.mark.parametrize("backend", ["FLASH_ATTN", "FLASHINFER", "XFORMERS"]) def test_sliding_window_retrival(baseline_llm_generator, test_llm_generator, - batch_size, seed): + batch_size, seed, backend, monkeypatch): """ The test does a bunch of assignments "x1 = 10\nx2 = 33\n..." and then asks for value of one of them (which is outside the sliding window). @@ -38,6 +40,8 @@ def test_sliding_window_retrival(baseline_llm_generator, test_llm_generator, Additionally, we compare the results of the v1 and v2 managers. """ + override_backend_env_variable(monkeypatch, backend) + sampling_params = SamplingParams( max_tokens=1024, ignore_eos=True, @@ -84,7 +88,9 @@ def test_sliding_window_retrival(baseline_llm_generator, test_llm_generator, @pytest.mark.parametrize("test_llm_kwargs", [{"enable_chunked_prefill": True}]) @pytest.mark.parametrize("batch_size", [5]) @pytest.mark.parametrize("seed", [1]) -def test_sliding_window_chunked_prefill(test_llm_generator, batch_size, seed): +@pytest.mark.parametrize("backend", ["FLASH_ATTN", "FLASHINFER", "XFORMERS"]) +def test_sliding_window_chunked_prefill(test_llm_generator, batch_size, seed, + backend, monkeypatch): """ This is similar to test_sliding_window_retrival, however, it doesn't compare against the v1 block manager since v1 doesn't support @@ -93,6 +99,8 @@ def test_sliding_window_chunked_prefill(test_llm_generator, batch_size, seed): The results with and without chunked prefill are not the same due to numerical instabilities. """ + override_backend_env_variable(monkeypatch, backend) + sampling_params = SamplingParams( max_tokens=10, ignore_eos=True, diff --git a/vllm/attention/backends/flashinfer.py b/vllm/attention/backends/flashinfer.py index 107e3bbf79666..b61c660e3e280 100644 --- a/vllm/attention/backends/flashinfer.py +++ b/vllm/attention/backends/flashinfer.py @@ -757,9 +757,8 @@ def __init__( if alibi_slopes is not None: alibi_slopes = torch.tensor(alibi_slopes, dtype=torch.float32) self.alibi_slopes = alibi_slopes - if sliding_window is not None: - raise ValueError("Sliding window is not supported in FlashInfer.") - self.sliding_window = (-1, -1) + self.sliding_window = ((sliding_window - 1, + 0) if sliding_window is not None else (-1, -1)) self.kv_cache_dtype = kv_cache_dtype self.logits_soft_cap = logits_soft_cap @@ -865,6 +864,8 @@ def unified_flash_infer( assert query.shape[0] == num_prefill_tokens assert decode_query.shape[0] == num_decode_tokens + window_left = window_size[0] if window_size is not None else -1 + prefill_output: Optional[torch.Tensor] = None decode_output: Optional[torch.Tensor] = None if prefill_meta := attn_metadata.prefill_metadata: @@ -895,7 +896,8 @@ def unified_flash_infer( logits_soft_cap=logits_soft_cap, causal=True, k_scale=k_scale, - v_scale=v_scale) + v_scale=v_scale, + window_left=window_left) if decode_meta := attn_metadata.decode_metadata: assert attn_metadata.decode_metadata is not None assert attn_metadata.decode_metadata.decode_wrapper is not None @@ -905,7 +907,8 @@ def unified_flash_infer( sm_scale=softmax_scale, logits_soft_cap=logits_soft_cap, k_scale=k_scale, - v_scale=v_scale) + v_scale=v_scale, + window_left=window_left) if prefill_output is None and decode_output is not None: # Decode only batch. From 9d827170a3aa586dfb458bf28d18fd279bdbf580 Mon Sep 17 00:00:00 2001 From: Mengqing Cao Date: Thu, 21 Nov 2024 12:44:20 +0800 Subject: [PATCH 075/397] [Platforms] Add `device_type` in `Platform` (#10508) Signed-off-by: MengqingCao --- vllm/config.py | 17 ++--------------- vllm/platforms/cpu.py | 1 + vllm/platforms/cuda.py | 1 + vllm/platforms/hpu.py | 1 + vllm/platforms/interface.py | 1 + vllm/platforms/neuron.py | 1 + vllm/platforms/openvino.py | 1 + vllm/platforms/rocm.py | 1 + vllm/platforms/tpu.py | 1 + vllm/platforms/xpu.py | 1 + 10 files changed, 11 insertions(+), 15 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index 7522486782cc9..0ed92f370cf50 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -1193,21 +1193,8 @@ class DeviceConfig: def __init__(self, device: str = "auto") -> None: if device == "auto": # Automated device type detection - if current_platform.is_cuda_alike(): - self.device_type = "cuda" - elif current_platform.is_neuron(): - self.device_type = "neuron" - elif current_platform.is_hpu(): - self.device_type = "hpu" - elif current_platform.is_openvino(): - self.device_type = "openvino" - elif current_platform.is_tpu(): - self.device_type = "tpu" - elif current_platform.is_cpu(): - self.device_type = "cpu" - elif current_platform.is_xpu(): - self.device_type = "xpu" - else: + self.device_type = current_platform.device_type + if self.device_type is None: raise RuntimeError("Failed to infer device type") else: # Device type is assigned explicitly diff --git a/vllm/platforms/cpu.py b/vllm/platforms/cpu.py index 43cbafe709d84..0c4c916406223 100644 --- a/vllm/platforms/cpu.py +++ b/vllm/platforms/cpu.py @@ -19,6 +19,7 @@ class CpuPlatform(Platform): _enum = PlatformEnum.CPU + device_type: str = "cpu" @classmethod def get_device_name(cls, device_id: int = 0) -> str: diff --git a/vllm/platforms/cuda.py b/vllm/platforms/cuda.py index d2911ef650743..07562a8c3d71e 100644 --- a/vllm/platforms/cuda.py +++ b/vllm/platforms/cuda.py @@ -109,6 +109,7 @@ def device_id_to_physical_device_id(device_id: int) -> int: class CudaPlatform(Platform): _enum = PlatformEnum.CUDA + device_type: str = "cuda" @classmethod def get_device_capability(cls, device_id: int = 0) -> DeviceCapability: diff --git a/vllm/platforms/hpu.py b/vllm/platforms/hpu.py index 1e0888a30ba96..36d944b3f24b8 100644 --- a/vllm/platforms/hpu.py +++ b/vllm/platforms/hpu.py @@ -5,6 +5,7 @@ class HpuPlatform(Platform): _enum = PlatformEnum.HPU + device_type: str = "hpu" @classmethod def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: diff --git a/vllm/platforms/interface.py b/vllm/platforms/interface.py index f4849fa2ccfb0..68abec28ad71e 100644 --- a/vllm/platforms/interface.py +++ b/vllm/platforms/interface.py @@ -56,6 +56,7 @@ def to_int(self) -> int: class Platform: _enum: PlatformEnum + device_type: str def is_cuda(self) -> bool: return self._enum == PlatformEnum.CUDA diff --git a/vllm/platforms/neuron.py b/vllm/platforms/neuron.py index 07d8398eda525..57e3c0dfae84c 100644 --- a/vllm/platforms/neuron.py +++ b/vllm/platforms/neuron.py @@ -3,6 +3,7 @@ class NeuronPlatform(Platform): _enum = PlatformEnum.NEURON + device_type: str = "neuron" @classmethod def get_device_name(cls, device_id: int = 0) -> str: diff --git a/vllm/platforms/openvino.py b/vllm/platforms/openvino.py index ad69ced5417b3..130b8eec1b386 100644 --- a/vllm/platforms/openvino.py +++ b/vllm/platforms/openvino.py @@ -10,6 +10,7 @@ class OpenVinoPlatform(Platform): _enum = PlatformEnum.OPENVINO + device_type: str = "openvino" @classmethod def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: diff --git a/vllm/platforms/rocm.py b/vllm/platforms/rocm.py index bb3a49c8b73bc..c62241d8bb47b 100644 --- a/vllm/platforms/rocm.py +++ b/vllm/platforms/rocm.py @@ -29,6 +29,7 @@ class RocmPlatform(Platform): _enum = PlatformEnum.ROCM + device_type: str = "cuda" @classmethod def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: diff --git a/vllm/platforms/tpu.py b/vllm/platforms/tpu.py index 2a7ca9fb8c576..863875ef5c2d6 100644 --- a/vllm/platforms/tpu.py +++ b/vllm/platforms/tpu.py @@ -16,6 +16,7 @@ class TpuPlatform(Platform): _enum = PlatformEnum.TPU + device_type: str = "tpu" @classmethod def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: diff --git a/vllm/platforms/xpu.py b/vllm/platforms/xpu.py index c3c4746d3cc25..536e17a5f93e8 100644 --- a/vllm/platforms/xpu.py +++ b/vllm/platforms/xpu.py @@ -16,6 +16,7 @@ class XPUPlatform(Platform): _enum = PlatformEnum.XPU + device_type: str = "xpu" @classmethod def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: From 8b0fe06c890a202eba24d517cc77562e4a8b0d0c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Luka=20Govedi=C4=8D?= Date: Thu, 21 Nov 2024 00:44:57 -0500 Subject: [PATCH 076/397] [torch.compile] Inductor code caching fix (#10273) Signed-off-by: luka Signed-off-by: Luka Govedic --- tests/compile/backend.py | 16 +- tests/compile/test_functionalization.py | 95 +++++++++ tests/compile/test_fusion.py | 11 +- tests/compile/test_pass_manager.py | 35 ++++ vllm/compilation/backends.py | 236 ++-------------------- vllm/compilation/fix_functionalization.py | 177 ++++++++++++++++ vllm/compilation/fusion.py | 13 +- vllm/compilation/inductor_pass.py | 100 ++++++--- vllm/compilation/pass_manager.py | 77 +++++++ vllm/compilation/reshapes.py | 8 +- vllm/compilation/vllm_inductor_pass.py | 53 +++++ vllm/config.py | 60 ++++-- vllm/utils.py | 9 - vllm/v1/worker/gpu_model_runner.py | 2 +- 14 files changed, 604 insertions(+), 288 deletions(-) create mode 100644 tests/compile/test_functionalization.py create mode 100644 tests/compile/test_pass_manager.py create mode 100644 vllm/compilation/fix_functionalization.py create mode 100644 vllm/compilation/pass_manager.py create mode 100644 vllm/compilation/vllm_inductor_pass.py diff --git a/tests/compile/backend.py b/tests/compile/backend.py index 9d5c68274374e..8fa10e5bd1b37 100644 --- a/tests/compile/backend.py +++ b/tests/compile/backend.py @@ -1,7 +1,9 @@ from copy import deepcopy -from typing import Callable +from typing import Callable, Union -import torch +from torch import fx + +from vllm.compilation.inductor_pass import InductorPass class TestBackend: @@ -11,19 +13,21 @@ class TestBackend: It also saves the graph before and after the custom passes for inspection. """ - def __init__(self, *args: Callable[[torch.fx.Graph], None]): - self.custom_passes = args + def __init__(self, *passes: Union[InductorPass, Callable[[fx.Graph], + None]]): + self.custom_passes = list(passes) from torch._inductor import config self.current_config = config.shallow_copy_dict() + self.current_config['force_disable_caches'] = True self.current_config['post_grad_custom_post_pass'] = self.post_pass - def __call__(self, graph: torch.fx.GraphModule, example_inputs): + def __call__(self, graph: fx.GraphModule, example_inputs): from torch._inductor.compile_fx import compile_fx return compile_fx(graph, example_inputs, config_patches=self.current_config) - def post_pass(self, graph: torch.fx.Graph): + def post_pass(self, graph: fx.Graph): self.graph_pre_pass = deepcopy(graph) for pass_ in self.custom_passes: pass_(graph) diff --git a/tests/compile/test_functionalization.py b/tests/compile/test_functionalization.py new file mode 100644 index 0000000000000..5036189077be2 --- /dev/null +++ b/tests/compile/test_functionalization.py @@ -0,0 +1,95 @@ +import pytest +import torch + +import vllm.envs as envs +from vllm import LLM, SamplingParams +from vllm.compilation.fix_functionalization import FixFunctionalizationPass +from vllm.compilation.fusion import (FusionPass, find_auto_fn, + find_auto_fn_maybe) +from vllm.compilation.reshapes import RedundantReshapesPass +from vllm.compilation.vllm_inductor_pass import is_func +from vllm.config import CompilationConfig + +from .backend import TestBackend + +OPS_IN_MODEL = [ + torch.ops._C.rotary_embedding.default, + torch.ops._C.fused_add_rms_norm.default, + torch.ops._C.silu_and_mul.default, +] + +RMS_OP = torch.ops._C.rms_norm.default + +RMS_QUANT_OPS = { + "static_fp8": [ + torch.ops._C.rms_norm_static_fp8_quant.default, + torch.ops._C.fused_add_rms_norm_static_fp8_quant.default + ], +} + +prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", +] + + +@pytest.mark.parametrize("model", + ["nm-testing/TinyLlama-1.1B-Chat-v1.0-FP8-e2e"]) +@pytest.mark.parametrize("do_fusion", [True, False]) +@pytest.mark.skipif(envs.VLLM_TARGET_DEVICE != "cuda", + reason="Only test on CUDA") +def test_fix_functionalization(model: str, do_fusion: bool): + torch.set_default_device("cuda") + + config = CompilationConfig.PassConfig(enable_fusion=do_fusion, + enable_reshape=True) + reshape_pass = RedundantReshapesPass(config) + fusion_pass = FusionPass.instance(config) + + passes = [reshape_pass, fusion_pass] if do_fusion else [reshape_pass] + func_pass = FixFunctionalizationPass(config) + backend_func = TestBackend(*passes, func_pass) + backend_no_func = TestBackend(*passes) + + # instantiate a full engine and manually compile the model 2x + # (with and without FixFunctionalizationPass) + llm = LLM(model=model, enforce_eager=True) + model_runner = llm.llm_engine.model_executor.driver_worker.model_runner + orig_model = model_runner.model + # TODO mark inputs dynamic? (currently torch.compile is triggered 4x) + # Can only do that by using the decorator but then we'd have to instantiate + # 2 LLM instances. + + sampling_params = SamplingParams(temperature=0.0, top_p=1.0) + model_runner.model = torch.compile(orig_model, + fullgraph=True, + backend=backend_func) + gen_func = llm.generate(prompts, sampling_params) + + model_runner.model = torch.compile(orig_model, + fullgraph=True, + backend=backend_no_func) + gen_no_func = llm.generate(prompts, sampling_params) + + for output_func, output_no_func in zip(gen_func, gen_no_func): + assert output_func.outputs[0].text == output_no_func.outputs[0].text + + # OPS_IN_MODEL always appear. RMS_OP is fused away if we run fusion, + # and replaced by fused quantized ops in RMS_QUANT_OPS. + ops = OPS_IN_MODEL + (RMS_QUANT_OPS["static_fp8"] + if do_fusion else [RMS_OP]) + + for op in ops: + find_auto_fn(backend_no_func.graph_post_pass.nodes, op) + assert find_auto_fn_maybe(backend_func.graph_post_pass.nodes, + op) is None # noqa: E501 + + # make sure the ops were all de-functionalized + found = dict() + for node in backend_func.graph_post_pass.nodes: + for op in ops: + if is_func(node, op): + found[op] = True + assert all(found[op] for op in ops) diff --git a/tests/compile/test_fusion.py b/tests/compile/test_fusion.py index 4db79b070fd8d..f92ec8d0de5f1 100644 --- a/tests/compile/test_fusion.py +++ b/tests/compile/test_fusion.py @@ -38,12 +38,6 @@ def forward(self, x): return y3 -# Init does pattern registration, which can only happen once -config = CompilationConfig(enable_fusion=True) -reshape_pass = RedundantReshapesPass(config) -fusion_pass = FusionPass.instance(config) - - @pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16]) @pytest.mark.parametrize("hidden_size", [64, 3392, 4096]) @pytest.mark.parametrize("num_tokens", [7, 256, 533, 2048, 2049]) @@ -58,6 +52,11 @@ def test_fusion_rmsnorm_quant(dtype, hidden_size, num_tokens, eps): pytest.skip("Only test eps=1e-5 for now") # Reshape pass is needed for the fusion pass to work + config = CompilationConfig.PassConfig(enable_fusion=True, + enable_reshape=True) + reshape_pass = RedundantReshapesPass(config) + fusion_pass = FusionPass.instance(config) + backend = TestBackend(reshape_pass, fusion_pass) model = TestModel(hidden_size, eps) diff --git a/tests/compile/test_pass_manager.py b/tests/compile/test_pass_manager.py new file mode 100644 index 0000000000000..03e7535093c5d --- /dev/null +++ b/tests/compile/test_pass_manager.py @@ -0,0 +1,35 @@ +import pickle + +import pytest +import torch +from torch._inductor.codecache import BypassFxGraphCache + +from vllm.compilation.config import CompilationConfig +from vllm.compilation.inductor_pass import (CallableInductorPass, + as_inductor_pass) +from vllm.compilation.pass_manager import PostGradPassManager + + +def simple_callable(graph: torch.fx.Graph): + pass + + +@as_inductor_pass(files=(__file__, )) +def callable_decorated(graph: torch.fx.Graph): + pass + + +@pytest.mark.parametrize( + "works, callable", + [(False, simple_callable), (True, callable_decorated), + (True, CallableInductorPass(simple_callable, "simple_callable"))]) +def test_pass_manager(works: bool, callable): + config = CompilationConfig().pass_config + pass_manager = PostGradPassManager([callable]) + pass_manager.configure(config) # Adds default passes + + if works: + pickle.dumps(pass_manager) + else: + with pytest.raises(BypassFxGraphCache): + pickle.dumps(pass_manager) diff --git a/vllm/compilation/backends.py b/vllm/compilation/backends.py index 416cffd326489..464bc2af8fd6d 100644 --- a/vllm/compilation/backends.py +++ b/vllm/compilation/backends.py @@ -1,6 +1,5 @@ import copy import dataclasses -import operator from contextlib import ExitStack from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple from unittest.mock import patch @@ -11,205 +10,15 @@ import vllm.envs as envs from vllm.config import CompilationConfig from vllm.logger import init_logger -from vllm.utils import combine_fx_passes, weak_ref_tensors +from vllm.utils import weak_ref_tensors from .counter import compilation_counter -from .fusion import FusionPass -from .reshapes import RedundantReshapesPass +from .inductor_pass import InductorPass +from .pass_manager import PostGradPassManager logger = init_logger(__name__) -def fix_functionalization(graph: fx.Graph): - """ - Rewrite the graph module to replace the pattern involving - torch._higher_order_ops.auto_functionalize.auto_functionalized - with a direct call to the inplace custom op. - - # TODO: check if PyTorch nightly has fixed this issue - """ - - # debug code, if we want to see the graph before the transformation - # with open("before.py", "w") as f: - # print(graph.python_code(root_module="self", verbose=True).src, file=f) - - nodes_to_remove = [] - - for node in graph.nodes: - # Identify the auto_functionalized node - if node.op == 'call_function' and node.target == torch._higher_order_ops.auto_functionalize.auto_functionalized: # noqa - if node.args[0] == torch.ops._C.rotary_embedding.default: - # manual replace for rotary_embedding - - # Now, collect the arguments - kwargs = node.kwargs - - query = kwargs['query'] - mm_node = query.args[0].args[0] - - # Create a new call to torch.ops._C.rotary_embedding.default - with graph.inserting_before(node): - # just insert the call to the custom op - # NOTE: don't run dead code elimination, - # otherwise this op will be removed - graph.call_function(torch.ops._C.rotary_embedding.default, - kwargs=kwargs) - - # Remove the auto_functionalized node - # Since the node may have outputs, we need to handle its users - # Replace uses of the outputs (getitem nodes) with mm_node - for user in list(node.users): - if user.op == 'call_function' and user.target == operator.getitem: # noqa - # Remove the getitem node - for getitem_user in list(user.users): - if (getitem_user.op == 'call_function' - and getitem_user.target - == torch.ops.aten.slice_scatter.default): - # Replace the uses of slice_scatter node - # with mm_node - getitem_user.replace_all_uses_with(mm_node) - nodes_to_remove.append(getitem_user) - nodes_to_remove.append(user) - nodes_to_remove.append(node) - - elif node.args[0] == torch.ops._C.fused_add_rms_norm.default: - # manual replace for fused_add_rms_norm - # this is the most effective optimization for llama - # failing to do this will result in many unnecessary copies - - kwargs = node.kwargs - - input = kwargs['input'] - residual = kwargs['residual'] - - # Create a new call to torch.ops._C.rotary_embedding.default - with graph.inserting_before(node): - # just insert the call to the custom op - # NOTE: don't run dead code elimination, - # otherwise this op will be removed - graph.call_function( - torch.ops._C.fused_add_rms_norm.default, kwargs=kwargs) - - for user in list(node.users): - if user.op == 'call_function' and user.target == operator.getitem: # noqa - # Remove the getitem node - if user.args[1] == 1: - replace_node = input - elif user.args[1] == 2: - replace_node = residual - user.replace_all_uses_with(replace_node) - nodes_to_remove.append(user) - nodes_to_remove.append(node) - elif (node.args[0] == - torch.ops._C.fused_add_rms_norm_static_fp8_quant.default): - # manual replace for fused_add_rms_norm_static_fp8_quant - # this is the most effective optimization for llama - # failing to do this will result in many unnecessary copies - - kwargs = node.kwargs - - result = kwargs['result'] - residual = kwargs['residual'] - - # Create a new call to - # torch.ops._C.fused_add_rms_norm_static_fp8_quant.default - with graph.inserting_before(node): - # just insert the call to the custom op - # NOTE: don't run dead code elimination, - # otherwise this op will be removed - graph.call_function( - torch.ops._C.fused_add_rms_norm_static_fp8_quant. - default, - kwargs=kwargs) - - for user in list(node.users): - if user.op == 'call_function' and user.target == operator.getitem: # noqa - # Remove the getitem node - if user.args[1] == 1: - replace_node = result - elif user.args[1] == 2: - replace_node = residual - user.replace_all_uses_with(replace_node) - nodes_to_remove.append(user) - nodes_to_remove.append(node) - - elif node.args[0] == torch.ops._C.rms_norm.default: - # manual replace for rms_norm - - kwargs = node.kwargs - - replace_node = kwargs['result'] - # Create a new call to torch.ops._C.rms_norm.default - with graph.inserting_before(node): - # just insert the call to the custom op - # NOTE: don't run dead code elimination, - # otherwise this op will be removed - graph.call_function(torch.ops._C.rms_norm.default, - kwargs=kwargs) - - for user in list(node.users): - if user.op == 'call_function' and user.target == operator.getitem: # noqa - user.replace_all_uses_with(replace_node) - nodes_to_remove.append(user) - nodes_to_remove.append(node) - - elif node.args[ - 0] == torch.ops._C.rms_norm_static_fp8_quant.default: # noqa - # manual replace for rms_norm_static_fp8_quant - - kwargs = node.kwargs - - replace_node = kwargs['result'] - # Create a new call to torch.ops._C.rms_norm_static_fp8_quant.default # noqa - with graph.inserting_before(node): - # just insert the call to the custom op - # NOTE: don't run dead code elimination, - # otherwise this op will be removed - graph.call_function( - torch.ops._C.rms_norm_static_fp8_quant.default, - kwargs=kwargs) - - for user in list(node.users): - if user.op == 'call_function' and user.target == operator.getitem: # noqa - user.replace_all_uses_with(replace_node) - nodes_to_remove.append(user) - nodes_to_remove.append(node) - - elif node.args[0] == torch.ops._C.silu_and_mul.default: - # manual replace for silu_and_mul - - kwargs = node.kwargs - - input = kwargs['input'] - out = kwargs['out'] - - # Create a new call to torch.ops._C.silu_and_mul.default - # cannot use kwargs, because we have an `out`, see https://github.com/pytorch/pytorch/blob/a00faf440888ffb724bad413f329a49e2b6388e7/torch/_inductor/lowering.py#L351 # noqa - with graph.inserting_before(node): - # just insert the call to the custom op - # NOTE: don't run dead code elimination, - # otherwise this op will be removed - graph.call_function( - torch.ops._C.silu_and_mul.default, - args=(out, input), - ) - replace_node = out - - for user in list(node.users): - if user.op == 'call_function' and user.target == operator.getitem: # noqa - user.replace_all_uses_with(replace_node) - nodes_to_remove.append(user) - nodes_to_remove.append(node) - - # Remove the nodes all at once - for node in nodes_to_remove: - graph.erase_node(node) - - # debug code, if we want to see the graph after the transformation - # with open("after.py", "w") as f: - # print(graph.python_code(root_module="self", verbose=True).src, file=f) - - def wrap_inductor(graph, example_inputs, additional_inductor_config, @@ -368,12 +177,8 @@ class VllmBackend: The major work of this backend is to split the graph into piecewise graphs, and pass them to the piecewise backend. - This backend also handles custom passes and adds them to Inductor config. - The order of the post-grad post-passes is: - 1. post_grad_passes (constructor parameter) - 2. config["post_grad_custom_post_pass"] - 3. fix_functionalization - This way, all passes operate on a functionalized graph. + This backend also adds the PostGradPassManager to Inductor config, + which handles the post-grad passes. """ compilation_configs: CompilationConfig @@ -402,7 +207,9 @@ def __init__( # streams, it might not be safe to share a global pool. # only investigate this when we use multiple streams self.graph_pool = global_graph_pool - self.post_grad_passes = [] + + # Passes to run on the graph post-grad. + self.post_grad_pass_manager = PostGradPassManager() self.sym_tensor_indices = [] self.input_buffers = [] @@ -412,24 +219,19 @@ def __init__( # `torch.compile` is JIT compiled, so we don't need to # do anything here - def add_passes_to_config(self): + def configure_post_pass(self): config = self.compilation_configs - passes = list(self.post_grad_passes) - - passes = passes + [RedundantReshapesPass(config)] - - if config.enable_fusion: - passes = passes + [FusionPass.instance(config)] + self.post_grad_pass_manager.configure(config.pass_config) + # Post-grad custom passes are run using the post_grad_custom_post_pass + # hook. If a pass for that hook exists, add it to the pass manager. inductor_config = config.inductor_compile_config - if "post_grad_custom_post_pass" in inductor_config: - passes = passes + [inductor_config["post_grad_custom_post_pass"]] - - # add the fix_functionalization pass last, so that all other - # passes operate on a functionalized graph - passes = passes + [fix_functionalization] - combined_pass = combine_fx_passes(passes) - inductor_config["post_grad_custom_post_pass"] = combined_pass + PASS_KEY = "post_grad_custom_post_pass" + if PASS_KEY in inductor_config: + # Config should automatically wrap all inductor passes + assert isinstance(inductor_config[PASS_KEY], InductorPass) + self.post_grad_pass_manager.add(inductor_config[PASS_KEY]) + inductor_config[PASS_KEY] = self.post_grad_pass_manager def __call__(self, graph: fx.GraphModule, example_inputs) -> Callable: @@ -444,7 +246,7 @@ def __call__(self, graph: fx.GraphModule, example_inputs) -> Callable: # we get the sizes to capture for cudagraph # from compilation context self.compilation_configs.init_during_runtime() - self.add_passes_to_config() + self.configure_post_pass() self.split_gm, self.piecewise_graphs = split_graph( graph, self.compilation_configs.splitting_ops) diff --git a/vllm/compilation/fix_functionalization.py b/vllm/compilation/fix_functionalization.py new file mode 100644 index 0000000000000..3584cc3608caf --- /dev/null +++ b/vllm/compilation/fix_functionalization.py @@ -0,0 +1,177 @@ +import operator +from typing import Dict, Iterable, List, Optional, Tuple, Union + +import torch +from torch._higher_order_ops.auto_functionalize import auto_functionalized + +from vllm.logger import init_logger + +from .vllm_inductor_pass import VllmInductorPass, is_func + +logger = init_logger(__name__) + + +class FixFunctionalizationPass(VllmInductorPass): + """ + This pass defunctionalizes certain nodes to avoid redundant tensor copies. + After this pass, DCE (dead-code elimination) should never be run, + as de-functionalized nodes may appear as dead code. + + To add new nodes to defunctionalize, add to the if-elif chain in __call__. + """ + + def __call__(self, graph: torch.fx.Graph): + self.begin() + self.dump_graph(graph, "before_fix_functionalization") + + self.nodes_to_remove: List[torch.fx.Node] = [] + count = 0 + for node in graph.nodes: + if not is_func(node, auto_functionalized): + continue # Avoid deep if-elif nesting + + kwargs = node.kwargs + at_target = node.args[0] + + if at_target == torch.ops._C.rotary_embedding.default: + query = kwargs['query'] + mm_node = query.args[0].args[0] + + # rotary_embedding is a special case: the two mutating inputs + # are query and key, which are slices of mm_node. + # While functionalized, results at[1] and at[2] are scattered + # back into mm_node. After de-functionalization, we can just + # use mm_node directly. + for idx, user in self.getitem_users(node).items(): + for user_of_getitem in user.users: + if is_func(user_of_getitem, + torch.ops.aten.slice_scatter.default): + user_of_getitem.replace_all_uses_with(mm_node) + self._remove(user_of_getitem) + self._remove(user) + + self.insert_defunctionalized(graph, node) + self._remove(node) + + # These 2 replacements avoid the most copies for LLaMa. + elif at_target == torch.ops._C.fused_add_rms_norm.default: + mutated_args = {1: 'input', 2: 'residual'} + self.defunctionalize(graph, node, mutated_args) + elif at_target == torch.ops._C.fused_add_rms_norm_static_fp8_quant.default: # noqa: E501 + mutated_args = {1: 'result', 2: 'residual'} + self.defunctionalize(graph, node, mutated_args) + + elif at_target in [ + torch.ops._C.rms_norm.default, + torch.ops._C.rms_norm_static_fp8_quant.default + ]: + mutated_args = {1: 'result'} + self.defunctionalize(graph, node, mutated_args) + + elif at_target == torch.ops._C.silu_and_mul.default: + mutated_args = {1: 'out'} + # Because we have an 'out', need to specify args directly + self.defunctionalize(graph, + node, + mutated_args, + args=('out', 'input')) + else: + continue # skip the count + + count += 1 + + self.dump_graph(graph, "before_fix_functionalization_cleanup") + + # Remove the nodes all at once + count_removed = len(self.nodes_to_remove) + for node in self.nodes_to_remove: + graph.erase_node(node) + + logger.debug("De-functionalized %s nodes, removed %s nodes", count, + count_removed) + self.dump_graph(graph, "after_fix_functionalization") + self.end_and_log() + + def _remove(self, node_or_nodes: Union[torch.fx.Node, + Iterable[torch.fx.Node]]): + """ + Stage a node (or nodes) for removal at the end of the pass. + """ + if isinstance(node_or_nodes, torch.fx.Node): + self.nodes_to_remove.append(node_or_nodes) + else: + self.nodes_to_remove.extend(node_or_nodes) + + def defunctionalize(self, + graph: torch.fx.Graph, + node: torch.fx.Node, + mutated_args: Dict[int, Union[torch.fx.Node, str]], + args: Optional[Tuple[Union[torch.fx.Node, str], + ...]] = None): + """ + De-functionalize a node by replacing it with a call to the original. + It also replaces the getitem users with the mutated arguments. + See replace_users_with_mutated_args and insert_defunctionalized. + """ + self.replace_users_with_mutated_args(node, mutated_args) + self.insert_defunctionalized(graph, node, args=args) + self._remove(node) + + def replace_users_with_mutated_args(self, node: torch.fx.Node, + mutated_args: Dict[int, + Union[torch.fx.Node, + str]]): + """ + Replace all getitem users of the auto-functionalized node with the + mutated arguments. + :param node: The auto-functionalized node + :param mutated_args: The mutated arguments, indexed by getitem index. + If the value of an arg is a string, `node.kwargs[arg]` is used. + """ + for idx, user in self.getitem_users(node).items(): + arg = mutated_args[idx] + arg = node.kwargs[arg] if isinstance(arg, str) else arg + user.replace_all_uses_with(arg) + self._remove(user) + + def getitem_users(self, node: torch.fx.Node) -> Dict[int, torch.fx.Node]: + """ + Returns the operator.getitem users of the auto-functionalized node, + indexed by the index they are getting. + """ + users = {} + for user in node.users: + if is_func(user, operator.getitem): + idx = user.args[1] + users[idx] = user + return users + + def insert_defunctionalized(self, + graph: torch.fx.Graph, + node: torch.fx.Node, + args: Optional[Tuple[Union[torch.fx.Node, str], + ...]] = None): + """ + Insert a new defunctionalized node into the graph before node. + If one of the kwargs is 'out', provide args directly, + as node.kwargs cannot be used. + See https://github.com/pytorch/pytorch/blob/a00faf440888ffb724bad413f329a49e2b6388e7/torch/_inductor/lowering.py#L351 + + :param graph: Graph to insert the defunctionalized node into + :param node: The auto-functionalized node to defunctionalize + :param args: If we cannot use kwargs, specify args directly. + If an arg is a string, `node.kwargs[arg]` is used. + """ # noqa: E501 + assert is_func(node, auto_functionalized), \ + f"node must be auto-functionalized, is {node} instead" + + # Create a new call to the original function + with graph.inserting_before(node): + function = node.args[0] + if args is None: + graph.call_function(function, kwargs=node.kwargs) + else: + # Args passed as strings refer to items in node.kwargs + args = tuple(node.kwargs[arg] if isinstance(arg, str) else arg + for arg in args) + graph.call_function(function, args=args) diff --git a/vllm/compilation/fusion.py b/vllm/compilation/fusion.py index e6a3afef85e1b..5efa410fab6a0 100644 --- a/vllm/compilation/fusion.py +++ b/vllm/compilation/fusion.py @@ -6,10 +6,11 @@ from torch._inductor.pattern_matcher import (Match, PatternMatcherPass, fwd_only, register_replacement) -from vllm.compilation.inductor_pass import InductorPass from vllm.config import CompilationConfig from vllm.logger import init_logger +from .vllm_inductor_pass import VllmInductorPass, is_func + logger = init_logger(__name__) @@ -90,8 +91,6 @@ def empty_fp32(*args, **kwargs): # Utilities for post-processing multi-output matches -def is_func(node: torch.fx.Node, target) -> bool: - return node.op == "call_function" and node.target == target # Returns the first auto_functionalized node with the given op (if it exists) @@ -127,7 +126,7 @@ def find_getitem(node: torch.fx.Node, idx: int) -> torch.fx.Node: return ret -class FusionPass(InductorPass): +class FusionPass(VllmInductorPass): """ This pass fuses a pre-defined set of custom ops into fused ops. It uses the torch pattern matcher to find the patterns and replace them. @@ -142,7 +141,7 @@ class FusionPass(InductorPass): _instance: 'Optional[FusionPass]' = None @classmethod - def instance(cls, config: CompilationConfig): + def instance(cls, config: CompilationConfig.PassConfig): """ Get the singleton instance of the FusionPass. If the instance exists, the config is updated but @@ -154,7 +153,7 @@ def instance(cls, config: CompilationConfig): cls._instance.config = config return cls._instance - def __init__(self, config: CompilationConfig): + def __init__(self, config: CompilationConfig.PassConfig): assert self.__class__._instance is None, \ "FusionPass singleton instance already exists" super().__init__(config) @@ -278,6 +277,7 @@ def process_matches(self, graph: torch.fx.Graph): for node in match.nodes) def __call__(self, graph: torch.fx.Graph): + self.begin() self.dump_graph(graph, "before_fusion") count = self.patterns.apply(graph) @@ -289,3 +289,4 @@ def __call__(self, graph: torch.fx.Graph): logger.debug("Post-processed %s matches", len(self.matches)) self.dump_graph(graph, "after_fusion") self.matches.clear() + self.end_and_log() diff --git a/vllm/compilation/inductor_pass.py b/vllm/compilation/inductor_pass.py index 8082a08b40019..f6846c08ac841 100644 --- a/vllm/compilation/inductor_pass.py +++ b/vllm/compilation/inductor_pass.py @@ -1,38 +1,84 @@ +import hashlib +import inspect +import types from abc import ABC, abstractmethod +from typing import Any, Callable, Optional, Union import torch - -from vllm.config import CompilationConfig -# yapf: disable -from vllm.distributed import get_tensor_model_parallel_rank as get_tp_rank -from vllm.distributed import ( - get_tensor_model_parallel_world_size as get_tp_world_size) -from vllm.distributed import model_parallel_is_initialized as p_is_init -# yapf: enable -from vllm.logger import init_logger - -logger = init_logger(__name__) +from torch import fx class InductorPass(ABC): + """ + General custom inductor pass interface. + TODO(torch==2.6) use torch._inductor.custom_graph_pass.CustomGraphPass + """ @abstractmethod def __call__(self, graph: torch.fx.Graph): + """ + Execute the pass on the given graph. + """ raise NotImplementedError - def __init__(self, config: CompilationConfig): - self.config = config - - def dump_graph(self, graph: torch.fx.Graph, stage: str): - if stage in self.config.dump_graph_stages: - # Make sure filename includes rank in the distributed setting - parallel = p_is_init() and get_tp_world_size() > 1 - rank = f"-{get_tp_rank()}" if parallel else "" - filepath = self.config.dump_graph_dir / f"{stage}{rank}.py" - - logger.info("Printing graph to %s", filepath) - with open(filepath, "w") as f: - src = graph.python_code(root_module="self", verbose=True).src - # Add imports so it's not full of errors - print("import torch; from torch import device", file=f) - print(src, file=f) + def uuid(self) -> Any: + """ + Provide a unique identifier for the pass, used in Inductor code cache. + This should depend on the pass implementation, so that changes to the + pass result in recompilation. + By default, the object source is hashed. + """ + return InductorPass.hash_source(self) + + @staticmethod + def hash_source(*srcs: Union[str, Any]): + """ + Utility method to hash the sources of functions or objects. + :param srcs: strings or objects to add to the hash. + Objects and functions have their source inspected. + :return: + """ + hasher = hashlib.sha256() + for src in srcs: + if isinstance(src, str): + src_str = src + elif isinstance(src, types.FunctionType): + src_str = inspect.getsource(src) + else: + src_str = inspect.getsource(src.__class__) + hasher.update(src_str.encode("utf-8")) + return hasher.digest() + + +class CallableInductorPass(InductorPass): + """ + This class is a wrapper for a callable that automatically provides an + implementation of the UUID. + """ + + def __init__(self, + callable: Callable[[fx.Graph], None], + uuid: Optional[Any] = None): + self.callable = callable + if uuid is None: + uuid = InductorPass.hash_source(callable) + self._uuid = uuid + + def __call__(self, graph: torch.fx.Graph): + self.callable(graph) + + def uuid(self) -> Any: + return self._uuid + + def __getstate__(self): + """ + Pickling occurs in the Inductor code cache if a pass is not given to + the pass manager but is instead directly added to config as a pass. + See PostGradPassManager for more. + + TODO(torch==2.6), use the `uuid` method in CustomGraphPass instead. + """ + return self._uuid + + def __setstate__(self, state): + raise ValueError("Cannot unpickle CallableInductorPass") diff --git a/vllm/compilation/pass_manager.py b/vllm/compilation/pass_manager.py new file mode 100644 index 0000000000000..fb522ae053e97 --- /dev/null +++ b/vllm/compilation/pass_manager.py @@ -0,0 +1,77 @@ +from typing import List + +from torch import fx as fx + +from vllm.config import CompilationConfig +from vllm.logger import init_logger + +from .fix_functionalization import FixFunctionalizationPass +from .fusion import FusionPass +from .inductor_pass import InductorPass +from .reshapes import RedundantReshapesPass + +logger = init_logger(__name__) + + +class PostGradPassManager: + """ + The pass manager for post-grad passes. + It handles configuration, adding custom passes, and running passes. + It also supports pickling, which is used by the Inductor code cache. + TODO(torch==2.6), use CustomGraphPass + (torch._inductor.custom_graph_pass.CustomGraphPass) + + The order of the post-grad post-passes is: + 1. passes (constructor parameter) + 2. default passes (RedundantReshapesPass, FusionPass) + 3. config["post_grad_custom_post_pass"] (if it exists) + 4. fix_functionalization + This way, all passes operate on a functionalized graph. + """ + + def __init__(self): + self.passes: List[InductorPass] = [] + + def __call__(self, graph: fx.Graph): + for pass_ in self.passes: + pass_(graph) + + # always run fix_functionalization last + self.fix_functionalization(graph) + + def configure(self, pass_config: CompilationConfig.PassConfig): + self.pass_config = pass_config + if pass_config.enable_reshape: + self.passes += [RedundantReshapesPass(pass_config)] + + if pass_config.enable_fusion: + self.passes += [FusionPass.instance(pass_config)] + + self.fix_functionalization = FixFunctionalizationPass(pass_config) + + def add(self, pass_: InductorPass): + assert isinstance(pass_, InductorPass) + self.passes.append(pass_) + + def __getstate__(self): + """ + Custom pickling for the pass manager, as some passes cannot be pickled. + Pickling occurs because the pass manager is set as the value of + `config["post_grad_custom_post_pass"]` in the Inductor config. + The config is pickled to act as a key in the Inductor code cache. + Any other passes in the config are pickled as well. + + TODO(torch==2.6), use the `uuid` method in CustomGraphPass instead. + """ + state = {"pass_config": self.pass_config.uuid(), "passes": []} + for pass_ in self.passes: + state["passes"].append(pass_.uuid()) + state["passes"].append(self.fix_functionalization.uuid()) + return state + + def __setstate__(self, state): + """ + Do not allow unpickling of the pass manager. + If this is needed in the future, it should properly pickle the passes. + """ + raise ValueError("Cannot unpickle PostGradPassManager") diff --git a/vllm/compilation/reshapes.py b/vllm/compilation/reshapes.py index 36597e119d2e1..63a369fe8d966 100644 --- a/vllm/compilation/reshapes.py +++ b/vllm/compilation/reshapes.py @@ -3,14 +3,14 @@ import torch.fx from torch import SymInt -from vllm.compilation.fusion import is_func -from vllm.compilation.inductor_pass import InductorPass from vllm.logger import init_logger +from .vllm_inductor_pass import VllmInductorPass, is_func + logger = init_logger(__name__) -class RedundantReshapesPass(InductorPass): +class RedundantReshapesPass(VllmInductorPass): """ This is an inductor pass that removes redundant reshape operations. It is required for RMSNorm-quant fusion to work properly. @@ -31,6 +31,7 @@ class RedundantReshapesPass(InductorPass): """ def __call__(self, graph: torch.fx.Graph): + self.begin() self.dump_graph(graph, "before_reshapes") count = 0 # Remove no-op reshapes/views: @@ -56,6 +57,7 @@ def __call__(self, graph: torch.fx.Graph): logger.debug("Removed %s no-op reshapes", count) self.dump_graph(graph, "after_reshapes") + self.end_and_log() def dims_equivalent(self, dim: Union[int, torch.fx.Node], i_dim: Union[int, SymInt]) -> bool: diff --git a/vllm/compilation/vllm_inductor_pass.py b/vllm/compilation/vllm_inductor_pass.py new file mode 100644 index 0000000000000..dbf6b8f7789e1 --- /dev/null +++ b/vllm/compilation/vllm_inductor_pass.py @@ -0,0 +1,53 @@ +import time + +import torch + +from vllm.config import CompilationConfig +# yapf: disable +from vllm.distributed import get_tensor_model_parallel_rank as get_tp_rank +from vllm.distributed import ( + get_tensor_model_parallel_world_size as get_tp_world_size) +from vllm.distributed import model_parallel_is_initialized as p_is_init +# yapf: enable +from vllm.logger import init_logger + +from .inductor_pass import InductorPass + +logger = init_logger(__name__) + + +def is_func(node: torch.fx.Node, target) -> bool: + return node.op == "call_function" and node.target == target + + +class VllmInductorPass(InductorPass): + """ + An inductor pass with access to vLLM PassConfig. + It provides timing, logging, and dumping utilities. + """ + + def __init__(self, config: CompilationConfig.PassConfig): + self.config = config + self.pass_name = self.__class__.__name__ + + def dump_graph(self, graph: torch.fx.Graph, stage: str): + if stage in self.config.dump_graph_stages: + # Make sure filename includes rank in the distributed setting + parallel = p_is_init() and get_tp_world_size() > 1 + rank = f"-{get_tp_rank()}" if parallel else "" + filepath = self.config.dump_graph_dir / f"{stage}{rank}.py" + + logger.info("%s printing graph to %s", self.pass_name, filepath) + with open(filepath, "w") as f: + src = graph.python_code(root_module="self", verbose=True).src + # Add imports so it's not full of errors + print("import torch; from torch import device", file=f) + print(src, file=f) + + def begin(self): + self._start_time = time.perf_counter_ns() + + def end_and_log(self): + self._end_time = time.perf_counter_ns() + duration_ms = float(self._end_time - self._start_time) / 1.0e6 + logger.debug("%s completed in %.1f ms", self.pass_name, duration_ms) diff --git a/vllm/config.py b/vllm/config.py index 0ed92f370cf50..b2785e1ce2d5f 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -1,5 +1,6 @@ import copy import enum +import hashlib import json import warnings from dataclasses import dataclass, field, replace @@ -13,6 +14,7 @@ from transformers import PretrainedConfig import vllm.envs as envs +from vllm.compilation.inductor_pass import CallableInductorPass, InductorPass from vllm.logger import init_logger from vllm.model_executor.layers.quantization import (QUANTIZATION_METHODS, get_quantization_config) @@ -2120,12 +2122,7 @@ class CompilationConfig(BaseModel): name because the config uses json format. If we pass the config from Python, functions can also be passed directly via Python object constructor, e.g. `CompilationConfig(inductor_passes={"a": func})` - - custom inductor passes: - - dump_graph_stages: list of stages for which we want to dump the graph. - Each pass defines its own stages (before, after, maybe in-between). - - dump_graph_dir: directory to dump the graph. Default is . - - enable_fusion: whether to enable the custom fusion pass. - TODO better pass enabling system. + - custom inductor passes: see PassConfig for more details Why we have different sizes for cudagraph and inductor: - cudagraph: a cudagraph captured for a specific size can only be used @@ -2157,9 +2154,43 @@ class CompilationConfig(BaseModel): cudagraph_capture_sizes: Optional[List[int]] = None cudagraph_copy_inputs: bool = False - dump_graph_stages: List[str] = Field(default_factory=list) - dump_graph_dir: Path = Field(default=Path(".")) - enable_fusion: bool = True + class PassConfig(BaseModel): + """ + Configuration for custom Inductor passes. + This is separate from general CompilationConfig so that inductor passes + don't all have access to full configuration - that would create a cycle + as the PassManager is set as a property of config. + - dump_graph_stages: list of stages for which we want to dump the graph. + Each pass defines its own stages (before, after, maybe in-between). + - dump_graph_dir: directory to dump the graphs. Default is . + - enable_fusion: whether to enable the custom fusion pass. + - enable_reshape: whether to enable the custom reshape elimination pass. + TODO better pass enabling system. + """ + dump_graph_stages: List[str] = Field(default_factory=list) + dump_graph_dir: Path = Field(default=Path(".")) + enable_fusion: bool = True + enable_reshape: bool = True + + def uuid(self): + """ + Produces a hash unique to the pass configuration. + Any new fields that affect compilation should be added to the hash. + Do not include dump_graph_* in the hash - they don't affect + compilation. + """ + dict_ = self.model_dump( + include={"enable_fusion", "enable_reshape"}) + encoded = json.dumps(dict_, sort_keys=True).encode("utf-8") + return hashlib.sha256(encoded).digest() + + def model_post_init(self, __context: Any) -> None: + if not self.enable_reshape and self.enable_fusion: + print_warning_once( + "Fusion enabled but reshape elimination disabled." + "RMSNorm + quant (fp8) fusion might not work") + + pass_config: PassConfig = Field(default_factory=PassConfig) # not configurable, computed after init compile_sizes: List[int] = PrivateAttr @@ -2185,8 +2216,9 @@ def model_post_init(self, __context: Any) -> None: for k, v in self.inductor_passes.items(): if not isinstance(v, str): assert callable(v), ( - f"pass {k} should be a function or a qualified name") - self.inductor_compile_config[k] = v + f"pass {k} should be callable or a qualified name") + self.inductor_compile_config[k] = v if isinstance( + v, InductorPass) else CallableInductorPass(v) continue # resolve function from qualified name @@ -2194,7 +2226,8 @@ def model_post_init(self, __context: Any) -> None: module = ".".join(names[:-1]) func_name = names[-1] func = __import__(module).__dict__[func_name] - self.inductor_compile_config[k] = func + self.inductor_compile_config[k] = func if isinstance( + func, InductorPass) else CallableInductorPass(func) self.enabled_custom_ops = Counter() self.disabled_custom_ops = Counter() @@ -2344,7 +2377,8 @@ def __post_init__(self): self.compilation_config.custom_ops = ["none"] self.compilation_config.use_cudagraph = True self.compilation_config.use_inductor = True - self.compilation_config.enable_fusion = False + self.compilation_config.pass_config.enable_fusion = False + self.compilation_config.pass_config.enable_reshape = False current_platform.check_and_update_config(self) diff --git a/vllm/utils.py b/vllm/utils.py index 2bbdc8d1ebde8..cb2ad43a2ae8d 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -1501,15 +1501,6 @@ def __len__(self): return len(self._factory) -def combine_fx_passes(passes: List[Callable]) -> Callable: - - def combined_fx(graph) -> None: - for fx in passes: - fx(graph) - - return combined_fx - - def weak_ref_tensor(tensor: torch.Tensor) -> torch.Tensor: """ Create a weak reference to a tensor. diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 1f9b544637bf7..5f66293cbe8e4 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -548,7 +548,7 @@ def capture_model(self) -> None: if not self.use_cuda_graph: logger.warning( "Skipping CUDA graph capture. Please add " - "-O 3 to use CUDA graphs.", CompilationLevel.PIECEWISE) + "-O %s to use CUDA graphs.", CompilationLevel.PIECEWISE) return start_time = time.perf_counter() From 3430857b641131ffabf215ab569c41696b57b953 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Thu, 21 Nov 2024 15:06:42 +0800 Subject: [PATCH 077/397] [Misc] Increase default video fetch timeout (#10495) Signed-off-by: DarkLight1337 --- vllm/envs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/envs.py b/vllm/envs.py index 853c49bc4dbc1..14c1617f1be19 100644 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -49,7 +49,7 @@ VLLM_WORKER_MULTIPROC_METHOD: str = "fork" VLLM_ASSETS_CACHE: str = os.path.join(VLLM_CACHE_ROOT, "assets") VLLM_IMAGE_FETCH_TIMEOUT: int = 5 - VLLM_VIDEO_FETCH_TIMEOUT: int = 15 + VLLM_VIDEO_FETCH_TIMEOUT: int = 30 VLLM_AUDIO_FETCH_TIMEOUT: int = 10 VLLM_TARGET_DEVICE: str = "cuda" MAX_JOBS: Optional[str] = None From aaddce5d268d2c82d49b0240d6c112ba4941f69e Mon Sep 17 00:00:00 2001 From: youkaichao Date: Wed, 20 Nov 2024 23:07:56 -0800 Subject: [PATCH 078/397] [platforms] improve error message for unspecified platforms (#10520) Signed-off-by: youkaichao --- vllm/config.py | 3 ++- vllm/platforms/interface.py | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/vllm/config.py b/vllm/config.py index b2785e1ce2d5f..ed09f8ae31863 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -1191,12 +1191,13 @@ def is_multi_step(self) -> bool: class DeviceConfig: device: Optional[torch.device] + device_type: str def __init__(self, device: str = "auto") -> None: if device == "auto": # Automated device type detection self.device_type = current_platform.device_type - if self.device_type is None: + if not self.device_type: raise RuntimeError("Failed to infer device type") else: # Device type is assigned explicitly diff --git a/vllm/platforms/interface.py b/vllm/platforms/interface.py index 68abec28ad71e..07f23167d509a 100644 --- a/vllm/platforms/interface.py +++ b/vllm/platforms/interface.py @@ -170,3 +170,4 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: class UnspecifiedPlatform(Platform): _enum = PlatformEnum.UNSPECIFIED + device_type = "" From f0e02380169b99a20cc5a4cd1848bbe085b50d5c Mon Sep 17 00:00:00 2001 From: Zhong Qishuai Date: Thu, 21 Nov 2024 17:05:23 +0800 Subject: [PATCH 079/397] [Doc] fix a small typo in docstring of llama_tool_parser (#10513) --- vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py b/vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py index a5f44d69e5fd2..1856308b88cfa 100644 --- a/vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py +++ b/vllm/entrypoints/openai/tool_parsers/llama_tool_parser.py @@ -29,7 +29,8 @@ class Llama3JsonToolParser(ToolParser): Tool call parser for Llama 3.1 models intended for use with the examples/tool_chat_template_llama.jinja template. - Used when --enable-auto-tool-choice --tool-call-parser mistral are all set + Used when --enable-auto-tool-choice --tool-call-parser llama3_json + are all set """ def __init__(self, tokenizer: PreTrainedTokenizerBase): From 1cfde82ffd6edfca6029a7e312c848386ea322c1 Mon Sep 17 00:00:00 2001 From: Alex Brooks Date: Thu, 21 Nov 2024 03:46:20 -0700 Subject: [PATCH 080/397] [Model] Add Support for Multimodal Granite Models (#10291) Signed-off-by: Alex-Brooks Co-authored-by: Cyrus Leung --- vllm/model_executor/models/clip.py | 47 ++++++++++++++++++------ vllm/model_executor/models/llava.py | 45 +++++++++++++++++++---- vllm/model_executor/models/llava_next.py | 20 +++++++++- vllm/model_executor/models/pixtral.py | 28 ++++++++++++-- vllm/model_executor/models/siglip.py | 42 ++++++++++++++++----- vllm/multimodal/utils.py | 44 ++++++++++++++++++++++ 6 files changed, 191 insertions(+), 35 deletions(-) diff --git a/vllm/model_executor/models/clip.py b/vllm/model_executor/models/clip.py index 7f638506f9fb2..cd89519e95986 100644 --- a/vllm/model_executor/models/clip.py +++ b/vllm/model_executor/models/clip.py @@ -21,7 +21,8 @@ from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.multimodal.utils import (cached_get_tokenizer, consecutive_placeholder_ranges, - repeat_and_pad_placeholder_tokens) + repeat_and_pad_placeholder_tokens, + resolve_visual_encoder_outputs) from vllm.sequence import SequenceData from .utils import get_vit_attn_backend @@ -389,12 +390,20 @@ def __init__( for layer_idx in range(num_hidden_layers) ]) - def forward(self, inputs_embeds: torch.Tensor): - + def forward( + self, inputs_embeds: torch.Tensor, return_all_hidden_states: bool + ) -> Union[torch.Tensor, list[torch.Tensor]]: + hidden_states_pool = [] hidden_states = inputs_embeds + for encoder_layer in self.layers: hidden_states = encoder_layer(hidden_states) - + if return_all_hidden_states: + hidden_states_pool.append(hidden_states) + # If we have multiple feature sample layers, we return all hidden + # states in order and grab the ones we need by index. + if return_all_hidden_states: + return hidden_states_pool return hidden_states @@ -419,6 +428,7 @@ def __init__( # NOTE: This typo of "layrnorm" is not fixed on purpose to match # the original transformers code and name of the model weights. self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) + self.encoder = CLIPEncoder( config=config, quant_config=quant_config, @@ -446,16 +456,26 @@ def __init__( def forward( self, pixel_values: torch.Tensor, + feature_sample_layers: Optional[list[int]] = None, ) -> torch.Tensor: hidden_states = self.embeddings(pixel_values) hidden_states = self.pre_layrnorm(hidden_states) - hidden_states = self.encoder(inputs_embeds=hidden_states) - if self.post_layernorm is None: - return hidden_states + return_all_hidden_states = feature_sample_layers is not None + + # Produces either the last layer output or all of the hidden states, + # depending on if we have feature_sample_layers or not + encoder_outputs = self.encoder( + inputs_embeds=hidden_states, + return_all_hidden_states=return_all_hidden_states) + + # Handle post-norm (if applicable) and stacks feature layers if needed + encoder_outputs = resolve_visual_encoder_outputs( + encoder_outputs, feature_sample_layers, self.post_layernorm, + self.config.num_hidden_layers) - return self.post_layernorm(hidden_states) + return encoder_outputs class CLIPVisionModel(nn.Module): @@ -478,11 +498,14 @@ def __init__( quant_config=quant_config, num_hidden_layers_override=num_hidden_layers_override, require_post_norm=require_post_norm, - prefix=f"{prefix}.vision_model", - ) + prefix=f"{prefix}.vision_model") - def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: - return self.vision_model(pixel_values) + def forward( + self, + pixel_values: torch.Tensor, + feature_sample_layers: Optional[list[int]] = None, + ) -> torch.Tensor: + return self.vision_model(pixel_values, feature_sample_layers) @property def device(self): diff --git a/vllm/model_executor/models/llava.py b/vllm/model_executor/models/llava.py index e7d3161a7cb2d..05c6cc62efcd7 100644 --- a/vllm/model_executor/models/llava.py +++ b/vllm/model_executor/models/llava.py @@ -204,7 +204,41 @@ def input_processor_for_llava(ctx: InputContext, inputs: DecoderOnlyInputs): class LlavaLikeConfig(Protocol): vision_config: PretrainedConfig - vision_feature_layer: int + vision_feature_layer: Union[int, List[int]] + + +def _get_num_hidden_layers(hf_config: LlavaLikeConfig) -> int: + """Determine the number of hidden layers to initialize up to in the + visual encoder. + + Args: + hf_config: Model config with vision feature layer(s). + """ + feature_layers = hf_config.vision_feature_layer + num_hidden_layers = hf_config.vision_config.num_hidden_layers + # If we have one feature layer, initialize up to that layer + if isinstance(feature_layers, int): + return _get_layer_index(feature_layers, num_hidden_layers) + # If we have multiple feature layers, initialize up to the deepest one + elif isinstance(feature_layers, (list, tuple)): + return max( + _get_layer_index(idx, num_hidden_layers) for idx in feature_layers) + raise TypeError(f"vision_layer_feature type: {type(feature_layers)}" + " is not supported") + + +def _get_layer_index(feature_layer_index: int, num_hidden_layers: int) -> int: + """Given an signed vision feature layer, get the number of hidden layers + needed to leverage it. + + Args: + feature_layer_index: Index of a required layer in the visual encoder. + num_hidden_layers: The total number of hidden layers in the visual + encoder. + """ + if feature_layer_index < 0: + return num_hidden_layers + feature_layer_index + 1 + return feature_layer_index + 1 def init_vision_tower_for_llava( @@ -216,13 +250,8 @@ def init_vision_tower_for_llava( ): vision_config = hf_config.vision_config - # Initialize the vision tower only up to the required feature layer - vision_feature_layer = hf_config.vision_feature_layer - if vision_feature_layer < 0: - num_hidden_layers = hf_config.vision_config.num_hidden_layers \ - + vision_feature_layer + 1 - else: - num_hidden_layers = vision_feature_layer + 1 + # Initialize the vision tower only up to the deepest required feature layer + num_hidden_layers = _get_num_hidden_layers(hf_config) if isinstance(vision_config, CLIPVisionConfig): return CLIPVisionModel( diff --git a/vllm/model_executor/models/llava_next.py b/vllm/model_executor/models/llava_next.py index 37e2227a52dcd..abeebb45fc4a7 100644 --- a/vllm/model_executor/models/llava_next.py +++ b/vllm/model_executor/models/llava_next.py @@ -288,6 +288,21 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: pooler_config = vllm_config.model_config.pooler_config multimodal_config = vllm_config.model_config.multimodal_config + vision_feature_layer = config.vision_feature_layer + # Determine the layer up to which we will initialize the vision tower + if isinstance(vision_feature_layer, int): + vision_hidden_size = config.vision_config.hidden_size + self.feature_sample_layers = None + # Used for multimodal granite models to control encoder outputs + elif isinstance(vision_feature_layer, (list, tuple)): + vision_hidden_size = config.vision_config.hidden_size * len( + vision_feature_layer) + self.feature_sample_layers = vision_feature_layer + else: + raise TypeError( + f"vision_layer_feature type: {type(vision_feature_layer)}" + " is not supported") + self.config = config self.multimodal_config = multimodal_config @@ -300,7 +315,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: self.image_newline = nn.Parameter( torch.empty(config.text_config.hidden_size)) self.multi_modal_projector = LlavaMultiModalProjector( - vision_hidden_size=config.vision_config.hidden_size, + vision_hidden_size=vision_hidden_size, text_hidden_size=config.text_config.hidden_size, projector_hidden_act=config.projector_hidden_act) @@ -419,7 +434,8 @@ def _image_pixels_to_features( # NOTE: we skip the step to select the vision feature layer since # this is already done inside the vision tower - image_features = vision_tower(pixel_values) + image_features = vision_tower( + pixel_values, feature_sample_layers=self.feature_sample_layers) return self._select_image_features( image_features, diff --git a/vllm/model_executor/models/pixtral.py b/vllm/model_executor/models/pixtral.py index d14b89d6b3f85..6711cbf5694b9 100644 --- a/vllm/model_executor/models/pixtral.py +++ b/vllm/model_executor/models/pixtral.py @@ -33,7 +33,8 @@ from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs from vllm.multimodal.inputs import PlaceholderRange from vllm.multimodal.utils import (cached_get_tokenizer, - consecutive_placeholder_ranges) + consecutive_placeholder_ranges, + resolve_visual_encoder_outputs) from vllm.sequence import IntermediateTensors, SequenceData from vllm.transformers_utils.processor import cached_get_processor from vllm.utils import is_list_of @@ -970,9 +971,18 @@ def forward( x: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: torch.Tensor, + return_all_hidden_states: bool, ) -> torch.Tensor: + hidden_states_pool = [] + for layer in self.layers: x = layer(x, attention_mask, position_embeddings) + if return_all_hidden_states: + hidden_states_pool.append(x) + # If we have multiple feature sample layers, we return all hidden + # states in order and grab the ones we need by index. + if return_all_hidden_states: + return hidden_states_pool return x @@ -990,6 +1000,7 @@ def __init__( super().__init__() self.config = config + self.patch_conv = nn.Conv2d( in_channels=config.num_channels, out_channels=config.hidden_size, @@ -1024,6 +1035,7 @@ def __init__( def forward( self, pixel_values: List[torch.Tensor], + feature_sample_layers: Optional[list[int]] = None, ) -> torch.Tensor: """ Args: @@ -1031,6 +1043,9 @@ def forward( in pixel_values. This means it will be a list of tensors because multiple requests batched can have multiple images, each with their own shape potentially + feature_sample_layers: Layer indices whose features should be + concatenated and used as the visual encoder output. If none + are provided, the last layer is used. Returns: image_features: tensor of token features for @@ -1065,8 +1080,15 @@ def forward( [p.shape[-2] * p.shape[-1] for p in patch_embeds_list], patch_embeds) - out = self.transformer(patch_embeds, attention_mask, - position_embedding) + return_all_hidden_states = feature_sample_layers is not None + out = self.transformer( + patch_embeds, + attention_mask, + position_embedding, + return_all_hidden_states=return_all_hidden_states) + + out = resolve_visual_encoder_outputs(out, feature_sample_layers, None, + self.config.num_hidden_layers) return out diff --git a/vllm/model_executor/models/siglip.py b/vllm/model_executor/models/siglip.py index c58ad99692900..deaed0ba7e4ce 100644 --- a/vllm/model_executor/models/siglip.py +++ b/vllm/model_executor/models/siglip.py @@ -25,7 +25,8 @@ from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.multimodal.utils import (cached_get_tokenizer, consecutive_placeholder_ranges, - repeat_and_pad_placeholder_tokens) + repeat_and_pad_placeholder_tokens, + resolve_visual_encoder_outputs) from vllm.sequence import SequenceData from .utils import get_vit_attn_backend @@ -450,11 +451,19 @@ def __init__( def forward( self, inputs_embeds: torch.Tensor, - ) -> torch.Tensor: + return_all_hidden_states: bool, + ) -> Union[torch.Tensor, list[torch.Tensor]]: + hidden_states_pool = [] hidden_states = inputs_embeds + for encoder_layer in self.layers: hidden_states, _ = encoder_layer(hidden_states) - + if return_all_hidden_states: + hidden_states_pool.append(hidden_states) + # If we have multiple feature sample layers, we return all hidden + # states in order and grab the ones we need by index. + if return_all_hidden_states: + return hidden_states_pool return hidden_states @@ -509,6 +518,7 @@ def __init__( embed_dim = config.hidden_size self.embeddings = SiglipVisionEmbeddings(config) + self.encoder = SiglipEncoder( config, quant_config=quant_config, @@ -546,23 +556,33 @@ def forward( self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = True, + feature_sample_layers: Optional[list[int]] = None, ) -> torch.Tensor: + hidden_states = self.embeddings( pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, ) - encoder_outputs = self.encoder(inputs_embeds=hidden_states) + return_all_hidden_states = feature_sample_layers is not None + + # Produces either the last layer output or all of the hidden states, + # depending on if we have feature_sample_layers or not + encoder_outputs = self.encoder( + inputs_embeds=hidden_states, + return_all_hidden_states=return_all_hidden_states, + ) - if self.post_layernorm is None: - return encoder_outputs + # Handle post-norm (if applicable) and stacks feature layers if needed + encoder_outputs = resolve_visual_encoder_outputs( + encoder_outputs, feature_sample_layers, self.post_layernorm, + self.config.num_hidden_layers) - last_hidden_state = self.post_layernorm(encoder_outputs) - # TODO: add this back when pooled_output is used in inference + # TODO: add this back when pooled_output is used in inference. # if self.use_head: - # pooled_output = self.head(last_hidden_state) + # pooled_output = self.head(encoder_outputs) - return last_hidden_state + return encoder_outputs class SiglipVisionModel(nn.Module): @@ -595,10 +615,12 @@ def forward( self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False, + feature_sample_layers: Optional[list[int]] = None, ) -> torch.Tensor: return self.vision_model( pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, + feature_sample_layers=feature_sample_layers, ) def load_weights(self, weights: Iterable[Tuple[str, diff --git a/vllm/multimodal/utils.py b/vllm/multimodal/utils.py index 40194716bbf94..d4333b7519b47 100644 --- a/vllm/multimodal/utils.py +++ b/vllm/multimodal/utils.py @@ -6,6 +6,7 @@ import numpy as np import numpy.typing as npt +import torch from PIL import Image import vllm.envs as envs @@ -392,6 +393,49 @@ def encode_video_base64(frames: npt.NDArray): return ",".join(base64_frames) +def resolve_visual_encoder_outputs( + encoder_outputs: Union[torch.Tensor, list[torch.Tensor]], + feature_sample_layers: Optional[list[int]], + post_layer_norm: Optional[torch.nn.LayerNorm], + max_possible_layers: int, +) -> torch.Tensor: + """Given the outputs a visual encoder module that may correspond to the + output of the last layer, or a list of hidden states to be stacked, + handle post normalization and resolve it into a single output tensor. + + Args: + encoder_outputs: Output of encoder's last layer or all hidden states. + feature_sample_layers: Optional layer indices to grab from the encoder + outputs; if provided, encoder outputs must be a list. + post_layer_norm: Post norm to apply to the output of the encoder. + max_possible_layers: Total layers in the fully loaded visual encoder. + + """ + if feature_sample_layers is None: + if post_layer_norm is not None: + return post_layer_norm(encoder_outputs) + return encoder_outputs + + # Get the hidden states corresponding to the layer indices. + # Negative values are relative to the full visual encoder, + # so offset them depending on how many layers were loaded. + # NOTE: this assumes that encoder_outputs contains a list + # of hidden states in the same order as the encoder layers + # that produced them. + offset = max_possible_layers - len(encoder_outputs) + hs_pool = [ + encoder_outputs[layer_idx] + if layer_idx >= 0 else encoder_outputs[layer_idx + offset] + for layer_idx in feature_sample_layers + ] + + # Apply post-norm on the final hidden state if we are using it + uses_last_layer = feature_sample_layers[-1] in (len(hs_pool) - 1, -1) + if post_layer_norm is not None and uses_last_layer: + hs_pool[-1] = post_layer_norm(encoder_outputs) + return torch.cat(hs_pool, dim=-1) + + # Utilities for input processors _T = TypeVar("_T", str, int) From 8a93a598d9ac265882e55432e7aef55c8bff23f4 Mon Sep 17 00:00:00 2001 From: "Wang, Yi" Date: Thu, 21 Nov 2024 19:15:36 +0800 Subject: [PATCH 081/397] fix the issue that len(tokenizer(prompt)["input_ids"]) > prompt_len (#10524) Signed-off-by: Wang, Yi A --- benchmarks/backend_request_func.py | 1 + 1 file changed, 1 insertion(+) diff --git a/benchmarks/backend_request_func.py b/benchmarks/backend_request_func.py index 25c8b1bbf3e22..c3fed56e8a956 100644 --- a/benchmarks/backend_request_func.py +++ b/benchmarks/backend_request_func.py @@ -54,6 +54,7 @@ async def async_request_tgi( "do_sample": True, "temperature": 0.01, # TGI does not accept 0.0 temperature. "top_p": 0.99, # TGI does not accept 1.0 top_p. + "truncate": request_func_input.prompt_len, # TGI does not accept ignore_eos flag. } payload = { From d5ec121f95f51184acce4e2c27ad8fc01904d3d9 Mon Sep 17 00:00:00 2001 From: Isotr0py <2037008807@qq.com> Date: Thu, 21 Nov 2024 22:20:08 +0800 Subject: [PATCH 082/397] [Model] Expose `dynamic_image_size` as mm_processor_kwargs for InternVL2 models (#10518) Signed-off-by: Isotr0py <2037008807@qq.com> --- .../mm_processor_kwargs/test_internvl.py | 206 ++++++++++++++++++ vllm/model_executor/models/internvl.py | 63 ++++-- 2 files changed, 255 insertions(+), 14 deletions(-) create mode 100644 tests/models/decoder_only/vision_language/mm_processor_kwargs/test_internvl.py diff --git a/tests/models/decoder_only/vision_language/mm_processor_kwargs/test_internvl.py b/tests/models/decoder_only/vision_language/mm_processor_kwargs/test_internvl.py new file mode 100644 index 0000000000000..af0c2aa211998 --- /dev/null +++ b/tests/models/decoder_only/vision_language/mm_processor_kwargs/test_internvl.py @@ -0,0 +1,206 @@ +"""Tests for InternVL's multimodal preprocessing kwargs.""" +from typing import Callable, Optional + +import pytest +from transformers import AutoTokenizer + +from vllm.inputs import InputContext, token_inputs +from vllm.multimodal import MultiModalRegistry + +from .....conftest import _ImageAssets +from ....utils import build_model_context + +models = ["OpenGVLab/InternVL2-2B"] + + +# Wrap lazy imports to avoid initializing CUDA during test collection +@pytest.fixture() +def input_processor_for_internvl(): + from vllm.model_executor.models.internvl import InternVLInputPipeline + + pipeline = InternVLInputPipeline('', '', '') + return pipeline.input_processor + + +@pytest.fixture() +def dummy_data_for_internvl(): + from vllm.model_executor.models.internvl import InternVLInputPipeline + + pipeline = InternVLInputPipeline('', '', '') + return pipeline.dummy_data + + +@pytest.fixture() +def get_max_internvl_image_tokens(): + from vllm.model_executor.models.internvl import ( + get_max_internvl_image_tokens) + return get_max_internvl_image_tokens + + +@pytest.mark.parametrize("model", models) +@pytest.mark.parametrize("max_dynamic_patch", [1, 4]) +@pytest.mark.parametrize("dynamic_image_size", [True, False, None]) +def test_input_mapper_override( + model: str, + image_assets: _ImageAssets, + max_dynamic_patch: int, + dynamic_image_size: Optional[bool], +): + mm_processor_kwargs = { + "max_dynamic_patch": max_dynamic_patch, + } + if dynamic_image_size is not None: + mm_processor_kwargs["dynamic_image_size"] = dynamic_image_size + + expected_num_patches = max_dynamic_patch + 1 if max_dynamic_patch > 1 else 1 + if dynamic_image_size is False: + expected_num_patches = 1 + + ctx = build_model_context( + model_name=model, + tokenizer_name=model, + trust_remote_code=True, + mm_processor_kwargs=mm_processor_kwargs, + ) + + mm_registry = MultiModalRegistry() + mm_registry.init_mm_limits_per_prompt(ctx.model_config) + + image = image_assets[0].pil_image.resize((448 * 2, 448 * 2)) + vllm_result = mm_registry.map_input( + ctx.model_config, + {"image": image}, + ) + assert vllm_result["pixel_values"].size(1) == expected_num_patches + + +@pytest.mark.parametrize("model", models) +@pytest.mark.parametrize("max_dynamic_patch", [1, 4, None]) +@pytest.mark.parametrize("dynamic_image_size", [True, False, None]) +def test_max_tokens_override( + get_max_internvl_image_tokens: Callable, + model: str, + max_dynamic_patch: Optional[int], + dynamic_image_size: Optional[bool], +): + """Ensure get_max_internvl_image_tokens handles mm_processor_kwargs.""" + ctx = build_model_context( + model_name=model, + tokenizer_name=model, + trust_remote_code=True, + mm_processor_kwargs=None, + ) + + if max_dynamic_patch is None: + max_dynamic_patch = ctx.get_hf_config().max_dynamic_patch + expected_num_patches = max_dynamic_patch + 1 if max_dynamic_patch > 1 else 1 + if dynamic_image_size is False: + expected_num_patches = 1 + expected_max_tokens = 256 * expected_num_patches + + actual_max_tokens = get_max_internvl_image_tokens( + ctx=InputContext(ctx.model_config), + max_dynamic_patch=max_dynamic_patch, + dynamic_image_size=dynamic_image_size, + ) + assert expected_max_tokens == actual_max_tokens + + +@pytest.mark.parametrize("model", models) +@pytest.mark.parametrize("num_imgs", [1, 2]) +@pytest.mark.parametrize("max_dynamic_patch", [1, 4, None]) +@pytest.mark.parametrize("dynamic_image_size", [True, False, None]) +def test_dummy_data_override( + dummy_data_for_internvl: Callable, + model: str, + num_imgs: int, + max_dynamic_patch: Optional[int], + dynamic_image_size: Optional[bool], +): + """Ensure dummy_data_for_internvl handles kwargs properly.""" + # Same as the previous test - don't initialize mm_processor_kwargs + # in this test and assume that the kwargs will be correctly expanded by + # the partial when calling the dummy data func. + ctx = build_model_context( + model_name=model, + tokenizer_name=model, + trust_remote_code=True, + mm_processor_kwargs=None, + ) + + if max_dynamic_patch is None: + max_dynamic_patch = ctx.get_hf_config().max_dynamic_patch + expected_num_patches = max_dynamic_patch + 1 if max_dynamic_patch > 1 else 1 + if dynamic_image_size is False: + expected_num_patches = 1 + expected_max_tokens = 256 * expected_num_patches + + dummy_data = dummy_data_for_internvl( + ctx=ctx, + seq_len=8192, # Should be bigger than num_imgs * toks_per_img + mm_counts={"image": num_imgs}, + max_dynamic_patch=max_dynamic_patch, + dynamic_image_size=dynamic_image_size, + ) + sequence_data = dummy_data.seq_data + + tokenizer = AutoTokenizer.from_pretrained(model, trust_remote_code=True) + image_token_id = tokenizer.encode('', + add_special_tokens=False)[0] + + # Ensure we have the right number of placeholders per size + img_tok_count = sequence_data.get_token_ids().count(image_token_id) + assert img_tok_count == expected_max_tokens * num_imgs + + +@pytest.mark.parametrize("model", models) +@pytest.mark.parametrize("max_dynamic_patch", [1, 4]) +@pytest.mark.parametrize("dynamic_image_size", [True, False, None]) +@pytest.mark.parametrize("num_imgs", [1, 2]) +def test_input_processor_override( + input_processor_for_internvl: Callable, + image_assets: _ImageAssets, + model: str, + num_imgs: int, + max_dynamic_patch: int, + dynamic_image_size: Optional[bool], +): + """Ensure input_processor_for_internvl handles kwargs properly.""" + # Same as the previous test - don't initialize mm_processor_kwargs + # in this test and assume that the kwargs will be correctly expanded by + # the partial when calling the custom input processor. + expected_num_patches = max_dynamic_patch + 1 if max_dynamic_patch > 1 else 1 + if dynamic_image_size is False: + expected_num_patches = 1 + + ctx = build_model_context( + model_name=model, + tokenizer_name=model, + trust_remote_code=True, + mm_processor_kwargs=None, + ) + expected_toks_per_img = 256 * expected_num_patches + + # Build the image str / prompt based on the number of images we pass + tokenizer = AutoTokenizer.from_pretrained(model, trust_remote_code=True) + placeholders = "" if num_imgs == 1 else "\n".join( + f"Image-{i}: \n" for i in range(1, num_imgs + 1)) + prompt = placeholders + images = [image_assets[0].pil_image.resize((448 * 2, 448 * 2))] * num_imgs + + inputs = token_inputs(prompt_token_ids=tokenizer.encode(prompt), + prompt=prompt, + multi_modal_data={"image": images}) + + processed_inputs = input_processor_for_internvl( + ctx, + inputs, + max_dynamic_patch=max_dynamic_patch, + dynamic_image_size=dynamic_image_size, + ) + + # Ensure we have the right number of placeholders per num_crops size + image_token_id = tokenizer.encode('', + add_special_tokens=False)[0] + img_tok_count = processed_inputs["prompt_token_ids"].count(image_token_id) + assert img_tok_count == expected_toks_per_img * num_imgs diff --git a/vllm/model_executor/models/internvl.py b/vllm/model_executor/models/internvl.py index 5d38b4b1ef14b..47ac00b6afe9b 100644 --- a/vllm/model_executor/models/internvl.py +++ b/vllm/model_executor/models/internvl.py @@ -123,8 +123,15 @@ def calculate_num_blocks(orig_width: int, orig_height: int, min_num: int, return blocks, target_width, target_height -def calculate_num_blocks_wrapper(hf_config: PretrainedConfig, - max_dynamic_patch: Optional[int] = None): +def calculate_num_blocks_wrapper( + hf_config: PretrainedConfig, + max_dynamic_patch: Optional[int] = None, + dynamic_image_size: Optional[bool] = None, +): + if dynamic_image_size is None: + dynamic_image_size = hf_config.dynamic_image_size + + max_dynamic_patch = max_dynamic_patch if dynamic_image_size else 1 if max_dynamic_patch is None: max_dynamic_patch = hf_config.max_dynamic_patch min_num = hf_config.min_dynamic_patch @@ -183,10 +190,17 @@ def image_to_pixel_values(image: Image.Image, input_size: int, min_num: int, return pixel_values -def image_to_pixel_values_wrapper(hf_config: PretrainedConfig, - max_dynamic_patch: Optional[int] = None): +def image_to_pixel_values_wrapper( + hf_config: PretrainedConfig, + max_dynamic_patch: Optional[int] = None, + dynamic_image_size: Optional[bool] = None, +): image_size = hf_config.vision_config.image_size min_num = hf_config.min_dynamic_patch + if dynamic_image_size is None: + dynamic_image_size = hf_config.dynamic_image_size + + max_dynamic_patch = max_dynamic_patch if dynamic_image_size else 1 if max_dynamic_patch is None: max_dynamic_patch = hf_config.max_dynamic_patch use_thumbnail = hf_config.use_thumbnail @@ -207,11 +221,17 @@ def get_internvl_num_patches(hf_config: PretrainedConfig): (downsample_ratio**2)) -def get_max_internvl_image_tokens(ctx: InputContext, - *, - max_dynamic_patch: Optional[int] = None): +def get_max_internvl_image_tokens( + ctx: InputContext, + *, + max_dynamic_patch: Optional[int] = None, + dynamic_image_size: Optional[bool] = None, +): hf_config = ctx.get_hf_config() + if dynamic_image_size is None: + dynamic_image_size = hf_config.dynamic_image_size + max_dynamic_patch = max_dynamic_patch if dynamic_image_size else 1 if max_dynamic_patch is None: max_dynamic_patch = hf_config.max_dynamic_patch use_thumbnail = hf_config.use_thumbnail @@ -222,12 +242,18 @@ def get_max_internvl_image_tokens(ctx: InputContext, return num_patches * max_dynamic_patch -def get_max_internvl_image_size(ctx: InputContext, - *, - max_dynamic_patch: Optional[int] = None): +def get_max_internvl_image_size( + ctx: InputContext, + *, + max_dynamic_patch: Optional[int] = None, + dynamic_image_size: Optional[bool] = None, +): hf_config = ctx.get_hf_config() image_size = hf_config.vision_config.image_size + if dynamic_image_size is None: + dynamic_image_size = hf_config.dynamic_image_size + max_dynamic_patch = max_dynamic_patch if dynamic_image_size else 1 if max_dynamic_patch is None: max_dynamic_patch = hf_config.max_dynamic_patch use_thumbnail = hf_config.use_thumbnail @@ -281,6 +307,7 @@ def input_processor( inputs: DecoderOnlyInputs, *, max_dynamic_patch: Optional[int] = None, + dynamic_image_size: Optional[bool] = None, ) -> DecoderOnlyInputs: multi_modal_data = inputs.get("multi_modal_data") if multi_modal_data is None or "image" not in multi_modal_data: @@ -292,7 +319,7 @@ def input_processor( image_data = multi_modal_data["image"] num_patches = get_internvl_num_patches(hf_config) num_blocks_calculator = calculate_num_blocks_wrapper( - hf_config, max_dynamic_patch) + hf_config, max_dynamic_patch, dynamic_image_size) if isinstance(image_data, Image.Image): width, height = image_data.size num_blocks, _, _ = num_blocks_calculator(width, height) @@ -332,11 +359,12 @@ def input_mapper( data: object, *, max_dynamic_patch: Optional[int] = None, + dynamic_image_size: Optional[bool] = None, ): hf_config = ctx.get_hf_config() image_pixel_values_mapper = image_to_pixel_values_wrapper( - hf_config, max_dynamic_patch) + hf_config, max_dynamic_patch, dynamic_image_size) if isinstance(data, Image.Image): data = image_pixel_values_mapper(data) # Add an N dimension for number of images per prompt (currently 1). @@ -366,13 +394,17 @@ def dummy_data( mm_counts: Mapping[str, int], *, max_dynamic_patch: Optional[int] = None, + dynamic_image_size: Optional[bool] = None, ): num_images = mm_counts["image"] hf_config = ctx.get_hf_config() image_feature_size = get_max_internvl_image_tokens( - ctx, max_dynamic_patch=max_dynamic_patch) + ctx, + max_dynamic_patch=max_dynamic_patch, + dynamic_image_size=dynamic_image_size, + ) model_config = ctx.model_config tokenizer = cached_get_tokenizer( model_config.tokenizer, @@ -388,7 +420,10 @@ def dummy_data( ) max_image_width, max_image_height = get_max_internvl_image_size( - ctx, max_dynamic_patch=max_dynamic_patch) + ctx, + max_dynamic_patch=max_dynamic_patch, + dynamic_image_size=dynamic_image_size, + ) mm_data = dummy_image_for_clip( hf_config.vision_config, From 4d676f085295d92a9248c4944433b4ade52a8ff3 Mon Sep 17 00:00:00 2001 From: Xiaoyu Zhang <35585791+BBuf@users.noreply.github.com> Date: Thu, 21 Nov 2024 22:40:02 +0800 Subject: [PATCH 083/397] [Bugfix] Embedding model pooling_type equals ALL and multi input's bug (#10494) --- vllm/model_executor/layers/pooler.py | 29 ++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/vllm/model_executor/layers/pooler.py b/vllm/model_executor/layers/pooler.py index bfe2d7d0f382e..df1978241340b 100644 --- a/vllm/model_executor/layers/pooler.py +++ b/vllm/model_executor/layers/pooler.py @@ -94,14 +94,10 @@ def forward( pooled_data = hidden_states[last_token_flat_indices] elif self.pooling_type == PoolingType.ALL: offset = 0 - pooled_data_lst = [] + pooled_data = [] for prompt_len in prompt_lens: - pooled_data_i = hidden_states[offset:offset + prompt_len] - - pooled_data_lst.append(pooled_data_i) + pooled_data.append(hidden_states[offset:offset + prompt_len]) offset += prompt_len - - pooled_data = torch.stack(pooled_data_lst) elif self.pooling_type == PoolingType.MEAN: # Calculate mean pooling cumsum = torch.cumsum(hidden_states, dim=0) @@ -121,7 +117,7 @@ def forward( step_tag_id = self.step_tag_id offset = 0 - pooled_data_lst = [] + pooled_data = [] for prompt_len, seq_data_i in zip( prompt_lens, pooling_metadata.seq_data.values()): pooled_data_i = hidden_states[offset:offset + prompt_len] @@ -130,17 +126,26 @@ def forward( pooled_data_i = pooled_data_i[token_ids == step_tag_id] offset += prompt_len - pooled_data_lst.append(pooled_data_i) - - pooled_data = torch.stack(pooled_data_lst) + pooled_data.append(pooled_data_i) else: raise ValueError(f"Invalid pooling type: {self.pooling_type}") if self.normalize: - pooled_data = nn.functional.normalize(pooled_data, p=2, dim=1) + if isinstance(pooled_data, list): + pooled_data = [ + nn.functional.normalize(data, p=2, dim=1) + for data in pooled_data + ] + else: + pooled_data = nn.functional.normalize(pooled_data, p=2, dim=1) if self.softmax: - pooled_data = nn.functional.softmax(pooled_data, dim=-1) + if isinstance(pooled_data, list): + pooled_data = [ + nn.functional.softmax(data, dim=-1) for data in pooled_data + ] + else: + pooled_data = nn.functional.softmax(pooled_data, dim=-1) pooled_outputs = [ EmbeddingSequenceGroupOutput(data.tolist()) for data in pooled_data From da7e702c6fae521bf8633affb8fe7b834f5cb94b Mon Sep 17 00:00:00 2001 From: Chauncey Date: Fri, 22 Nov 2024 00:24:32 +0800 Subject: [PATCH 084/397] [Bug]: When apply continue_final_message for OpenAI server, the "echo":false is ignored (#10180) Signed-off-by: chaunceyjiang --- tests/entrypoints/openai/test_chat_echo.py | 79 ++++++++++++++++++++++ vllm/entrypoints/openai/serving_chat.py | 4 +- 2 files changed, 81 insertions(+), 2 deletions(-) create mode 100644 tests/entrypoints/openai/test_chat_echo.py diff --git a/tests/entrypoints/openai/test_chat_echo.py b/tests/entrypoints/openai/test_chat_echo.py new file mode 100644 index 0000000000000..223ac5b41aa83 --- /dev/null +++ b/tests/entrypoints/openai/test_chat_echo.py @@ -0,0 +1,79 @@ +from typing import NamedTuple + +import openai # use the official client for correctness check +import pytest +import pytest_asyncio + +from ...utils import RemoteOpenAIServer + +# # any model with a chat template should work here +MODEL_NAME = "Qwen/Qwen2-1.5B-Instruct" +DUMMY_CHAT_TEMPLATE = """{% for message in messages %}{{message['role'] + ': ' + message['content'] + '\\n'}}{% endfor %}""" # noqa: E501 + + +@pytest.fixture(scope="module") +def server(): + args = [ + # use half precision for speed and memory savings in CI environment + "--dtype", + "float16", + "--enforce-eager", + "--max-model-len", + "4080", + "--chat-template", + DUMMY_CHAT_TEMPLATE, + ] + + with RemoteOpenAIServer(MODEL_NAME, args) as remote_server: + yield remote_server + + +@pytest_asyncio.fixture +async def client(server): + async with server.get_async_client() as async_client: + yield async_client + + +class TestCase(NamedTuple): + model_name: str + echo: bool + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "test_case", + [ + TestCase(model_name=MODEL_NAME, echo=True), + TestCase(model_name=MODEL_NAME, echo=False) + ], +) +async def test_chat_session_with_echo_and_continue_final_message( + client: openai.AsyncOpenAI, test_case: TestCase): + saying: str = "Here is a common saying about apple. An apple a day, keeps" + # test echo with continue_final_message parameter + chat_completion = await client.chat.completions.create( + model=test_case.model_name, + messages=[{ + "role": "user", + "content": "tell me a common saying" + }, { + "role": "assistant", + "content": saying + }], + extra_body={ + "echo": test_case.echo, + "continue_final_message": True, + "add_generation_prompt": False + }) + assert chat_completion.id is not None + assert len(chat_completion.choices) == 1 + + choice = chat_completion.choices[0] + assert choice.finish_reason == "stop" + + message = choice.message + if test_case.echo: + assert message.content is not None and saying in message.content + else: + assert message.content is not None and saying not in message.content + assert message.role == "assistant" diff --git a/vllm/entrypoints/openai/serving_chat.py b/vllm/entrypoints/openai/serving_chat.py index 2eef909eb9319..54ca0463bcab1 100644 --- a/vllm/entrypoints/openai/serving_chat.py +++ b/vllm/entrypoints/openai/serving_chat.py @@ -361,7 +361,7 @@ async def chat_completion_stream_generator( # Send response to echo the input portion of the # last message - if request.echo or request.continue_final_message: + if request.echo: last_msg_content: Union[str, List[Dict[str, str]]] = "" if conversation and "content" in conversation[ -1] and conversation[-1].get("role") == role: @@ -706,7 +706,7 @@ async def chat_completion_full_generator( stop_reason=output.stop_reason) choices.append(choice_data) - if request.echo or request.continue_final_message: + if request.echo: last_msg_content: Union[str, List[Dict[str, str]]] = "" if conversation and "content" in conversation[-1] and conversation[ -1].get("role") == role: From 2385b60d8300ce730ae67d9ea945f06de9ec4e21 Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Fri, 22 Nov 2024 01:18:11 +0800 Subject: [PATCH 085/397] [Kernel] Register punica ops directly (#10522) Signed-off-by: Jee Jee Li --- tests/lora/test_punica_variation.py | 23 ++++++++++++++++------ vllm/lora/ops/bgmv_expand.py | 23 +++++++++++++++++++--- vllm/lora/ops/bgmv_expand_slice.py | 25 +++++++++++++++++++++--- vllm/lora/ops/bgmv_shrink.py | 23 +++++++++++++++++++--- vllm/lora/ops/sgmv_expand.py | 29 +++++++++++++++++++++++++--- vllm/lora/ops/sgmv_expand_slice.py | 30 ++++++++++++++++++++++++++--- vllm/lora/ops/sgmv_shrink.py | 28 ++++++++++++++++++++++++--- 7 files changed, 157 insertions(+), 24 deletions(-) diff --git a/tests/lora/test_punica_variation.py b/tests/lora/test_punica_variation.py index 52b82f25d23e1..3b20033271d26 100644 --- a/tests/lora/test_punica_variation.py +++ b/tests/lora/test_punica_variation.py @@ -6,12 +6,13 @@ import pytest import torch -from vllm.lora.ops.bgmv_expand import bgmv_expand -from vllm.lora.ops.bgmv_expand_slice import bgmv_expand_slice -from vllm.lora.ops.bgmv_shrink import bgmv_shrink -from vllm.lora.ops.sgmv_expand import sgmv_expand -from vllm.lora.ops.sgmv_expand_slice import sgmv_expand_slice -from vllm.lora.ops.sgmv_shrink import sgmv_shrink +# Enable custom op register +import vllm.lora.ops.bgmv_expand +import vllm.lora.ops.bgmv_expand_slice +import vllm.lora.ops.bgmv_shrink +import vllm.lora.ops.sgmv_expand +import vllm.lora.ops.sgmv_expand_slice +import vllm.lora.ops.sgmv_shrink # noqa: F401 from vllm.platforms import current_platform from .utils import (generate_data, generate_data_for_expand_nslices, @@ -37,6 +38,16 @@ def assert_close(a, b): torch.testing.assert_close(a, b, rtol=rtol, atol=atol) +# Unlike test_punica_sizes.py, we directly utilize custom op for +# testing, which verifies the correct registration of these ops. +bgmv_expand = torch.ops.vllm.bgmv_expand +bgmv_expand_slice = torch.ops.vllm.bgmv_expand_slice +bgmv_shrink = torch.ops.vllm.bgmv_shrink +sgmv_expand = torch.ops.vllm.sgmv_expand +sgmv_expand_slice = torch.ops.vllm.sgmv_expand_slice +sgmv_shrink = torch.ops.vllm.sgmv_shrink + + @pytest.mark.parametrize("batches", BATCHES) @pytest.mark.parametrize("num_loras", NUM_LORA) @pytest.mark.parametrize("rank", MAX_RANKS) diff --git a/vllm/lora/ops/bgmv_expand.py b/vllm/lora/ops/bgmv_expand.py index f176259fddc78..42adb191b8ead 100644 --- a/vllm/lora/ops/bgmv_expand.py +++ b/vllm/lora/ops/bgmv_expand.py @@ -9,6 +9,8 @@ import triton import triton.language as tl +from vllm.utils import direct_register_custom_op + from .utils import get_lora_op_configs @@ -162,9 +164,24 @@ def _bgmv_expand( return +def bgmv_expand_fake( + inputs: torch.Tensor, + lora_b_weights: torch.Tensor, + output_tensor: torch.Tensor, + lora_indices_tensor: torch.Tensor, + add_inputs: bool = True, +) -> None: + return + + try: - bgmv_expand = torch.library.custom_op("lora::bgmv_expand", - _bgmv_expand, - mutates_args=["output_tensor"]) + direct_register_custom_op( + op_name="bgmv_expand", + op_func=_bgmv_expand, + mutates_args=["output_tensor"], + fake_impl=bgmv_expand_fake, + ) + bgmv_expand = torch.ops.vllm.bgmv_expand + except AttributeError: bgmv_expand = _bgmv_expand diff --git a/vllm/lora/ops/bgmv_expand_slice.py b/vllm/lora/ops/bgmv_expand_slice.py index 2c6ed96c253f0..f397d752a3ea9 100644 --- a/vllm/lora/ops/bgmv_expand_slice.py +++ b/vllm/lora/ops/bgmv_expand_slice.py @@ -9,6 +9,8 @@ import triton import triton.language as tl +from vllm.utils import direct_register_custom_op + from .utils import get_lora_op_configs @@ -179,9 +181,26 @@ def _bgmv_expand_slice( return +def bgmv_expand_slice_fake( + inputs: torch.Tensor, + lora_b_weights: torch.Tensor, + output_tensor: torch.Tensor, + lora_indices_tensor: torch.Tensor, + slice_offset: int, + slice_size: int, + add_inputs: bool = True, +) -> None: + return + + try: - bgmv_expand_slice = torch.library.custom_op("lora::bgmv_expand_slice", - _bgmv_expand_slice, - mutates_args=["output_tensor"]) + direct_register_custom_op( + op_name="bgmv_expand_slice", + op_func=_bgmv_expand_slice, + mutates_args=["output_tensor"], + fake_impl=bgmv_expand_slice_fake, + ) + bgmv_expand_slice = torch.ops.vllm.bgmv_expand_slice + except AttributeError: bgmv_expand_slice = _bgmv_expand_slice diff --git a/vllm/lora/ops/bgmv_shrink.py b/vllm/lora/ops/bgmv_shrink.py index 0846ff36b1692..f3ef01d39e776 100644 --- a/vllm/lora/ops/bgmv_shrink.py +++ b/vllm/lora/ops/bgmv_shrink.py @@ -9,6 +9,8 @@ import triton import triton.language as tl +from vllm.utils import direct_register_custom_op + from .utils import get_lora_op_configs @@ -142,9 +144,24 @@ def _bgmv_shrink( return +def bgmv_shrink_fake( + inputs: torch.Tensor, + lora_a_weights: torch.Tensor, + output_tensor: torch.Tensor, + lora_indices_tensor: torch.Tensor, + scaling: float = 1.0, +) -> None: + return + + try: - bgmv_shrink = torch.library.custom_op("lora::bgmv_shrink", - _bgmv_shrink, - mutates_args=["output_tensor"]) + direct_register_custom_op( + op_name="bgmv_shrink", + op_func=_bgmv_shrink, + mutates_args=["output_tensor"], + fake_impl=bgmv_shrink_fake, + ) + bgmv_shrink = torch.ops.vllm.bgmv_shrink + except AttributeError: bgmv_shrink = _bgmv_shrink diff --git a/vllm/lora/ops/sgmv_expand.py b/vllm/lora/ops/sgmv_expand.py index ee2cd2e05e2ee..77c5178493c44 100644 --- a/vllm/lora/ops/sgmv_expand.py +++ b/vllm/lora/ops/sgmv_expand.py @@ -9,6 +9,8 @@ import triton import triton.language as tl +from vllm.utils import direct_register_custom_op + @triton.jit def _sgmv_expand_kernel( @@ -196,9 +198,30 @@ def _sgmv_expand( return +def sgmv_expand_fake( + inputs: torch.Tensor, + lora_b_weights: torch.Tensor, + output_tensor: torch.Tensor, + b_seq_start_loc: torch.Tensor, + seq_len_tensor: torch.Tensor, + lora_indices_tensor: torch.Tensor, + batches: int, + max_seq_length: int, + token_nums: int, + add_inputs: bool = False, +) -> None: + return + + try: - sgmv_expand = torch.library.custom_op("lora::sgmv_expand", - _sgmv_expand, - mutates_args=["output_tensor"]) + + direct_register_custom_op( + op_name="sgmv_expand", + op_func=_sgmv_expand, + mutates_args=["output_tensor"], + fake_impl=sgmv_expand_fake, + ) + sgmv_expand = torch.ops.vllm.sgmv_expand + except AttributeError: sgmv_expand = _sgmv_expand diff --git a/vllm/lora/ops/sgmv_expand_slice.py b/vllm/lora/ops/sgmv_expand_slice.py index 5244fa14913a4..55c4fb68ed128 100644 --- a/vllm/lora/ops/sgmv_expand_slice.py +++ b/vllm/lora/ops/sgmv_expand_slice.py @@ -9,6 +9,8 @@ import triton import triton.language as tl +from vllm.utils import direct_register_custom_op + @triton.jit def _sgmv_expand_slice_kernel( @@ -209,9 +211,31 @@ def _sgmv_expand_slice( return +def sgmv_expand_slice_fake( + inputs: torch.Tensor, + lora_b_weights: torch.Tensor, + output_tensor: torch.Tensor, + b_seq_start_loc: torch.Tensor, + seq_len_tensor: torch.Tensor, + lora_indices_tensor: torch.Tensor, + batches: int, + max_seq_length: int, + token_nums: int, + slice_offset: int, + slice_size: int, + add_inputs: bool = False, +) -> None: + return + + try: - sgmv_expand_slice = torch.library.custom_op("lora::sgmv_expand_slice", - _sgmv_expand_slice, - mutates_args=["output_tensor"]) + direct_register_custom_op( + op_name="sgmv_expand_slice", + op_func=_sgmv_expand_slice, + mutates_args=["output_tensor"], + fake_impl=sgmv_expand_slice_fake, + ) + sgmv_expand_slice = torch.ops.vllm.sgmv_expand_slice + except AttributeError: sgmv_expand_slice = _sgmv_expand_slice diff --git a/vllm/lora/ops/sgmv_shrink.py b/vllm/lora/ops/sgmv_shrink.py index b4d893047b06b..37d1dc84eebca 100644 --- a/vllm/lora/ops/sgmv_shrink.py +++ b/vllm/lora/ops/sgmv_shrink.py @@ -9,6 +9,8 @@ import triton import triton.language as tl +from vllm.utils import direct_register_custom_op + @triton.jit def _sgmv_shrink_kernel( @@ -190,9 +192,29 @@ def _sgmv_shrink( return +def sgmv_shrink_fake( + inputs: torch.Tensor, + lora_a_weights: torch.Tensor, + output_tensor: torch.Tensor, + b_seq_start_loc: torch.Tensor, + seq_len_tensor: torch.Tensor, + lora_indices_tensor: torch.Tensor, + batches: int, + max_seq_length: int, + token_nums: int, + scaling: float, +) -> None: + return + + try: - sgmv_shrink = torch.library.custom_op("lora::sgmv_shrink", - _sgmv_shrink, - mutates_args=["output_tensor"]) + direct_register_custom_op( + op_name="sgmv_shrink", + op_func=_sgmv_shrink, + mutates_args=["output_tensor"], + fake_impl=sgmv_shrink_fake, + ) + sgmv_shrink = torch.ops.vllm.sgmv_shrink + except AttributeError: sgmv_shrink = _sgmv_shrink From c51e397fe8db2ef0664814ef3f80e1237c7283da Mon Sep 17 00:00:00 2001 From: Roger Wang <136131678+ywang96@users.noreply.github.com> Date: Thu, 21 Nov 2024 09:21:31 -0800 Subject: [PATCH 086/397] [Misc] Suppress duplicated logging regarding multimodal input pipeline (#10530) Signed-off-by: Roger Wang --- vllm/inputs/preprocess.py | 4 ++-- vllm/utils.py | 6 ++++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/vllm/inputs/preprocess.py b/vllm/inputs/preprocess.py index aacff87df6d79..853257c5ad71f 100644 --- a/vllm/inputs/preprocess.py +++ b/vllm/inputs/preprocess.py @@ -10,7 +10,7 @@ from vllm.multimodal.processing import MultiModalDataDict, MultiModalInputsV2 from vllm.prompt_adapter.request import PromptAdapterRequest from vllm.transformers_utils.tokenizer_group import BaseTokenizerGroup -from vllm.utils import print_warning_once +from vllm.utils import print_info_once, print_warning_once from .data import (DecoderOnlyInputs, EncoderDecoderInputs, ProcessorInputs, PromptType, SingletonInputs, SingletonPrompt, token_inputs) @@ -212,7 +212,7 @@ def _can_process_multimodal(self) -> bool: # updated to use the new multi-modal processor can_process_multimodal = self.mm_registry.has_processor(model_config) if not can_process_multimodal: - logger.info( + print_info_once( "Your model uses the legacy input pipeline instead of the new " "multi-modal processor. Please note that the legacy pipeline " "will be removed in a future release. For more details, see: " diff --git a/vllm/utils.py b/vllm/utils.py index cb2ad43a2ae8d..424e7d0947790 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -705,6 +705,12 @@ def create_kv_caches_with_random( return key_caches, value_caches +@lru_cache +def print_info_once(msg: str) -> None: + # Set the stacklevel to 2 to print the caller's line info + logger.info(msg, stacklevel=2) + + @lru_cache def print_warning_once(msg: str) -> None: # Set the stacklevel to 2 to print the caller's line info From e7a8341c7c7481a0c797d50ead7a698255ac8a9f Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Fri, 22 Nov 2024 02:09:43 +0800 Subject: [PATCH 087/397] [Bugfix] Allow token ID-only inputs in Qwen2-Audio (#10536) Signed-off-by: DarkLight1337 --- vllm/model_executor/models/qwen2_audio.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/model_executor/models/qwen2_audio.py b/vllm/model_executor/models/qwen2_audio.py index a4965f34b1ca8..0c2374c3c3fc9 100644 --- a/vllm/model_executor/models/qwen2_audio.py +++ b/vllm/model_executor/models/qwen2_audio.py @@ -212,7 +212,7 @@ def input_processor_for_qwen2_audio( return token_inputs( prompt_token_ids=new_input_ids, - prompt=inputs['prompt'], + prompt=inputs.get("prompt"), multi_modal_data=multi_modal_data, ) From 7560ae5cafbae3af9967ac7dc979cb31a40fc572 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Thu, 21 Nov 2024 12:30:42 -0800 Subject: [PATCH 088/397] [8/N] enable cli flag without a space (#10529) Signed-off-by: youkaichao --- tests/compile/test_basic_correctness.py | 4 ++-- tests/engine/test_arg_utils.py | 28 +++++++++++++++++++++++++ tests/tpu/test_custom_dispatcher.py | 9 ++++---- vllm/engine/arg_utils.py | 5 ++++- vllm/utils.py | 4 ++++ 5 files changed, 43 insertions(+), 7 deletions(-) diff --git a/tests/compile/test_basic_correctness.py b/tests/compile/test_basic_correctness.py index c0db2e78824be..b7170886d2556 100644 --- a/tests/compile/test_basic_correctness.py +++ b/tests/compile/test_basic_correctness.py @@ -103,7 +103,7 @@ def test_compile_correctness(test_setting: TestSetting): CompilationLevel.NO_COMPILATION, CompilationLevel.PIECEWISE, ]: - all_args.append(final_args + ["-O", str(level)]) + all_args.append(final_args + [f"-O{level}"]) all_envs.append({}) # inductor will change the output, so we only compare if the output @@ -121,7 +121,7 @@ def test_compile_correctness(test_setting: TestSetting): CompilationLevel.DYNAMO_AS_IS, CompilationLevel.DYNAMO_ONCE, ]: - all_args.append(final_args + ["-O", str(level)]) + all_args.append(final_args + [f"-O{level}"]) all_envs.append({}) if level != CompilationLevel.DYNAMO_ONCE and not fullgraph: # "DYNAMO_ONCE" will always use fullgraph diff --git a/tests/engine/test_arg_utils.py b/tests/engine/test_arg_utils.py index 7b1be5a9802fd..5b0e76fe53685 100644 --- a/tests/engine/test_arg_utils.py +++ b/tests/engine/test_arg_utils.py @@ -31,6 +31,34 @@ def test_limit_mm_per_prompt_parser(arg, expected): assert args.limit_mm_per_prompt == expected +def test_compilation_config(): + parser = EngineArgs.add_cli_args(FlexibleArgumentParser()) + + # default value + args = parser.parse_args([]) + assert args.compilation_config is None + + # set to O3 + args = parser.parse_args(["-O3"]) + assert args.compilation_config.level == 3 + + # set to O 3 (space) + args = parser.parse_args(["-O", "3"]) + assert args.compilation_config.level == 3 + + # set to O 3 (equals) + args = parser.parse_args(["-O=3"]) + assert args.compilation_config.level == 3 + + # set to json + args = parser.parse_args(["--compilation-config", '{"level": 3}']) + assert args.compilation_config.level == 3 + + # set to json + args = parser.parse_args(['--compilation-config={"level": 3}']) + assert args.compilation_config.level == 3 + + def test_valid_pooling_config(): parser = EngineArgs.add_cli_args(FlexibleArgumentParser()) args = parser.parse_args([ diff --git a/tests/tpu/test_custom_dispatcher.py b/tests/tpu/test_custom_dispatcher.py index df348258efcba..bb1379deba3fc 100644 --- a/tests/tpu/test_custom_dispatcher.py +++ b/tests/tpu/test_custom_dispatcher.py @@ -13,9 +13,10 @@ def test_custom_dispatcher(): compare_two_settings( "google/gemma-2b", - arg1=["--enforce-eager", "-O", - str(CompilationLevel.DYNAMO_ONCE)], - arg2=["--enforce-eager", "-O", - str(CompilationLevel.DYNAMO_AS_IS)], + arg1=[ + "--enforce-eager", + f"-O{CompilationLevel.DYNAMO_ONCE}", + ], + arg2=["--enforce-eager", f"-O{CompilationLevel.DYNAMO_AS_IS}"], env1={}, env2={}) diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 9288cd22c0036..88862a185ac75 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -882,7 +882,10 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: 'testing only. level 3 is the recommended level ' 'for production.\n' 'To specify the full compilation config, ' - 'use a JSON string.') + 'use a JSON string.\n' + 'Following the convention of traditional ' + 'compilers, using -O without space is also ' + 'supported. -O3 is equivalent to -O 3.') return parser diff --git a/vllm/utils.py b/vllm/utils.py index 424e7d0947790..67b2629ecc933 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -1192,6 +1192,10 @@ def parse_args(self, args=None, namespace=None): else: processed_args.append('--' + arg[len('--'):].replace('_', '-')) + elif arg.startswith('-O') and arg != '-O' and len(arg) == 2: + # allow -O flag to be used without space, e.g. -O3 + processed_args.append('-O') + processed_args.append(arg[2:]) else: processed_args.append(arg) From f9310cbd0c1109c4f22cf9f1dc615b2d08f06408 Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Thu, 21 Nov 2024 12:53:39 -0800 Subject: [PATCH 089/397] [V1] Fix Compilation config & Enable CUDA graph by default (#10528) Signed-off-by: Woosuk Kwon --- vllm/config.py | 3 +- vllm/v1/worker/gpu_model_runner.py | 62 ++++++++++++++++-------------- vllm/v1/worker/gpu_worker.py | 39 ++++++++++++------- 3 files changed, 62 insertions(+), 42 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index ed09f8ae31863..d1c6a850cb78c 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -2370,7 +2370,7 @@ def __post_init__(self): if self.compilation_config is None: self.compilation_config = CompilationConfig() - if envs.VLLM_USE_V1: + if envs.VLLM_USE_V1 and not self.model_config.enforce_eager: # NOTE(woosuk): Currently, we use inductor because the piecewise # CUDA graphs do not work properly with the custom CUDA kernels. # FIXME(woosuk): Disable inductor to reduce the compilation time @@ -2380,6 +2380,7 @@ def __post_init__(self): self.compilation_config.use_inductor = True self.compilation_config.pass_config.enable_fusion = False self.compilation_config.pass_config.enable_reshape = False + self.compilation_config.level = CompilationLevel.PIECEWISE current_platform.check_and_update_config(self) diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 5f66293cbe8e4..2cf55cd497659 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -1,3 +1,4 @@ +import gc import time from dataclasses import dataclass from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple @@ -515,7 +516,25 @@ def load_model(self) -> None: logger.info("Loading model weights took %.4f GB", self.model_memory_usage / float(2**30)) - def _dummy_run(self, model: nn.Module, num_tokens: int) -> None: + @torch.inference_mode() + def _dummy_run( + self, + model: nn.Module, + num_tokens: int, + kv_caches: List[torch.Tensor], + ) -> torch.Tensor: + with set_forward_context(None): + hidden_states = model( + input_ids=None, + positions=self.positions[:num_tokens], + kv_caches=kv_caches, + attn_metadata=None, + inputs_embeds=self.inputs_embeds[:num_tokens]) + return hidden_states + + def profile_run(self) -> None: + # TODO(woosuk): Profile the max memory usage of the encoder and + # the encoder cache. # use an empty tensor instead of `None`` to force Dynamo to pass # it by reference, rather by specializing on the value `None`. # the `dtype` argument does not matter, and we use `float32` as @@ -527,23 +546,17 @@ def _dummy_run(self, model: nn.Module, num_tokens: int) -> None: torch.tensor([], dtype=torch.float32, device=self.device) for _ in range(self.num_attn_layers) ] - with set_forward_context(None): # noqa: SIM117 - with set_compile_context(self.cudagraph_batch_sizes): - # Trigger compilation for general shape. - model(input_ids=None, - positions=self.positions, - kv_caches=dummy_kv_caches, - attn_metadata=None, - inputs_embeds=self.inputs_embeds) - - @torch.inference_mode() - def profile_run(self) -> None: - # TODO(woosuk): Profile the max memory usage of the encoder and - # the encoder cache. - self._dummy_run(self.model, self.max_num_tokens) + with set_compile_context(self.cudagraph_batch_sizes): + # Trigger compilation for general shape. + hidden_states = self._dummy_run(self.model, self.max_num_tokens, + dummy_kv_caches) + logits = self.model.compute_logits(hidden_states, None) + logits = logits[:self.max_num_tokens] + # TODO(woosuk): Consider the memory usage of the sampler. torch.cuda.synchronize() + del hidden_states, logits + gc.collect() - @torch.inference_mode() def capture_model(self) -> None: if not self.use_cuda_graph: logger.warning( @@ -554,18 +567,11 @@ def capture_model(self) -> None: start_time = time.perf_counter() start_free_gpu_memory = torch.cuda.mem_get_info()[0] - with set_forward_context(None): - # Trigger CUDA graph capture for specific shapes. - # Capture the large shapes first so that the smaller shapes - # can reuse the memory pool allocated for the large shapes. - for num_tokens in reversed(self.cudagraph_batch_sizes): - self.model( - input_ids=None, - positions=self.positions[:num_tokens], - kv_caches=self.kv_caches, - attn_metadata=None, - inputs_embeds=self.inputs_embeds[:num_tokens], - ) + # Trigger CUDA graph capture for specific shapes. + # Capture the large shapes first so that the smaller shapes + # can reuse the memory pool allocated for the large shapes. + for num_tokens in reversed(self.cudagraph_batch_sizes): + self._dummy_run(self.model, num_tokens, self.kv_caches) end_time = time.perf_counter() end_free_gpu_memory = torch.cuda.mem_get_info()[0] diff --git a/vllm/v1/worker/gpu_worker.py b/vllm/v1/worker/gpu_worker.py index c8192b7f86eb0..7973349f14a5d 100644 --- a/vllm/v1/worker/gpu_worker.py +++ b/vllm/v1/worker/gpu_worker.py @@ -105,35 +105,48 @@ def determine_num_available_blocks(self) -> Tuple[int, int]: # Profile the memory usage of the model and get the maximum number of # cache blocks that can be allocated with the remaining free memory. torch.cuda.empty_cache() + torch.cuda.reset_peak_memory_stats() + _, total_gpu_memory = torch.cuda.mem_get_info() # Execute a forward pass with dummy inputs to profile the memory usage # of the model. self.model_runner.profile_run() - - # Calculate the number of blocks that can be allocated with the - # profiled peak memory. torch.cuda.synchronize() - free_gpu_memory, total_gpu_memory = torch.cuda.mem_get_info() + + free_gpu_memory, _ = torch.cuda.mem_get_info() # NOTE(woosuk): Here we assume that the other processes using the same # GPU did not change their memory usage during the profiling. - peak_memory = self.init_gpu_memory - free_gpu_memory - assert peak_memory > 0, ( + assert self.init_gpu_memory > free_gpu_memory, ( "Error in memory profiling. " f"Initial free memory {self.init_gpu_memory}, current free memory" f" {free_gpu_memory}. This happens when the GPU memory was " "not properly cleaned up before initializing the vLLM instance.") + # Get the peak memory allocation recorded by torch + peak_memory = torch.cuda.memory_stats()["allocated_bytes.all.peak"] + + # Check for any memory left around that may have been allocated on the + # gpu outside of `torch`. NCCL operations, for example, can use a few + # GB during a forward pass + torch.cuda.empty_cache() + torch_allocated_bytes = torch.cuda.memory_stats( + )["allocated_bytes.all.current"] + total_allocated_bytes = torch.cuda.mem_get_info( + )[1] - torch.cuda.mem_get_info()[0] + non_torch_allocations = total_allocated_bytes - torch_allocated_bytes + if non_torch_allocations > 0: + peak_memory += non_torch_allocations + available_kv_cache_memory = ( + total_gpu_memory * self.cache_config.gpu_memory_utilization - + peak_memory) + + # Calculate the number of blocks that can be allocated with the + # profiled peak memory. cache_block_size = _get_cache_block_size(self.cache_config, self.model_config, self.parallel_config) - num_gpu_blocks = int( - (total_gpu_memory * self.cache_config.gpu_memory_utilization - - peak_memory) // cache_block_size) + num_gpu_blocks = int(available_kv_cache_memory // cache_block_size) num_gpu_blocks = max(num_gpu_blocks, 0) - # if self.model_runner.lora_manager: - # self.model_runner.remove_all_loras() - gc.collect() - torch.cuda.empty_cache() return num_gpu_blocks, 0 def initialize_cache(self, num_gpu_blocks: int) -> None: From edec3385b641afb22739a6ec0fd0145f8f1141c5 Mon Sep 17 00:00:00 2001 From: Yunmeng Date: Fri, 22 Nov 2024 05:03:58 +0800 Subject: [PATCH 090/397] [CI][Installation] Avoid uploading CUDA 11.8 wheel (#10535) Signed-off-by: simon-mo Co-authored-by: simon-mo --- .buildkite/upload-wheels.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.buildkite/upload-wheels.sh b/.buildkite/upload-wheels.sh index 541b395eddbe7..7345dd4e66b29 100644 --- a/.buildkite/upload-wheels.sh +++ b/.buildkite/upload-wheels.sh @@ -25,7 +25,12 @@ echo "Version: $version" # If the version contains "dev", rename it to v1.0.0.dev for consistency if [[ $version == *dev* ]]; then - new_version="1.0.0.dev" + suffix="${version##*.}" + if [[ $suffix == cu* ]]; then + new_version="1.0.0.dev+${suffix}" + else + new_version="1.0.0.dev" + fi new_wheel="${wheel/$version/$new_version}" mv -- "$wheel" "$new_wheel" wheel="$new_wheel" From cf656f5a022c1ef6f0513c53c5106c8eeff7fdaa Mon Sep 17 00:00:00 2001 From: youkaichao Date: Thu, 21 Nov 2024 13:13:17 -0800 Subject: [PATCH 091/397] [misc] improve error message (#10553) Signed-off-by: youkaichao --- vllm/platforms/cuda.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/vllm/platforms/cuda.py b/vllm/platforms/cuda.py index 07562a8c3d71e..b38dd7c936896 100644 --- a/vllm/platforms/cuda.py +++ b/vllm/platforms/cuda.py @@ -99,8 +99,14 @@ def device_id_to_physical_device_id(device_id: int) -> int: if "CUDA_VISIBLE_DEVICES" in os.environ: device_ids = os.environ["CUDA_VISIBLE_DEVICES"].split(",") if device_ids == [""]: - raise RuntimeError("CUDA_VISIBLE_DEVICES is set to empty string," - " which means GPU support is disabled.") + msg = ( + "CUDA_VISIBLE_DEVICES is set to empty string, which means" + " GPU support is disabled. If you are using ray, please unset" + " the environment variable `CUDA_VISIBLE_DEVICES` inside the" + " worker/actor. " + "Check https://github.com/vllm-project/vllm/issues/8402 for" + " more information.") + raise RuntimeError(msg) physical_device_id = device_ids[device_id] return int(physical_device_id) else: From 46fe9b46d83e733130ce952eb3967a9c96713583 Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Thu, 21 Nov 2024 13:28:16 -0800 Subject: [PATCH 092/397] [Minor] Revert change in offline inference example (#10545) Signed-off-by: Woosuk Kwon --- examples/offline_inference.py | 98 +++++++------------------------ examples/offline_inference_cli.py | 80 +++++++++++++++++++++++++ 2 files changed, 100 insertions(+), 78 deletions(-) create mode 100644 examples/offline_inference_cli.py diff --git a/examples/offline_inference.py b/examples/offline_inference.py index 391ac6b9b6b03..9b758fa2479f6 100644 --- a/examples/offline_inference.py +++ b/examples/offline_inference.py @@ -1,80 +1,22 @@ -from dataclasses import asdict - from vllm import LLM, SamplingParams -from vllm.engine.arg_utils import EngineArgs -from vllm.utils import FlexibleArgumentParser - - -def get_prompts(num_prompts: int): - # The default sample prompts. - prompts = [ - "Hello, my name is", - "The president of the United States is", - "The capital of France is", - "The future of AI is", - ] - - if num_prompts != len(prompts): - prompts = (prompts * ((num_prompts // len(prompts)) + 1))[:num_prompts] - - return prompts - - -def main(args): - # Create prompts - prompts = get_prompts(args.num_prompts) - - # Create a sampling params object. - sampling_params = SamplingParams(n=args.n, - temperature=args.temperature, - top_p=args.top_p, - top_k=args.top_k, - max_tokens=args.max_tokens) - - # Create an LLM. - # The default model is 'facebook/opt-125m' - engine_args = EngineArgs.from_cli_args(args) - llm = LLM(**asdict(engine_args)) - - # Generate texts from the prompts. - # The output is a list of RequestOutput objects - # that contain the prompt, generated text, and other information. - outputs = llm.generate(prompts, sampling_params) - # Print the outputs. - for output in outputs: - prompt = output.prompt - generated_text = output.outputs[0].text - print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") - - -if __name__ == '__main__': - parser = FlexibleArgumentParser() - parser = EngineArgs.add_cli_args(parser) - group = parser.add_argument_group("SamplingParams options") - group.add_argument("--num-prompts", - type=int, - default=4, - help="Number of prompts used for inference") - group.add_argument("--max-tokens", - type=int, - default=16, - help="Generated output length for sampling") - group.add_argument('--n', - type=int, - default=1, - help='Number of generated sequences per prompt') - group.add_argument('--temperature', - type=float, - default=0.8, - help='Temperature for text generation') - group.add_argument('--top-p', - type=float, - default=0.95, - help='top_p for text generation') - group.add_argument('--top-k', - type=int, - default=-1, - help='top_k for text generation') - args = parser.parse_args() - main(args) +# Sample prompts. +prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", +] +# Create a sampling params object. +sampling_params = SamplingParams(temperature=0.8, top_p=0.95) + +# Create an LLM. +llm = LLM(model="facebook/opt-125m") +# Generate texts from the prompts. The output is a list of RequestOutput objects +# that contain the prompt, generated text, and other information. +outputs = llm.generate(prompts, sampling_params) +# Print the outputs. +for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") diff --git a/examples/offline_inference_cli.py b/examples/offline_inference_cli.py new file mode 100644 index 0000000000000..391ac6b9b6b03 --- /dev/null +++ b/examples/offline_inference_cli.py @@ -0,0 +1,80 @@ +from dataclasses import asdict + +from vllm import LLM, SamplingParams +from vllm.engine.arg_utils import EngineArgs +from vllm.utils import FlexibleArgumentParser + + +def get_prompts(num_prompts: int): + # The default sample prompts. + prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", + ] + + if num_prompts != len(prompts): + prompts = (prompts * ((num_prompts // len(prompts)) + 1))[:num_prompts] + + return prompts + + +def main(args): + # Create prompts + prompts = get_prompts(args.num_prompts) + + # Create a sampling params object. + sampling_params = SamplingParams(n=args.n, + temperature=args.temperature, + top_p=args.top_p, + top_k=args.top_k, + max_tokens=args.max_tokens) + + # Create an LLM. + # The default model is 'facebook/opt-125m' + engine_args = EngineArgs.from_cli_args(args) + llm = LLM(**asdict(engine_args)) + + # Generate texts from the prompts. + # The output is a list of RequestOutput objects + # that contain the prompt, generated text, and other information. + outputs = llm.generate(prompts, sampling_params) + # Print the outputs. + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + + +if __name__ == '__main__': + parser = FlexibleArgumentParser() + parser = EngineArgs.add_cli_args(parser) + group = parser.add_argument_group("SamplingParams options") + group.add_argument("--num-prompts", + type=int, + default=4, + help="Number of prompts used for inference") + group.add_argument("--max-tokens", + type=int, + default=16, + help="Generated output length for sampling") + group.add_argument('--n', + type=int, + default=1, + help='Number of generated sequences per prompt') + group.add_argument('--temperature', + type=float, + default=0.8, + help='Temperature for text generation') + group.add_argument('--top-p', + type=float, + default=0.95, + help='top_p for text generation') + group.add_argument('--top-k', + type=int, + default=-1, + help='top_k for text generation') + + args = parser.parse_args() + main(args) From 9afa01455237892c878bb2810912c487d66149a9 Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Thu, 21 Nov 2024 18:43:43 -0500 Subject: [PATCH 093/397] Add small example to metrics.rst (#10550) --- docs/source/serving/metrics.rst | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/docs/source/serving/metrics.rst b/docs/source/serving/metrics.rst index 15e57bd3fec65..231111cd7b738 100644 --- a/docs/source/serving/metrics.rst +++ b/docs/source/serving/metrics.rst @@ -2,9 +2,34 @@ Production Metrics ================== vLLM exposes a number of metrics that can be used to monitor the health of the -system. These metrics are exposed via the `/metrics` endpoint on the vLLM +system. These metrics are exposed via the ``/metrics`` endpoint on the vLLM OpenAI compatible API server. +You can start the server using Python, or using [Docker](deploying_with_docker.rst): + +.. code-block:: console + + $ vllm serve unsloth/Llama-3.2-1B-Instruct + +Then query the endpoint to get the latest metrics from the server: + +.. code-block:: console + + $ curl http://0.0.0.0:8000/metrics + + # HELP vllm:iteration_tokens_total Histogram of number of tokens per engine_step. + # TYPE vllm:iteration_tokens_total histogram + vllm:iteration_tokens_total_sum{model_name="unsloth/Llama-3.2-1B-Instruct"} 0.0 + vllm:iteration_tokens_total_bucket{le="1.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 + vllm:iteration_tokens_total_bucket{le="8.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 + vllm:iteration_tokens_total_bucket{le="16.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 + vllm:iteration_tokens_total_bucket{le="32.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 + vllm:iteration_tokens_total_bucket{le="64.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 + vllm:iteration_tokens_total_bucket{le="128.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 + vllm:iteration_tokens_total_bucket{le="256.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 + vllm:iteration_tokens_total_bucket{le="512.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 + ... + The following metrics are exposed: .. literalinclude:: ../../../vllm/engine/metrics.py From aed074860a46536faf77bacd76d02efccbaf4a5d Mon Sep 17 00:00:00 2001 From: Simon Mo Date: Thu, 21 Nov 2024 18:27:20 -0800 Subject: [PATCH 094/397] [Benchmark] Add new H100 machine (#10547) --- .../benchmark-pipeline.yaml | 39 ++++++++++--------- .../convert-results-json-to-markdown.py | 13 +++++-- 2 files changed, 31 insertions(+), 21 deletions(-) diff --git a/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml b/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml index 5c069b38b2d7d..3db77d5f16022 100644 --- a/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml +++ b/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml @@ -13,6 +13,7 @@ steps: - wait - label: "A100" + # skip: "use this flag to conditionally skip the benchmark step, useful for PR testing" agents: queue: A100 plugins: @@ -45,6 +46,7 @@ steps: medium: Memory - label: "H200" + # skip: "use this flag to conditionally skip the benchmark step, useful for PR testing" agents: queue: H200 plugins: @@ -63,21 +65,22 @@ steps: - VLLM_USAGE_SOURCE - HF_TOKEN - - # - label: "H100" - # agents: - # queue: H100 - # plugins: - # - docker#v5.11.0: - # image: public.ecr.aws/q9t5s3a7/vllm-ci-test-repo:$BUILDKITE_COMMIT - # command: - # - bash - # - .buildkite/nightly-benchmarks/run-benchmarks-suite.sh - # mount-buildkite-agent: true - # propagate-environment: true - # ipc: host - # gpus: all - # environment: - # - VLLM_USAGE_SOURCE - # - HF_TOKEN - + - label: "H100" + # skip: "use this flag to conditionally skip the benchmark step, useful for PR testing" + agents: + queue: H100 + plugins: + - docker#v5.12.0: + image: public.ecr.aws/q9t5s3a7/vllm-ci-test-repo:$BUILDKITE_COMMIT + command: + - bash + - .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh + mount-buildkite-agent: true + propagate-environment: true + ipc: host + gpus: all # see CUDA_VISIBLE_DEVICES for actual GPUs used + volumes: + - /data/benchmark-hf-cache:/root/.cache/huggingface + environment: + - VLLM_USAGE_SOURCE + - HF_TOKEN diff --git a/.buildkite/nightly-benchmarks/scripts/convert-results-json-to-markdown.py b/.buildkite/nightly-benchmarks/scripts/convert-results-json-to-markdown.py index d640563252a0c..9d3646e2f6a15 100644 --- a/.buildkite/nightly-benchmarks/scripts/convert-results-json-to-markdown.py +++ b/.buildkite/nightly-benchmarks/scripts/convert-results-json-to-markdown.py @@ -157,10 +157,17 @@ def results_to_json(latency, throughput, serving): throughput_results, serving_results) - # Sort all dataframes by their respective "Test name" columns for df in [latency_results, serving_results, throughput_results]: - if not df.empty: - df.sort_values(by="Test name", inplace=True) + if df.empty: + continue + + # Sort all dataframes by their respective "Test name" columns + df.sort_values(by="Test name", inplace=True) + + # The GPUs sometimes come in format of "GPUTYPE\nGPUTYPE\n...", + # we want to turn it into "8xGPUTYPE" + df["GPU"] = df["GPU"].apply( + lambda x: f"{len(x.split('\n'))}x{x.split('\n')[0]}") # get markdown tables latency_md_table = tabulate(latency_results, From 33e0a2540a6bff23cbc6a4b8f7a6784a2bc87d47 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Thu, 21 Nov 2024 19:13:31 -0800 Subject: [PATCH 095/397] [9/N] torch.compile LLM usage (#10552) Signed-off-by: youkaichao --- tests/tpu/test_compilation.py | 5 ++--- vllm/entrypoints/llm.py | 15 ++++++++++++++- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/tests/tpu/test_compilation.py b/tests/tpu/test_compilation.py index 65bee85e7a1ea..b7124ebc1b0f3 100644 --- a/tests/tpu/test_compilation.py +++ b/tests/tpu/test_compilation.py @@ -4,7 +4,7 @@ import depyf -from vllm.config import CompilationConfig, CompilationLevel +from vllm.config import CompilationLevel temp_dir = tempfile.mkdtemp() with depyf.prepare_debug(temp_dir): @@ -34,8 +34,7 @@ # all the control llm = LLM(model="google/gemma-2b", enforce_eager=True, - compilation_config=CompilationConfig( - level=CompilationLevel.DYNAMO_AS_IS)) + compilation_config={"level": CompilationLevel.DYNAMO_AS_IS}) outputs = llm.generate(prompts, sampling_params) for output, answer in zip(outputs, answers): prompt = output.prompt diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index 86b0b6893f1d9..2446a64a02eb2 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -1,4 +1,5 @@ import itertools +import json import warnings from contextlib import contextmanager from typing import (Any, ClassVar, Dict, List, Optional, Sequence, Tuple, Type, @@ -9,6 +10,7 @@ from vllm import envs from vllm.beam_search import (BeamSearchInstance, BeamSearchOutput, BeamSearchSequence, get_beam_search_score) +from vllm.config import CompilationConfig from vllm.engine.arg_utils import (EngineArgs, HfOverrides, PoolerConfig, TaskOption) from vllm.engine.llm_engine import LLMEngine @@ -107,13 +109,16 @@ class LLM: hf_overrides: If a dictionary, contains arguments to be forwarded to the HuggingFace config. If a callable, it is called to update the HuggingFace config. + compilation_config: Either an integer or a dictionary. If it is an integer, + it is used as the level of compilation optimization. If it is a dictionary, + it can specify the full compilation configuration. **kwargs: Arguments for :class:`~vllm.EngineArgs`. (See :ref:`engine_args`) Note: This class is intended to be used for offline inference. For online serving, use the :class:`~vllm.AsyncLLMEngine` class instead. - """ + """ # noqa DEPRECATE_LEGACY: ClassVar[bool] = False """A flag to toggle whether to deprecate the legacy generate/encode API.""" @@ -166,6 +171,7 @@ def __init__( # After positional args are removed, move this right below `model` task: TaskOption = "auto", override_pooler_config: Optional[PoolerConfig] = None, + compilation_config: Optional[Union[int, Dict[str, Any]]] = None, **kwargs, ) -> None: ''' @@ -178,6 +184,12 @@ def __init__( if "disable_log_stats" not in kwargs: kwargs["disable_log_stats"] = True + if compilation_config is not None: + compilation_config_instance = CompilationConfig.from_cli( + json.dumps(compilation_config)) + else: + compilation_config_instance = None + engine_args = EngineArgs( model=model, task=task, @@ -202,6 +214,7 @@ def __init__( hf_overrides=hf_overrides, mm_processor_kwargs=mm_processor_kwargs, override_pooler_config=override_pooler_config, + compilation_config=compilation_config_instance, **kwargs, ) # Logic to switch between engines is done at runtime instead of import From 446c7806b21d810b90604097487cc87393542aad Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Thu, 21 Nov 2024 19:40:40 -0800 Subject: [PATCH 096/397] [Minor] Fix line-too-long (#10563) Signed-off-by: Woosuk Kwon --- vllm/entrypoints/llm.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index 2446a64a02eb2..c211ec5aee080 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -109,16 +109,16 @@ class LLM: hf_overrides: If a dictionary, contains arguments to be forwarded to the HuggingFace config. If a callable, it is called to update the HuggingFace config. - compilation_config: Either an integer or a dictionary. If it is an integer, - it is used as the level of compilation optimization. If it is a dictionary, - it can specify the full compilation configuration. + compilation_config: Either an integer or a dictionary. If it is an + integer, it is used as the level of compilation optimization. If it + is a dictionary, it can specify the full compilation configuration. **kwargs: Arguments for :class:`~vllm.EngineArgs`. (See :ref:`engine_args`) Note: This class is intended to be used for offline inference. For online serving, use the :class:`~vllm.AsyncLLMEngine` class instead. - """ # noqa + """ DEPRECATE_LEGACY: ClassVar[bool] = False """A flag to toggle whether to deprecate the legacy generate/encode API.""" From a111d0151ffed94582bec65635979e04e5b63676 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Thu, 21 Nov 2024 21:00:32 -0800 Subject: [PATCH 097/397] [platforms] absorb worker cls difference into platforms folder (#10555) Signed-off-by: youkaichao Co-authored-by: Nick Hill --- vllm/config.py | 238 ++++++++++++------------ vllm/engine/arg_utils.py | 11 +- vllm/executor/cpu_executor.py | 7 +- vllm/executor/gpu_executor.py | 49 +---- vllm/executor/hpu_executor.py | 5 +- vllm/executor/multiproc_gpu_executor.py | 2 +- vllm/executor/neuron_executor.py | 5 +- vllm/executor/openvino_executor.py | 8 +- vllm/executor/ray_gpu_executor.py | 16 +- vllm/executor/ray_hpu_executor.py | 36 +--- vllm/executor/ray_tpu_executor.py | 19 +- vllm/executor/xpu_executor.py | 14 +- vllm/platforms/cpu.py | 2 + vllm/platforms/cuda.py | 21 ++- vllm/platforms/hpu.py | 23 +++ vllm/platforms/neuron.py | 14 ++ vllm/platforms/openvino.py | 18 ++ vllm/platforms/rocm.py | 20 ++ vllm/platforms/tpu.py | 12 ++ vllm/platforms/xpu.py | 6 + vllm/worker/worker_base.py | 30 +-- 21 files changed, 273 insertions(+), 283 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index d1c6a850cb78c..b5f2116e3557b 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -926,56 +926,56 @@ def _verify_load_format(self) -> None: f"{rocm_supported_load_format}") +@dataclass class ParallelConfig: - """Configuration for the distributed execution. + """Configuration for the distributed execution.""" - Args: - pipeline_parallel_size: Number of pipeline parallel groups. - tensor_parallel_size: Number of tensor parallel groups. - worker_use_ray: Deprecated, use distributed_executor_backend instead. - max_parallel_loading_workers: Maximum number of multiple batches - when load model sequentially. To avoid RAM OOM when using tensor - parallel and large models. - disable_custom_all_reduce: Disable the custom all-reduce kernel and - fall back to NCCL. - tokenizer_pool_config: Config for the tokenizer pool. - If None, will use synchronous tokenization. - ray_workers_use_nsight: Whether to profile Ray workers with nsight, see - https://docs.ray.io/en/latest/ray-observability/user-guides/profiling.html#profiling-nsight-profiler. - placement_group: ray distributed model workers placement group. - distributed_executor_backend: Backend to use for distributed model - workers, either "ray" or "mp" (multiprocessing). If the product - of pipeline_parallel_size and tensor_parallel_size is less than - or equal to the number of GPUs available, "mp" will be used to - keep processing on a single host. Otherwise, this will default - to "ray" if Ray is installed and fail otherwise. Note that tpu - and hpu only support Ray for distributed inference. - """ + pipeline_parallel_size: int = 1 # Number of pipeline parallel groups. + tensor_parallel_size: int = 1 # Number of tensor parallel groups. - def __init__( - self, - pipeline_parallel_size: int, - tensor_parallel_size: int, - worker_use_ray: Optional[bool] = None, - max_parallel_loading_workers: Optional[int] = None, - disable_custom_all_reduce: bool = False, - tokenizer_pool_config: Optional[TokenizerPoolConfig] = None, - ray_workers_use_nsight: bool = False, - placement_group: Optional["PlacementGroup"] = None, - distributed_executor_backend: Optional[Union[ - str, Type["ExecutorBase"]]] = None, - ) -> None: - self.pipeline_parallel_size = pipeline_parallel_size - self.tensor_parallel_size = tensor_parallel_size - self.distributed_executor_backend = distributed_executor_backend - self.max_parallel_loading_workers = max_parallel_loading_workers - self.disable_custom_all_reduce = disable_custom_all_reduce - self.tokenizer_pool_config = tokenizer_pool_config - self.ray_workers_use_nsight = ray_workers_use_nsight - self.placement_group = placement_group - self.world_size = pipeline_parallel_size * self.tensor_parallel_size - - if worker_use_ray: + # Deprecated, use distributed_executor_backend instead. + worker_use_ray: Optional[bool] = None + + # Maximum number of multiple batches + # when load model sequentially. To avoid RAM OOM when using tensor + # parallel and large models. + max_parallel_loading_workers: Optional[int] = None + + # Disable the custom all-reduce kernel and fall back to NCCL. + disable_custom_all_reduce: bool = False + + # Config for the tokenizer pool. If None, will use synchronous tokenization. + tokenizer_pool_config: Optional[TokenizerPoolConfig] = None + + # Whether to profile Ray workers with nsight, see https://docs.ray.io/en/latest/ray-observability/user-guides/profiling.html#profiling-nsight-profiler. + ray_workers_use_nsight: bool = False + + # ray distributed model workers placement group. + placement_group: Optional["PlacementGroup"] = None + + # Backend to use for distributed model + # workers, either "ray" or "mp" (multiprocessing). If the product + # of pipeline_parallel_size and tensor_parallel_size is less than + # or equal to the number of GPUs available, "mp" will be used to + # keep processing on a single host. Otherwise, this will default + # to "ray" if Ray is installed and fail otherwise. Note that tpu + # and hpu only support Ray for distributed inference. + distributed_executor_backend: Optional[Union[str, + Type["ExecutorBase"]]] = None + + # the full name of the worker class to use. If "auto", the worker class + # will be determined based on the platform. + worker_cls: str = "auto" + + world_size: int = field(init=False) + + rank: int = 0 + + def __post_init__(self) -> None: + self.world_size = self.pipeline_parallel_size * \ + self.tensor_parallel_size + + if self.worker_use_ray: if self.distributed_executor_backend is None: self.distributed_executor_backend = "ray" elif not self.use_ray: @@ -1026,7 +1026,6 @@ def __init__( backend) self._verify_args() - self.rank: int = 0 @property def use_ray(self) -> bool: @@ -1059,100 +1058,97 @@ def _verify_args(self) -> None: "run with Ray.") +@dataclass class SchedulerConfig: - """Scheduler configuration. + """Scheduler configuration.""" - Args: - task: The task to use the model for. - max_num_batched_tokens: Maximum number of tokens to be processed in - a single iteration. - max_num_seqs: Maximum number of sequences to be processed in a single - iteration. - max_model_len: Maximum length of a sequence (including prompt - and generated text). - num_lookahead_slots: The number of slots to allocate per sequence per - step, beyond the known token ids. This is used in speculative - decoding to store KV activations of tokens which may or may not be - accepted. - delay_factor: Apply a delay (of delay factor multiplied by previous - prompt latency) before scheduling next prompt. - enable_chunked_prefill: If True, prefill requests can be chunked based - on the remaining max_num_batched_tokens. - preemption_mode: Whether to perform preemption by swapping or - recomputation. If not specified, we determine the mode as follows: - We use recomputation by default since it incurs lower overhead than - swapping. However, when the sequence group has multiple sequences - (e.g., beam search), recomputation is not currently supported. In - such a case, we use swapping instead. - send_delta_data: Private API. If used, scheduler sends delta data to - workers instead of an entire data. It should be enabled only - when SPMD worker architecture is enabled. I.e., - VLLM_USE_RAY_SPMD_WORKER=1 - policy: The scheduling policy to use. "fcfs" (default) or "priority". - """ + task: str = "generate" # The task to use the model for. + + # Maximum number of tokens to be processed in a single iteration. + max_num_batched_tokens: int = field(default=None) # type: ignore + + # Maximum number of sequences to be processed in a single iteration. + max_num_seqs: int = 128 + + # Maximum length of a sequence (including prompt and generated text). + max_model_len: int = 8192 + + # The number of slots to allocate per sequence per + # step, beyond the known token ids. This is used in speculative + # decoding to store KV activations of tokens which may or may not be + # accepted. + num_lookahead_slots: int = 0 + + # Apply a delay (of delay factor multiplied by previous + # prompt latency) before scheduling next prompt. + delay_factor: float = 0.0 + + # If True, prefill requests can be chunked based + # on the remaining max_num_batched_tokens. + enable_chunked_prefill: bool = False + + is_multimodal_model: bool = False - def __init__(self, - task: _Task, - max_num_batched_tokens: Optional[int], - max_num_seqs: int, - max_model_len: int, - num_lookahead_slots: int = 0, - delay_factor: float = 0.0, - enable_chunked_prefill: bool = False, - is_multimodal_model: bool = False, - preemption_mode: Optional[str] = None, - num_scheduler_steps: int = 1, - multi_step_stream_outputs: bool = False, - send_delta_data: bool = False, - policy: str = "fcfs") -> None: - if max_num_batched_tokens is None: - if enable_chunked_prefill: - if num_scheduler_steps > 1: + # Whether to perform preemption by swapping or + # recomputation. If not specified, we determine the mode as follows: + # We use recomputation by default since it incurs lower overhead than + # swapping. However, when the sequence group has multiple sequences + # (e.g., beam search), recomputation is not currently supported. In + # such a case, we use swapping instead. + preemption_mode: Optional[str] = None + + num_scheduler_steps: int = 1 + + multi_step_stream_outputs: bool = False + + # Private API. If used, scheduler sends delta data to + # workers instead of an entire data. It should be enabled only + # when SPMD worker architecture is enabled. I.e., + # VLLM_USE_RAY_SPMD_WORKER=1 + send_delta_data: bool = False + + # The scheduling policy to use. "fcfs" (default) or "priority". + policy: str = "fcfs" + + chunked_prefill_enabled: bool = field(init=False) + + def __post_init__(self) -> None: + if self.max_num_batched_tokens is None: + if self.enable_chunked_prefill: + if self.num_scheduler_steps > 1: # Multi-step Chunked-Prefill doesn't allow prompt-chunking # for now. Have max_num_batched_tokens set to max_model_len # so we don't reject sequences on account of a short # max_num_batched_tokens. - max_num_batched_tokens = max(max_model_len, 2048) + self.max_num_batched_tokens = max(self.max_model_len, 2048) else: # It is the values that have the best balance between ITL # and TTFT on A100. Note it is not optimized for throughput. - max_num_batched_tokens = 512 + self.max_num_batched_tokens = 512 else: # If max_model_len is too short, use 2048 as the default value # for higher throughput. - max_num_batched_tokens = max(max_model_len, 2048) + self.max_num_batched_tokens = max(self.max_model_len, 2048) - if task == "embedding": + if self.task == "embedding": # For embedding, choose specific value for higher throughput - max_num_batched_tokens = max( - max_num_batched_tokens, + self.max_num_batched_tokens = max( + self.max_num_batched_tokens, _EMBEDDING_MODEL_MAX_NUM_BATCHED_TOKENS, ) - if is_multimodal_model: + if self.is_multimodal_model: # The value needs to be at least the number of multimodal tokens - max_num_batched_tokens = max( - max_num_batched_tokens, + self.max_num_batched_tokens = max( + self.max_num_batched_tokens, _MULTIMODAL_MODEL_MAX_NUM_BATCHED_TOKENS, ) - self.max_num_batched_tokens = max_num_batched_tokens - - if enable_chunked_prefill: + if self.enable_chunked_prefill: logger.info( "Chunked prefill is enabled with max_num_batched_tokens=%d.", self.max_num_batched_tokens) - self.task: Final = task - self.max_num_seqs = max_num_seqs - self.max_model_len = max_model_len - self.num_lookahead_slots = num_lookahead_slots - self.delay_factor = delay_factor - self.chunked_prefill_enabled = enable_chunked_prefill - self.preemption_mode = preemption_mode - self.num_scheduler_steps = num_scheduler_steps - self.multi_step_stream_outputs = multi_step_stream_outputs - self.send_delta_data = send_delta_data - self.policy = policy + self.chunked_prefill_enabled = self.enable_chunked_prefill self._verify_args() def _verify_args(self) -> None: @@ -2293,10 +2289,10 @@ class VllmConfig: model_config: ModelConfig = field(default=None, init=True) # type: ignore cache_config: CacheConfig = field(default=None, init=True) # type: ignore - parallel_config: ParallelConfig = field(default=None, - init=True) # type: ignore - scheduler_config: SchedulerConfig = field(default=None, - init=True) # type: ignore + parallel_config: ParallelConfig = field(default_factory=ParallelConfig, + init=True) + scheduler_config: SchedulerConfig = field(default_factory=SchedulerConfig, + init=True) device_config: DeviceConfig = field(default=None, init=True) # type: ignore load_config: LoadConfig = field(default=None, init=True) # type: ignore diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 88862a185ac75..82f1ef51255e9 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -191,6 +191,7 @@ class EngineArgs: override_neuron_config: Optional[Dict[str, Any]] = None override_pooler_config: Optional[PoolerConfig] = None compilation_config: Optional[CompilationConfig] = None + worker_cls: str = "auto" def __post_init__(self): if not self.tokenizer: @@ -887,6 +888,12 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: 'compilers, using -O without space is also ' 'supported. -O3 is equivalent to -O 3.') + parser.add_argument( + '--worker-cls', + type=str, + default="auto", + help='The worker class to use for distributed execution.') + return parser @classmethod @@ -999,7 +1006,9 @@ def create_engine_config(self) -> VllmConfig: self.tokenizer_pool_extra_config, ), ray_workers_use_nsight=self.ray_workers_use_nsight, - distributed_executor_backend=self.distributed_executor_backend) + distributed_executor_backend=self.distributed_executor_backend, + worker_cls=self.worker_cls, + ) max_model_len = model_config.max_model_len use_long_context = max_model_len > 32768 diff --git a/vllm/executor/cpu_executor.py b/vllm/executor/cpu_executor.py index 1542a2ae367eb..336f9bc8efb20 100644 --- a/vllm/executor/cpu_executor.py +++ b/vllm/executor/cpu_executor.py @@ -115,13 +115,8 @@ def _create_worker( local_rank: int = 0, rank: int = 0, ): - worker_module_name = "vllm.worker.cpu_worker" - worker_class_name = "CPUWorker" - wrapper = WorkerWrapperBase( - worker_module_name=worker_module_name, - worker_class_name=worker_class_name, - ) + wrapper = WorkerWrapperBase(vllm_config=self.vllm_config) assert self.distributed_init_method is not None diff --git a/vllm/executor/gpu_executor.py b/vllm/executor/gpu_executor.py index c65d0836e5ff7..7fa34456028dd 100644 --- a/vllm/executor/gpu_executor.py +++ b/vllm/executor/gpu_executor.py @@ -1,4 +1,4 @@ -from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union +from typing import Any, Dict, List, Optional, Set, Tuple, Union from vllm.executor.executor_base import ExecutorAsyncBase, ExecutorBase from vllm.logger import init_logger @@ -8,19 +8,14 @@ from vllm.sequence import ExecuteModelRequest, PoolerOutput from vllm.utils import (get_distributed_init_method, get_ip, get_open_port, make_async) -from vllm.worker.worker_base import WorkerBase, WorkerWrapperBase +from vllm.worker.worker_base import WorkerWrapperBase logger = init_logger(__name__) -def create_worker(worker_module_name: str, worker_class_name: str, - worker_class_fn: Optional[Callable[[], Type[WorkerBase]]], - **kwargs): - wrapper = WorkerWrapperBase( - worker_module_name=worker_module_name, - worker_class_name=worker_class_name, - worker_class_fn=worker_class_fn, - ) +def create_worker(**kwargs): + vllm_config = kwargs.get("vllm_config") + wrapper = WorkerWrapperBase(vllm_config=vllm_config) wrapper.init_worker(**kwargs) return wrapper.worker @@ -57,43 +52,11 @@ def _get_worker_kwargs( or (rank % self.parallel_config.tensor_parallel_size == 0), ) - def _get_worker_module_and_class( - self) -> Tuple[str, str, Optional[Callable[[], Type[WorkerBase]]]]: - worker_class_fn = None - if self.scheduler_config.is_multi_step: - worker_module_name = "vllm.worker.multi_step_worker" - worker_class_name = "MultiStepWorker" - elif self.speculative_config: - worker_module_name = "vllm.spec_decode.spec_decode_worker" - worker_class_name = "create_spec_worker" - else: - worker_module_name = "vllm.worker.worker" - worker_class_name = "Worker" - return (worker_module_name, worker_class_name, worker_class_fn) - - def _get_create_worker_kwargs( - self, - local_rank: int = 0, - rank: int = 0, - distributed_init_method: Optional[str] = None) -> Dict: - worker_kwargs = self._get_worker_kwargs(local_rank, rank, - distributed_init_method) - - (worker_module_name, worker_class_name, - worker_class_fn) = self._get_worker_module_and_class() - worker_kwargs.update( - worker_module_name=worker_module_name, - worker_class_name=worker_class_name, - worker_class_fn=worker_class_fn, - ) - - return worker_kwargs - def _create_worker(self, local_rank: int = 0, rank: int = 0, distributed_init_method: Optional[str] = None): - return create_worker(**self._get_create_worker_kwargs( + return create_worker(**self._get_worker_kwargs( local_rank=local_rank, rank=rank, distributed_init_method=distributed_init_method)) diff --git a/vllm/executor/hpu_executor.py b/vllm/executor/hpu_executor.py index 220e9eee87bb3..c9b7bfa71edfa 100644 --- a/vllm/executor/hpu_executor.py +++ b/vllm/executor/hpu_executor.py @@ -48,10 +48,7 @@ def _create_worker(self, local_rank: int = 0, rank: int = 0, distributed_init_method: Optional[str] = None): - wrapper = WorkerWrapperBase( - worker_module_name="vllm.worker.hpu_worker", - worker_class_name="HPUWorker", - ) + wrapper = WorkerWrapperBase(vllm_config=self.vllm_config) wrapper.init_worker(**self._get_worker_kwargs(local_rank, rank, distributed_init_method)) return wrapper.worker diff --git a/vllm/executor/multiproc_gpu_executor.py b/vllm/executor/multiproc_gpu_executor.py index 3eb14fb931925..a6c05a71d2b6f 100644 --- a/vllm/executor/multiproc_gpu_executor.py +++ b/vllm/executor/multiproc_gpu_executor.py @@ -90,7 +90,7 @@ def _init_executor(self) -> None: result_handler, partial( create_worker, - **self._get_create_worker_kwargs( + **self._get_worker_kwargs( rank=rank, local_rank=rank, distributed_init_method=distributed_init_method, diff --git a/vllm/executor/neuron_executor.py b/vllm/executor/neuron_executor.py index 02d37cd7fbf23..31e6fdc3ab1bb 100644 --- a/vllm/executor/neuron_executor.py +++ b/vllm/executor/neuron_executor.py @@ -7,6 +7,7 @@ from vllm.sequence import ExecuteModelRequest from vllm.utils import (get_distributed_init_method, get_ip, get_open_port, make_async) +from vllm.worker.worker_base import WorkerWrapperBase logger = init_logger(__name__) @@ -25,10 +26,10 @@ def _init_executor(self) -> None: self._init_worker() def _init_worker(self): - from vllm.worker.neuron_worker import NeuronWorker + wrapper = WorkerWrapperBase(vllm_config=self.vllm_config) distributed_init_method = get_distributed_init_method( get_ip(), get_open_port()) - self.driver_worker = NeuronWorker( + self.driver_worker = wrapper.init_worker( vllm_config=self.vllm_config, local_rank=0, rank=0, diff --git a/vllm/executor/openvino_executor.py b/vllm/executor/openvino_executor.py index d06b0ccb7906e..dcd4b7621381d 100644 --- a/vllm/executor/openvino_executor.py +++ b/vllm/executor/openvino_executor.py @@ -14,6 +14,7 @@ from vllm.sequence import ExecuteModelRequest from vllm.utils import (GiB_bytes, get_distributed_init_method, get_ip, get_open_port, make_async) +from vllm.worker.worker_base import WorkerWrapperBase logger = init_logger(__name__) @@ -38,15 +39,12 @@ def _init_executor(self) -> None: self._init_worker() def _init_worker(self): - from vllm.worker.openvino_worker import OpenVINOWorker - assert ( - self.parallel_config.world_size == 1 - ), "OpenVINOExecutor only supports single CPU socket currently." + wrapper = WorkerWrapperBase(vllm_config=self.vllm_config) distributed_init_method = get_distributed_init_method( get_ip(), get_open_port()) - self.driver_worker = OpenVINOWorker( + self.driver_worker = wrapper.init_worker( ov_core=self.ov_core, vllm_config=self.vllm_config, local_rank=0, diff --git a/vllm/executor/ray_gpu_executor.py b/vllm/executor/ray_gpu_executor.py index 66bab2c686c67..810b0f06ff7b2 100644 --- a/vllm/executor/ray_gpu_executor.py +++ b/vllm/executor/ray_gpu_executor.py @@ -91,17 +91,6 @@ def _configure_ray_workers_use_nsight(self, return ray_remote_kwargs - def _get_worker_wrapper_args(self) -> Dict[str, Any]: - (worker_module_name, worker_class_name, - worker_class_fn) = self._get_worker_module_and_class() - - return dict( - worker_module_name=worker_module_name, - worker_class_name=worker_class_name, - worker_class_fn=worker_class_fn, - trust_remote_code=self.model_config.trust_remote_code, - ) - # child class could overwrite this to return actual env vars. def _get_env_vars_to_be_updated(self): return self._env_vars_for_all_workers @@ -135,7 +124,6 @@ def _init_workers_ray(self, placement_group: "PlacementGroup", # Create the workers. driver_ip = get_ip() - worker_wrapper_kwargs = self._get_worker_wrapper_args() for bundle_id, bundle in enumerate(placement_group.bundle_specs): if not bundle.get("GPU", 0): continue @@ -150,7 +138,7 @@ def _init_workers_ray(self, placement_group: "PlacementGroup", num_gpus=num_gpus, scheduling_strategy=scheduling_strategy, **ray_remote_kwargs, - )(RayWorkerWrapper).remote(**worker_wrapper_kwargs) + )(RayWorkerWrapper).remote(vllm_config=self.vllm_config) if self.use_ray_spmd_worker: self.workers.append(worker) @@ -161,7 +149,7 @@ def _init_workers_ray(self, placement_group: "PlacementGroup", # as the resource holder for the driver process. self.driver_dummy_worker = worker self.driver_worker = RayWorkerWrapper( - **worker_wrapper_kwargs) + vllm_config=self.vllm_config) else: # Else, added to the list of workers. self.workers.append(worker) diff --git a/vllm/executor/ray_hpu_executor.py b/vllm/executor/ray_hpu_executor.py index a24bab6df370e..6fe8c6c403358 100644 --- a/vllm/executor/ray_hpu_executor.py +++ b/vllm/executor/ray_hpu_executor.py @@ -2,8 +2,7 @@ import os from collections import defaultdict from itertools import islice, repeat -from typing import (TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, - Type) +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import msgspec @@ -18,7 +17,6 @@ from vllm.utils import (_run_task_with_lock, get_distributed_init_method, get_ip, get_open_port, get_vllm_instance_id, make_async) -from vllm.worker.worker_base import WorkerBase if ray is not None: from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy @@ -81,33 +79,6 @@ def shutdown(self) -> None: def finish_measurements(self): self._run_workers("finish_measurements") - def _get_worker_module_and_class( - self - ) -> Tuple[str, str, Optional[Callable[[], - Type[WorkerBase]]]]: # noqa: F821 - worker_class_fn = None - if self.scheduler_config.is_multi_step: - raise NotImplementedError( - "Multi-step execution is not implemented for HPU") - elif self.speculative_config: - raise NotImplementedError( - "Speculative decoding is not implemented for HPU") - else: - worker_module_name = "vllm.worker.hpu_worker" - worker_class_name = "HPUWorker" - return (worker_module_name, worker_class_name, worker_class_fn) - - def _get_worker_wrapper_args(self) -> Dict[str, Any]: - (worker_module_name, worker_class_name, - worker_class_fn) = self._get_worker_module_and_class() - - return dict( - worker_module_name=worker_module_name, - worker_class_name=worker_class_name, - worker_class_fn=worker_class_fn, - trust_remote_code=self.model_config.trust_remote_code, - ) - def _init_workers_ray(self, placement_group: "PlacementGroup", **ray_remote_kwargs): # Otherwise, the ray workers are allocated with a full GPU. @@ -128,7 +99,6 @@ def _init_workers_ray(self, placement_group: "PlacementGroup", # Create the workers. driver_ip = get_ip() - worker_wrapper_kwargs = self._get_worker_wrapper_args() for bundle_id, bundle in enumerate(placement_group.bundle_specs): if not bundle.get("HPU", 0): continue @@ -144,7 +114,7 @@ def _init_workers_ray(self, placement_group: "PlacementGroup", resources={'HPU': num_gpus}, scheduling_strategy=scheduling_strategy, **ray_remote_kwargs, - )(RayWorkerWrapper).remote(**worker_wrapper_kwargs) + )(RayWorkerWrapper).remote(vllm_config=self.vllm_config) if self.use_ray_spmd_worker: self.workers.append(worker) @@ -155,7 +125,7 @@ def _init_workers_ray(self, placement_group: "PlacementGroup", # as the resource holder for the driver process. self.driver_dummy_worker = worker self.driver_worker = RayWorkerWrapper( - **worker_wrapper_kwargs) + vllm_config=self.vllm_config) else: # Else, added to the list of workers. self.workers.append(worker) diff --git a/vllm/executor/ray_tpu_executor.py b/vllm/executor/ray_tpu_executor.py index d02fecb46f007..c227b5e283c68 100644 --- a/vllm/executor/ray_tpu_executor.py +++ b/vllm/executor/ray_tpu_executor.py @@ -69,14 +69,6 @@ def _init_workers_ray(self, placement_group: "PlacementGroup", placement_group_bundle_index=bundle_id, ) - assert self.speculative_config is None - if self.scheduler_config.is_multi_step: - worker_module_name = "vllm.worker.multi_step_tpu_worker" - worker_class_name = "MultiStepTPUWorker" - else: - worker_module_name = "vllm.worker.tpu_worker" - worker_class_name = "TPUWorker" - # GKE does not fetch environment information from metadata server # and instead sets these from within the Ray process. Therefore we # need to override the Ray environment variables manually. @@ -95,11 +87,7 @@ def _init_workers_ray(self, placement_group: "PlacementGroup", resources={"TPU": 1}, scheduling_strategy=scheduling_strategy, **ray_remote_kwargs, - )(RayWorkerWrapper).remote( - worker_module_name=worker_module_name, - worker_class_name=worker_class_name, - trust_remote_code=self.model_config.trust_remote_code, - ) + )(RayWorkerWrapper).remote(vllm_config=self.vllm_config) if override_env: worker.override_env_vars.remote(override_env) @@ -109,10 +97,7 @@ def _init_workers_ray(self, placement_group: "PlacementGroup", # as the resource holder for the driver process. self.driver_dummy_worker = worker self.driver_worker = RayWorkerWrapper( - worker_module_name=worker_module_name, - worker_class_name=worker_class_name, - trust_remote_code=self.model_config.trust_remote_code, - ) + vllm_config=self.vllm_config) else: # Else, added to the list of workers. self.workers.append(worker) diff --git a/vllm/executor/xpu_executor.py b/vllm/executor/xpu_executor.py index ba6177e51a453..722b86a95ff8a 100644 --- a/vllm/executor/xpu_executor.py +++ b/vllm/executor/xpu_executor.py @@ -1,4 +1,4 @@ -from typing import Callable, List, Optional, Tuple, Type, Union +from typing import List, Optional, Union from vllm.executor.executor_base import ExecutorAsyncBase from vllm.executor.gpu_executor import GPUExecutor @@ -6,7 +6,6 @@ from vllm.model_executor.layers.sampler import SamplerOutput from vllm.sequence import ExecuteModelRequest, PoolerOutput from vllm.utils import make_async -from vllm.worker.worker_base import WorkerBase logger = init_logger(__name__) @@ -22,17 +21,6 @@ def _init_executor(self) -> None: GPUExecutor._init_executor(self) - def _get_worker_module_and_class( - self) -> Tuple[str, str, Optional[Callable[[], Type[WorkerBase]]]]: - worker_class_fn = None - if self.speculative_config is not None: - raise NotImplementedError( - "XPU does not support speculative decoding") - else: - worker_module_name = "vllm.worker.xpu_worker" - worker_class_name = "XPUWorker" - return (worker_module_name, worker_class_name, worker_class_fn) - def execute_model( self, execute_model_req: ExecuteModelRequest ) -> Optional[List[Union[SamplerOutput, PoolerOutput]]]: diff --git a/vllm/platforms/cpu.py b/vllm/platforms/cpu.py index 0c4c916406223..9be9031dc3baf 100644 --- a/vllm/platforms/cpu.py +++ b/vllm/platforms/cpu.py @@ -84,3 +84,5 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: "distributed executor backend."), parallel_config.distributed_executor_backend) parallel_config.distributed_executor_backend = "mp" + if parallel_config.worker_cls == "auto": + parallel_config.worker_cls = "vllm.worker.cpu_worker.CPUWorker" diff --git a/vllm/platforms/cuda.py b/vllm/platforms/cuda.py index b38dd7c936896..cf0d41081a5aa 100644 --- a/vllm/platforms/cuda.py +++ b/vllm/platforms/cuda.py @@ -4,7 +4,7 @@ import os from functools import lru_cache, wraps -from typing import Callable, List, Tuple, TypeVar +from typing import TYPE_CHECKING, Callable, List, Tuple, TypeVar import pynvml import torch @@ -16,6 +16,11 @@ from .interface import DeviceCapability, Platform, PlatformEnum +if TYPE_CHECKING: + from vllm.config import VllmConfig +else: + VllmConfig = None + logger = init_logger(__name__) _P = ParamSpec("_P") @@ -157,3 +162,17 @@ def is_full_nvlink(cls, physical_device_ids: List[int]) -> bool: " machine has no NVLink equipped.") return False return True + + @classmethod + def check_and_update_config(cls, vllm_config: VllmConfig) -> None: + parallel_config = vllm_config.parallel_config + scheduler_config = vllm_config.scheduler_config + if parallel_config.worker_cls == "auto": + if scheduler_config.is_multi_step: + parallel_config.worker_cls = \ + "vllm.worker.multi_step_worker.MultiStepWorker" + elif vllm_config.speculative_config: + parallel_config.worker_cls = \ + "vllm.spec_decode.spec_decode_worker.create_spec_worker" + else: + parallel_config.worker_cls = "vllm.worker.worker.Worker" diff --git a/vllm/platforms/hpu.py b/vllm/platforms/hpu.py index 36d944b3f24b8..a8f568d31d5a7 100644 --- a/vllm/platforms/hpu.py +++ b/vllm/platforms/hpu.py @@ -1,7 +1,14 @@ +from typing import TYPE_CHECKING + import torch from .interface import Platform, PlatformEnum, _Backend +if TYPE_CHECKING: + from vllm.config import VllmConfig +else: + VllmConfig = None + class HpuPlatform(Platform): _enum = PlatformEnum.HPU @@ -14,3 +21,19 @@ def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: @staticmethod def inference_mode(): return torch.no_grad() + + @classmethod + def check_and_update_config(cls, vllm_config: VllmConfig) -> None: + + scheduler_config = vllm_config.scheduler_config + if scheduler_config.is_multi_step: + raise NotImplementedError( + "Multi-step execution is not implemented for HPU") + + if vllm_config.speculative_config is not None: + raise NotImplementedError( + "Speculative decoding is not implemented for HPU") + + parallel_config = vllm_config.parallel_config + if parallel_config.worker_cls == "auto": + parallel_config.worker_cls = "vllm.worker.hpu_worker.HPUWorker" diff --git a/vllm/platforms/neuron.py b/vllm/platforms/neuron.py index 57e3c0dfae84c..4c4d778ed3dd4 100644 --- a/vllm/platforms/neuron.py +++ b/vllm/platforms/neuron.py @@ -1,5 +1,12 @@ +from typing import TYPE_CHECKING + from .interface import Platform, PlatformEnum +if TYPE_CHECKING: + from vllm.config import VllmConfig +else: + VllmConfig = None + class NeuronPlatform(Platform): _enum = PlatformEnum.NEURON @@ -8,3 +15,10 @@ class NeuronPlatform(Platform): @classmethod def get_device_name(cls, device_id: int = 0) -> str: return "neuron" + + @classmethod + def check_and_update_config(cls, vllm_config: VllmConfig) -> None: + parallel_config = vllm_config.parallel_config + if parallel_config.worker_cls == "auto": + parallel_config.worker_cls = \ + "vllm.worker.neuron_worker.NeuronWorker" diff --git a/vllm/platforms/openvino.py b/vllm/platforms/openvino.py index 130b8eec1b386..33a41933e9fff 100644 --- a/vllm/platforms/openvino.py +++ b/vllm/platforms/openvino.py @@ -1,3 +1,5 @@ +from typing import TYPE_CHECKING + import torch import vllm.envs as envs @@ -5,6 +7,11 @@ from .interface import Platform, PlatformEnum, _Backend +if TYPE_CHECKING: + from vllm.config import VllmConfig +else: + VllmConfig = None + logger = init_logger(__name__) @@ -38,3 +45,14 @@ def is_openvino_gpu(self) -> bool: def is_pin_memory_available(self) -> bool: logger.warning("Pin memory is not supported on OpenViNO.") return False + + @classmethod + def check_and_update_config(cls, vllm_config: VllmConfig) -> None: + parallel_config = vllm_config.parallel_config + assert ( + parallel_config.world_size == 1 + ), "OpenVINOExecutor only supports single CPU socket currently." + + if parallel_config.worker_cls == "auto": + parallel_config.worker_cls = \ + "vllm.worker.openvino_worker.OpenVINOWorker" diff --git a/vllm/platforms/rocm.py b/vllm/platforms/rocm.py index c62241d8bb47b..3fe8c01c15787 100644 --- a/vllm/platforms/rocm.py +++ b/vllm/platforms/rocm.py @@ -1,5 +1,6 @@ import os from functools import lru_cache +from typing import TYPE_CHECKING import torch @@ -7,6 +8,11 @@ from .interface import DeviceCapability, Platform, PlatformEnum, _Backend +if TYPE_CHECKING: + from vllm.config import VllmConfig +else: + VllmConfig = None + logger = init_logger(__name__) try: @@ -58,3 +64,17 @@ def get_device_name(cls, device_id: int = 0) -> str: def get_device_total_memory(cls, device_id: int = 0) -> int: device_props = torch.cuda.get_device_properties(device_id) return device_props.total_memory + + @classmethod + def check_and_update_config(cls, vllm_config: VllmConfig) -> None: + parallel_config = vllm_config.parallel_config + scheduler_config = vllm_config.scheduler_config + if parallel_config.worker_cls == "auto": + if scheduler_config.is_multi_step: + parallel_config.worker_cls = \ + "vllm.worker.multi_step_worker.MultiStepWorker" + elif vllm_config.speculative_config: + parallel_config.worker_cls = \ + "vllm.spec_decode.spec_decode_worker.create_spec_worker" + else: + parallel_config.worker_cls = "vllm.worker.worker.Worker" diff --git a/vllm/platforms/tpu.py b/vllm/platforms/tpu.py index 863875ef5c2d6..513cfa54687dc 100644 --- a/vllm/platforms/tpu.py +++ b/vllm/platforms/tpu.py @@ -48,3 +48,15 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: if compilation_config.backend == "": compilation_config.backend = "openxla" + + assert vllm_config.speculative_config is None, \ + "TPU does not support speculative decoding" + + parallel_config = vllm_config.parallel_config + scheduler_config = vllm_config.scheduler_config + if parallel_config.worker_cls == "auto": + if scheduler_config.is_multi_step: + parallel_config.worker_cls = \ + "vllm.worker.multi_step_tpu_worker.MultiStepTPUWorker" + else: + parallel_config.worker_cls = "vllm.worker.tpu_worker.TPUWorker" diff --git a/vllm/platforms/xpu.py b/vllm/platforms/xpu.py index 536e17a5f93e8..b2ee0ef2f71cd 100644 --- a/vllm/platforms/xpu.py +++ b/vllm/platforms/xpu.py @@ -57,6 +57,10 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: "mode.") model_config.enforce_eager = True + if vllm_config.speculative_config is not None: + raise NotImplementedError( + "XPU does not support speculative decoding") + # check and update parallel config parallel_config = vllm_config.parallel_config if (parallel_config.distributed_executor_backend is not None @@ -66,3 +70,5 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: " executor backend.", parallel_config.distributed_executor_backend) parallel_config.distributed_executor_backend = "ray" + if parallel_config.worker_cls == "auto": + parallel_config.worker_cls = "vllm.worker.xpu_worker.XPUWorker" diff --git a/vllm/worker/worker_base.py b/vllm/worker/worker_base.py index cf8a4946a71c4..e7fec6d17eecd 100644 --- a/vllm/worker/worker_base.py +++ b/vllm/worker/worker_base.py @@ -1,9 +1,8 @@ import dataclasses -import importlib import os import time from abc import ABC, abstractmethod -from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union +from typing import Any, Dict, List, Optional, Set, Tuple, Type, Union import torch @@ -15,7 +14,7 @@ from vllm.platforms import current_platform from vllm.sequence import ExecuteModelRequest, IntermediateTensors from vllm.utils import (enable_trace_function_call_for_thread, - update_environment_variables) + resolve_obj_by_qualname, update_environment_variables) from vllm.worker.model_runner_base import (BroadcastableModelInput, ModelRunnerBase, ModelRunnerInputBase) @@ -411,23 +410,14 @@ class WorkerWrapperBase: We first instantiate the WorkerWrapper, which remembers the worker module and class name. Then, when we call `update_environment_variables`, and the real initialization happens in `init_worker`. - - If worker_class_fn is specified, it will be executed to get the worker - class. - Otherwise, the worker class will be obtained by dynamically importing it - using worker_module_name and worker_class_name. """ def __init__( self, - worker_module_name: str, - worker_class_name: str, - trust_remote_code: bool = False, - worker_class_fn: Optional[Callable[[], - Type[WorkerBase]]] = None) -> None: - self.worker_module_name = worker_module_name - self.worker_class_name = worker_class_name - self.worker_class_fn = worker_class_fn + vllm_config: VllmConfig, + ) -> None: + self.vllm_config = vllm_config + trust_remote_code = vllm_config.model_config.trust_remote_code self.worker: Optional[WorkerBase] = None if trust_remote_code: # note: lazy import to avoid importing torch before initializing @@ -456,12 +446,8 @@ def init_worker(self, *args, **kwargs): from vllm.plugins import load_general_plugins load_general_plugins() - if self.worker_class_fn: - worker_class = self.worker_class_fn() - else: - mod = importlib.import_module(self.worker_module_name) - worker_class = getattr(mod, self.worker_class_name) - + worker_class = resolve_obj_by_qualname( + self.vllm_config.parallel_config.worker_cls) self.worker = worker_class(*args, **kwargs) assert self.worker is not None From b6374e09b0af4f8fa4c0b911b3cd1bd45342ead6 Mon Sep 17 00:00:00 2001 From: Isotr0py Date: Fri, 22 Nov 2024 15:01:56 +0800 Subject: [PATCH 098/397] [Bugfix] Fix Phi-3 BNB quantization with tensor parallel (#9948) Signed-off-by: Isotr0py <2037008807@qq.com> --- vllm/model_executor/layers/linear.py | 19 +++++++--- vllm/model_executor/model_loader/loader.py | 43 +++++++++++++++++++++- 2 files changed, 56 insertions(+), 6 deletions(-) diff --git a/vllm/model_executor/layers/linear.py b/vllm/model_executor/layers/linear.py index 2471c160d66b7..46ef11e7d02c6 100644 --- a/vllm/model_executor/layers/linear.py +++ b/vllm/model_executor/layers/linear.py @@ -1,3 +1,4 @@ +import itertools from abc import abstractmethod from typing import Dict, List, Optional, Tuple @@ -41,12 +42,12 @@ def adjust_marlin_shard(param, shard_size, shard_offset): def adjust_bitsandbytes_4bit_shard(param: Parameter, - qkv_offsets: Dict[str, Tuple[int, int]], + shard_offsets: Dict[str, Tuple[int, int]], loaded_shard_id: str) -> Tuple[int, int]: """Adjust the quantization offsets and sizes for BitsAndBytes sharding.""" - total, _ = qkv_offsets["total"] - orig_offset, orig_size = qkv_offsets[loaded_shard_id] + total, _ = shard_offsets["total"] + orig_offset, orig_size = shard_offsets[loaded_shard_id] quantized_total = param.data.shape[0] quantized_offset = orig_offset * quantized_total // total @@ -499,9 +500,17 @@ def weight_loader(self, # Special case for Marlin. shard_size, shard_offset = adjust_marlin_shard( param, shard_size, shard_offset) + if use_bitsandbytes_4bit: - shard_size = loaded_weight.shape[output_dim] // 2 - shard_offset = shard_size * shard_id + index = list(itertools.accumulate([0] + self.output_sizes)) + orig_offsets = { + str(i): (index[i], size) + for i, size in enumerate(self.output_sizes) + } + orig_offsets["total"] = (self.output_size, 0) + shard_size, shard_offset = adjust_bitsandbytes_4bit_shard( + param, orig_offsets, str(shard_id)) + loaded_weight_shard = loaded_weight.narrow( output_dim, shard_offset, shard_size) self.weight_loader(param, loaded_weight_shard, shard_id) diff --git a/vllm/model_executor/model_loader/loader.py b/vllm/model_executor/model_loader/loader.py index 936c2fe415375..34e0860162260 100644 --- a/vllm/model_executor/model_loader/loader.py +++ b/vllm/model_executor/model_loader/loader.py @@ -5,6 +5,7 @@ import fnmatch import glob import inspect +import itertools import json import math import os @@ -27,7 +28,9 @@ get_tensor_model_parallel_world_size) from vllm.envs import VLLM_USE_MODELSCOPE from vllm.logger import init_logger -from vllm.model_executor.layers.linear import (ReplicatedLinear, +from vllm.model_executor.layers.linear import (MergedColumnParallelLinear, + QKVParallelLinear, + ReplicatedLinear, RowParallelLinear) from vllm.model_executor.layers.quantization.base_config import ( QuantizeMethodBase) @@ -936,6 +939,34 @@ def _unquantized_generator(self, hf_weights_files, use_safetensors, end_index = total_size // tp_size * (tp_rank + 1) weight_sub_tensor = weight_tensor[..., start_index:end_index] + # Weights have fused on disk. In this case, we assume that the + # weight and module use same name. + elif any( + weight_name.startswith(module) + for module in self.maybe_fused_weights_modules): + # special case for fused weights + # get the size of each shard weight tensor + total_shard_sizes = next( + (sizes for module, sizes in + self.maybe_fused_weights_modules.items() + if weight_name.startswith(module))) + total_size = weight_tensor.size(0) + assert total_size == sum(total_shard_sizes) + # get the start/end index of each shard weight tensor + total_start_index = list( + itertools.accumulate([0] + total_shard_sizes))[:-1] + shard_weights_index = [ + (idx + size // tp_size * tp_rank, + idx + size // tp_size * (tp_rank + 1)) + for idx, size in zip(total_start_index, + total_shard_sizes) + ] + # slice and reorder the weight tensor + weight_tensor = [ + weight_tensor[start_index:end_index, ...] + for start_index, end_index in shard_weights_index + ] + weight_sub_tensor = torch.cat(weight_tensor, dim=0) # Shard by row else: total_size = weight_tensor.size(0) @@ -985,12 +1016,22 @@ def _load_weights(self, model_config: ModelConfig, else: self.target_modules = self.default_target_modules + # Modules whose weights might have fused on disk + # we need their output_sizes to make shard in flight correctly with TP + self.maybe_fused_weights_modules: Dict[str, List[int]] = {} + for name, module in model.named_modules(): # Some modules like `ReplicatedLinear` should not have their weights # sharded. The reason for implementing it this way is to avoid new # static variable in the model implementation. if isinstance(module, (ReplicatedLinear, )): self.unsharded_weights_modules.append(name) + # `QKVParallelLinear` and `MergedColumnParallelLinear` might have + # fused weights on disk. We need to use the output sizes of these + # modules to shard the weights correctly. + elif isinstance(module, + (QKVParallelLinear, MergedColumnParallelLinear)): + self.maybe_fused_weights_modules[name] = module.output_sizes # In TP, these weights are partitioned along the column # dimension (dim=-1) elif isinstance(module, (RowParallelLinear, )): From 11fcf0e0661365f24bfff9591434a0cec640df6c Mon Sep 17 00:00:00 2001 From: Noam Gat Date: Fri, 22 Nov 2024 09:59:47 +0200 Subject: [PATCH 099/397] Remove token-adding chat embedding params (#10551) Signed-off-by: Noam Gat --- vllm/entrypoints/openai/protocol.py | 16 ---------------- vllm/entrypoints/openai/serving_embedding.py | 6 ++++-- 2 files changed, 4 insertions(+), 18 deletions(-) diff --git a/vllm/entrypoints/openai/protocol.py b/vllm/entrypoints/openai/protocol.py index a82212677f63a..9db5951e5fe5b 100644 --- a/vllm/entrypoints/openai/protocol.py +++ b/vllm/entrypoints/openai/protocol.py @@ -760,22 +760,6 @@ class EmbeddingChatRequest(OpenAIBaseModel): # doc: end-chat-embedding-pooling-params # doc: begin-chat-embedding-extra-params - add_generation_prompt: bool = Field( - default=True, - description= - ("If true, the generation prompt will be added to the chat template. " - "This is a parameter used by chat template in tokenizer config of the " - "model."), - ) - continue_final_message: bool = Field( - default=False, - description= - ("If this is set, the chat will be formatted so that the final " - "message in the chat is open-ended, without any EOS tokens. The " - "model will continue this message rather than starting a new one. " - "This allows you to \"prefill\" part of the model's response for it. " - "Cannot be used at the same time as `add_generation_prompt`."), - ) add_special_tokens: bool = Field( default=False, description=( diff --git a/vllm/entrypoints/openai/serving_embedding.py b/vllm/entrypoints/openai/serving_embedding.py index 74ad7389784fc..c84a7d2d8e13e 100644 --- a/vllm/entrypoints/openai/serving_embedding.py +++ b/vllm/entrypoints/openai/serving_embedding.py @@ -148,8 +148,10 @@ async def create_embedding( chat_template=request.chat_template or self.chat_template, chat_template_content_format=self. chat_template_content_format, - add_generation_prompt=request.add_generation_prompt, - continue_final_message=request.continue_final_message, + # In embedding requests, we are not generating tokens, + # so there is no need to append extra tokens to the input + add_generation_prompt=False, + continue_final_message=False, truncate_prompt_tokens=truncate_prompt_tokens, add_special_tokens=request.add_special_tokens, ) From db100c5cdebc7140b57cbb40b20b5a28d7bff386 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Fri, 22 Nov 2024 10:02:14 -0800 Subject: [PATCH 100/397] [bugfix] fix full graph tests (#10581) Signed-off-by: youkaichao --- tests/compile/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/compile/utils.py b/tests/compile/utils.py index 078c6bf9ea1df..7c92d165d05f7 100644 --- a/tests/compile/utils.py +++ b/tests/compile/utils.py @@ -4,7 +4,7 @@ from tests.quantization.utils import is_quant_method_supported from vllm import LLM, SamplingParams -from vllm.config import CompilationConfig, CompilationLevel +from vllm.config import CompilationLevel from vllm.platforms import current_platform TEST_MODELS = [ @@ -85,7 +85,7 @@ def check_full_graph_support(model, enforce_eager=True, tensor_parallel_size=tp_size, disable_custom_all_reduce=True, - compilation_config=CompilationConfig(level=optimization_level), + compilation_config=optimization_level, **model_kwargs) outputs = llm.generate(prompts, sampling_params) From eebad39f265606cfe35af4d1e0bea678516648a3 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Fri, 22 Nov 2024 14:04:42 -0800 Subject: [PATCH 101/397] [torch.compile] support all attention backends (#10558) Signed-off-by: youkaichao --- tests/kernels/test_encoder_decoder_attn.py | 37 +- vllm/attention/backends/abstract.py | 23 +- vllm/attention/backends/blocksparse_attn.py | 2 +- vllm/attention/backends/flash_attn.py | 412 ++++++++---------- vllm/attention/backends/flashinfer.py | 280 +++++------- vllm/attention/backends/hpu_attn.py | 2 +- vllm/attention/backends/ipex_attn.py | 2 +- vllm/attention/backends/pallas.py | 2 +- vllm/attention/backends/rocm_flash_attn.py | 2 +- vllm/attention/backends/torch_sdpa.py | 12 +- vllm/attention/backends/utils.py | 4 +- vllm/attention/backends/xformers.py | 8 +- vllm/attention/layer.py | 81 +++- vllm/config.py | 9 +- vllm/forward_context.py | 27 +- vllm/model_executor/models/arctic.py | 15 +- vllm/model_executor/models/baichuan.py | 18 +- vllm/model_executor/models/bart.py | 48 +- vllm/model_executor/models/bloom.py | 14 +- vllm/model_executor/models/chameleon.py | 11 +- vllm/model_executor/models/chatglm.py | 25 +- vllm/model_executor/models/commandr.py | 14 +- vllm/model_executor/models/dbrx.py | 21 +- vllm/model_executor/models/deepseek.py | 9 +- vllm/model_executor/models/deepseek_v2.py | 3 +- vllm/model_executor/models/exaone.py | 3 +- vllm/model_executor/models/falcon.py | 22 +- vllm/model_executor/models/florence2.py | 10 +- vllm/model_executor/models/gemma.py | 3 +- vllm/model_executor/models/gemma2.py | 15 +- .../models/glm4_vision_encoder.py | 17 +- vllm/model_executor/models/gpt2.py | 3 +- vllm/model_executor/models/gpt_bigcode.py | 13 +- vllm/model_executor/models/gpt_j.py | 13 +- vllm/model_executor/models/gpt_neox.py | 13 +- vllm/model_executor/models/granite.py | 3 +- vllm/model_executor/models/granitemoe.py | 3 +- vllm/model_executor/models/internlm2.py | 21 +- vllm/model_executor/models/internlm2_ve.py | 23 +- vllm/model_executor/models/jais.py | 13 +- vllm/model_executor/models/jamba.py | 8 +- vllm/model_executor/models/llama.py | 1 + vllm/model_executor/models/minicpm.py | 11 +- vllm/model_executor/models/minicpm3.py | 9 +- vllm/model_executor/models/mixtral.py | 3 +- vllm/model_executor/models/mixtral_quant.py | 12 +- vllm/model_executor/models/molmo.py | 13 +- vllm/model_executor/models/mpt.py | 13 +- vllm/model_executor/models/nemotron.py | 3 +- vllm/model_executor/models/olmo.py | 16 +- vllm/model_executor/models/olmoe.py | 13 +- vllm/model_executor/models/orion.py | 11 +- vllm/model_executor/models/persimmon.py | 16 +- vllm/model_executor/models/phi.py | 17 +- vllm/model_executor/models/phi3_small.py | 26 +- vllm/model_executor/models/phimoe.py | 8 +- vllm/model_executor/models/qwen.py | 11 +- vllm/model_executor/models/qwen2_moe.py | 9 +- vllm/model_executor/models/solar.py | 1 + vllm/model_executor/models/stablelm.py | 16 +- vllm/model_executor/models/starcoder2.py | 15 +- vllm/model_executor/models/xverse.py | 10 +- vllm/platforms/cpu.py | 1 + vllm/platforms/cuda.py | 1 + vllm/platforms/hpu.py | 1 + vllm/platforms/interface.py | 4 + vllm/platforms/openvino.py | 1 + vllm/platforms/rocm.py | 1 + vllm/platforms/tpu.py | 1 + vllm/platforms/xpu.py | 1 + vllm/spec_decode/draft_model_runner.py | 3 +- vllm/utils.py | 3 +- vllm/v1/attention/backends/flash_attn.py | 3 +- vllm/v1/worker/gpu_model_runner.py | 4 +- vllm/worker/embedding_model_runner.py | 2 +- vllm/worker/enc_dec_model_runner.py | 2 +- vllm/worker/model_runner.py | 4 +- 77 files changed, 879 insertions(+), 651 deletions(-) diff --git a/tests/kernels/test_encoder_decoder_attn.py b/tests/kernels/test_encoder_decoder_attn.py index 3d3724c50421d..c4b72ba6bf4ee 100644 --- a/tests/kernels/test_encoder_decoder_attn.py +++ b/tests/kernels/test_encoder_decoder_attn.py @@ -18,8 +18,10 @@ from vllm.attention.backends.utils import STR_NOT_IMPL_ENC_DEC_ROCM_HIP from vllm.attention.selector import (_Backend, _cached_get_attn_backend, global_force_attn_backend_context_manager) +from vllm.config import VllmConfig from vllm.forward_context import set_forward_context from vllm.platforms import current_platform +from vllm.plugins import set_current_vllm_config # List of support backends for encoder/decoder models LIST_ENC_DEC_SUPPORTED_BACKENDS = [_Backend.XFORMERS, _Backend.FLASH_ATTN] @@ -594,6 +596,7 @@ def _run_encoder_attention_test( encoder_test_params: PhaseTestParameters, attn_metadata: AttentionMetadata, test_pt: TestPoint, + vllm_config: VllmConfig, ) -> torch.Tensor: ''' Run encoder attention. @@ -623,7 +626,7 @@ def _run_encoder_attention_test( attn_type = AttentionType.ENCODER packed_qkv = encoder_test_params.packed_qkvo.packed_qkv assert packed_qkv is not None - with set_forward_context(attn_metadata): + with set_forward_context(attn_metadata, vllm_config): # In the test setup the shape of the query is # [batch_size, seq_len, num_heads, head_size]. However # the attention backend expect the shape to be @@ -648,6 +651,7 @@ def _run_decoder_self_attention_test( decoder_test_params: PhaseTestParameters, attn_metadata: AttentionMetadata, test_pt: TestPoint, + vllm_config: VllmConfig, ) -> torch.Tensor: ''' Run decoder self-attention test. @@ -677,7 +681,7 @@ def _run_decoder_self_attention_test( kv_cache = test_rsrcs.kv_cache packed_qkv = decoder_test_params.packed_qkvo.packed_qkv assert packed_qkv is not None - with set_forward_context(attn_metadata): + with set_forward_context(attn_metadata, vllm_config): # In the test setup the shape of the query is # [batch_size, seq_len, num_heads, head_size]. However # the attention backend expect the shape to be @@ -701,6 +705,7 @@ def _run_encoder_decoder_cross_attention_test( cross_test_params: Optional[PhaseTestParameters], attn_metadata: AttentionMetadata, test_pt: TestPoint, + vllm_config: VllmConfig, ) -> torch.Tensor: ''' Run encoder/decoder cross-attention test. @@ -748,7 +753,7 @@ def _run_encoder_decoder_cross_attention_test( cross_pckd_qkv = cross_test_params.packed_qkvo.packed_qkv key = (None if cross_pckd_qkv is None else cross_pckd_qkv.key) value = (None if cross_pckd_qkv is None else cross_pckd_qkv.value) - with set_forward_context(attn_metadata): + with set_forward_context(attn_metadata, vllm_config): # In the test setup the shape of the query is # [batch_size, seq_len, num_heads, head_size]. However # the attention backend expect the shape to be @@ -839,7 +844,9 @@ def test_encoder_only( # Attention scale factor, attention backend instance, attention wrapper # instance, KV cache init - test_rsrcs = _make_test_resources(test_pt) + vllm_config = VllmConfig() + with set_current_vllm_config(vllm_config): + test_rsrcs = _make_test_resources(test_pt) # Construct encoder attention test params (only used # during prefill) @@ -863,7 +870,8 @@ def test_encoder_only( test_rsrcs.attn, enc_test_params, prephase_attn_metadata, - test_pt=test_pt)) + test_pt=test_pt, + vllm_config=vllm_config)) # - Is encoder attention result correct? assert_actual_matches_ideal(enc_test_params, enc_pckd_act_out, @@ -960,7 +968,9 @@ def test_e2e_enc_dec_attn( # Attention scale factor, attention backend instance, attention wrapper # instance, KV cache init - test_rsrcs = _make_test_resources(test_pt) + vllm_config = VllmConfig() + with set_current_vllm_config(vllm_config): + test_rsrcs = _make_test_resources(test_pt) # Construct encoder attention test params (only used # during prefill) @@ -1011,7 +1021,8 @@ def test_e2e_enc_dec_attn( enc_pckd_act_out = _run_encoder_attention_test(test_rsrcs.attn, enc_test_params, prephase_attn_metadata, - test_pt=test_pt) + test_pt=test_pt, + vllm_config=vllm_config) # - Is encoder attention result correct? assert_actual_matches_ideal(enc_test_params, enc_pckd_act_out, @@ -1023,7 +1034,8 @@ def test_e2e_enc_dec_attn( test_rsrcs, prephase_dec_test_params, prephase_attn_metadata, - test_pt=test_pt) + test_pt=test_pt, + vllm_config=vllm_config) # - Is prefill decoder self-attention correct? assert_actual_matches_ideal(prephase_dec_test_params, @@ -1037,7 +1049,8 @@ def test_e2e_enc_dec_attn( prephase_dec_test_params, prephase_cross_test_params, prephase_attn_metadata, - test_pt=test_pt) + test_pt=test_pt, + vllm_config=vllm_config) # - Is prefill encoder/decoder cross-attention correct? assert_actual_matches_ideal(prephase_cross_test_params, @@ -1061,7 +1074,8 @@ def test_e2e_enc_dec_attn( test_rsrcs, decphase_dec_test_params, decphase_attn_metadata, - test_pt=test_pt) + test_pt=test_pt, + vllm_config=vllm_config) # - Is decode-phase decoder self-attention correct? assert_actual_matches_ideal(decphase_dec_test_params, @@ -1075,7 +1089,8 @@ def test_e2e_enc_dec_attn( decphase_dec_test_params, None, decphase_attn_metadata, - test_pt=test_pt) + test_pt=test_pt, + vllm_config=vllm_config) # - Is decode-phase encoder/decoder cross-attention correct? assert_actual_matches_ideal(decphase_cross_test_params, diff --git a/vllm/attention/backends/abstract.py b/vllm/attention/backends/abstract.py index a504cb1f7e318..5be2d83346d00 100644 --- a/vllm/attention/backends/abstract.py +++ b/vllm/attention/backends/abstract.py @@ -1,7 +1,6 @@ from abc import ABC, abstractmethod from contextlib import contextmanager from dataclasses import dataclass, fields -from enum import Enum, auto from typing import (TYPE_CHECKING, Any, Dict, Generic, List, Optional, Set, Tuple, Type, TypeVar) @@ -15,13 +14,19 @@ ModelRunnerInputBuilderBase) -class AttentionType(Enum): - DECODER = auto() # Decoder attention between previous layer Q/K/V - ENCODER = auto( - ) # Encoder attention between previous layer Q/K/V for encoder-decoder - ENCODER_ONLY = auto() # Encoder attention between previous layer Q/K/V - ENCODER_DECODER = auto( - ) # Attention between dec. Q and enc. K/V for encoder-decoder +class AttentionType: + """ + Attention type. + Use string to be compatible with `torch.compile`. + """ + # Decoder attention between previous layer Q/K/V + DECODER = "decoder" + # Encoder attention between previous layer Q/K/V for encoder-decoder + ENCODER = "encoder" + # Encoder attention between previous layer Q/K/V + ENCODER_ONLY = "encoder_only" + # Attention between dec. Q and enc. K/V for encoder-decoder + ENCODER_DECODER = "encoder_decoder" class AttentionBackend(ABC): @@ -241,6 +246,6 @@ def forward( attn_metadata: T, k_scale: float = 1.0, v_scale: float = 1.0, - attn_type: AttentionType = AttentionType.DECODER, + attn_type: str = AttentionType.DECODER, ) -> torch.Tensor: raise NotImplementedError diff --git a/vllm/attention/backends/blocksparse_attn.py b/vllm/attention/backends/blocksparse_attn.py index 409a42187f46c..94002e36db2bb 100644 --- a/vllm/attention/backends/blocksparse_attn.py +++ b/vllm/attention/backends/blocksparse_attn.py @@ -354,7 +354,7 @@ def forward( attn_metadata: BlocksparseFlashAttentionMetadata, k_scale: float = 1.0, v_scale: float = 1.0, - attn_type: AttentionType = AttentionType.DECODER, + attn_type: str = AttentionType.DECODER, ) -> torch.Tensor: """Forward pass with FlashAttention and PagedAttention. diff --git a/vllm/attention/backends/flash_attn.py b/vllm/attention/backends/flash_attn.py index 314822b695722..32738d1043b1d 100644 --- a/vllm/attention/backends/flash_attn.py +++ b/vllm/attention/backends/flash_attn.py @@ -16,10 +16,8 @@ compute_slot_mapping_start_idx, get_num_prefill_decode_query_kv_tokens, get_seq_len_block_table_args, is_all_cross_attn_metadata_set, is_all_encoder_attn_metadata_set, is_block_tables_empty) -from vllm.forward_context import get_forward_context from vllm.multimodal import MultiModalPlaceholderMap -from vllm.utils import (async_tensor_h2d, direct_register_custom_op, - make_tensor_with_pad) +from vllm.utils import async_tensor_h2d, make_tensor_with_pad if TYPE_CHECKING: from vllm.worker.model_runner import (ModelInputForGPUBuilder, @@ -639,7 +637,7 @@ def forward( attn_metadata: FlashAttentionMetadata, k_scale: float = 1.0, v_scale: float = 1.0, - attn_type: AttentionType = AttentionType.DECODER, + attn_type: str = AttentionType.DECODER, ) -> torch.Tensor: """Forward pass with FlashAttention. @@ -668,23 +666,174 @@ def forward( "requires setting cross-attention " "metadata attributes.") - output = torch.ops.vllm.unified_flash_attention( - query, - key, - value, - self.num_heads, - self.head_size, - self.num_kv_heads, - kv_cache, - self.kv_cache_dtype, - k_scale, - v_scale, - self.scale, - attn_type.value, - self.sliding_window, - self.alibi_slopes, - self.logits_soft_cap, - ) + num_heads: int = self.num_heads + head_size: int = self.head_size + num_kv_heads: int = self.num_kv_heads + kv_cache_dtype: str = self.kv_cache_dtype + softmax_scale: float = self.scale + window_size = self.sliding_window + alibi_slopes: Optional[torch.Tensor] = self.alibi_slopes + logits_soft_cap: Optional[float] = self.logits_soft_cap + + num_tokens, hidden_size = query.shape + + # Reshape the query, key, and value tensors. + query = query.view(-1, num_heads, head_size) + if (key is not None) and (value is not None): + key = key.view(-1, num_kv_heads, head_size) + value = value.view(-1, num_kv_heads, head_size) + + if kv_cache.numel() > 0: + key_cache = kv_cache[0] + value_cache = kv_cache[1] + # We skip updating the KV cache under two conditions: + # a. When the Attention Type is ENCODER. In this phase, we compute + # only the encoder attention without updating the cache. + # b. When both Key and Value are None. This occurs during + # cross-attention computation in the decoding phase, where the + # KV cache is already populated with the cross-attention + # tensor. Thus, we skip cache updates during this time. + if (attn_type != AttentionType.ENCODER) and (key is not None) and ( + value is not None): + if attn_type == AttentionType.ENCODER_DECODER: + # Update cross-attention KV cache (prefill-only) + updated_slot_mapping = attn_metadata.cross_slot_mapping + else: + # Update self-attention KV cache (prefill/decode) + updated_slot_mapping = attn_metadata.slot_mapping + + # Reshape the input keys and values and store them in the cache. + # If kv_cache is not provided, the new key and value tensors are + # not cached. This happens during the initial memory + # profiling run. + torch.ops._C_cache_ops.reshape_and_cache_flash( + key, + value, + kv_cache[0], + kv_cache[1], + updated_slot_mapping.flatten(), # type: ignore[union-attr] + kv_cache_dtype, + k_scale, + v_scale, + ) + + (num_prefill_query_tokens, num_prefill_kv_tokens, + num_decode_query_tokens) = \ + get_num_prefill_decode_query_kv_tokens(attn_metadata, attn_type) + decode_query = query[num_prefill_query_tokens:] + # QKV for prefill. + query = query[:num_prefill_query_tokens] + assert query.shape[0] == num_prefill_query_tokens + assert decode_query.shape[0] == num_decode_query_tokens + + prefill_output: Optional[torch.Tensor] = None + decode_output: Optional[torch.Tensor] = None + if prefill_meta := attn_metadata.prefill_metadata: + # Prompt run. + if (kv_cache.numel() == 0 or prefill_meta.block_tables is None + or prefill_meta.block_tables.numel() == 0): + # normal attention + # When block_tables are not filled, it means q and k are the + # prompt, and they have the same length. + q_seq_start_loc, q_seq_len, k_seq_start_loc, k_seq_len = \ + _get_query_key_seq_metadata(prefill_meta, True, attn_type) + + key = key[:num_prefill_kv_tokens] + value = value[:num_prefill_kv_tokens] + + prefill_output = flash_attn_varlen_func( + q=query, + k=key, + v=value, + cu_seqlens_q=q_seq_start_loc, + cu_seqlens_k=k_seq_start_loc, + max_seqlen_q=q_seq_len, + max_seqlen_k=k_seq_len, + softmax_scale=softmax_scale, + causal=_get_causal_option(attn_type), + window_size=window_size, + alibi_slopes=alibi_slopes, + softcap=logits_soft_cap, + ) + else: + # prefix-enabled attention + assert attn_type == AttentionType.DECODER, ( + "Only decoder-only models support prefix caching") + assert prefill_meta.seq_lens is not None + max_seq_len = max(prefill_meta.seq_lens) + prefill_output = flash_attn_varlen_func( # noqa + q=query, + k=key_cache, + v=value_cache, + cu_seqlens_q=prefill_meta.query_start_loc, + max_seqlen_q=prefill_meta.max_query_len, + cu_seqlens_k=prefill_meta.seq_start_loc, + max_seqlen_k=max_seq_len, + softmax_scale=softmax_scale, + causal=True, + window_size=window_size, + alibi_slopes=alibi_slopes, + block_table=prefill_meta.block_tables, + softcap=logits_soft_cap, + ) + + if decode_meta := attn_metadata.decode_metadata: + # Decoding run. + # Use flash_attn_varlen_func kernel for speculative decoding + # because different queries might have different lengths. + + assert decode_meta.max_decode_query_len is not None + # use only for actual varlen decoding + if decode_meta.max_decode_query_len > 1: + assert attn_type == AttentionType.DECODER, ( + "Only decoder-only models support max_decode_query_len > 1" + ) + decode_output = flash_attn_varlen_func( + q=decode_query, + k=key_cache, + v=value_cache, + cu_seqlens_q=decode_meta.query_start_loc, + max_seqlen_q=decode_meta.max_decode_query_len, + cu_seqlens_k=decode_meta.seq_start_loc, + max_seqlen_k=decode_meta.max_decode_seq_len, + softmax_scale=softmax_scale, + causal=True, + window_size=window_size, + alibi_slopes=alibi_slopes, + softcap=logits_soft_cap, + block_table=decode_meta.block_tables, + ) + else: + # Use flash_attn_with_kvcache for normal decoding. + ( + seq_lens_arg, + _, + block_tables_arg, + ) = get_seq_len_block_table_args(decode_meta, False, attn_type) + decode_output = flash_attn_with_kvcache( + q=decode_query.unsqueeze(1), + k_cache=key_cache, + v_cache=value_cache, + block_table=block_tables_arg, + cache_seqlens=seq_lens_arg, + softmax_scale=softmax_scale, + causal=True, + window_size=window_size, + alibi_slopes=alibi_slopes, + softcap=logits_soft_cap, + ).squeeze(1) + + if prefill_output is None: + assert decode_output is not None + return decode_output.view(num_decode_query_tokens, hidden_size) + if decode_output is None: + assert prefill_output is not None + return prefill_output.view(num_prefill_query_tokens, hidden_size) + + assert decode_meta is not None + decode_output = decode_output.squeeze(1) + output = torch.cat([prefill_output, decode_output], dim=0) + return output.view(num_tokens, hidden_size) return output @@ -692,7 +841,7 @@ def forward( def _get_query_key_seq_metadata( attn_metadata, is_prompt: bool, - attn_type: AttentionType, + attn_type: str, ) -> tuple: """ Returns sequence metadata for key and query based on the specified @@ -754,7 +903,7 @@ def _get_query_key_seq_metadata( raise AttributeError(f"Invalid attention type {str(attn_type)}") -def _get_causal_option(attn_type: AttentionType) -> bool: +def _get_causal_option(attn_type: str) -> bool: """ Determine whether the given attention type is suitable for causal attention mechanisms. @@ -770,220 +919,3 @@ def _get_causal_option(attn_type: AttentionType) -> bool: return not (attn_type == AttentionType.ENCODER or attn_type == AttentionType.ENCODER_ONLY or attn_type == AttentionType.ENCODER_DECODER) - - -def unified_flash_attention( - query: torch.Tensor, - key: torch.Tensor, - value: torch.Tensor, - num_heads: int, - head_size: int, - num_kv_heads: int, - kv_cache: torch.Tensor, - kv_cache_dtype: str, - k_scale: float, - v_scale: float, - softmax_scale: float, - attn_type_int_val: int, - window_size: Optional[List[int]] = None, - alibi_slopes: Optional[torch.Tensor] = None, - logits_soft_cap: Optional[float] = None, -) -> torch.Tensor: - - # Convert integer attn_type to enum - try: - attn_type = AttentionType(attn_type_int_val) - except ValueError as err: - raise AttributeError( - f"Invalid attention type {str(attn_type_int_val)}") from err - - current_metadata = get_forward_context() - assert current_metadata is not None - assert isinstance(current_metadata, FlashAttentionMetadata) - attn_metadata: FlashAttentionMetadata = current_metadata - - num_tokens, hidden_size = query.shape - - # Reshape the query, key, and value tensors. - query = query.view(-1, num_heads, head_size) - if (key is not None) and (value is not None): - key = key.view(-1, num_kv_heads, head_size) - value = value.view(-1, num_kv_heads, head_size) - - if kv_cache.numel() > 0: - key_cache = kv_cache[0] - value_cache = kv_cache[1] - # We skip updating the KV cache under two conditions: - # a. When the Attention Type is ENCODER. In this phase, we compute - # only the encoder attention without updating the cache. - # b. When both Key and Value are None. This occurs during - # cross-attention computation in the decoding phase, where the KV - # cache is already populated with the cross-attention tensor. - # Thus, we skip cache updates during this time. - if (attn_type != AttentionType.ENCODER) and (key is not None) and ( - value is not None): - if attn_type == AttentionType.ENCODER_DECODER: - # Update cross-attention KV cache (prefill-only) - updated_slot_mapping = attn_metadata.cross_slot_mapping - else: - # Update self-attention KV cache (prefill/decode) - updated_slot_mapping = attn_metadata.slot_mapping - - # Reshape the input keys and values and store them in the cache. - # If kv_cache is not provided, the new key and value tensors are - # not cached. This happens during the initial memory profiling run. - torch.ops._C_cache_ops.reshape_and_cache_flash( - key, - value, - kv_cache[0], - kv_cache[1], - updated_slot_mapping.flatten(), # type: ignore[union-attr] - kv_cache_dtype, - k_scale, - v_scale, - ) - - (num_prefill_query_tokens, num_prefill_kv_tokens, - num_decode_query_tokens) = \ - get_num_prefill_decode_query_kv_tokens(attn_metadata, attn_type) - decode_query = query[num_prefill_query_tokens:] - # QKV for prefill. - query = query[:num_prefill_query_tokens] - assert query.shape[0] == num_prefill_query_tokens - assert decode_query.shape[0] == num_decode_query_tokens - - prefill_output: Optional[torch.Tensor] = None - decode_output: Optional[torch.Tensor] = None - if prefill_meta := attn_metadata.prefill_metadata: - # Prompt run. - if (kv_cache.numel() == 0 or prefill_meta.block_tables is None - or prefill_meta.block_tables.numel() == 0): - # normal attention - # When block_tables are not filled, it means q and k are the - # prompt, and they have the same length. - q_seq_start_loc, q_seq_len, k_seq_start_loc, k_seq_len = \ - _get_query_key_seq_metadata(prefill_meta, True, attn_type) - - key = key[:num_prefill_kv_tokens] - value = value[:num_prefill_kv_tokens] - - prefill_output = flash_attn_varlen_func( - q=query, - k=key, - v=value, - cu_seqlens_q=q_seq_start_loc, - cu_seqlens_k=k_seq_start_loc, - max_seqlen_q=q_seq_len, - max_seqlen_k=k_seq_len, - softmax_scale=softmax_scale, - causal=_get_causal_option(attn_type), - window_size=window_size, - alibi_slopes=alibi_slopes, - softcap=logits_soft_cap, - ) - else: - # prefix-enabled attention - assert attn_type == AttentionType.DECODER, ( - "Only decoder-only models support prefix caching") - assert prefill_meta.seq_lens is not None - max_seq_len = max(prefill_meta.seq_lens) - prefill_output = flash_attn_varlen_func( # noqa - q=query, - k=key_cache, - v=value_cache, - cu_seqlens_q=prefill_meta.query_start_loc, - max_seqlen_q=prefill_meta.max_query_len, - cu_seqlens_k=prefill_meta.seq_start_loc, - max_seqlen_k=max_seq_len, - softmax_scale=softmax_scale, - causal=True, - window_size=window_size, - alibi_slopes=alibi_slopes, - block_table=prefill_meta.block_tables, - softcap=logits_soft_cap, - ) - - if decode_meta := attn_metadata.decode_metadata: - # Decoding run. - # Use flash_attn_varlen_func kernel for speculative decoding - # because different queries might have different lengths. - - assert decode_meta.max_decode_query_len is not None - # use only for actual varlen decoding - if decode_meta.max_decode_query_len > 1: - assert attn_type == AttentionType.DECODER, ( - "Only decoder-only models support max_decode_query_len > 1") - decode_output = flash_attn_varlen_func( - q=decode_query, - k=key_cache, - v=value_cache, - cu_seqlens_q=decode_meta.query_start_loc, - max_seqlen_q=decode_meta.max_decode_query_len, - cu_seqlens_k=decode_meta.seq_start_loc, - max_seqlen_k=decode_meta.max_decode_seq_len, - softmax_scale=softmax_scale, - causal=True, - window_size=window_size, - alibi_slopes=alibi_slopes, - softcap=logits_soft_cap, - block_table=decode_meta.block_tables, - ) - else: - # Use flash_attn_with_kvcache for normal decoding. - ( - seq_lens_arg, - _, - block_tables_arg, - ) = get_seq_len_block_table_args(decode_meta, False, attn_type) - decode_output = flash_attn_with_kvcache( - q=decode_query.unsqueeze(1), - k_cache=key_cache, - v_cache=value_cache, - block_table=block_tables_arg, - cache_seqlens=seq_lens_arg, - softmax_scale=softmax_scale, - causal=True, - window_size=window_size, - alibi_slopes=alibi_slopes, - softcap=logits_soft_cap, - ).squeeze(1) - - if prefill_output is None: - assert decode_output is not None - return decode_output.view(num_decode_query_tokens, hidden_size) - if decode_output is None: - assert prefill_output is not None - return prefill_output.view(num_prefill_query_tokens, hidden_size) - - assert decode_meta is not None - decode_output = decode_output.squeeze(1) - output = torch.cat([prefill_output, decode_output], dim=0) - return output.view(num_tokens, hidden_size) - - -def unified_flash_attention_fake( - query: torch.Tensor, - key: torch.Tensor, - value: torch.Tensor, - num_heads: int, - head_size: int, - num_kv_heads: int, - kv_cache: torch.Tensor, - kv_cache_dtype: str, - k_scale: float, - v_scale: float, - softmax_scale: float, - attn_type_int_val: int, - window_size: Optional[List[int]] = None, - alibi_slopes: Optional[torch.Tensor] = None, - logits_soft_cap: Optional[float] = None, -) -> torch.Tensor: - return torch.empty_like(query) - - -direct_register_custom_op( - op_name="unified_flash_attention", - op_func=unified_flash_attention, - mutates_args=["kv_cache"], - fake_impl=unified_flash_attention_fake, -) diff --git a/vllm/attention/backends/flashinfer.py b/vllm/attention/backends/flashinfer.py index b61c660e3e280..1a2024705eb04 100644 --- a/vllm/attention/backends/flashinfer.py +++ b/vllm/attention/backends/flashinfer.py @@ -30,9 +30,8 @@ compute_slot_mapping_start_idx, is_block_tables_empty) from vllm.attention.ops.paged_attn import PagedAttention -from vllm.forward_context import get_forward_context -from vllm.utils import (async_tensor_h2d, direct_register_custom_op, - get_kv_cache_torch_dtype, make_tensor_with_pad) +from vllm.utils import (async_tensor_h2d, get_kv_cache_torch_dtype, + make_tensor_with_pad) if TYPE_CHECKING: from vllm.worker.model_runner import (ModelInputForGPUBuilder, @@ -774,7 +773,7 @@ def forward( attn_metadata: FlashInferMetadata, k_scale: float = 1.0, v_scale: float = 1.0, - attn_type: AttentionType = AttentionType.DECODER, + attn_type: str = AttentionType.DECODER, ) -> torch.Tensor: if attn_type != AttentionType.DECODER: raise NotImplementedError("Encoder self-attention and " @@ -782,174 +781,117 @@ def forward( "are not implemented for " "FlashInferImpl") - return torch.ops.vllm.unified_flash_infer( - query, - key, - value, - self.num_heads, - self.head_size, - self.num_kv_heads, - kv_cache, - self.kv_cache_dtype, - k_scale, - v_scale, - self.scale, - self.sliding_window, - self.alibi_slopes, - self.logits_soft_cap, - ) - - -def unified_flash_infer( - query: torch.Tensor, - key: torch.Tensor, - value: torch.Tensor, - num_heads: int, - head_size: int, - num_kv_heads: int, - kv_cache: torch.Tensor, - kv_cache_dtype: str, - k_scale: float, - v_scale: float, - softmax_scale: float, - window_size: Optional[List[int]] = None, - alibi_slopes: Optional[torch.Tensor] = None, - logits_soft_cap: Optional[float] = None, -) -> torch.Tensor: - - current_metadata = get_forward_context() - assert current_metadata is not None - assert isinstance(current_metadata, FlashInferMetadata) - attn_metadata: FlashInferMetadata = current_metadata - - num_tokens, hidden_size = query.shape - query = query.view(-1, num_heads, head_size) - key = key.view(-1, num_kv_heads, head_size) - value = value.view(-1, num_kv_heads, head_size) - - if kv_cache.numel() > 0: - # Use the same reshape and cache kernel as flash attention. - ops.reshape_and_cache_flash( - key, - value, - kv_cache[:, 0], - kv_cache[:, 1], - attn_metadata.slot_mapping.flatten(), - kv_cache_dtype, - k_scale, - v_scale, - ) - # The FlashInfer api requires data to be in fp8_e4m3 or fp8_e5m2 - # to process the cache when the kv_cache_dtype is fp8 - if kv_cache_dtype.startswith("fp8"): - torch_dtype = FlashInferBackend.get_fp8_dtype_for_flashinfer( - kv_cache_dtype) - kv_cache = kv_cache.view(torch_dtype) - - num_prefill_tokens = attn_metadata.num_prefill_tokens - num_decode_tokens = attn_metadata.num_decode_tokens - assert key.shape[0] == num_prefill_tokens + num_decode_tokens, \ - f"key : {key.shape} : #prefill tokens {num_prefill_tokens} : #decode tokens {num_decode_tokens}" # noqa - assert value.shape[0] == num_prefill_tokens + num_decode_tokens, \ - f"value : {value.shape} : #prefill toks {num_prefill_tokens} : #decode toks {num_decode_tokens}" # noqa - query = query.contiguous() # Flashinfer requires query to be contiguous - # Query for decode. KV is not needed because it is already cached. - # QKV for prefill. - decode_query = query[num_prefill_tokens:] - query = query[:num_prefill_tokens] - - key = key[:num_prefill_tokens] - value = value[:num_prefill_tokens] - - assert query.shape[0] == num_prefill_tokens - assert decode_query.shape[0] == num_decode_tokens - - window_left = window_size[0] if window_size is not None else -1 - - prefill_output: Optional[torch.Tensor] = None - decode_output: Optional[torch.Tensor] = None - if prefill_meta := attn_metadata.prefill_metadata: - # We will use flash attention for prefill - # when kv_cache is not provided. - # This happens when vllm runs the profiling to - # determine the number of blocks. - if kv_cache.numel() == 0: - prefill_output = flash_attn_varlen_func( - q=query, - k=key, - v=value, - cu_seqlens_q=prefill_meta.seq_start_loc, - cu_seqlens_k=prefill_meta.seq_start_loc, - max_seqlen_q=prefill_meta.max_prefill_seq_len, - max_seqlen_k=prefill_meta.max_prefill_seq_len, - softmax_scale=softmax_scale, - causal=True, - window_size=window_size, - alibi_slopes=alibi_slopes, + num_heads: int = self.num_heads + head_size: int = self.head_size + num_kv_heads: int = self.num_kv_heads + kv_cache_dtype: str = self.kv_cache_dtype + softmax_scale: float = self.scale + window_size = self.sliding_window + alibi_slopes = self.alibi_slopes + logits_soft_cap = self.logits_soft_cap + + num_tokens, hidden_size = query.shape + query = query.view(-1, num_heads, head_size) + key = key.view(-1, num_kv_heads, head_size) + value = value.view(-1, num_kv_heads, head_size) + + if kv_cache.numel() > 0: + # Use the same reshape and cache kernel as flash attention. + ops.reshape_and_cache_flash( + key, + value, + kv_cache[:, 0], + kv_cache[:, 1], + attn_metadata.slot_mapping.flatten(), + kv_cache_dtype, + k_scale, + v_scale, ) - else: - assert prefill_meta is not None - assert prefill_meta.prefill_wrapper is not None - prefill_output = prefill_meta.prefill_wrapper.forward( - query, + # The FlashInfer api requires data to be in fp8_e4m3 or fp8_e5m2 + # to process the cache when the kv_cache_dtype is fp8 + if kv_cache_dtype.startswith("fp8"): + torch_dtype = FlashInferBackend.get_fp8_dtype_for_flashinfer( + kv_cache_dtype) + kv_cache = kv_cache.view(torch_dtype) + + num_prefill_tokens = attn_metadata.num_prefill_tokens + num_decode_tokens = attn_metadata.num_decode_tokens + assert key.shape[0] == num_prefill_tokens + num_decode_tokens, \ + f"key : {key.shape} : #prefill tokens {num_prefill_tokens} : #decode tokens {num_decode_tokens}" # noqa + assert value.shape[0] == num_prefill_tokens + num_decode_tokens, \ + f"value : {value.shape} : #prefill toks {num_prefill_tokens} : #decode toks {num_decode_tokens}" # noqa + query = query.contiguous( + ) # Flashinfer requires query to be contiguous + # Query for decode. KV is not needed because it is already cached. + # QKV for prefill. + decode_query = query[num_prefill_tokens:] + query = query[:num_prefill_tokens] + + key = key[:num_prefill_tokens] + value = value[:num_prefill_tokens] + + assert query.shape[0] == num_prefill_tokens + assert decode_query.shape[0] == num_decode_tokens + + window_left = window_size[0] if window_size is not None else -1 + + prefill_output: Optional[torch.Tensor] = None + decode_output: Optional[torch.Tensor] = None + if prefill_meta := attn_metadata.prefill_metadata: + # We will use flash attention for prefill + # when kv_cache is not provided. + # This happens when vllm runs the profiling to + # determine the number of blocks. + if kv_cache.numel() == 0: + prefill_output = flash_attn_varlen_func( + q=query, + k=key, + v=value, + cu_seqlens_q=prefill_meta.seq_start_loc, + cu_seqlens_k=prefill_meta.seq_start_loc, + max_seqlen_q=prefill_meta.max_prefill_seq_len, + max_seqlen_k=prefill_meta.max_prefill_seq_len, + softmax_scale=softmax_scale, + causal=True, + window_size=window_size, + alibi_slopes=alibi_slopes, + ) + else: + assert prefill_meta is not None + assert prefill_meta.prefill_wrapper is not None + prefill_output = prefill_meta.prefill_wrapper.forward( + query, + kv_cache, + logits_soft_cap=logits_soft_cap, + causal=True, + k_scale=k_scale, + v_scale=v_scale, + window_left=window_left) + if decode_meta := attn_metadata.decode_metadata: + assert decode_meta is not None + assert decode_meta.decode_wrapper is not None + decode_output = decode_meta.decode_wrapper.forward( + decode_query, kv_cache, + sm_scale=softmax_scale, logits_soft_cap=logits_soft_cap, - causal=True, k_scale=k_scale, v_scale=v_scale, window_left=window_left) - if decode_meta := attn_metadata.decode_metadata: - assert attn_metadata.decode_metadata is not None - assert attn_metadata.decode_metadata.decode_wrapper is not None - decode_output = attn_metadata.decode_metadata.decode_wrapper.forward( - decode_query, - kv_cache, - sm_scale=softmax_scale, - logits_soft_cap=logits_soft_cap, - k_scale=k_scale, - v_scale=v_scale, - window_left=window_left) - - if prefill_output is None and decode_output is not None: - # Decode only batch. - output, num_tokens = decode_output, num_decode_tokens - elif decode_output is None and prefill_output is not None: - # Prefill only batch. - output, num_tokens = prefill_output, num_prefill_tokens - else: - # Chunked prefill batch does not work with speculative decoding in - # FlashInfer backend, so the query length for decode should be 1. - assert prefill_output is not None - assert decode_output is not None - assert decode_meta is not None - assert decode_meta.decode_query_len == 1 - decode_output = decode_output.squeeze(1) - output = torch.cat([prefill_output, decode_output], dim=0) - return output.view(num_tokens, hidden_size) - - -def unified_flash_infer_fake( - query: torch.Tensor, - key: torch.Tensor, - value: torch.Tensor, - num_heads: int, - head_size: int, - num_kv_heads: int, - kv_cache: torch.Tensor, - kv_cache_dtype: str, - k_scale: float, - v_scale: float, - softmax_scale: float, - window_size: Optional[List[int]] = None, - alibi_slopes: Optional[torch.Tensor] = None, - logits_soft_cap: Optional[float] = None, -) -> torch.Tensor: - return torch.empty_like(query).contiguous() - - -direct_register_custom_op( - op_name="unified_flash_infer", - op_func=unified_flash_infer, - mutates_args=["kv_cache"], - fake_impl=unified_flash_infer_fake, -) + + if prefill_output is None and decode_output is not None: + # Decode only batch. + output, num_tokens = decode_output, num_decode_tokens + elif decode_output is None and prefill_output is not None: + # Prefill only batch. + output, num_tokens = prefill_output, num_prefill_tokens + else: + # Chunked prefill batch does not work with speculative decoding in + # FlashInfer backend, so the query length for decode should be 1. + assert prefill_output is not None + assert decode_output is not None + assert decode_meta is not None + assert decode_meta.decode_query_len == 1 + decode_output = decode_output.squeeze(1) + output = torch.cat([prefill_output, decode_output], dim=0) + return output.view(num_tokens, hidden_size) diff --git a/vllm/attention/backends/hpu_attn.py b/vllm/attention/backends/hpu_attn.py index a8f4b09b67274..4a3ddd5db94e5 100644 --- a/vllm/attention/backends/hpu_attn.py +++ b/vllm/attention/backends/hpu_attn.py @@ -140,7 +140,7 @@ def forward( attn_metadata: HPUAttentionMetadata, k_scale: float = 1.0, v_scale: float = 1.0, - attn_type: AttentionType = AttentionType.DECODER, + attn_type: str = AttentionType.DECODER, ) -> torch.Tensor: """Forward pass with xFormers and PagedAttention. diff --git a/vllm/attention/backends/ipex_attn.py b/vllm/attention/backends/ipex_attn.py index 87bdb1e0e6565..3b0d51ea4a3d8 100644 --- a/vllm/attention/backends/ipex_attn.py +++ b/vllm/attention/backends/ipex_attn.py @@ -172,7 +172,7 @@ def forward( attn_metadata: IpexAttnMetadata, # type: ignore k_scale: float = 1.0, v_scale: float = 1.0, - attn_type: AttentionType = AttentionType.DECODER, + attn_type: str = AttentionType.DECODER, ) -> torch.Tensor: """Forward pass with IPEX varlen_attention and PagedAttention. diff --git a/vllm/attention/backends/pallas.py b/vllm/attention/backends/pallas.py index eeab8731a2c39..5988be0e6b687 100644 --- a/vllm/attention/backends/pallas.py +++ b/vllm/attention/backends/pallas.py @@ -150,7 +150,7 @@ def forward( attn_metadata: PallasMetadata, k_scale: float = 1.0, v_scale: float = 1.0, - attn_type: AttentionType = AttentionType.DECODER, + attn_type: str = AttentionType.DECODER, ) -> torch.Tensor: """Forward pass with Pallas attention. diff --git a/vllm/attention/backends/rocm_flash_attn.py b/vllm/attention/backends/rocm_flash_attn.py index 2bae370eaa90f..6a494f4e73cb4 100644 --- a/vllm/attention/backends/rocm_flash_attn.py +++ b/vllm/attention/backends/rocm_flash_attn.py @@ -414,7 +414,7 @@ def forward( attn_metadata: ROCmFlashAttentionMetadata, k_scale: float = 1.0, v_scale: float = 1.0, - attn_type: AttentionType = AttentionType.DECODER, + attn_type: str = AttentionType.DECODER, ) -> torch.Tensor: """Forward pass with FlashAttention and PagedAttention. diff --git a/vllm/attention/backends/torch_sdpa.py b/vllm/attention/backends/torch_sdpa.py index 3d025df26a7a1..16e044b618c40 100644 --- a/vllm/attention/backends/torch_sdpa.py +++ b/vllm/attention/backends/torch_sdpa.py @@ -141,7 +141,7 @@ def decode_metadata(self) -> Optional["TorchSDPAMetadata"]: def get_seq_lens( self, - attn_type: AttentionType, + attn_type: str, ): ''' Extract appropriate sequence lengths from attention metadata @@ -174,7 +174,7 @@ def get_seq_lens( def get_attn_bias( self, - attn_type: AttentionType, + attn_type: str, ) -> Optional[List[torch.Tensor]]: ''' Extract appropriate attention bias from attention metadata @@ -203,7 +203,7 @@ def get_attn_bias( def set_attn_bias( self, attn_bias: List[torch.Tensor], - attn_type: AttentionType, + attn_type: str, ) -> None: ''' Update appropriate attention bias field of attention metadata, @@ -229,7 +229,7 @@ def set_attn_bias( def get_seq_len_block_table_args( self, - attn_type: AttentionType, + attn_type: str, ) -> tuple: ''' The particular choice of sequence-length- and block-table-related @@ -426,7 +426,7 @@ def forward( attn_metadata: TorchSDPAMetadata, # type: ignore k_scale: float = 1.0, v_scale: float = 1.0, - attn_type: AttentionType = AttentionType.DECODER, + attn_type: str = AttentionType.DECODER, ) -> torch.Tensor: """Forward pass with torch SDPA and PagedAttention. @@ -574,7 +574,7 @@ def _run_sdpa_forward( key: torch.Tensor, value: torch.Tensor, attn_metadata: TorchSDPAMetadata, - attn_type: AttentionType = AttentionType.DECODER, + attn_type: str = AttentionType.DECODER, ) -> None: if self.num_kv_heads != self.num_heads: key = key.repeat_interleave(self.num_queries_per_kv, dim=1) diff --git a/vllm/attention/backends/utils.py b/vllm/attention/backends/utils.py index 12800668af223..56cc43430301f 100644 --- a/vllm/attention/backends/utils.py +++ b/vllm/attention/backends/utils.py @@ -478,7 +478,7 @@ def is_all_cross_attn_metadata_set(attn_metadata): def get_seq_len_block_table_args( attn_metadata, is_prompt: bool, - attn_type: AttentionType, + attn_type: str, ) -> tuple: ''' The particular choice of sequence-length- and block-table-related @@ -529,7 +529,7 @@ def get_seq_len_block_table_args( def get_num_prefill_decode_query_kv_tokens( attn_metadata, - attn_type: AttentionType, + attn_type: str, ) -> Tuple[int, int, int]: """ Calculate the number of prefill and decode tokens for query, key/value diff --git a/vllm/attention/backends/xformers.py b/vllm/attention/backends/xformers.py index 83d03606524dc..292575a8736bc 100644 --- a/vllm/attention/backends/xformers.py +++ b/vllm/attention/backends/xformers.py @@ -284,7 +284,7 @@ def decode_metadata(self) -> Optional["XFormersMetadata"]: def _get_attn_bias( attn_metadata: XFormersMetadata, - attn_type: AttentionType, + attn_type: str, ) -> Optional[AttentionBias]: ''' Extract appropriate attention bias from attention metadata @@ -314,7 +314,7 @@ def _get_attn_bias( def _set_attn_bias( attn_metadata: XFormersMetadata, attn_bias: List[Optional[AttentionBias]], - attn_type: AttentionType, + attn_type: str, ) -> None: ''' Update appropriate attention bias field of attention metadata, @@ -416,7 +416,7 @@ def forward( attn_metadata: "XFormersMetadata", k_scale: float = 1.0, v_scale: float = 1.0, - attn_type: AttentionType = AttentionType.DECODER, + attn_type: str = AttentionType.DECODER, ) -> torch.Tensor: """Forward pass with xFormers and PagedAttention. @@ -617,7 +617,7 @@ def _run_memory_efficient_xformers_forward( key: torch.Tensor, value: torch.Tensor, attn_metadata: XFormersMetadata, - attn_type: AttentionType = AttentionType.DECODER, + attn_type: str = AttentionType.DECODER, ) -> torch.Tensor: """Attention for 1D query of multiple prompts. Multiple prompt tokens are flattened in to `query` input. diff --git a/vllm/attention/layer.py b/vllm/attention/layer.py index 33d05cbd3fe01..8acbeaf12b0cf 100644 --- a/vllm/attention/layer.py +++ b/vllm/attention/layer.py @@ -4,12 +4,17 @@ import torch import torch.nn as nn +import vllm.envs as envs from vllm.attention import AttentionMetadata, AttentionType from vllm.attention.selector import get_attn_backend from vllm.config import CacheConfig +from vllm.forward_context import ForwardContext, get_forward_context from vllm.model_executor.layers.quantization.base_config import ( QuantizationConfig) from vllm.model_executor.layers.quantization.kv_cache import BaseKVCacheMethod +from vllm.platforms import current_platform +from vllm.plugins import get_current_vllm_config +from vllm.utils import direct_register_custom_op class Attention(nn.Module): @@ -86,6 +91,18 @@ def __init__( alibi_slopes, sliding_window, kv_cache_dtype, blocksparse_params, logits_soft_cap) + # For cuda-alike (CUDA and ROCM) and cpu platforms, we control how + # torch.compile works by registering the attention as one giant + # opaque custom op. For other platforms, we directly call them + # and let torch.compile handle them. + self.use_direct_call = envs.VLLM_USE_V1 or not ( + current_platform.is_cuda_alike() or current_platform.is_cpu()) + compilation_config = get_current_vllm_config().compilation_config + if prefix in compilation_config.static_forward_context: + raise ValueError(f"Duplicate layer name: {prefix}") + compilation_config.static_forward_context[prefix] = self + self.layer_name = prefix + def forward( self, query: torch.Tensor, @@ -93,17 +110,22 @@ def forward( value: torch.Tensor, kv_cache: torch.Tensor, attn_metadata: AttentionMetadata, - attn_type: AttentionType = AttentionType.DECODER, + attn_type: str = AttentionType.DECODER, ) -> torch.Tensor: - return self.impl.forward(query, - key, - value, - kv_cache, - attn_metadata, - self._k_scale, - self._v_scale, - attn_type=attn_type) + if self.use_direct_call: + return self.impl.forward(query, + key, + value, + kv_cache, + attn_metadata, + self._k_scale, + self._v_scale, + attn_type=attn_type) + else: + return torch.ops.vllm.unified_attention(query, key, value, + kv_cache, attn_type, + self.layer_name) def extra_repr(self) -> str: s = f"head_size={self.impl.head_size}" # type: ignore @@ -112,3 +134,44 @@ def extra_repr(self) -> str: s += f", scale={self.impl.scale}" # type: ignore s += f", backend={self.impl.__class__.__name__}" return s + + +def unified_attention( + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + kv_cache: torch.Tensor, + attn_type: str, + layer_name: str, +) -> torch.Tensor: + forward_context: ForwardContext = get_forward_context() + attn_metadata = forward_context.dynamic_forward_context + self = forward_context.static_forward_context[layer_name] + return self.impl.forward(query, + key, + value, + kv_cache, + attn_metadata, + self._k_scale, + self._v_scale, + attn_type=attn_type) + + +def unified_attention_fake( + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + kv_cache: torch.Tensor, + attn_type: str, + layer_name: str, +) -> torch.Tensor: + return torch.empty_like(query).contiguous() + + +direct_register_custom_op( + op_name="unified_attention", + op_func=unified_attention, + mutates_args=["kv_cache"], + fake_impl=unified_attention_fake, + dispatch_key=current_platform.dispatch_key, +) diff --git a/vllm/config.py b/vllm/config.py index b5f2116e3557b..bb02c2ad4c7d4 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -2135,8 +2135,7 @@ class CompilationConfig(BaseModel): backend: str = "" custom_ops: List[str] = Field(default_factory=list) splitting_ops: List[str] = Field(default_factory=lambda: [ - "vllm.unified_flash_attention", - "vllm.unified_flash_infer", + "vllm.unified_attention", "vllm.unified_v1_flash_attention", ]) @@ -2197,6 +2196,11 @@ def model_post_init(self, __context: Any) -> None: enabled_custom_ops: Counter[str] = PrivateAttr disabled_custom_ops: Counter[str] = PrivateAttr + # Per-model forward context + # Mainly used to store attention cls + # Map from layer name to the attention cls + static_forward_context: Dict[str, Any] = PrivateAttr + @classmethod def from_cli(cls, cli_value: str) -> "CompilationConfig": """Parse the CLI value for the compilation config.""" @@ -2228,6 +2232,7 @@ def model_post_init(self, __context: Any) -> None: self.enabled_custom_ops = Counter() self.disabled_custom_ops = Counter() + self.static_forward_context = {} def init_backend(self) -> Union[str, Callable]: if self.level == CompilationLevel.NO_COMPILATION: diff --git a/vllm/forward_context.py b/vllm/forward_context.py index 777747505e14a..aaa3e4bb3a1e8 100644 --- a/vllm/forward_context.py +++ b/vllm/forward_context.py @@ -1,21 +1,38 @@ from contextlib import contextmanager -from typing import Any +from dataclasses import dataclass +from typing import Any, Dict, Optional -_forward_context: Any = None +from vllm.config import VllmConfig -def get_forward_context() -> Any: +@dataclass +class ForwardContext: + static_forward_context: Dict[str, Any] + # TODO: extend to support per-layer dynamic forward context + dynamic_forward_context: Any + + +_forward_context: Optional[ForwardContext] = None + + +def get_forward_context() -> ForwardContext: """Get the current forward context.""" + assert _forward_context is not None, ( + "Forward context is not set. " + "Please use `set_forward_context` to set the forward context.") return _forward_context @contextmanager -def set_forward_context(context: Any): +def set_forward_context(context: Any, vllm_config: VllmConfig): """A context manager that stores the current forward context, can be attention metadata, etc.""" global _forward_context prev_context = _forward_context - _forward_context = context + _forward_context = ForwardContext( + static_forward_context=vllm_config.compilation_config. + static_forward_context, + dynamic_forward_context=context) try: yield finally: diff --git a/vllm/model_executor/models/arctic.py b/vllm/model_executor/models/arctic.py index e58ad19cab54c..ac4c464aa10ac 100644 --- a/vllm/model_executor/models/arctic.py +++ b/vllm/model_executor/models/arctic.py @@ -223,6 +223,7 @@ def __init__( layer_idx: Optional[int] = None, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.config = config @@ -274,7 +275,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -299,6 +301,7 @@ def __init__( layer_idx: int, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.layer_idx = layer_idx @@ -308,7 +311,8 @@ def __init__( self.self_attn = ArcticAttention(config, layer_idx, cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.self_attn") self.block_sparse_moe = ArcticMoE( config, layer_id=layer_idx, @@ -380,8 +384,11 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): org_num_embeddings=self.vocab_size) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: ArcticDecoderLayer(config, int( - prefix.split(".")[-1]), cache_config, quant_config), + lambda prefix: ArcticDecoderLayer(config, + int(prefix.split(".")[-1]), + cache_config, + quant_config, + prefix=prefix), prefix=f"{prefix}.layers") self._attn_implementation = config._attn_implementation self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) diff --git a/vllm/model_executor/models/baichuan.py b/vllm/model_executor/models/baichuan.py index 3749a16a38994..a923ed36a9db2 100644 --- a/vllm/model_executor/models/baichuan.py +++ b/vllm/model_executor/models/baichuan.py @@ -116,6 +116,7 @@ def __init__( max_position_embeddings: int = 8192, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.hidden_size = hidden_size @@ -158,7 +159,8 @@ def __init__( self.head_dim, scaling, alibi_slopes=alibi_slopes, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") else: self.rotary_emb = get_rope( self.head_dim, @@ -171,7 +173,8 @@ def __init__( self.head_dim, self.scaling, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -195,7 +198,8 @@ def __init__(self, config: PretrainedConfig, position_embedding: str, cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None): + quant_config: Optional[QuantizationConfig] = None, + prefix: str = ""): super().__init__() self.hidden_size = config.hidden_size rope_theta = getattr(config, "rope_theta", 10000) @@ -209,6 +213,7 @@ def __init__(self, max_position_embeddings=max_position_embeddings, cache_config=cache_config, quant_config=quant_config, + prefix=f"{prefix}.self_attn", ) self.mlp = BaiChuanMLP( hidden_size=self.hidden_size, @@ -275,8 +280,11 @@ def __init__( ) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: BaiChuanDecoderLayer(config, position_embedding, - cache_config, quant_config), + lambda prefix: BaiChuanDecoderLayer(config, + position_embedding, + cache_config, + quant_config, + prefix=prefix), prefix=f"{prefix}.layers", ) self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) diff --git a/vllm/model_executor/models/bart.py b/vllm/model_executor/models/bart.py index a50a5a5b018e1..3776490cb3465 100644 --- a/vllm/model_executor/models/bart.py +++ b/vllm/model_executor/models/bart.py @@ -126,6 +126,7 @@ def __init__( config: Optional[BartConfig] = None, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.d_model = config.d_model @@ -178,7 +179,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward(self, hidden_states: torch.Tensor, kv_cache: torch.Tensor, attn_metadata: AttentionMetadata) -> torch.Tensor: @@ -208,6 +210,7 @@ def __init__( config: Optional[BartConfig] = None, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.d_model = config.d_model @@ -260,7 +263,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward(self, hidden_states: torch.Tensor, kv_cache: torch.Tensor, attn_metadata: AttentionMetadata) -> torch.Tensor: @@ -290,6 +294,7 @@ def __init__( config: Optional[BartConfig] = None, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.d_model = config.d_model @@ -342,7 +347,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -384,6 +390,7 @@ def __init__( config: BartConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.embed_dim = config.d_model @@ -393,7 +400,9 @@ def __init__( num_heads=config.encoder_attention_heads, config=config, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.self_attn", + ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.activation_fn = get_act_fn(config.activation_function) @@ -464,6 +473,7 @@ def __init__( config: BartConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.embed_dim = config.d_model @@ -473,7 +483,9 @@ def __init__( num_heads=config.decoder_attention_heads, config=config, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.self_attn", + ) self.activation_fn = get_act_fn(config.activation_function) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) @@ -486,6 +498,7 @@ def __init__( self.embed_dim, config.decoder_attention_heads, config=config, + prefix=f"{prefix}.encoder_attn", ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) @@ -578,7 +591,8 @@ def __init__(self, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, lora_config: Optional[LoRAConfig] = None, - embed_tokens: Optional[nn.Embedding] = None): + embed_tokens: Optional[nn.Embedding] = None, + prefix: str = ""): super().__init__() self.cache_config = cache_config @@ -599,9 +613,13 @@ def __init__(self, config.max_position_embeddings, embed_dim, ) - self.layers = nn.ModuleList( - [BartEncoderLayer(config,cache_config,quant_config) \ - for _ in range(config.encoder_layers)]) + self.layers = nn.ModuleList([ + BartEncoderLayer(config, + cache_config, + quant_config, + prefix=f"{prefix}.layers.{layer_idx}") + for layer_idx in range(config.encoder_layers) + ]) self.layernorm_embedding = nn.LayerNorm(embed_dim) @@ -661,6 +679,7 @@ def __init__( quant_config: Optional[QuantizationConfig] = None, lora_config: Optional[LoRAConfig] = None, embed_tokens: Optional[nn.Embedding] = None, + prefix: str = "", ): super().__init__() self.cache_config = cache_config @@ -683,8 +702,9 @@ def __init__( ) self.layers = nn.ModuleList( - [BartDecoderLayer(config,cache_config,quant_config) \ - for _ in range(config.decoder_layers)]) + [BartDecoderLayer(config,cache_config,quant_config, + prefix=f"{prefix}.layers.{layer_idx}") \ + for layer_idx in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) @@ -759,10 +779,12 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.encoder = BartEncoder(config, cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.encoder") self.decoder = BartDecoder(config, cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.decoder") def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, encoder_input_ids: torch.Tensor, diff --git a/vllm/model_executor/models/bloom.py b/vllm/model_executor/models/bloom.py index 1060d418474ef..fee74f491acc1 100644 --- a/vllm/model_executor/models/bloom.py +++ b/vllm/model_executor/models/bloom.py @@ -78,6 +78,7 @@ def __init__( config: BloomConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.hidden_size = config.hidden_size @@ -116,7 +117,8 @@ def __init__( scaling, alibi_slopes=alibi_slopes, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -168,14 +170,17 @@ def __init__( config: BloomConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() hidden_size = config.hidden_size self.input_layernorm = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) - self.self_attention = BloomAttention(config, cache_config, - quant_config) + self.self_attention = BloomAttention(config, + cache_config, + quant_config, + prefix=f"{prefix}.self_attention") self.post_attention_layernorm = nn.LayerNorm( hidden_size, eps=config.layer_norm_epsilon) self.mlp = BloomMLP(config, quant_config) @@ -242,7 +247,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): # Transformer blocks self.start_layer, self.end_layer, self.h = make_layers( config.num_hidden_layers, - lambda prefix: BloomBlock(config, cache_config, quant_config), + lambda prefix: BloomBlock( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.h") # Final Layer Norm diff --git a/vllm/model_executor/models/chameleon.py b/vllm/model_executor/models/chameleon.py index 8f91abffaea90..5a6d6432112f0 100644 --- a/vllm/model_executor/models/chameleon.py +++ b/vllm/model_executor/models/chameleon.py @@ -223,6 +223,7 @@ def __init__( quant_config: Optional[QuantizationConfig] = None, bias: bool = False, cache_config: Optional[CacheConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = hidden_size @@ -276,7 +277,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def _apply_qk_norm(self, q: torch.Tensor, k: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: @@ -313,6 +315,7 @@ def __init__( config: ChameleonConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = config.hidden_size @@ -336,6 +339,7 @@ def __init__( quant_config=quant_config, bias=False, cache_config=cache_config, + prefix=f"{prefix}.self_attn", ) self.mlp = ChameleonMLP( hidden_size=self.hidden_size, @@ -386,6 +390,7 @@ def __init__( config: ChameleonConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = config.hidden_size @@ -409,6 +414,7 @@ def __init__( quant_config=quant_config, bias=False, cache_config=cache_config, + prefix=f"{prefix}.self_attn", ) self.mlp = ChameleonMLP( hidden_size=self.hidden_size, @@ -855,7 +861,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config.num_hidden_layers, lambda prefix: decoder_layer(config=config, cache_config=cache_config, - quant_config=quant_config), + quant_config=quant_config, + prefix=prefix), prefix=f"{prefix}.layers", ) diff --git a/vllm/model_executor/models/chatglm.py b/vllm/model_executor/models/chatglm.py index 2ea592aaba9f9..e3a068908b7f3 100644 --- a/vllm/model_executor/models/chatglm.py +++ b/vllm/model_executor/models/chatglm.py @@ -230,6 +230,7 @@ def __init__( config: ChatGLMConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.hidden_size = config.hidden_size @@ -285,7 +286,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -364,6 +366,7 @@ def __init__( config: ChatGLMConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.apply_residual_connection_post_layernorm = ( @@ -377,7 +380,10 @@ def __init__( eps=config.layernorm_epsilon) # Self attention. - self.self_attention = GLMAttention(config, cache_config, quant_config) + self.self_attention = GLMAttention(config, + cache_config, + quant_config, + prefix=f"{prefix}.self_attention") self.hidden_dropout = config.hidden_dropout # Layernorm on the attention output @@ -446,7 +452,8 @@ def __init__( # Transformer layers. self.start_layer, self.end_layer, self.layers = make_layers( self.num_layers, - lambda prefix: GLMBlock(config, cache_config, quant_config), + lambda prefix: GLMBlock( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers", ) @@ -500,16 +507,22 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.num_layers = config.num_layers self.multi_query_group_num = config.multi_query_group_num self.kv_channels = config.kv_channels - self.encoder = GLMTransformer(config, cache_config, quant_config) + self.encoder = GLMTransformer(config, + cache_config, + quant_config, + prefix=f"{prefix}.encoder") self.output_layer = ParallelLMHead(config.padded_vocab_size, config.hidden_size, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.output_layer") vision_config_flag = getattr(config, 'vision_config', None) if vision_config_flag is not None: self.vision_config = Namespace(**config.vision_config) - self.vision = EVA2CLIPModel(self.config, quant_config) + self.vision = EVA2CLIPModel(self.config, + quant_config, + prefix=f"{prefix}.vision") else: self.vision = None diff --git a/vllm/model_executor/models/commandr.py b/vllm/model_executor/models/commandr.py index 9fd083e5a02a9..85e24ca660686 100644 --- a/vllm/model_executor/models/commandr.py +++ b/vllm/model_executor/models/commandr.py @@ -120,6 +120,7 @@ def __init__( config: CohereConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() tp_size = get_tensor_model_parallel_world_size() @@ -175,7 +176,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") if self.use_qk_norm: self.q_norm = LayerNorm(param_shape=(self.num_heads, self.head_dim), @@ -215,13 +217,15 @@ class CohereDecoderLayer(nn.Module): def __init__(self, config: CohereConfig, cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None): + quant_config: Optional[QuantizationConfig] = None, + prefix: str = ""): super().__init__() self.hidden_size = config.hidden_size self.self_attn = CohereAttention(config, cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.self_attn") self.mlp = CohereMLP(config, quant_config=quant_config) self.input_layernorm = LayerNorm(param_shape=(config.hidden_size), @@ -271,8 +275,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config.hidden_size) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: CohereDecoderLayer(config, cache_config, - quant_config), + lambda prefix: CohereDecoderLayer( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers") self.norm = LayerNorm(param_shape=(config.hidden_size), eps=config.layer_norm_eps) diff --git a/vllm/model_executor/models/dbrx.py b/vllm/model_executor/models/dbrx.py index eab338800249e..3932d8b52a9d1 100644 --- a/vllm/model_executor/models/dbrx.py +++ b/vllm/model_executor/models/dbrx.py @@ -154,6 +154,7 @@ def __init__( config: DbrxConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.d_model = config.d_model @@ -208,7 +209,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -234,10 +236,14 @@ def __init__( config: DbrxConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.d_model = config.d_model - self.attn = DbrxAttention(config, cache_config, quant_config) + self.attn = DbrxAttention(config, + cache_config, + quant_config, + prefix=f"{prefix}.attn") self.norm_1 = nn.LayerNorm(self.d_model) self.norm_2 = nn.LayerNorm(self.d_model) @@ -269,10 +275,14 @@ def __init__( config: DbrxConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() - self.norm_attn_norm = DbrxFusedNormAttention(config, cache_config, - quant_config) + self.norm_attn_norm = DbrxFusedNormAttention( + config, + cache_config, + quant_config, + prefix=f"{prefix}.norm_attn_norm") self.ffn = DbrxMoE(config, quant_config) def forward( @@ -308,7 +318,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) self.start_layer, self.end_layer, self.blocks = make_layers( config.n_layers, - lambda prefix: DbrxBlock(config, cache_config, quant_config), + lambda prefix: DbrxBlock( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.blocks", ) self.norm_f = nn.LayerNorm(config.d_model, eps=1e-5) diff --git a/vllm/model_executor/models/deepseek.py b/vllm/model_executor/models/deepseek.py index 8c5ad9904e925..32488d931ea1c 100644 --- a/vllm/model_executor/models/deepseek.py +++ b/vllm/model_executor/models/deepseek.py @@ -184,6 +184,7 @@ def __init__( max_position_embeddings: int = 8192, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = hidden_size @@ -236,7 +237,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -261,6 +263,7 @@ def __init__( layer_idx: int, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = config.hidden_size @@ -277,6 +280,7 @@ def __init__( max_position_embeddings=max_position_embeddings, cache_config=cache_config, quant_config=quant_config, + prefix=f"{prefix}.self_attn", ) if (config.n_routed_experts is not None and layer_idx >= config.first_k_dense_replace @@ -346,7 +350,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): lambda prefix: DeepseekDecoderLayer(config, int(prefix.split(".")[-1]), cache_config, - quant_config=quant_config), + quant_config=quant_config, + prefix=prefix), prefix=f"{prefix}.layers") self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.make_empty_intermediate_tensors = ( diff --git a/vllm/model_executor/models/deepseek_v2.py b/vllm/model_executor/models/deepseek_v2.py index d2c4ca0bf85e9..4cf4e6c358bf2 100644 --- a/vllm/model_executor/models/deepseek_v2.py +++ b/vllm/model_executor/models/deepseek_v2.py @@ -268,7 +268,8 @@ def __init__( self.scaling, num_kv_heads=self.num_local_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, diff --git a/vllm/model_executor/models/exaone.py b/vllm/model_executor/models/exaone.py index 9d739d0479548..5ca26d53a17e7 100644 --- a/vllm/model_executor/models/exaone.py +++ b/vllm/model_executor/models/exaone.py @@ -174,6 +174,7 @@ def __init__( num_kv_heads=self.num_kv_heads, cache_config=cache_config, quant_config=quant_config, + prefix=f"{prefix}.attn", ) def forward( @@ -219,7 +220,7 @@ def __init__( quant_config=quant_config, bias=bias, cache_config=cache_config, - prefix=prefix, + prefix=f"{prefix}.attention", ) def forward( diff --git a/vllm/model_executor/models/falcon.py b/vllm/model_executor/models/falcon.py index 2aa4b67d99894..096ad32b38e86 100644 --- a/vllm/model_executor/models/falcon.py +++ b/vllm/model_executor/models/falcon.py @@ -84,6 +84,7 @@ def __init__( config: FalconConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() @@ -158,7 +159,8 @@ def __init__( self.head_dim, self.inv_norm_factor, num_kv_heads=self.num_kv_heads, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") elif self.use_alibi: tp_rank = get_tensor_model_parallel_rank() head_start = tp_rank * self.num_heads @@ -171,14 +173,16 @@ def __init__( self.inv_norm_factor, num_kv_heads=self.num_kv_heads, alibi_slopes=alibi_slopes, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") else: self.attn = Attention(self.num_heads, self.head_dim, scale=self.inv_norm_factor, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -241,12 +245,16 @@ def __init__( config: FalconConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() hidden_size = config.hidden_size self.num_heads = config.num_attention_heads - self.self_attention = FalconAttention(config, cache_config, - quant_config) + self.self_attention = FalconAttention( + config, + cache_config, + quant_config, + prefix=f"{prefix}.self_attention") self.mlp = FalconMLP(config, quant_config) self.config = config @@ -357,8 +365,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): # Transformer blocks self.start_layer, self.end_layer, self.h = make_layers( config.num_hidden_layers, - lambda prefix: FalconDecoderLayer(config, cache_config, - quant_config), + lambda prefix: FalconDecoderLayer( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.h") # Final Layer Norm diff --git a/vllm/model_executor/models/florence2.py b/vllm/model_executor/models/florence2.py index d3a9ff6915b84..3a5fe8e1f4144 100644 --- a/vllm/model_executor/models/florence2.py +++ b/vllm/model_executor/models/florence2.py @@ -35,10 +35,12 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.shared = BartScaledWordEmbedding(self.vocab_size, config.d_model) self.encoder = BartEncoder(config, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.encoder") self.decoder = BartDecoder(config, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.decoder") if self.config.tie_word_embeddings: self.encoder.embed_tokens.weight = self.shared.weight @@ -99,7 +101,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.config = config self.model = Florence2LanguageModel(vllm_config=vllm_config, - prefix=prefix) + prefix=f"{prefix}.model") embed_scale = math.sqrt( config.d_model) if config.scale_embedding else 1.0 @@ -198,7 +200,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): # TODO(Isotr0py): Add vision backbone self.language_model = Florence2LanguageForConditionalGeneration( vllm_config=vllm_config.with_hf_config(config.text_config), - prefix=prefix, + prefix=f"{prefix}.language_model", ) @property diff --git a/vllm/model_executor/models/gemma.py b/vllm/model_executor/models/gemma.py index 64e03b30bf2f1..131e9af139c2a 100644 --- a/vllm/model_executor/models/gemma.py +++ b/vllm/model_executor/models/gemma.py @@ -174,7 +174,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, diff --git a/vllm/model_executor/models/gemma2.py b/vllm/model_executor/models/gemma2.py index 4ba39223cc07f..839130364ef4d 100644 --- a/vllm/model_executor/models/gemma2.py +++ b/vllm/model_executor/models/gemma2.py @@ -95,7 +95,8 @@ def __init__(self, rope_theta: float, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, - attn_logits_soft_cap: Optional[float] = None) -> None: + attn_logits_soft_cap: Optional[float] = None, + prefix: str = "") -> None: super().__init__() self.layer_idx = layer_idx self.config = config @@ -154,7 +155,8 @@ def __init__(self, num_kv_heads=self.num_kv_heads, cache_config=cache_config, quant_config=quant_config, - logits_soft_cap=attn_logits_soft_cap) + logits_soft_cap=attn_logits_soft_cap, + prefix=f"{prefix}.attn") def forward( self, @@ -179,6 +181,7 @@ def __init__( config: Gemma2Config, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = config.hidden_size @@ -194,6 +197,7 @@ def __init__( cache_config=cache_config, quant_config=quant_config, attn_logits_soft_cap=config.attn_logit_softcapping, + prefix=f"{prefix}.self_attn", ) self.hidden_size = config.hidden_size self.mlp = Gemma2MLP( @@ -257,8 +261,11 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: Gemma2DecoderLayer(int(prefix.split(".")[ - -1]), config, cache_config, quant_config), + lambda prefix: Gemma2DecoderLayer(int(prefix.split(".")[-1]), + config, + cache_config, + quant_config, + prefix=prefix), prefix=f"{prefix}.layers") self.norm = GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) diff --git a/vllm/model_executor/models/glm4_vision_encoder.py b/vllm/model_executor/models/glm4_vision_encoder.py index 025615b0920fd..f37ab0f82d52a 100644 --- a/vllm/model_executor/models/glm4_vision_encoder.py +++ b/vllm/model_executor/models/glm4_vision_encoder.py @@ -56,6 +56,7 @@ def __init__( self, config, quant_config: Optional[QuantizationConfig] = None, + prefix: str = '', ): super().__init__() self.hidden_size = config.hidden_size @@ -135,11 +136,14 @@ def __init__( self, config, quant_config: Optional[QuantizationConfig] = None, + prefix: str = '', ): super().__init__() self.input_layernorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.attention = Attention(config, quant_config=quant_config) + self.attention = Attention(config, + quant_config=quant_config, + prefix=f"{prefix}.attention") self.mlp = MLP(config, quant_config=quant_config) self.post_attention_layernorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps) @@ -161,11 +165,14 @@ def __init__( self, config, quant_config: Optional[QuantizationConfig] = None, + prefix: str = '', ): super().__init__() self.layers = nn.ModuleList([ - TransformerLayer(config, quant_config=quant_config) - for _ in range(config.num_hidden_layers) + TransformerLayer(config, + quant_config=quant_config, + prefix=f"{prefix}.layer.{layer_idx}") + for layer_idx in range(config.num_hidden_layers) ]) def forward(self, hidden_states): @@ -252,12 +259,14 @@ def __init__( self, config, quant_config: Optional[QuantizationConfig] = None, + prefix: str = '', ): super().__init__() vision_config = Namespace(**config.vision_config) self.patch_embedding = PatchEmbedding(vision_config) self.transformer = Transformer(vision_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.transformer") self.linear_proj = GLU(config, in_features=config.hidden_size, quant_config=quant_config) diff --git a/vllm/model_executor/models/gpt2.py b/vllm/model_executor/models/gpt2.py index 1c61408ae1dd9..fd926ff0254d4 100644 --- a/vllm/model_executor/models/gpt2.py +++ b/vllm/model_executor/models/gpt2.py @@ -84,7 +84,8 @@ def __init__( self.head_dim, scale=self.scale, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, diff --git a/vllm/model_executor/models/gpt_bigcode.py b/vllm/model_executor/models/gpt_bigcode.py index 50a143cb1b600..c64bc70688806 100644 --- a/vllm/model_executor/models/gpt_bigcode.py +++ b/vllm/model_executor/models/gpt_bigcode.py @@ -52,6 +52,7 @@ def __init__( config: GPTBigCodeConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.hidden_size = config.hidden_size @@ -92,7 +93,8 @@ def __init__( scale=self.scale, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -151,6 +153,7 @@ def __init__( config: GPTBigCodeConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() hidden_size = config.hidden_size @@ -158,7 +161,10 @@ def __init__( hidden_size) self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) - self.attn = GPTBigCodeAttention(config, cache_config, quant_config) + self.attn = GPTBigCodeAttention(config, + cache_config, + quant_config, + prefix=f"{prefix}.attn") self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.mlp = GPTBigMLP(inner_dim, config, quant_config) @@ -210,7 +216,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) self.start_layer, self.end_layer, self.h = make_layers( config.num_hidden_layers, - lambda prefix: GPTBigCodeBlock(config, cache_config, quant_config), + lambda prefix: GPTBigCodeBlock( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.h", ) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) diff --git a/vllm/model_executor/models/gpt_j.py b/vllm/model_executor/models/gpt_j.py index d5defc60764e6..4829578a56959 100644 --- a/vllm/model_executor/models/gpt_j.py +++ b/vllm/model_executor/models/gpt_j.py @@ -53,6 +53,7 @@ def __init__( config: GPTJConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.total_num_heads = config.num_attention_heads @@ -94,7 +95,8 @@ def __init__( self.head_size, scaling, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -147,12 +149,16 @@ def __init__( config: GPTJConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() inner_dim = (4 * config.n_embd if config.n_inner is None else config.n_inner) self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) - self.attn = GPTJAttention(config, cache_config, quant_config) + self.attn = GPTJAttention(config, + cache_config, + quant_config, + prefix=f"{prefix}.attn") self.mlp = GPTJMLP(inner_dim, config, quant_config) def forward( @@ -193,7 +199,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) self.start_layer, self.end_layer, self.h = make_layers( config.n_layer, - lambda prefix: GPTJBlock(config, cache_config, quant_config), + lambda prefix: GPTJBlock( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.h", ) self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) diff --git a/vllm/model_executor/models/gpt_neox.py b/vllm/model_executor/models/gpt_neox.py index 0bb5e2f9b95f9..731642772011c 100644 --- a/vllm/model_executor/models/gpt_neox.py +++ b/vllm/model_executor/models/gpt_neox.py @@ -52,6 +52,7 @@ def __init__( config: GPTNeoXConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.total_num_heads = config.num_attention_heads @@ -94,7 +95,8 @@ def __init__( self.head_size, scaling, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -145,6 +147,7 @@ def __init__( config: GPTNeoXConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.use_parallel_residual = config.use_parallel_residual @@ -152,7 +155,10 @@ def __init__( eps=config.layer_norm_eps) self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.attention = GPTNeoXAttention(config, cache_config, quant_config) + self.attention = GPTNeoXAttention(config, + cache_config, + quant_config, + prefix=f"{prefix}.attention") self.mlp = GPTNeoXMLP(config, quant_config) def forward( @@ -205,7 +211,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: GPTNeoXLayer(config, cache_config, quant_config), + lambda prefix: GPTNeoXLayer( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers", ) self.final_layer_norm = nn.LayerNorm(config.hidden_size, diff --git a/vllm/model_executor/models/granite.py b/vllm/model_executor/models/granite.py index c1e2e87f08ec3..bd2394e71c973 100644 --- a/vllm/model_executor/models/granite.py +++ b/vllm/model_executor/models/granite.py @@ -161,7 +161,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, diff --git a/vllm/model_executor/models/granitemoe.py b/vllm/model_executor/models/granitemoe.py index a91a18816995f..51296ef0cc08e 100644 --- a/vllm/model_executor/models/granitemoe.py +++ b/vllm/model_executor/models/granitemoe.py @@ -164,7 +164,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, diff --git a/vllm/model_executor/models/internlm2.py b/vllm/model_executor/models/internlm2.py index 94b819b5d9366..906128940ff76 100644 --- a/vllm/model_executor/models/internlm2.py +++ b/vllm/model_executor/models/internlm2.py @@ -1,5 +1,5 @@ from functools import partial -from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Type, Union import torch from torch import nn @@ -250,7 +250,12 @@ def forward( @support_torch_compile class InternLM2Model(nn.Module): - def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + def __init__( + self, + *, + vllm_config: VllmConfig, + prefix: str = "", + layer_type: Type[InternLMDecoderLayer] = InternLMDecoderLayer): super().__init__() config = vllm_config.model_config.hf_config @@ -266,7 +271,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: InternLMDecoderLayer( + lambda prefix: layer_type( config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers") self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) @@ -316,14 +321,18 @@ def forward( class InternLM2ForCausalLM(nn.Module, SupportsPP): - def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + def __init__(self, + *, + vllm_config: VllmConfig, + prefix: str = "", + model_type: Type[InternLM2Model] = InternLM2Model): super().__init__() config = vllm_config.model_config.hf_config quant_config = vllm_config.quant_config self.config = config self.quant_config = quant_config - self.model = InternLM2Model(vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "model")) + self.model = model_type(vllm_config=vllm_config, + prefix=maybe_prefix(prefix, "model")) self.output = ParallelLMHead(config.vocab_size, config.hidden_size, quant_config=quant_config, diff --git a/vllm/model_executor/models/internlm2_ve.py b/vllm/model_executor/models/internlm2_ve.py index f1b7c896cadfe..93ac2dcf8d587 100644 --- a/vllm/model_executor/models/internlm2_ve.py +++ b/vllm/model_executor/models/internlm2_ve.py @@ -14,8 +14,6 @@ InternLM2MLP, InternLM2Model) from vllm.sequence import IntermediateTensors -from .utils import make_layers, maybe_prefix - class InternLM2VEDecoderLayer(nn.Module): @@ -105,17 +103,9 @@ def forward( class InternLM2VEModel(InternLM2Model): def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): - super().__init__(vllm_config=vllm_config, prefix=prefix) - - config = vllm_config.model_config.hf_config - cache_config = vllm_config.cache_config - quant_config = vllm_config.quant_config - - self.start_layer, self.end_layer, self.layers = make_layers( - config.num_hidden_layers, - lambda prefix: InternLM2VEDecoderLayer( - config, cache_config, quant_config, prefix=prefix), - prefix=f"{prefix}.layers") + super().__init__(vllm_config=vllm_config, + prefix=prefix, + layer_type=InternLM2VEDecoderLayer) def forward( self, @@ -159,7 +149,6 @@ def forward( class InternLM2VEForCausalLM(InternLM2ForCausalLM): def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): - super().__init__(vllm_config=vllm_config, prefix=prefix) - - self.model = InternLM2VEModel(vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "model")) + super().__init__(vllm_config=vllm_config, + prefix=prefix, + model_type=InternLM2VEModel) diff --git a/vllm/model_executor/models/jais.py b/vllm/model_executor/models/jais.py index 41db85b678456..8c81dff6b5768 100644 --- a/vllm/model_executor/models/jais.py +++ b/vllm/model_executor/models/jais.py @@ -76,6 +76,7 @@ def __init__( config: JAISConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.hidden_size = config.hidden_size @@ -114,7 +115,8 @@ def __init__( scale=self.scale, alibi_slopes=alibi_slopes, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -178,6 +180,7 @@ def __init__( config: JAISConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() hidden_size = config.hidden_size @@ -185,7 +188,10 @@ def __init__( hidden_size) self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) - self.attn = JAISAttention(config, cache_config, quant_config) + self.attn = JAISAttention(config, + cache_config, + quant_config, + prefix=f"{prefix}.attn") self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.mlp = JAISMLP(inner_dim, config, quant_config) @@ -241,7 +247,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config.num_hidden_layers, lambda prefix: JAISBlock(config=config, cache_config=cache_config, - quant_config=quant_config), + quant_config=quant_config, + prefix=prefix), prefix=f"{prefix}.h", ) diff --git a/vllm/model_executor/models/jamba.py b/vllm/model_executor/models/jamba.py index f83f0fce7275f..099ca7e12b288 100644 --- a/vllm/model_executor/models/jamba.py +++ b/vllm/model_executor/models/jamba.py @@ -102,7 +102,8 @@ def __init__(self, config: JambaConfig, layer_idx: int, cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None) -> None: + quant_config: Optional[QuantizationConfig] = None, + prefix: str = "") -> None: super().__init__() self.config = config self.mamba = MambaMixer(hidden_size= config.hidden_size, @@ -157,6 +158,7 @@ def __init__( layer_idx: int, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = config.hidden_size @@ -198,6 +200,7 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, + prefix=f"{prefix}.attn", ) num_experts = config.layers_num_experts[layer_idx] @@ -287,7 +290,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): layer_class(config, layer_idx=i, cache_config=cache_config, - quant_config=quant_config)) + quant_config=quant_config, + prefix=f"{prefix}.layers.{i}")) self.layers = nn.ModuleList(decoder_layers) self.final_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) diff --git a/vllm/model_executor/models/llama.py b/vllm/model_executor/models/llama.py index 2b40e9ec73fad..66b29e72cfa89 100644 --- a/vllm/model_executor/models/llama.py +++ b/vllm/model_executor/models/llama.py @@ -174,6 +174,7 @@ def __init__( num_kv_heads=self.num_kv_heads, cache_config=cache_config, quant_config=quant_config, + prefix=f"{prefix}.attn", ) def forward( diff --git a/vllm/model_executor/models/minicpm.py b/vllm/model_executor/models/minicpm.py index b92bff4d7c28c..c9a573278a136 100644 --- a/vllm/model_executor/models/minicpm.py +++ b/vllm/model_executor/models/minicpm.py @@ -192,6 +192,7 @@ def __init__( max_position_embeddings: int = 8192, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = hidden_size @@ -246,7 +247,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -273,6 +275,7 @@ def __init__( config: PretrainedConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.config = config @@ -283,6 +286,7 @@ def __init__( self.rope_scaling = getattr(config, "rope_scaling", None) self.max_position_embeddings = getattr(config, "max_position_embeddings", 8192) + self.prefix = prefix self._init_attn_block() self._init_ffn_block() @@ -298,6 +302,7 @@ def _init_attn_block(self): max_position_embeddings=self.max_position_embeddings, cache_config=self.cache_config, quant_config=self.quant_config, + prefix=f"{self.prefix}.self_attn", ) def _init_ffn_block(self): @@ -388,8 +393,8 @@ def _init_layers( ): self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: MiniCPMDecoderLayer(config, cache_config, - quant_config), + lambda prefix: MiniCPMDecoderLayer( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers") def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: diff --git a/vllm/model_executor/models/minicpm3.py b/vllm/model_executor/models/minicpm3.py index 278c4bbe6e563..c38c31a0d4953 100644 --- a/vllm/model_executor/models/minicpm3.py +++ b/vllm/model_executor/models/minicpm3.py @@ -60,6 +60,7 @@ def __init__( max_position_embeddings: int = 8192, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = hidden_size @@ -119,7 +120,8 @@ def __init__( self.scaling, num_kv_heads=self.num_local_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -195,6 +197,7 @@ def _init_attn_block(self): max_position_embeddings=self.max_position_embeddings, cache_config=self.cache_config, quant_config=self.quant_config, + prefix=f"{self.prefix}.self_attn", ) @@ -209,8 +212,8 @@ def _init_layers( ): self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: MiniCPM3DecoderLayer(config, cache_config, - quant_config), + lambda prefix: MiniCPM3DecoderLayer( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers") diff --git a/vllm/model_executor/models/mixtral.py b/vllm/model_executor/models/mixtral.py index 0faffb4f1b00c..a5b364fe5ec85 100644 --- a/vllm/model_executor/models/mixtral.py +++ b/vllm/model_executor/models/mixtral.py @@ -166,7 +166,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, diff --git a/vllm/model_executor/models/mixtral_quant.py b/vllm/model_executor/models/mixtral_quant.py index ddd6afcf6a1b6..7a9b8cd88cfd0 100644 --- a/vllm/model_executor/models/mixtral_quant.py +++ b/vllm/model_executor/models/mixtral_quant.py @@ -170,6 +170,7 @@ def __init__( rope_theta: float = 10000, quant_config: Optional[QuantizationConfig] = None, cache_config: Optional[CacheConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = hidden_size @@ -219,7 +220,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -243,6 +245,7 @@ def __init__( config: MixtralConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = config.hidden_size @@ -255,7 +258,9 @@ def __init__( num_kv_heads=config.num_key_value_heads, rope_theta=rope_theta, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.self_attn", + ) self.block_sparse_moe = MixtralMoE(config=config, quant_config=quant_config) self.input_layernorm = RMSNorm(config.hidden_size, @@ -311,7 +316,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, lambda prefix: MixtralDecoderLayer( - config, cache_config, quant_config=quant_config), + config, cache_config, quant_config=quant_config, prefix=prefix + ), prefix=f"{prefix}.layers") self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.make_empty_intermediate_tensors = ( diff --git a/vllm/model_executor/models/molmo.py b/vllm/model_executor/models/molmo.py index 2528f741864b3..ee7b560fe1ee4 100644 --- a/vllm/model_executor/models/molmo.py +++ b/vllm/model_executor/models/molmo.py @@ -370,6 +370,7 @@ def __init__( config: PretrainedConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = config.hidden_size @@ -427,7 +428,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") # Attention output projection. self.o_proj = RowParallelLinear( @@ -517,10 +519,14 @@ def __init__( config: PretrainedConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() # Attention block. - self.self_attn = MolmoAttention(config, cache_config, quant_config) + self.self_attn = MolmoAttention(config, + cache_config, + quant_config, + prefix=f"{prefix}.self_attn") # MLP block. self.mlp = MolmoMLP(config, quant_config=quant_config) @@ -738,7 +744,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): else MolmoDecoderLayer self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: decoder_layer(config, cache_config, quant_config), + lambda prefix: decoder_layer( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers", ) diff --git a/vllm/model_executor/models/mpt.py b/vllm/model_executor/models/mpt.py index 8716e92b0f1c2..1235816413a44 100644 --- a/vllm/model_executor/models/mpt.py +++ b/vllm/model_executor/models/mpt.py @@ -50,6 +50,7 @@ def __init__( config: MPTConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.d_model = config.d_model @@ -115,7 +116,8 @@ def __init__( alibi_slopes=alibi_slopes, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -176,11 +178,15 @@ def __init__( config: MPTConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() hidden_size = config.d_model self.norm_1 = nn.LayerNorm(hidden_size) - self.attn = MPTAttention(config, cache_config, quant_config) + self.attn = MPTAttention(config, + cache_config, + quant_config, + prefix=f"{prefix}.attn") self.norm_2 = nn.LayerNorm(hidden_size) self.ffn = MPTMLP(config, quant_config) @@ -224,7 +230,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) self.start_layer, self.end_layer, self.blocks = make_layers( config.n_layers, - lambda prefix: MPTBlock(config, cache_config, quant_config), + lambda prefix: MPTBlock( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.blocks") self.norm_f = nn.LayerNorm(config.d_model) if config.no_bias: diff --git a/vllm/model_executor/models/nemotron.py b/vllm/model_executor/models/nemotron.py index ceab299a7950a..c7b4c22b6896b 100644 --- a/vllm/model_executor/models/nemotron.py +++ b/vllm/model_executor/models/nemotron.py @@ -195,7 +195,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, diff --git a/vllm/model_executor/models/olmo.py b/vllm/model_executor/models/olmo.py index dc138e2e636ad..538e31ec91699 100644 --- a/vllm/model_executor/models/olmo.py +++ b/vllm/model_executor/models/olmo.py @@ -62,6 +62,7 @@ def __init__( config: OlmoConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.config = config @@ -101,7 +102,8 @@ def __init__( self.head_dim, scale=self.scaling, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") # Attention output projection. self.o_proj = RowParallelLinear( @@ -184,10 +186,14 @@ class OlmoDecoderLayer(nn.Module): def __init__(self, config: OlmoConfig, cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None): + quant_config: Optional[QuantizationConfig] = None, + prefix: str = ""): super().__init__() # Attention block. - self.self_attn = OlmoAttention(config, cache_config, quant_config) + self.self_attn = OlmoAttention(config, + cache_config, + quant_config, + prefix=f"{prefix}.self_attn") # MLP block. self.mlp = OlmoMLP(config, quant_config) @@ -238,8 +244,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config.hidden_size) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: OlmoDecoderLayer(config, cache_config, quant_config - ), + lambda prefix: OlmoDecoderLayer( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers") self.norm = nn.LayerNorm(config.hidden_size, elementwise_affine=False, diff --git a/vllm/model_executor/models/olmoe.py b/vllm/model_executor/models/olmoe.py index ab87695d8e650..5b5b3ef48b035 100644 --- a/vllm/model_executor/models/olmoe.py +++ b/vllm/model_executor/models/olmoe.py @@ -102,6 +102,7 @@ def __init__( max_position_embeddings: int = 4096, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = hidden_size @@ -156,7 +157,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -182,6 +184,7 @@ def __init__( layer_idx: int, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = config.hidden_size @@ -199,6 +202,7 @@ def __init__( max_position_embeddings=max_position_embeddings, cache_config=cache_config, quant_config=quant_config, + prefix=f"{prefix}.self_attn", ) self.mlp = OlmoeMoE( @@ -260,8 +264,11 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: OlmoeDecoderLayer(config, int( - prefix.split(".")[-1]), cache_config, quant_config), + lambda prefix: OlmoeDecoderLayer(config, + int(prefix.split(".")[-1]), + cache_config, + quant_config, + prefix=prefix), prefix=f"{prefix}.layers") self.norm = RMSNorm(config.hidden_size, eps=1e-5) diff --git a/vllm/model_executor/models/orion.py b/vllm/model_executor/models/orion.py index b01734af8ddd8..a3757b5c8808e 100644 --- a/vllm/model_executor/models/orion.py +++ b/vllm/model_executor/models/orion.py @@ -75,6 +75,7 @@ def __init__( max_position_embeddings: int = 8192, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = hidden_size @@ -126,7 +127,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -150,6 +152,7 @@ def __init__( config: PretrainedConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = config.hidden_size @@ -166,6 +169,7 @@ def __init__( max_position_embeddings=max_position_embeddings, cache_config=cache_config, quant_config=quant_config, + prefix=f"{prefix}.self_attn", ) self.mlp = OrionMLP( hidden_size=self.hidden_size, @@ -226,10 +230,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, lambda prefix: OrionDecoderLayer( - config, - cache_config, - quant_config, - ), + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers") self.norm = nn.LayerNorm(config.hidden_size, eps=config.rms_norm_eps) self.make_empty_intermediate_tensors = ( diff --git a/vllm/model_executor/models/persimmon.py b/vllm/model_executor/models/persimmon.py index 3b8199f4f1661..14dd4b5b1b4da 100644 --- a/vllm/model_executor/models/persimmon.py +++ b/vllm/model_executor/models/persimmon.py @@ -75,7 +75,8 @@ class PersimmonAttention(nn.Module): def __init__(self, config: PersimmonConfig, cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None): + quant_config: Optional[QuantizationConfig] = None, + prefix: str = ""): super().__init__() self.config = config tensor_parallel_world_size = get_tensor_model_parallel_world_size() @@ -122,7 +123,8 @@ def __init__(self, self.head_dim, scale=self.scaling, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def _split_heads(self, x: torch.Tensor) -> torch.Tensor: # [seq_length, hidden_size] -> [seq_length, num_heads, head_dim] @@ -167,12 +169,14 @@ class PersimmonDecoderLayer(nn.Module): def __init__(self, config: PersimmonConfig, cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None): + quant_config: Optional[QuantizationConfig] = None, + prefix: str = ""): super().__init__() self.hidden_size = config.hidden_size self.self_attn = PersimmonAttention(config=config, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.self_attn") self.mlp = PersimmonMLP(config, quant_config=quant_config) self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) @@ -226,8 +230,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config.hidden_size) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: PersimmonDecoderLayer(config, cache_config, - quant_config), + lambda prefix: PersimmonDecoderLayer( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers") self.final_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) diff --git a/vllm/model_executor/models/phi.py b/vllm/model_executor/models/phi.py index 0a117bf16c9b3..998d3723a0d7d 100644 --- a/vllm/model_executor/models/phi.py +++ b/vllm/model_executor/models/phi.py @@ -69,7 +69,8 @@ class PhiAttention(nn.Module): def __init__(self, config: PhiConfig, cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None): + quant_config: Optional[QuantizationConfig] = None, + prefix: str = ""): super().__init__() self.total_num_heads = config.num_attention_heads self.hidden_size = config.hidden_size @@ -116,7 +117,8 @@ def __init__(self, self.head_size, scaling, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -167,11 +169,15 @@ class PhiLayer(nn.Module): def __init__(self, config: PhiConfig, cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None): + quant_config: Optional[QuantizationConfig] = None, + prefix: str = ""): super().__init__() self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.self_attn = PhiAttention(config, cache_config, quant_config) + self.self_attn = PhiAttention(config, + cache_config, + quant_config, + prefix=f"{prefix}.self_attn") self.mlp = PhiMLP(config, quant_config) def forward( @@ -210,7 +216,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config.hidden_size) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: PhiLayer(config, cache_config, quant_config), + lambda prefix: PhiLayer( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers") self.final_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) diff --git a/vllm/model_executor/models/phi3_small.py b/vllm/model_executor/models/phi3_small.py index f71cbd1264c45..da7e4cdbc6940 100644 --- a/vllm/model_executor/models/phi3_small.py +++ b/vllm/model_executor/models/phi3_small.py @@ -117,6 +117,7 @@ def __init__( layer_idx: int, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.layer_idx = layer_idx @@ -214,15 +215,14 @@ def __init__( "homo_head": self.homo_heads } - self.attn = Attention( - self.num_heads_per_partition, - self.head_dim, - self.scale, - num_kv_heads=self.num_kv_heads_per_partion, - cache_config=cache_config, - quant_config=quant_config, - blocksparse_params=bs_params, - ) + self.attn = Attention(self.num_heads_per_partition, + self.head_dim, + self.scale, + num_kv_heads=self.num_kv_heads_per_partion, + cache_config=cache_config, + quant_config=quant_config, + blocksparse_params=bs_params, + prefix=f"{prefix}.attn") def forward( self, @@ -259,13 +259,15 @@ def __init__( layer_idx: int, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.hidden_size = config.hidden_size self.self_attn = Phi3SmallSelfAttention(config, layer_idx, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.self_attn") self.mlp = Phi3SmallMLP(config, quant_config) self.input_layernorm = nn.LayerNorm(config.hidden_size, @@ -315,7 +317,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config.num_hidden_layers, lambda prefix: Phi3SmallDecoderLayer(config, int(prefix.split('.')[-1]), - cache_config, quant_config), + cache_config, + quant_config, + prefix=prefix), prefix=f"{prefix}.layers") self.final_layernorm = nn.LayerNorm(config.hidden_size, diff --git a/vllm/model_executor/models/phimoe.py b/vllm/model_executor/models/phimoe.py index e475d286bd7ea..1febd62f2f705 100644 --- a/vllm/model_executor/models/phimoe.py +++ b/vllm/model_executor/models/phimoe.py @@ -294,6 +294,7 @@ def __init__( cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, rope_scaling: Optional[dict] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = hidden_size @@ -347,6 +348,7 @@ def __init__( num_kv_heads=self.num_kv_heads, cache_config=cache_config, quant_config=quant_config, + prefix=f"{prefix}.attn", ) def forward( @@ -371,6 +373,7 @@ def __init__( config: PhiMoEConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = config.hidden_size @@ -385,6 +388,7 @@ def __init__( cache_config=cache_config, quant_config=quant_config, rope_scaling=config.rope_scaling, + prefix=f"{prefix}.self_attn", ) self.block_sparse_moe = PhiMoE( num_experts=config.num_local_experts, @@ -454,8 +458,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: PhiMoEDecoderLayer(config, cache_config, - quant_config), + lambda prefix: PhiMoEDecoderLayer( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers") self.norm = nn.LayerNorm(config.hidden_size, eps=config.rms_norm_eps, diff --git a/vllm/model_executor/models/qwen.py b/vllm/model_executor/models/qwen.py index 44ce6eda42943..d3a776f665c74 100644 --- a/vllm/model_executor/models/qwen.py +++ b/vllm/model_executor/models/qwen.py @@ -442,6 +442,7 @@ def __init__( rope_scaling: Optional[Dict[str, Any]] = None, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.hidden_size = hidden_size @@ -478,7 +479,8 @@ def __init__( self.head_dim, self.scaling, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -502,6 +504,7 @@ def __init__( config: PretrainedConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.ln_1 = RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) @@ -514,7 +517,8 @@ def __init__( rope_theta=rope_theta, rope_scaling=rope_scaling, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") self.ln_2 = RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) @@ -568,7 +572,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) self.start_layer, self.end_layer, self.h = make_layers( config.num_hidden_layers, - lambda prefix: QWenBlock(config, cache_config, quant_config), + lambda prefix: QWenBlock( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.h") self.ln_f = RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) self.make_empty_intermediate_tensors = ( diff --git a/vllm/model_executor/models/qwen2_moe.py b/vllm/model_executor/models/qwen2_moe.py index 96a9bc451f4df..1091f88ab2534 100644 --- a/vllm/model_executor/models/qwen2_moe.py +++ b/vllm/model_executor/models/qwen2_moe.py @@ -168,6 +168,7 @@ def __init__( max_position_embeddings: int = 8192, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = hidden_size @@ -220,7 +221,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -245,6 +247,7 @@ def __init__( layer_idx: int, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = config.hidden_size @@ -261,6 +264,7 @@ def __init__( max_position_embeddings=max_position_embeddings, cache_config=cache_config, quant_config=quant_config, + prefix=f"{prefix}.self_attn", ) # Note: Qwen/Qwen2-57B-A14B-Instruct does not have @@ -336,7 +340,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): layer_idx=int( prefix.split(".")[-1]), cache_config=cache_config, - quant_config=quant_config), + quant_config=quant_config, + prefix=prefix), prefix=f"{prefix}.layers", ) self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) diff --git a/vllm/model_executor/models/solar.py b/vllm/model_executor/models/solar.py index 6d6fafc5ab0eb..f58710d215056 100644 --- a/vllm/model_executor/models/solar.py +++ b/vllm/model_executor/models/solar.py @@ -167,6 +167,7 @@ def __init__( num_kv_heads=self.num_kv_heads, cache_config=cache_config, quant_config=quant_config, + prefix=f"{prefix}.attn", ) def forward( diff --git a/vllm/model_executor/models/stablelm.py b/vllm/model_executor/models/stablelm.py index e11d2e916730a..6b2107bef0a66 100644 --- a/vllm/model_executor/models/stablelm.py +++ b/vllm/model_executor/models/stablelm.py @@ -77,7 +77,8 @@ class StablelmAttention(nn.Module): def __init__(self, config: PretrainedConfig, cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None) -> None: + quant_config: Optional[QuantizationConfig] = None, + prefix: str = "") -> None: super().__init__() self.config = config self.hidden_size = config.hidden_size @@ -131,7 +132,8 @@ def __init__(self, self.scaling, num_kv_heads=self.num_key_value_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -155,9 +157,13 @@ def __init__( config: PretrainedConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() - self.self_attn = StablelmAttention(config, cache_config, quant_config) + self.self_attn = StablelmAttention(config, + cache_config, + quant_config, + prefix=f"{prefix}.self_attn") self.mlp = StablelmMLP(config, quant_config) norm_eps = getattr(config, "norm_eps", getattr(config, "layer_norm_eps", 1e-05)) @@ -207,8 +213,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: StablelmDecoderLayer(config, cache_config, - quant_config), + lambda prefix: StablelmDecoderLayer( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers", ) norm_eps = getattr(config, "norm_eps", diff --git a/vllm/model_executor/models/starcoder2.py b/vllm/model_executor/models/starcoder2.py index 74c66042226de..15e8f2af52cda 100644 --- a/vllm/model_executor/models/starcoder2.py +++ b/vllm/model_executor/models/starcoder2.py @@ -52,7 +52,8 @@ class Starcoder2Attention(nn.Module): def __init__(self, config: Starcoder2Config, cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None): + quant_config: Optional[QuantizationConfig] = None, + prefix: str = ""): super().__init__() self.config = config @@ -105,7 +106,8 @@ def __init__(self, self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -154,12 +156,14 @@ class Starcoder2DecoderLayer(nn.Module): def __init__(self, config: Starcoder2Config, cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None): + quant_config: Optional[QuantizationConfig] = None, + prefix: str = ""): super().__init__() self.hidden_size = config.hidden_size self.self_attn = Starcoder2Attention(config, cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.self_attn") self.mlp = Starcoder2MLP(config, quant_config=quant_config) self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon) @@ -213,7 +217,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, lambda prefix: Starcoder2DecoderLayer( - config, cache_config, quant_config=quant_config), + config, cache_config, quant_config=quant_config, prefix=prefix + ), prefix=f"{prefix}.layers", ) self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon) diff --git a/vllm/model_executor/models/xverse.py b/vllm/model_executor/models/xverse.py index bc37a997eabb5..25a0d474e2863 100644 --- a/vllm/model_executor/models/xverse.py +++ b/vllm/model_executor/models/xverse.py @@ -93,6 +93,7 @@ def __init__( quant_config: Optional[QuantizationConfig] = None, bias: bool = False, cache_config: Optional[CacheConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = hidden_size @@ -138,7 +139,8 @@ def __init__( self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.attn") def forward( self, @@ -162,6 +164,7 @@ def __init__( config: PretrainedConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ) -> None: super().__init__() self.hidden_size = config.hidden_size @@ -180,6 +183,7 @@ def __init__( quant_config=quant_config, bias=getattr(config, "bias", False), cache_config=cache_config, + prefix=f"{prefix}.self_attn", ) self.mlp = XverseMLP( hidden_size=self.hidden_size, @@ -243,8 +247,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: XverseDecoderLayer(config, cache_config, - quant_config), + lambda prefix: XverseDecoderLayer( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers", ) self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) diff --git a/vllm/platforms/cpu.py b/vllm/platforms/cpu.py index 9be9031dc3baf..cbc982752c6b4 100644 --- a/vllm/platforms/cpu.py +++ b/vllm/platforms/cpu.py @@ -20,6 +20,7 @@ class CpuPlatform(Platform): _enum = PlatformEnum.CPU device_type: str = "cpu" + dispatch_key: str = "CPU" @classmethod def get_device_name(cls, device_id: int = 0) -> str: diff --git a/vllm/platforms/cuda.py b/vllm/platforms/cuda.py index cf0d41081a5aa..70724b8be4c45 100644 --- a/vllm/platforms/cuda.py +++ b/vllm/platforms/cuda.py @@ -121,6 +121,7 @@ def device_id_to_physical_device_id(device_id: int) -> int: class CudaPlatform(Platform): _enum = PlatformEnum.CUDA device_type: str = "cuda" + dispatch_key: str = "CUDA" @classmethod def get_device_capability(cls, device_id: int = 0) -> DeviceCapability: diff --git a/vllm/platforms/hpu.py b/vllm/platforms/hpu.py index a8f568d31d5a7..3071136e43b85 100644 --- a/vllm/platforms/hpu.py +++ b/vllm/platforms/hpu.py @@ -13,6 +13,7 @@ class HpuPlatform(Platform): _enum = PlatformEnum.HPU device_type: str = "hpu" + dispatch_key: str = "HPU" @classmethod def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: diff --git a/vllm/platforms/interface.py b/vllm/platforms/interface.py index 07f23167d509a..3328665029039 100644 --- a/vllm/platforms/interface.py +++ b/vllm/platforms/interface.py @@ -57,6 +57,10 @@ def to_int(self) -> int: class Platform: _enum: PlatformEnum device_type: str + # available dispatch keys: + # check https://github.com/pytorch/pytorch/blob/313dac6c1ca0fa0cde32477509cce32089f8532a/torchgen/model.py#L134 # noqa + # use "CPU" as a fallback for platforms not registered in PyTorch + dispatch_key: str = "CPU" def is_cuda(self) -> bool: return self._enum == PlatformEnum.CUDA diff --git a/vllm/platforms/openvino.py b/vllm/platforms/openvino.py index 33a41933e9fff..694de836e1517 100644 --- a/vllm/platforms/openvino.py +++ b/vllm/platforms/openvino.py @@ -18,6 +18,7 @@ class OpenVinoPlatform(Platform): _enum = PlatformEnum.OPENVINO device_type: str = "openvino" + dispatch_key: str = "CPU" @classmethod def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: diff --git a/vllm/platforms/rocm.py b/vllm/platforms/rocm.py index 3fe8c01c15787..d2f44c3e423e3 100644 --- a/vllm/platforms/rocm.py +++ b/vllm/platforms/rocm.py @@ -36,6 +36,7 @@ class RocmPlatform(Platform): _enum = PlatformEnum.ROCM device_type: str = "cuda" + dispatch_key: str = "CUDA" @classmethod def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: diff --git a/vllm/platforms/tpu.py b/vllm/platforms/tpu.py index 513cfa54687dc..137af57023ea9 100644 --- a/vllm/platforms/tpu.py +++ b/vllm/platforms/tpu.py @@ -17,6 +17,7 @@ class TpuPlatform(Platform): _enum = PlatformEnum.TPU device_type: str = "tpu" + dispatch_key: str = "XLA" @classmethod def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: diff --git a/vllm/platforms/xpu.py b/vllm/platforms/xpu.py index b2ee0ef2f71cd..69388a8e0f27c 100644 --- a/vllm/platforms/xpu.py +++ b/vllm/platforms/xpu.py @@ -17,6 +17,7 @@ class XPUPlatform(Platform): _enum = PlatformEnum.XPU device_type: str = "xpu" + dispatch_key: str = "XPU" @classmethod def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: diff --git a/vllm/spec_decode/draft_model_runner.py b/vllm/spec_decode/draft_model_runner.py index cd4d7eb0e6e4e..cf166e3eb5bad 100644 --- a/vllm/spec_decode/draft_model_runner.py +++ b/vllm/spec_decode/draft_model_runner.py @@ -273,7 +273,8 @@ def execute_model( if previous_hidden_states is not None else {} # Run model - with set_forward_context(model_input.attn_metadata): + with set_forward_context(model_input.attn_metadata, + self.vllm_config): hidden_states = model_executable( input_ids=model_input.input_tokens, positions=model_input.input_positions, diff --git a/vllm/utils.py b/vllm/utils.py index 67b2629ecc933..30c371b0e3591 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -1573,6 +1573,7 @@ def direct_register_custom_op( mutates_args: List[str], fake_impl: Optional[Callable] = None, target_lib: Optional[Library] = None, + dispatch_key: str = "CUDA", ): """ `torch.library.custom_op` can have significant overhead because it @@ -1601,7 +1602,7 @@ def direct_register_custom_op( schema_str = torch._custom_op.impl.infer_schema(op_func, mutates_args) my_lib = target_lib or vllm_lib my_lib.define(op_name + schema_str) - my_lib.impl(op_name, op_func, "CUDA") + my_lib.impl(op_name, op_func, dispatch_key=dispatch_key) if fake_impl is not None: my_lib._register_fake(op_name, fake_impl) diff --git a/vllm/v1/attention/backends/flash_attn.py b/vllm/v1/attention/backends/flash_attn.py index e73a1e60b2730..d98bb5a716e97 100644 --- a/vllm/v1/attention/backends/flash_attn.py +++ b/vllm/v1/attention/backends/flash_attn.py @@ -173,7 +173,8 @@ def unified_v1_flash_attention( alibi_slopes: Optional[torch.Tensor] = None, logits_soft_cap: Optional[float] = None, ) -> None: - current_metadata = get_forward_context() + context = get_forward_context() + current_metadata = context.dynamic_forward_context if current_metadata is None: # Profiling run. return diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 2cf55cd497659..02f9498142bb7 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -447,7 +447,7 @@ def execute_model( # Run the decoder. # Use persistent buffers for CUDA graphs. - with set_forward_context(attn_metadata): + with set_forward_context(attn_metadata, self.vllm_config): hidden_states = self.model( input_ids=None, positions=self.positions[:num_input_tokens], @@ -523,7 +523,7 @@ def _dummy_run( num_tokens: int, kv_caches: List[torch.Tensor], ) -> torch.Tensor: - with set_forward_context(None): + with set_forward_context(None, self.vllm_config): hidden_states = model( input_ids=None, positions=self.positions[:num_tokens], diff --git a/vllm/worker/embedding_model_runner.py b/vllm/worker/embedding_model_runner.py index 37cfcbf13d7a3..4a55d91e71484 100644 --- a/vllm/worker/embedding_model_runner.py +++ b/vllm/worker/embedding_model_runner.py @@ -97,7 +97,7 @@ def execute_model( model_forward_end = torch.cuda.Event(enable_timing=True) model_forward_start.record() - with set_forward_context(model_input.attn_metadata): + with set_forward_context(model_input.attn_metadata, self.vllm_config): hidden_or_intermediate_states = model_executable( input_ids=model_input.input_tokens, positions=model_input.input_positions, diff --git a/vllm/worker/enc_dec_model_runner.py b/vllm/worker/enc_dec_model_runner.py index 687d2cc79360f..ae18c79c980c8 100644 --- a/vllm/worker/enc_dec_model_runner.py +++ b/vllm/worker/enc_dec_model_runner.py @@ -176,7 +176,7 @@ def execute_model( } if self.has_inner_state else {} multi_modal_kwargs = model_input.multi_modal_kwargs or {} - with set_forward_context(model_input.attn_metadata): + with set_forward_context(model_input.attn_metadata, self.vllm_config): hidden_or_intermediate_states = model_executable( input_ids=model_input.input_tokens, positions=model_input.input_positions, diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py index ed0360fb7f727..13301b876217d 100644 --- a/vllm/worker/model_runner.py +++ b/vllm/worker/model_runner.py @@ -1503,7 +1503,7 @@ def capture_model(self, kv_caches: List[List[torch.Tensor]]) -> None: self._update_inputs_to_capture_for_enc_dec_model( capture_inputs) - with set_forward_context(attn_metadata): + with set_forward_context(attn_metadata, self.vllm_config): graph_runner.capture(**capture_inputs) self.graph_memory_pool = graph_runner.graph.pool() self.graph_runners[virtual_engine][batch_size] = ( @@ -1649,7 +1649,7 @@ def execute_model( model_forward_end = torch.cuda.Event(enable_timing=True) model_forward_start.record() - with set_forward_context(model_input.attn_metadata): + with set_forward_context(model_input.attn_metadata, self.vllm_config): hidden_or_intermediate_states = model_executable( input_ids=model_input.input_tokens, positions=model_input.input_positions, From 97814fbf0f847a11d2e0eb339e3e7572ca69379d Mon Sep 17 00:00:00 2001 From: Ricky Xu Date: Fri, 22 Nov 2024 15:27:25 -0800 Subject: [PATCH 102/397] [v1] Refactor KVCacheManager for more hash input than token ids (#10507) Signed-off-by: rickyx Signed-off-by: Cody Yu Co-authored-by: Cody Yu --- tests/v1/core/test_prefix_caching.py | 225 +++++++++++++++++++-- vllm/v1/core/kv_cache_manager.py | 289 +++++++++++++-------------- vllm/v1/core/kv_cache_utils.py | 37 ++-- 3 files changed, 365 insertions(+), 186 deletions(-) diff --git a/tests/v1/core/test_prefix_caching.py b/tests/v1/core/test_prefix_caching.py index d614d3e67460f..83bfbb6ade8d7 100644 --- a/tests/v1/core/test_prefix_caching.py +++ b/tests/v1/core/test_prefix_caching.py @@ -1,8 +1,11 @@ """Compare the with and without prefix caching.""" +import pytest + from vllm.inputs import token_inputs from vllm.sampling_params import SamplingParams +from vllm.utils import cdiv from vllm.v1.core.kv_cache_manager import KVCacheManager, Request -from vllm.v1.core.kv_cache_utils import hash_block_tokens +from vllm.v1.core.kv_cache_utils import KVCacheBlock, hash_block_tokens def make_request(request_id, prompt_token_ids): @@ -31,7 +34,8 @@ def test_prefill(): # Fully cache miss # Incomplete 1 block (7 tokens) unique_token_ids = [3] * 7 - req0 = make_request("0", common_token_ids + unique_token_ids) + all_token_ids = common_token_ids + unique_token_ids + req0 = make_request("0", all_token_ids) computed_blocks = manager.get_computed_blocks(req0) assert not computed_blocks blocks = manager.allocate_slots(req0, 55, computed_blocks) @@ -40,24 +44,16 @@ def test_prefill(): # Check full block metadata parent_block_hash = None for block_id in (0, 1, 2): - block_hash = hash_block_tokens(parent_block_hash, - manager.block_pool[block_id].token_ids) + block_tokens = tuple(all_token_ids[block_id * 16:(block_id + 1) * 16]) + block_hash = hash_block_tokens(parent_block_hash, block_tokens) assert manager.block_pool[block_id].block_hash == block_hash assert manager.block_pool[block_id].ref_cnt == 1 - assert manager.block_pool[block_id].num_hashed_tokens == 16 * ( - block_id + 1) - assert manager.block_pool[block_id].token_ids == tuple([block_id] * 16) parent_block_hash = block_hash # Check partial/preallocated block metadata for block_id in (3, 4): assert manager.block_pool[block_id].block_hash is None assert manager.block_pool[block_id].ref_cnt == 1 - assert manager.block_pool[block_id].num_hashed_tokens == 0 - if block_id == 3: - assert manager.block_pool[block_id].token_ids == [3] * 7 - else: - assert not manager.block_pool[block_id].token_ids # Cache hit in the common prefix when the original block is still in use. # Incomplete 1 block (5 tokens) @@ -113,7 +109,7 @@ def test_prefill(): req3 = make_request("3", [99] * (16 * 9)) computed_blocks = manager.get_computed_blocks(req3) assert not computed_blocks - blocks = manager.allocate_slots(req2, 16 * 9, computed_blocks) + blocks = manager.allocate_slots(req3, 16 * 9, computed_blocks) # This block ID order also checks the eviction order. assert [b.block_id for b in blocks] == [9, 4, 3, 6, 5, 8, 7, 2, 1, 0] assert manager.free_block_queue.num_free_blocks == 0 @@ -148,7 +144,7 @@ def test_decode(): req0.append_output_token_ids(8) new_blocks = manager.append_slots(req0, 4) assert new_blocks is not None and len(new_blocks) == 0 - assert len(manager.block_pool[3].token_ids) == 11 + assert manager.req_to_blocks[req0.request_id][-2].block_hash is None # Append slots without allocating a new block, but start using the # preallocated block. @@ -159,8 +155,7 @@ def test_decode(): req0.append_output_token_ids(7) new_blocks = manager.append_slots(req0, 15) assert new_blocks is not None and len(new_blocks) == 0 - assert len(manager.block_pool[3].token_ids) == 16 - assert len(manager.block_pool[4].token_ids) == 10 + assert manager.req_to_blocks[req0.request_id][-2].block_hash is not None # Append slots with allocating a new block. req0.num_computed_tokens = 74 @@ -171,9 +166,6 @@ def test_decode(): new_blocks = manager.append_slots(req0, 17) # Plus one preallocated block. assert new_blocks is not None and len(new_blocks) == 2 - assert len(manager.block_pool[4].token_ids) == 16 - assert len(manager.block_pool[5].token_ids) == 11 - assert len(manager.block_pool[6].token_ids) == 0 def test_evict(): @@ -217,3 +209,198 @@ def test_evict(): blocks = manager.allocate_slots(req2, 3, computed_blocks) assert [b.block_id for b in blocks] == [6, 5] assert manager.free_block_queue.num_free_blocks == 6 + + +def test_hash_block_correct_reuse(): + """ + This tests when a previously cached block is reused as a new block, + its hash metadata should be correctly reset. + """ + block_size = 16 + manager = KVCacheManager( + block_size=block_size, + num_gpu_blocks=1, + sliding_window=False, + enable_caching=True, + num_preallocate_tokens=0, + ) + + # Allocate 1 block and cache it. + num_tokens = block_size * 1 + req = make_request("0", list(range(num_tokens))) + computed_blocks = manager.get_computed_blocks(req) + assert not computed_blocks + blocks = manager.allocate_slots(req, num_tokens, computed_blocks) + assert len(blocks) == 1 + + # Deallocate the block. + manager.free(req) + + # Allocate a new block that's not full, make sure hash info on the + # block is cleared. + req = make_request("1", list(range(num_tokens - 1))) + computed_blocks = manager.get_computed_blocks(req) + assert not computed_blocks + blocks = manager.allocate_slots(req, num_tokens - 1, computed_blocks) + assert len(blocks) == 1 + + assert manager.block_pool[blocks[0].block_id].block_hash is None + + +def test_computed_blocks_not_evicted(): + """ + Test that the computed blocks are not evicted when getting new blocks + for a request if there are any other free blocks. + """ + block_size = 16 + manager = KVCacheManager( + block_size=block_size, + num_gpu_blocks=2, + sliding_window=False, + enable_caching=True, + num_preallocate_tokens=0, + ) + + # Allocate a block and cache it. + num_tokens = block_size * 1 + req0 = make_request("0", list(range(num_tokens))) + computed_blocks = manager.get_computed_blocks(req0) + assert not computed_blocks + blocks = manager.allocate_slots(req0, num_tokens, computed_blocks) + assert len(blocks) == 1 + assert blocks[0].block_id == 0 + + # Allocate another block. + req1 = make_request("1", list(range(num_tokens, num_tokens * 2))) + computed_blocks = manager.get_computed_blocks(req1) + assert not computed_blocks + blocks = manager.allocate_slots(req1, num_tokens, computed_blocks) + assert len(blocks) == 1 + assert blocks[0].block_id == 1 + + # Free the blocks. + manager.free(req0) + manager.free(req1) + + # Now if we have a cache hit on the first block, we should evict the second + # cached block rather than the first one. + req2 = make_request("2", list(range(num_tokens * 2))) + computed_blocks = manager.get_computed_blocks(req2) + assert len(computed_blocks) == 1 + assert computed_blocks[0].block_id == 0 + + blocks = manager.allocate_slots(req2, num_tokens * 2 - num_tokens, + computed_blocks) + assert len(blocks) == 1 + assert blocks[0].block_id == 1 + + +def test_basic_prefix_caching_disabled(): + """ + This tests that the prefix caching is disabled. + """ + block_size = 4 + manager = KVCacheManager( + block_size=block_size, + num_gpu_blocks=4, + sliding_window=False, + enable_caching=False, + num_preallocate_tokens=0, + ) + + req1 = make_request("1", list(range(10))) # 2 blocks and some more + + computed_blocks = manager.get_computed_blocks(req1) + assert not computed_blocks + blocks = manager.allocate_slots(req1, 10, computed_blocks) + assert len(blocks) == 3 + + # Free the blocks. + manager.free(req1) + + # No caching. + req2 = make_request("2", list(range(16))) # shared prefix + computed_blocks = manager.get_computed_blocks(req2) + assert not computed_blocks + blocks = manager.allocate_slots(req2, 16, computed_blocks) + assert len(blocks) == 4 + + # New requests should not have any blocks. + req3 = make_request("3", list(range(4))) + computed_blocks = manager.get_computed_blocks(req3) + assert not computed_blocks + blocks = manager.allocate_slots(req3, 4, computed_blocks) + assert not blocks + + +@pytest.mark.parametrize("num_preallocate_tokens", list(range(0, 8))) +@pytest.mark.parametrize("block_size", [4]) +def test_preallocate_blocks(num_preallocate_tokens: int, block_size: int): + """ + This tests that the preallocated blocks are correctly added. + """ + manager = KVCacheManager( + block_size=block_size, + num_gpu_blocks=10, + sliding_window=False, + enable_caching=True, + num_preallocate_tokens=num_preallocate_tokens, + ) + num_preallocated_blocks = cdiv(num_preallocate_tokens, block_size) + + req = make_request("0", list(range(block_size * 30))) + computed_blocks = manager.get_computed_blocks(req) + assert not computed_blocks + # Just ask for 1 block. + blocks = manager.allocate_slots(req, block_size, computed_blocks) + assert len(blocks) == 1 + num_preallocated_blocks + + # Append slots to the block. + req.num_computed_tokens = block_size * len(blocks) # Assume all used. + blocks = manager.append_slots(req, block_size) # Append 1 block. + assert len(blocks) == 1 + num_preallocated_blocks + + +def test_cache_blocks(): + """ + This is a unit test that tests the correctness of the _cache_full_blocks + function of KVCacheManager. + """ + block_size = 4 + manager = KVCacheManager( + block_size=block_size, + num_gpu_blocks=5, + sliding_window=False, + enable_caching=True, + num_preallocate_tokens=0, + ) + # Req: + # Block 0: [0, 1, 2, 3] + # Block 1: [4, 5, 6, 7] + # Block 2: [8, 9, 10, 11] + # Block 3: [12, 13] + req = make_request("0", list(range(14))) + + # Test that blocks are cached correctly for 2 full blocks from the start. + blocks = [KVCacheBlock(block_id=i) for i in range(2)] + + manager._cache_full_blocks( + request=req, + blk_start_idx=0, + full_blocks=blocks, + prev_block=None, + ) + + assert len(manager.cached_block_hash_to_block) == 2 + assert all([block.block_hash is not None for block in blocks]) + + # Test that blocks that don't start from the beginning are cached correctly. + blocks = [KVCacheBlock(block_id=2)] + manager._cache_full_blocks( + request=req, + blk_start_idx=2, + full_blocks=blocks, + prev_block=None, + ) + assert len(manager.cached_block_hash_to_block) == 3 + assert blocks[0].block_hash is not None diff --git a/vllm/v1/core/kv_cache_manager.py b/vllm/v1/core/kv_cache_manager.py index 38f1c03a4d3ac..8eb3fb976eb87 100644 --- a/vllm/v1/core/kv_cache_manager.py +++ b/vllm/v1/core/kv_cache_manager.py @@ -79,6 +79,9 @@ def get_computed_blocks(self, request: Request) -> List[KVCacheBlock]: return [] computed_blocks = [] + + # TODO(rickyx): potentially we could cache this so we don't have to + # recompute it every time. block_hashes = hash_request_tokens(self.block_size, request.all_token_ids) @@ -120,47 +123,45 @@ def append_slots( # slots, but we cannot allocate new blocks due to the limit. return None - # When caching is enabled, assign token IDs to already allocated blocks. - new_token_ids = None - parent_block = None - if self.enable_caching: - # Figure out the token IDs to add to the blocks. - new_token_ids = request.all_token_ids[ - request.num_computed_tokens:request.num_computed_tokens + - num_tokens] - - # Find the last full block index. - # TODO: This may be optimized by calculating the computed tokens. - last_full_block_idx = len(req_blocks) - 1 - while (last_full_block_idx >= 0 - and req_blocks[last_full_block_idx].block_hash is None): - last_full_block_idx -= 1 - - parent_block = (req_blocks[last_full_block_idx] - if last_full_block_idx >= 0 else None) - token_id_idx = self._add_token_ids_to_blocks( - blocks=req_blocks[last_full_block_idx + 1:], - token_ids=new_token_ids, - parent_block=parent_block) - - new_token_ids = new_token_ids[token_id_idx:] - parent_block = req_blocks[-1] - - # No new block is needed. When caching is enabled, we make sure - # token_id_idx is equal to len(new_token_ids), meaning that all tokens - # are added to allocated blocks. - if num_required_blocks <= len(req_blocks): - assert not self.enable_caching or token_id_idx == num_tokens, \ - f"{token_id_idx=} != {num_tokens=}" - return [] + if num_new_blocks <= 0: + # No new block is needed. + new_blocks = [] + else: + # Get new blocks from the free block pool considering + # preallocated blocks. + num_new_blocks = min( + num_new_blocks + self.num_preallocate_blocks, + self.free_block_queue.num_free_blocks, + ) + + new_blocks = self._get_new_blocks(num_new_blocks) + req_blocks.extend(new_blocks) + + if not self.enable_caching: + return new_blocks + + num_computed_full_blocks = (request.num_computed_tokens // + self.block_size) + + # NOTE(rickyx): We are assuming the `num_tokens` are actual + # tokens rather than lookahead slots (e.g. for speculative decoding). + # TODO(rickyx): When supporting speculative decoding, we will need to + # differentiate between them so that we can know how many blocks are + # full after appending the actual tokens. + num_full_blocks_after_append = (request.num_computed_tokens + + num_tokens) // self.block_size + assert num_full_blocks_after_append <= len(req_blocks) + + new_full_blocks = req_blocks[ + num_computed_full_blocks:num_full_blocks_after_append] + self._cache_full_blocks( + request=request, + blk_start_idx=num_computed_full_blocks, + full_blocks=new_full_blocks, + prev_block=req_blocks[num_computed_full_blocks - 1] + if num_computed_full_blocks >= 1 else None, + ) - # Allocate new blocks considering preallocated blocks, and - # add token IDs to them if caching is enabled. - num_new_blocks = min(num_new_blocks + self.num_preallocate_blocks, - self.free_block_queue.num_free_blocks) - new_blocks = self._get_new_blocks(num_new_blocks, new_token_ids, - parent_block) - req_blocks.extend(new_blocks) return new_blocks def allocate_slots( @@ -184,11 +185,20 @@ def allocate_slots( raise ValueError( f"num_tokens must be greater than 0, got {num_tokens}") - # If a computed block of a request is an eviction candidate (in the - # free queue and ref_cnt == 0), it cannot be counted as a free block - # when allocating this request. - num_evictable_computed_blocks = len( - [blk for blk in computed_blocks if blk.ref_cnt == 0]) + # Touch the computed blocks to make sure they won't be evicted. + num_evictable_computed_blocks = 0 + if self.enable_caching: + self._touch(computed_blocks) + + # If a computed block of a request is an eviction candidate (in the + # free queue and ref_cnt == 0), it cannot be counted as a free block + # when allocating this request. + num_evictable_computed_blocks = len( + [blk for blk in computed_blocks if blk.ref_cnt == 0]) + else: + assert not computed_blocks, ( + "Computed blocks should be empty when " + "prefix caching is disabled") num_required_blocks = cdiv(num_tokens, self.block_size) if (num_required_blocks > self.free_block_queue.num_free_blocks - @@ -201,35 +211,28 @@ def allocate_slots( num_new_blocks = min( num_required_blocks + self.num_preallocate_blocks, self.free_block_queue.num_free_blocks - - num_evictable_computed_blocks) - - num_computed_tokens = len(computed_blocks) * self.block_size + num_evictable_computed_blocks, + ) - # When caching is enabled, get the new token IDs and the parent block - # ID to generate cache keys. - new_token_ids = None - parent_block = None - if self.enable_caching: - # Touch the computed blocks to make sure they won't be evicted. - self._touch(computed_blocks) + # Concatenate the computed block IDs and the new block IDs. + new_blocks = self._get_new_blocks(num_new_blocks) + self.req_to_blocks[request.request_id] = computed_blocks + new_blocks - # Get the token IDs for the blocks being allocated for hashing. - new_token_ids = request.all_token_ids[ - num_computed_tokens:num_computed_tokens + num_tokens] - if not new_token_ids: - raise RuntimeError( - "Failed to infer the token IDs for allocation. " - f"#all_tokens={len(request.all_token_ids)} < " - f"#computed_tokens={num_computed_tokens}") + if not self.enable_caching: + return new_blocks - # Get the parent block ID to construct the block chain. - parent_block = computed_blocks[-1] if computed_blocks else None + num_computed_tokens = len(computed_blocks) * self.block_size + num_full_blocks = (num_computed_tokens + num_tokens) // self.block_size - new_blocks = self._get_new_blocks(num_new_blocks, new_token_ids, - parent_block) + self._cache_full_blocks( + request=request, + blk_start_idx=len(computed_blocks), + # The new full blocks are the full blocks that are not computed. + full_blocks=self.req_to_blocks[request.request_id] + [len(computed_blocks):num_full_blocks], + prev_block=computed_blocks[-1] if computed_blocks else None, + ) - # Concatenate the computed block IDs and the new block IDs. - self.req_to_blocks[request.request_id] = computed_blocks + new_blocks return new_blocks def free(self, request: Request) -> None: @@ -248,24 +251,17 @@ def free(self, request: Request) -> None: blocks = reversed(blocks) for block in blocks: - block.ref_cnt -= 1 + block.decr_ref() if block.ref_cnt == 0: self.free_block_queue.append(block) - def _get_new_blocks( - self, - num_blocks: int, - token_ids: Optional[List[int]] = None, - parent_block: Optional[int] = None) -> List[KVCacheBlock]: - """Get new blocks from the free block pool, and add token IDs to - allocated blocks if caching is enabled. + def _get_new_blocks(self, num_blocks: int) -> List[KVCacheBlock]: + """Get new blocks from the free block pool. + Note that we do not check block cache in this function. Args: num_blocks: The number of blocks to allocate. - token_ids: The token IDs in the blocks. None if caching is disabled. - parent_block: The parent block. Used to include block chain - in the block hash. Returns: A list of new block. @@ -274,56 +270,38 @@ def _get_new_blocks( raise ValueError( f"Cannot get {num_blocks} free blocks from the pool") - # First allocate blocks. ret: List[KVCacheBlock] = [] idx = 0 while idx < num_blocks: + # First allocate blocks. curr_block = self.free_block_queue.popleft() assert curr_block.ref_cnt == 0 - # Evict blocks from the cache. + # If the block is cached, evict it. if self.enable_caching: - block_hash = curr_block.block_hash - if (block_hash is not None - and block_hash in self.cached_block_hash_to_block): - if len(self.cached_block_hash_to_block[block_hash]) == 1: - del self.cached_block_hash_to_block[block_hash] - else: - del self.cached_block_hash_to_block[block_hash][ - curr_block.block_id] - curr_block.reset() - - curr_block.ref_cnt = 1 + self._evict_cached_block(curr_block) + + curr_block.incr_ref() ret.append(curr_block) idx += 1 - # Then assign token IDs to the allocated blocks. - if self.enable_caching: - assert token_ids is not None - token_id_idx = self._add_token_ids_to_blocks( - blocks=ret, token_ids=token_ids, parent_block=parent_block) - assert token_id_idx == len(token_ids) - return ret - def _cache_full_block(self, - block: KVCacheBlock, - parent_block: Optional[KVCacheBlock] = None) -> None: - """Cache a full block for prefix caching. + def _evict_cached_block(self, block: KVCacheBlock) -> None: + """ + If a block is cached in `cached_block_hash_to_block`, we reset its hash + metadata and evict it from the cache. Args: - block: The block to cache. - parent_block: The parent block. None if this is the first block. + block: The block to evict. """ - parent_block_hash = (parent_block.block_hash - if parent_block is not None else None) - assert len(block.token_ids) == self.block_size - block.token_ids = tuple(block.token_ids) - block_hash = hash_block_tokens(parent_block_hash, block.token_ids) - block.block_hash = block_hash - block.num_hashed_tokens = self.block_size + ( - parent_block.num_hashed_tokens if parent_block is not None else 0) - self.cached_block_hash_to_block[block_hash][block.block_id] = block + block_hash = block.block_hash + if block_hash and block_hash in self.cached_block_hash_to_block: + block.reset_hash() + del self.cached_block_hash_to_block[block_hash][block.block_id] + + if len(self.cached_block_hash_to_block[block_hash]) == 0: + del self.cached_block_hash_to_block[block_hash] def _get_cached_block(self, block_hash: BlockHashType) -> Optional[KVCacheBlock]: @@ -355,43 +333,50 @@ def _touch(self, blocks: List[KVCacheBlock]) -> None: # candidate), so remove it. if block.ref_cnt == 0: self.free_block_queue.remove(block) - block.ref_cnt += 1 - - def _add_token_ids_to_blocks( - self, - blocks: List[KVCacheBlock], - token_ids: List[int], - parent_block: Optional[KVCacheBlock] = None) -> int: - """Add token IDs to a list of allocated blocks. - If a block becomes full after adding token IDs, cache it. - Return the token ID index that has not been added to the blocks - if the blocks are not enough to hold all the token IDs. + block.incr_ref() - Args: - blocks: A list of blocks to add token IDs. - token_ids: A list of token IDs to add. - parent_block: The parent block. None if this is the - first block. + def _cache_full_blocks( + self, + request: Request, + blk_start_idx: int, + full_blocks: List[KVCacheBlock], + prev_block: Optional[KVCacheBlock], + ) -> None: + """Cache a list of full blocks for prefix caching. - Returns: - The starting token ID index that has not been added to the blocks - due to insufficient given blocks. + This function takes a list of blocks that will have their block hash + metadata to be updated and cached. Given a request, it computes the + block hashes for the blocks starting from `blk_start_idx` to the end + of the request's full blocks, updating the metadata for each block + and caching them in the `cached_block_hash_to_block`. + + Args: + request: The request to cache the blocks. + blk_start_idx: The index of the first block in the request's blocks + to cache. + full_blocks: The list of blocks to update hash metadata. + prev_block: The previous block in the chain. """ - token_id_start = 0 - for curr_block in blocks: - # If all token IDs are added, then the rest of the blocks are - # preallocated blocks, so we only need to update the - # parent_block_id. FIXME - if token_id_start == len(token_ids): - continue - - # Add token IDs to the empty slots in the block. - empty_slots = self.block_size - len(curr_block.token_ids) - token_id_end = min(token_id_start + empty_slots, len(token_ids)) - curr_block.token_ids.extend(token_ids[token_id_start:token_id_end]) - # Cache the block if it becomes full. - if len(curr_block.token_ids) == self.block_size: - self._cache_full_block(curr_block, parent_block) - parent_block = curr_block - token_id_start = token_id_end - return token_id_start + # Update the new blocks with the block hashes through the chain. + prev_block_hash = (prev_block.block_hash + if prev_block is not None else None) + for i, blk in enumerate(full_blocks): + blk_idx = blk_start_idx + i + + block_tokens = request.all_token_ids[blk_idx * + self.block_size:(blk_idx + + 1) * + self.block_size] + assert len(block_tokens) == self.block_size, ( + f"Expected {self.block_size} tokens, got {len(block_tokens)} " + f"at {blk_idx}th block for request " + f"{request.request_id}({request})") + + # Compute the hash of the current block. + block_hash = hash_block_tokens(prev_block_hash, + tuple(block_tokens)) + + # Update and added the full block to the cache. + blk.block_hash = block_hash + self.cached_block_hash_to_block[block_hash][blk.block_id] = blk + prev_block_hash = block_hash diff --git a/vllm/v1/core/kv_cache_utils.py b/vllm/v1/core/kv_cache_utils.py index 33dbfb7377bfd..fb666c364bfb2 100644 --- a/vllm/v1/core/kv_cache_utils.py +++ b/vllm/v1/core/kv_cache_utils.py @@ -1,6 +1,6 @@ """KV-Cache Utilities.""" -from dataclasses import dataclass, field -from typing import List, Optional, Tuple, Union +from dataclasses import dataclass +from typing import List, Optional, Tuple from vllm.logger import init_logger @@ -16,27 +16,34 @@ class KVCacheBlock: block_id: int # Reference count. ref_cnt: int = 0 - # Token IDs in the block. When the block is full, the type of token_ids - # should be Tuple[int] for fast matching. - token_ids: Union[List[int], Tuple[int]] = field(default_factory=list) # The hash of the block composed of (block hash, tuple of token IDs). # It is only available when the block is full. - block_hash: Optional[BlockHashType] = None - # The number of hashed tokens. More hashed tokens means the block - # is closer to the end of a prompt and more likely to be evicted. - num_hashed_tokens: int = 0 + _block_hash: Optional[BlockHashType] = None # Used to construct a doubly linked list for free blocks. # These two attributes should only be manipulated by FreeKVCacheBlockQueue. prev_free_block: Optional["KVCacheBlock"] = None next_free_block: Optional["KVCacheBlock"] = None - def reset(self): - """Reset the block metadata.""" - self.ref_cnt = 0 - self.token_ids = [] - self.block_hash = None - self.num_hashed_tokens = 0 + def incr_ref(self): + self.ref_cnt += 1 + + def decr_ref(self): + self.ref_cnt -= 1 + + @property + def block_hash(self) -> Optional[BlockHashType]: + return self._block_hash + + @block_hash.setter + def block_hash(self, block_hash: BlockHashType): + assert self.block_hash is None, ( + "The block already has a hash. This should not happen.") + self._block_hash = block_hash + + def reset_hash(self): + """Reset the block hash when the block is evicted.""" + self._block_hash = None class FreeKVCacheBlockQueue: From 948c859571af9588e344079cc0e79bbf8597cb18 Mon Sep 17 00:00:00 2001 From: zixuanzhang226 Date: Fri, 22 Nov 2024 16:16:14 -0800 Subject: [PATCH 103/397] support bitsandbytes quantization with qwen model (#10549) Signed-off-by: Ubuntu --- vllm/model_executor/models/qwen.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/vllm/model_executor/models/qwen.py b/vllm/model_executor/models/qwen.py index d3a776f665c74..8f001200308fe 100644 --- a/vllm/model_executor/models/qwen.py +++ b/vllm/model_executor/models/qwen.py @@ -1028,6 +1028,18 @@ class QWenLLM(QWenBaseModel): embedding_modules = {} embedding_padding_modules = [] + default_bitsandbytes_target_modules = [ + ".c_attn.", + ".c_proj.", + ".w1.", + ".w2.", + ] + bitsandbytes_stacked_params_mapping = { + # shard_name, weight_name, index + "w2": ("gate_up_proj", 0), + "w1": ("gate_up_proj", 1), + } + class QWenVL(QWenBaseModel, SupportsMultiModal): packed_modules_mapping = { From 28598f3939f9a04800f514e7fe62ab9bb8f617ec Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Fri, 22 Nov 2024 19:22:53 -0500 Subject: [PATCH 104/397] [Core] remove temporary local variables in LLMEngine.__init__ (#10577) Signed-off-by: Russell Bryant --- vllm/engine/llm_engine.py | 143 ++++++++++++++++++-------------------- 1 file changed, 66 insertions(+), 77 deletions(-) diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index 2a5eaf1340762..fb21b2dedeb74 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -231,19 +231,18 @@ def __init__( use_cached_outputs: bool = False, ) -> None: - # TODO: remove the local variables and use self.* throughout the class. - model_config = self.model_config = vllm_config.model_config - cache_config = self.cache_config = vllm_config.cache_config - lora_config = self.lora_config = vllm_config.lora_config - parallel_config = self.parallel_config = vllm_config.parallel_config - scheduler_config = self.scheduler_config = vllm_config.scheduler_config - device_config = self.device_config = vllm_config.device_config - speculative_config = self.speculative_config = vllm_config.speculative_config # noqa - load_config = self.load_config = vllm_config.load_config - decoding_config = self.decoding_config = vllm_config.decoding_config or DecodingConfig( # noqa + self.model_config = vllm_config.model_config + self.cache_config = vllm_config.cache_config + self.lora_config = vllm_config.lora_config + self.parallel_config = vllm_config.parallel_config + self.scheduler_config = vllm_config.scheduler_config + self.device_config = vllm_config.device_config + self.speculative_config = vllm_config.speculative_config # noqa + self.load_config = vllm_config.load_config + self.decoding_config = vllm_config.decoding_config or DecodingConfig( # noqa ) - prompt_adapter_config = self.prompt_adapter_config = vllm_config.prompt_adapter_config # noqa - observability_config = self.observability_config = vllm_config.observability_config or ObservabilityConfig( # noqa + self.prompt_adapter_config = vllm_config.prompt_adapter_config # noqa + self.observability_config = vllm_config.observability_config or ObservabilityConfig( # noqa ) logger.info( @@ -265,54 +264,43 @@ def __init__( "mm_processor_kwargs=%s, pooler_config=%r," "compilation_config=%r", VLLM_VERSION, - model_config.model, - speculative_config, - model_config.tokenizer, - model_config.skip_tokenizer_init, - model_config.tokenizer_mode, - model_config.revision, - model_config.override_neuron_config, - model_config.tokenizer_revision, - model_config.trust_remote_code, - model_config.dtype, - model_config.max_model_len, - load_config.download_dir, - load_config.load_format, - parallel_config.tensor_parallel_size, - parallel_config.pipeline_parallel_size, - parallel_config.disable_custom_all_reduce, - model_config.quantization, - model_config.enforce_eager, - cache_config.cache_dtype, - model_config.quantization_param_path, - device_config.device, - decoding_config, - observability_config, - model_config.seed, - model_config.served_model_name, - scheduler_config.num_scheduler_steps, - scheduler_config.chunked_prefill_enabled, - scheduler_config.multi_step_stream_outputs, - cache_config.enable_prefix_caching, - model_config.use_async_output_proc, + self.model_config.model, + self.speculative_config, + self.model_config.tokenizer, + self.model_config.skip_tokenizer_init, + self.model_config.tokenizer_mode, + self.model_config.revision, + self.model_config.override_neuron_config, + self.model_config.tokenizer_revision, + self.model_config.trust_remote_code, + self.model_config.dtype, + self.model_config.max_model_len, + self.load_config.download_dir, + self.load_config.load_format, + self.parallel_config.tensor_parallel_size, + self.parallel_config.pipeline_parallel_size, + self.parallel_config.disable_custom_all_reduce, + self.model_config.quantization, + self.model_config.enforce_eager, + self.cache_config.cache_dtype, + self.model_config.quantization_param_path, + self.device_config.device, + self.decoding_config, + self.observability_config, + self.model_config.seed, + self.model_config.served_model_name, + self.scheduler_config.num_scheduler_steps, + self.scheduler_config.chunked_prefill_enabled, + self.scheduler_config.multi_step_stream_outputs, + self.cache_config.enable_prefix_caching, + self.model_config.use_async_output_proc, use_cached_outputs, - model_config.mm_processor_kwargs, - model_config.pooler_config, + self.model_config.mm_processor_kwargs, + self.model_config.pooler_config, vllm_config.compilation_config, ) # TODO(woosuk): Print more configs in debug mode. - self.model_config = model_config - self.cache_config = cache_config - self.lora_config = lora_config - self.parallel_config = parallel_config - self.scheduler_config = scheduler_config - self.device_config = device_config - self.speculative_config = speculative_config - self.load_config = load_config - self.decoding_config = decoding_config or DecodingConfig() - self.prompt_adapter_config = prompt_adapter_config - self.observability_config = observability_config or ObservabilityConfig( - ) + self.log_stats = log_stats self.use_cached_outputs = use_cached_outputs @@ -334,15 +322,15 @@ def get_tokenizer_for_seq(sequence: Sequence) -> AnyTokenizer: self.seq_counter = Counter() self.generation_config_fields = _load_generation_config_dict( - model_config) + self.model_config) - self.input_preprocessor = InputPreprocessor(model_config, + self.input_preprocessor = InputPreprocessor(self.model_config, self.tokenizer, mm_registry) self.input_registry = input_registry self.input_processor = input_registry.create_input_processor( - model_config) + self.model_config) self.model_executor = executor_class(vllm_config=vllm_config, ) @@ -354,36 +342,36 @@ def get_tokenizer_for_seq(sequence: Sequence) -> AnyTokenizer: from vllm.model_executor.model_loader import ( get_architecture_class_name) usage_message.report_usage( - get_architecture_class_name(model_config), + get_architecture_class_name(self.model_config), usage_context, extra_kvs={ # Common configuration "dtype": - str(model_config.dtype), + str(self.model_config.dtype), "tensor_parallel_size": - parallel_config.tensor_parallel_size, + self.parallel_config.tensor_parallel_size, "block_size": - cache_config.block_size, + self.cache_config.block_size, "gpu_memory_utilization": - cache_config.gpu_memory_utilization, + self.cache_config.gpu_memory_utilization, # Quantization "quantization": - model_config.quantization, + self.model_config.quantization, "kv_cache_dtype": - str(cache_config.cache_dtype), + str(self.cache_config.cache_dtype), # Feature flags "enable_lora": - bool(lora_config), + bool(self.lora_config), "enable_prompt_adapter": - bool(prompt_adapter_config), + bool(self.prompt_adapter_config), "enable_prefix_caching": - cache_config.enable_prefix_caching, + self.cache_config.enable_prefix_caching, "enforce_eager": - model_config.enforce_eager, + self.model_config.enforce_eager, "disable_custom_all_reduce": - parallel_config.disable_custom_all_reduce, + self.parallel_config.disable_custom_all_reduce, }) if self.tokenizer: @@ -402,7 +390,7 @@ def get_tokenizer_for_seq(sequence: Sequence) -> AnyTokenizer: for _ in range(self.parallel_config.pipeline_parallel_size) ] - if model_config.use_async_output_proc: + if self.model_config.use_async_output_proc: process_model_outputs = weak_bind(self._process_model_outputs) self.async_callbacks = [ @@ -422,11 +410,11 @@ def get_tokenizer_for_seq(sequence: Sequence) -> AnyTokenizer: # GPU and CPU blocks, which are profiled in the distributed executor. self.scheduler = [ Scheduler( - scheduler_config, cache_config, lora_config, - parallel_config.pipeline_parallel_size, + self.scheduler_config, self.cache_config, self.lora_config, + self.parallel_config.pipeline_parallel_size, self.async_callbacks[v_id] - if model_config.use_async_output_proc else None) - for v_id in range(parallel_config.pipeline_parallel_size) + if self.model_config.use_async_output_proc else None) + for v_id in range(self.parallel_config.pipeline_parallel_size) ] # Metric Logging. @@ -448,7 +436,8 @@ def get_tokenizer_for_seq(sequence: Sequence) -> AnyTokenizer: "prometheus": PrometheusStatLogger( local_interval=_LOCAL_LOGGING_INTERVAL_SEC, - labels=dict(model_name=model_config.served_model_name), + labels=dict( + model_name=self.model_config.served_model_name), max_model_len=self.model_config.max_model_len), } self.stat_loggers["prometheus"].info("cache_config", From d345f409b7478c0e547b238916ec9e90b6156bbc Mon Sep 17 00:00:00 2001 From: Zhonghua Deng Date: Sat, 23 Nov 2024 09:16:15 +0800 Subject: [PATCH 105/397] [V1] EngineCore supports profiling (#10564) Signed-off-by: Abatom --- vllm/v1/engine/__init__.py | 6 ++++++ vllm/v1/engine/async_llm.py | 4 ++-- vllm/v1/engine/core.py | 14 ++++++++++++-- vllm/v1/engine/core_client.py | 28 +++++++++++++++++++++++----- vllm/v1/worker/gpu_worker.py | 25 +++++++++++++++++++++++++ 5 files changed, 68 insertions(+), 9 deletions(-) diff --git a/vllm/v1/engine/__init__.py b/vllm/v1/engine/__init__.py index edfb8bd7c2fc1..967124fd850ea 100644 --- a/vllm/v1/engine/__init__.py +++ b/vllm/v1/engine/__init__.py @@ -68,6 +68,11 @@ class EngineCoreOutputs(msgspec.Struct, outputs: List[EngineCoreOutput] +@dataclass +class EngineCoreProfile: + is_start: bool + + class EngineCoreRequestType(enum.Enum): """ Request types defined as hex byte strings, so it can be sent over sockets @@ -75,3 +80,4 @@ class EngineCoreRequestType(enum.Enum): """ ADD = b'\x00' ABORT = b'\x01' + PROFILE = b'\x02' diff --git a/vllm/v1/engine/async_llm.py b/vllm/v1/engine/async_llm.py index 09bff9655a882..c44ebb2a85ba0 100644 --- a/vllm/v1/engine/async_llm.py +++ b/vllm/v1/engine/async_llm.py @@ -346,10 +346,10 @@ async def check_health(self) -> None: logger.debug("Called check_health.") async def start_profile(self) -> None: - raise ValueError("Not supported on V1 yet.") + await self.engine_core.profile(True) async def stop_profile(self) -> None: - raise ValueError("Not supported on V1 yet.") + await self.engine_core.profile(False) @property def is_running(self) -> bool: diff --git a/vllm/v1/engine/core.py b/vllm/v1/engine/core.py index 35ed131d50de9..1a978fbe7355f 100644 --- a/vllm/v1/engine/core.py +++ b/vllm/v1/engine/core.py @@ -1,4 +1,5 @@ import multiprocessing +import pickle import queue import threading import time @@ -16,7 +17,8 @@ from vllm.usage.usage_lib import UsageContext from vllm.v1.core.scheduler import Scheduler from vllm.v1.engine import (EngineCoreOutput, EngineCoreOutputs, - EngineCoreRequest, EngineCoreRequestType) + EngineCoreProfile, EngineCoreRequest, + EngineCoreRequestType) from vllm.v1.engine.mm_input_mapper import MMInputMapper from vllm.v1.executor.gpu_executor import GPUExecutor from vllm.v1.request import Request, RequestStatus @@ -126,6 +128,9 @@ def step(self) -> List[EngineCoreOutput]: scheduler_output, output) return engine_core_outputs + def profile(self, is_start=True): + self.model_executor.worker.profile(is_start) + class EngineCoreProc(EngineCore): """ZMQ-wrapper for running EngineCore in background process.""" @@ -312,11 +317,14 @@ def _log_stats(self): self._last_logging_time = now def _handle_client_request( - self, request: Union[EngineCoreRequest, List[str]]) -> None: + self, request: Union[EngineCoreRequest, EngineCoreProfile, + List[str]]) -> None: """Handle EngineCoreRequest or EngineCoreABORT from Client.""" if isinstance(request, EngineCoreRequest): self.add_request(request) + elif isinstance(request, EngineCoreProfile): + self.model_executor.worker.profile(request.is_start) else: # TODO: make an EngineCoreAbort wrapper assert isinstance(request, list) @@ -341,6 +349,8 @@ def process_input_socket(self, input_path: str): request = decoder_add_req.decode(request_data) elif request_type == EngineCoreRequestType.ABORT.value: request = decoder_abort_req.decode(request_data) + elif request_type == EngineCoreRequestType.PROFILE.value: + request = pickle.loads(request_data) else: raise ValueError(f"Unknown RequestType: {request_type}") diff --git a/vllm/v1/engine/core_client.py b/vllm/v1/engine/core_client.py index 09801e20e16ca..835963f7ee86c 100644 --- a/vllm/v1/engine/core_client.py +++ b/vllm/v1/engine/core_client.py @@ -9,7 +9,8 @@ from vllm.logger import init_logger from vllm.utils import get_open_zmq_ipc_path from vllm.v1.engine import (EngineCoreOutput, EngineCoreOutputs, - EngineCoreRequest, EngineCoreRequestType) + EngineCoreProfile, EngineCoreRequest, + EngineCoreRequestType) from vllm.v1.engine.core import EngineCore, EngineCoreProc from vllm.v1.serial_utils import PickleEncoder @@ -58,6 +59,9 @@ def get_output(self) -> List[EngineCoreOutput]: def add_request(self, request: EngineCoreRequest) -> None: raise NotImplementedError + async def profile(self, is_start=True) -> None: + raise NotImplementedError + def abort_requests(self, request_ids: List[str]) -> None: raise NotImplementedError @@ -95,6 +99,9 @@ def add_request(self, request: EngineCoreRequest) -> None: def abort_requests(self, request_ids: List[str]) -> None: self.engine_core.abort_requests(request_ids) + async def profile(self, is_start=True) -> None: + self.engine_core.profile(is_start) + class MPClient(EngineCoreClient): """ @@ -177,8 +184,10 @@ def get_output(self) -> List[EngineCoreOutput]: engine_core_outputs = self.decoder.decode(frame.buffer).outputs return engine_core_outputs - def _send_input(self, request_type: EngineCoreRequestType, - request: Union[EngineCoreRequest, List[str]]) -> None: + def _send_input( + self, request_type: EngineCoreRequestType, + request: Union[EngineCoreRequest, EngineCoreProfile, + List[str]]) -> None: # (RequestType, SerializedRequest) msg = (request_type.value, self.encoder.encode(request)) @@ -190,6 +199,10 @@ def add_request(self, request: EngineCoreRequest) -> None: def abort_requests(self, request_ids: List[str]) -> None: self._send_input(EngineCoreRequestType.ABORT, request_ids) + async def profile(self, is_start=True) -> None: + self._send_input(EngineCoreRequestType.PROFILE, + EngineCoreProfile(is_start)) + class AsyncMPClient(MPClient): """Asyncio-compatible client for multi-proc EngineCore.""" @@ -205,8 +218,9 @@ async def get_output_async(self) -> List[EngineCoreOutput]: return engine_core_outputs async def _send_input( - self, request_type: EngineCoreRequestType, - request: Union[EngineCoreRequest, List[str]]) -> None: + self, request_type: EngineCoreRequestType, + request: Union[EngineCoreRequest, EngineCoreProfile, + List[str]]) -> None: msg = (request_type.value, self.encoder.encode(request)) await self.input_socket.send_multipart(msg, copy=False) @@ -217,3 +231,7 @@ async def add_request_async(self, request: EngineCoreRequest) -> None: async def abort_requests_async(self, request_ids: List[str]) -> None: if len(request_ids) > 0: await self._send_input(EngineCoreRequestType.ABORT, request_ids) + + async def profile(self, is_start=True) -> None: + await self._send_input(EngineCoreRequestType.PROFILE, + EngineCoreProfile(is_start)) diff --git a/vllm/v1/worker/gpu_worker.py b/vllm/v1/worker/gpu_worker.py index 7973349f14a5d..d33b55a8a9f9a 100644 --- a/vllm/v1/worker/gpu_worker.py +++ b/vllm/v1/worker/gpu_worker.py @@ -6,6 +6,7 @@ import torch import torch.distributed +import vllm.envs as envs from vllm.config import CacheConfig, ModelConfig, ParallelConfig, VllmConfig from vllm.distributed import (ensure_model_parallel_initialized, init_distributed_environment, @@ -56,6 +57,22 @@ def __init__( init_cached_hf_modules() self.model_runner = GPUModelRunner(vllm_config) + # Torch profiler. Enabled and configured through env vars: + # VLLM_TORCH_PROFILER_DIR=/path/to/save/trace + if envs.VLLM_TORCH_PROFILER_DIR: + torch_profiler_trace_dir = envs.VLLM_TORCH_PROFILER_DIR + logger.info("Profiling enabled. Traces will be saved to: %s", + torch_profiler_trace_dir) + self.profiler = torch.profiler.profile( + activities=[ + torch.profiler.ProfilerActivity.CPU, + torch.profiler.ProfilerActivity.CUDA, + ], + with_stack=True, + on_trace_ready=torch.profiler.tensorboard_trace_handler( + torch_profiler_trace_dir, use_gzip=True)) + else: + self.profiler = None def initialize(self): if self.device_config.device.type == "cuda": @@ -184,6 +201,14 @@ def execute_model( # TODO(woosuk): Send the output to the engine process. return output + def profile(self, is_start=True): + if self.profiler is None: + raise RuntimeError("Profiler is not enabled.") + if is_start: + self.profiler.start() + else: + self.profiler.stop() + def init_worker_distributed_environment( parallel_config: ParallelConfig, From d559979c548c4bee6eca089d5e6dc318630bf465 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Fri, 22 Nov 2024 17:34:03 -0800 Subject: [PATCH 106/397] [bugfix] fix cpu tests (#10585) Signed-off-by: youkaichao --- vllm/worker/cpu_embedding_model_runner.py | 4 +++- vllm/worker/cpu_enc_dec_model_runner.py | 4 +++- vllm/worker/cpu_model_runner.py | 18 ++++++++++-------- 3 files changed, 16 insertions(+), 10 deletions(-) diff --git a/vllm/worker/cpu_embedding_model_runner.py b/vllm/worker/cpu_embedding_model_runner.py index d0b8fec48d74f..978de73df6b70 100644 --- a/vllm/worker/cpu_embedding_model_runner.py +++ b/vllm/worker/cpu_embedding_model_runner.py @@ -3,6 +3,7 @@ import torch +from vllm.forward_context import set_forward_context from vllm.model_executor.pooling_metadata import PoolingMetadata from vllm.multimodal import MultiModalKwargs from vllm.pooling_params import PoolingParams @@ -64,7 +65,8 @@ def execute_model( intermediate_tensors, } - hidden_states = model_executable(**execute_model_kwargs) + with set_forward_context(model_input.attn_metadata, self.vllm_config): + hidden_states = model_executable(**execute_model_kwargs) # Only perform pooling in the driver worker. if not self.is_driver_worker: diff --git a/vllm/worker/cpu_enc_dec_model_runner.py b/vllm/worker/cpu_enc_dec_model_runner.py index d040831870bd8..1f8e2d2d88a23 100644 --- a/vllm/worker/cpu_enc_dec_model_runner.py +++ b/vllm/worker/cpu_enc_dec_model_runner.py @@ -4,6 +4,7 @@ import torch from vllm.attention import AttentionMetadata +from vllm.forward_context import set_forward_context from vllm.model_executor import SamplingMetadata from vllm.model_executor.layers.sampler import SamplerOutput from vllm.multimodal import MultiModalKwargs @@ -303,7 +304,8 @@ def execute_model( intermediate_tensors, } - hidden_states = model_executable(**execute_model_kwargs) + with set_forward_context(model_input.attn_metadata, self.vllm_config): + hidden_states = model_executable(**execute_model_kwargs) # Compute the logits. logits = self.model.compute_logits(hidden_states, diff --git a/vllm/worker/cpu_model_runner.py b/vllm/worker/cpu_model_runner.py index 66bd844c94901..2cf573625401a 100644 --- a/vllm/worker/cpu_model_runner.py +++ b/vllm/worker/cpu_model_runner.py @@ -10,6 +10,7 @@ from vllm.attention import AttentionMetadata, get_attn_backend from vllm.config import VllmConfig +from vllm.forward_context import set_forward_context from vllm.logger import init_logger from vllm.model_executor import SamplingMetadata from vllm.model_executor.layers.rotary_embedding import MRotaryEmbedding @@ -487,14 +488,15 @@ def execute_model( multimodal_kwargs = MultiModalKwargs.as_kwargs( model_input.multi_modal_kwargs, device=self.device) - hidden_states = model_executable( - input_ids=model_input.input_tokens, - positions=model_input.input_positions, - kv_caches=kv_caches, - attn_metadata=model_input.attn_metadata, - intermediate_tensors=intermediate_tensors, - **multimodal_kwargs, - ) + with set_forward_context(model_input.attn_metadata, self.vllm_config): + hidden_states = model_executable( + input_ids=model_input.input_tokens, + positions=model_input.input_positions, + kv_caches=kv_caches, + attn_metadata=model_input.attn_metadata, + intermediate_tensors=intermediate_tensors, + **multimodal_kwargs, + ) # Compute the logits. logits = self.model.compute_logits(hidden_states, From 9195dbdbcadb681db67181a664521bd6ef98deee Mon Sep 17 00:00:00 2001 From: Travis Johnson Date: Fri, 22 Nov 2024 19:17:38 -0700 Subject: [PATCH 107/397] [Bugfix][Frontend] Update Llama Chat Templates to also support Non-Tool use (#10164) Signed-off-by: Travis Johnson --- .../tool_chat_template_llama3.1_json.jinja | 46 +++++++-- .../tool_chat_template_llama3.2_json.jinja | 96 ++++++++++++++----- tests/entrypoints/test_chat_utils.py | 4 +- 3 files changed, 110 insertions(+), 36 deletions(-) diff --git a/examples/tool_chat_template_llama3.1_json.jinja b/examples/tool_chat_template_llama3.1_json.jinja index c24a7e51335ef..033830936a56b 100644 --- a/examples/tool_chat_template_llama3.1_json.jinja +++ b/examples/tool_chat_template_llama3.1_json.jinja @@ -19,10 +19,18 @@ {#- This block extracts the system message, so we can slot it into the right place. #} {%- if messages[0]['role'] == 'system' %} - {%- set system_message = messages[0]['content']|trim %} + {%- if messages[0]['content'] is string %} + {%- set system_message = messages[0]['content']|trim %} + {%- else %} + {%- set system_message = messages[0]['content'][0]['text']|trim %} + {%- endif %} {%- set messages = messages[1:] %} {%- else %} - {%- set system_message = "You are a helpful assistant with tool calling capabilities. Only reply with a tool call if the function exists in the library provided by the user. If it doesn't exist, just reply directly in natural language. When you receive a tool call response, use the output to format an answer to the original user question." %} + {%- if tools is not none %} + {%- set system_message = "You are a helpful assistant with tool calling capabilities. Only reply with a tool call if the function exists in the library provided by the user. If it doesn't exist, just reply directly in natural language. When you receive a tool call response, use the output to format an answer to the original user question." %} + {%- else %} + {%- set system_message = "" %} + {%- endif %} {%- endif %} {#- System message #} @@ -33,8 +41,8 @@ {{- "Cutting Knowledge Date: December 2023\n" }} {{- "Today Date: " + date_string + "\n\n" }} {%- if tools is not none and not tools_in_user_message %} - {{- "You have access to the following functions. To call a function, please respond with JSON for a function call." }} - {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }} + {{- "You have access to the following functions. To call a function, please respond with JSON for a function call. " }} + {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}. ' }} {{- "Do not use variables.\n\n" }} {%- for t in tools %} {{- t | tojson(indent=4) }} @@ -48,7 +56,11 @@ {%- if tools_in_user_message and not tools is none %} {#- Extract the first user message so we can plug it in here #} {%- if messages | length != 0 %} - {%- set first_user_message = messages[0]['content']|trim %} + {%- if messages[0]['content'] is string %} + {%- set first_user_message = messages[0]['content']|trim %} + {%- else %} + {%- set first_user_message = messages[0]['content'] | selectattr('type', 'equalto', 'text') | map(attribute='text') | map('trim') | join('\n') %} + {%- endif %} {%- set messages = messages[1:] %} {%- else %} {{- raise_exception("Cannot put tools in the first user message when there's no first user message!") }} @@ -56,7 +68,7 @@ {{- '<|start_header_id|>user<|end_header_id|>\n\n' -}} {{- "Given the following functions, please respond with a JSON for a function call " }} {{- "with its proper arguments that best answers the given prompt.\n\n" }} - {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }} + {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}. ' }} {{- "Do not use variables.\n\n" }} {%- for t in tools %} {{- t | tojson(indent=4) }} @@ -67,7 +79,17 @@ {%- for message in messages %} {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %} - {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' }} + {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' }} + {%- if message['content'] is string %} + {{- message['content'] | trim}} + {%- else %} + {%- for content in message['content'] %} + {%- if content['type'] == 'text' %} + {{- content['text'] | trim }} + {%- endif %} + {%- endfor %} + {%- endif %} + {{- '<|eot_id|>' }} {%- elif 'tool_calls' in message %} {%- if not message.tool_calls|length == 1 %} {{- raise_exception("This model only supports single tool-calls at once!") }} @@ -81,10 +103,14 @@ {{- "<|eot_id|>" }} {%- elif message.role == "tool" or message.role == "ipython" %} {{- "<|start_header_id|>ipython<|end_header_id|>\n\n" }} - {%- if message.content is mapping %} - {{- message.content | tojson }} - {%- else %} + {%- if message.content is string %} {{- { "output": message.content } | tojson }} + {%- else %} + {%- for content in message['content'] %} + {%- if content['type'] == 'text' %} + {{- { "output": content['text'] } | tojson }} + {%- endif %} + {%- endfor %} {%- endif %} {{- "<|eot_id|>" }} {%- endif %} diff --git a/examples/tool_chat_template_llama3.2_json.jinja b/examples/tool_chat_template_llama3.2_json.jinja index 7e24777726a35..39f902c1c3c40 100644 --- a/examples/tool_chat_template_llama3.2_json.jinja +++ b/examples/tool_chat_template_llama3.2_json.jinja @@ -16,38 +16,70 @@ {%- set tools = none %} {%- endif %} +{#- Find out if there are any images #} +{% set image_ns = namespace(has_images=false) %} +{%- for message in messages %} + {%- for content in message['content'] %} + {%- if content['type'] == 'image' %} + {%- set image_ns.has_images = true %} + {%- endif %} + {%- endfor %} +{%- endfor %} + + {#- This block extracts the system message, so we can slot it into the right place. #} {%- if messages[0]['role'] == 'system' %} - {%- set system_message = messages[0]['content']|trim %} + {%- if messages[0]['content'] is string %} + {%- set system_message = messages[0]['content']|trim %} + {%- else %} + {#- Support vLLM's transforming of a content string to JSON. #} + {%- set system_message = messages[0]['content'][0]['text']|trim %} + {%- endif %} {%- set messages = messages[1:] %} {%- else %} - {%- set system_message = "You are a helpful assistant with tool calling capabilities. Only reply with a tool call if the function exists in the library provided by the user. If it doesn't exist, just reply directly in natural language. When you receive a tool call response, use the output to format an answer to the original user question." %} + {%- if tools is not none %} + {%- set system_message = "You are a helpful assistant with tool calling capabilities. Only reply with a tool call if the function exists in the library provided by the user. If it doesn't exist, just reply directly in natural language. When you receive a tool call response, use the output to format an answer to the original user question." %} + {%- else %} + {%- set system_message = "" %} + {%- endif %} {%- endif %} -{#- System message #} -{{- "<|start_header_id|>system<|end_header_id|>\n\n" }} -{%- if tools is not none %} - {{- "Environment: ipython\n" }} +{#- Including an image is not compatible with a system message #} +{%- if image_ns.has_images and not system_message == "" %} + {{- raise_exception("Prompting with images is incompatible with system messages and tool use.") }} {%- endif %} -{{- "Cutting Knowledge Date: December 2023\n" }} -{{- "Today Date: " + date_string + "\n\n" }} -{%- if tools is not none and not tools_in_user_message %} - {{- "You have access to the following functions. To call a function, please respond with JSON for a function call." }} - {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }} - {{- "Do not use variables.\n\n" }} - {%- for t in tools %} - {{- t | tojson(indent=4) }} - {{- "\n\n" }} - {%- endfor %} + + +{#- System message, if there are no images #} +{%- if not image_ns.has_images %} + {{- "<|start_header_id|>system<|end_header_id|>\n\n" }} + {%- if tools is not none %} + {{- "Environment: ipython\n" }} + {%- endif %} + {{- "Cutting Knowledge Date: December 2023\n" }} + {{- "Today Date: " + date_string + "\n\n" }} + {%- if tools is not none and not tools_in_user_message %} + {{- "You have access to the following functions. To call a function, please respond with JSON for a function call. " }} + {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}. ' }} + {{- "Do not use variables.\n\n" }} + {%- for t in tools %} + {{- t | tojson(indent=4) }} + {{- "\n\n" }} + {%- endfor %} + {%- endif %} + {{- system_message }} + {{- "<|eot_id|>" }} {%- endif %} -{{- system_message }} -{{- "<|eot_id|>" }} {#- Custom tools are passed in a user message with some extra guidance #} {%- if tools_in_user_message and not tools is none %} {#- Extract the first user message so we can plug it in here #} {%- if messages | length != 0 %} - {%- set first_user_message = messages[0]['content']|trim %} + {%- if messages[0]['content'] is string %} + {%- set first_user_message = messages[0]['content']|trim %} + {%- else %} + {%- set first_user_message = messages[0]['content'] | selectattr('type', 'equalto', 'text') | map(attribute='text') | map('trim') | join('\n') %} + {%- endif %} {%- set messages = messages[1:] %} {%- else %} {{- raise_exception("Cannot put tools in the first user message when there's no first user message!") }} @@ -55,7 +87,7 @@ {{- '<|start_header_id|>user<|end_header_id|>\n\n' -}} {{- "Given the following functions, please respond with a JSON for a function call " }} {{- "with its proper arguments that best answers the given prompt.\n\n" }} - {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }} + {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}. ' }} {{- "Do not use variables.\n\n" }} {%- for t in tools %} {{- t | tojson(indent=4) }} @@ -66,7 +98,19 @@ {%- for message in messages %} {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %} - {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' }} + {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' }} + {%- if message['content'] is string %} + {{- message['content'] | trim}} + {%- else %} + {%- for content in message['content'] %} + {%- if content['type'] == 'image' %} + {{- '<|image|>' }} + {%- elif content['type'] == 'text' %} + {{- content['text'] | trim }} + {%- endif %} + {%- endfor %} + {%- endif %} + {{- '<|eot_id|>' }} {%- elif 'tool_calls' in message %} {%- if not message.tool_calls|length == 1 %} {{- raise_exception("This model only supports single tool-calls at once!") }} @@ -80,10 +124,14 @@ {{- "<|eot_id|>" }} {%- elif message.role == "tool" or message.role == "ipython" %} {{- "<|start_header_id|>ipython<|end_header_id|>\n\n" }} - {%- if message.content is mapping %} - {{- message.content | tojson }} - {%- else %} + {%- if message.content is string %} {{- { "output": message.content } | tojson }} + {%- else %} + {%- for content in message['content'] %} + {%- if content['type'] == 'text' %} + {{- { "output": content['text'] } | tojson }} + {%- endif %} + {%- endfor %} {%- endif %} {{- "<|eot_id|>" }} {%- endif %} diff --git a/tests/entrypoints/test_chat_utils.py b/tests/entrypoints/test_chat_utils.py index 72477e048eafa..996e60bfee592 100644 --- a/tests/entrypoints/test_chat_utils.py +++ b/tests/entrypoints/test_chat_utils.py @@ -766,8 +766,8 @@ def test_resolve_content_format_hf_defined(model, expected_format): ("tool_chat_template_granite_20b_fc.jinja", "string"), ("tool_chat_template_hermes.jinja", "string"), ("tool_chat_template_internlm2_tool.jinja", "string"), - ("tool_chat_template_llama3.1_json.jinja", "string"), - ("tool_chat_template_llama3.2_json.jinja", "string"), + ("tool_chat_template_llama3.1_json.jinja", "openai"), + ("tool_chat_template_llama3.2_json.jinja", "openai"), ("tool_chat_template_mistral_parallel.jinja", "string"), ("tool_chat_template_mistral.jinja", "string")], ) From ebda51968b12b85c8b5b82727b2b7713dfc44f88 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Fri, 22 Nov 2024 21:23:51 -0500 Subject: [PATCH 108/397] [Core] Fix broken log configuration (#10458) Signed-off-by: Russell Bryant --- examples/logging_configuration.md | 2 +- vllm/logger.py | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/examples/logging_configuration.md b/examples/logging_configuration.md index 0d278b0392403..9ac8b13cd5eaf 100644 --- a/examples/logging_configuration.md +++ b/examples/logging_configuration.md @@ -118,7 +118,7 @@ configuration for the root vLLM logger and for the logger you wish to silence: { "formatters": { "vllm": { - "class": "vllm.logging.NewLineFormatter", + "class": "vllm.logging_utils.NewLineFormatter", "datefmt": "%m-%d %H:%M:%S", "format": "%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s" } diff --git a/vllm/logger.py b/vllm/logger.py index 9e16e591315ba..538db0dcf19aa 100644 --- a/vllm/logger.py +++ b/vllm/logger.py @@ -50,7 +50,7 @@ def _configure_vllm_root_logger() -> None: - logging_config: Optional[Dict] = None + logging_config: Dict = {} if not VLLM_CONFIGURE_LOGGING and VLLM_LOGGING_CONFIG_PATH: raise RuntimeError( @@ -75,6 +75,11 @@ def _configure_vllm_root_logger() -> None: type(custom_config).__name__) logging_config = custom_config + for formatter in logging_config.get("formatters", {}).values(): + # This provides backwards compatibility after #10134. + if formatter.get("class") == "vllm.logging.NewLineFormatter": + formatter["class"] = "vllm.logging_utils.NewLineFormatter" + if logging_config: dictConfig(logging_config) From 978b39744b22e90d49a0f5367c3d933ed26d66c8 Mon Sep 17 00:00:00 2001 From: Tyler Michael Smith Date: Fri, 22 Nov 2024 22:14:03 -0500 Subject: [PATCH 109/397] [Misc] Add pynccl wrappers for all_gather and reduce_scatter (#9432) --- tests/distributed/test_pynccl.py | 69 +++++++++++++++++++ .../device_communicators/pynccl.py | 42 +++++++++++ .../device_communicators/pynccl_wrapper.py | 44 ++++++++++++ 3 files changed, 155 insertions(+) diff --git a/tests/distributed/test_pynccl.py b/tests/distributed/test_pynccl.py index e0e424439e3a5..f702d7c46ea73 100644 --- a/tests/distributed/test_pynccl.py +++ b/tests/distributed/test_pynccl.py @@ -150,6 +150,75 @@ def worker_fn_with_cudagraph(): assert a.mean().cpu().item() == pynccl_comm.world_size**1 +@worker_fn_wrapper +def all_gather_worker_fn(): + pynccl_comm = PyNcclCommunicator(get_world_group().cpu_group, + device=get_world_group().device) + + rank = pynccl_comm.rank + world_size = pynccl_comm.world_size + device = f'cuda:{pynccl_comm.rank}' + + num_elems = 1000 + tensor = torch.arange(num_elems, dtype=torch.float32, + device=device) + rank * num_elems + result = torch.zeros(num_elems * world_size, + dtype=torch.float32, + device=device) + + expected = torch.cat([ + torch.arange(num_elems, dtype=torch.float32) + r * num_elems + for r in range(world_size) + ]).to(device) + + with pynccl_comm.change_state(enable=True): + pynccl_comm.all_gather(result, tensor) + torch.testing.assert_close(result, expected, rtol=1e-5, atol=1e-8) + + +@pytest.mark.skipif(torch.cuda.device_count() < 2, + reason="Need at least 2 GPUs to run the test.") +def test_pynccl_all_gather(): + distributed_run(all_gather_worker_fn, 2) + + +@worker_fn_wrapper +def reduce_scatter_worker_fn(): + pynccl_comm = PyNcclCommunicator(get_world_group().cpu_group, + device=get_world_group().device) + + rank = pynccl_comm.rank + world_size = pynccl_comm.world_size + device = f'cuda:{pynccl_comm.rank}' + + num_elems = 1000 + tensor = torch.arange(num_elems, dtype=torch.float32, + device=device) + rank * num_elems + assert (num_elems % world_size == 0) + result = torch.zeros(num_elems // world_size, + dtype=torch.float32, + device=device) + + # Calculate expected result for this rank's chunk + scattered_size = num_elems // world_size + all_tensors = [ + torch.arange(num_elems, dtype=torch.float32) + r * num_elems + for r in range(world_size) + ] + expected = sum(tensor[rank * scattered_size:(rank + 1) * scattered_size] + for tensor in all_tensors).to(device) + + with pynccl_comm.change_state(enable=True): + pynccl_comm.reduce_scatter(result, tensor) + torch.testing.assert_close(result, expected, rtol=1e-5, atol=1e-8) + + +@pytest.mark.skipif(torch.cuda.device_count() < 2, + reason="Need at least 2 GPUs to run the test.") +def test_pynccl_reduce_scatter(): + distributed_run(reduce_scatter_worker_fn, 2) + + @pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Need at least 2 GPUs to run the test.") def test_pynccl_with_cudagraph(): diff --git a/vllm/distributed/device_communicators/pynccl.py b/vllm/distributed/device_communicators/pynccl.py index 7c6f48e88637b..7411304eb18fa 100644 --- a/vllm/distributed/device_communicators/pynccl.py +++ b/vllm/distributed/device_communicators/pynccl.py @@ -131,6 +131,48 @@ def all_reduce(self, ncclRedOpTypeEnum.from_torch(op), self.comm, cudaStream_t(stream.cuda_stream)) + def all_gather(self, + output_tensor: torch.Tensor, + input_tensor: torch.Tensor, + stream=None): + if self.disabled: + return + # nccl communicator created on a specific device + # will only work on tensors on the same device + # otherwise it will cause "illegal memory access" + assert input_tensor.device == self.device, ( + f"this nccl communicator is created to work on {self.device}, " + f"but the input tensor is on {input_tensor.device}") + if stream is None: + stream = self.stream + self.nccl.ncclAllGather( + buffer_type(input_tensor.data_ptr()), + buffer_type(output_tensor.data_ptr()), input_tensor.numel(), + ncclDataTypeEnum.from_torch(input_tensor.dtype), self.comm, + cudaStream_t(stream.cuda_stream)) + + def reduce_scatter(self, + output_tensor: torch.Tensor, + input_tensor: torch.Tensor, + op: ReduceOp = ReduceOp.SUM, + stream=None): + if self.disabled: + return + # nccl communicator created on a specific device + # will only work on tensors on the same device + # otherwise it will cause "illegal memory access" + assert input_tensor.device == self.device, ( + f"this nccl communicator is created to work on {self.device}, " + f"but the input tensor is on {input_tensor.device}") + if stream is None: + stream = self.stream + self.nccl.ncclReduceScatter( + buffer_type(input_tensor.data_ptr()), + buffer_type(output_tensor.data_ptr()), output_tensor.numel(), + ncclDataTypeEnum.from_torch(input_tensor.dtype), + ncclRedOpTypeEnum.from_torch(op), self.comm, + cudaStream_t(stream.cuda_stream)) + def send(self, tensor: torch.Tensor, dst: int, stream=None): if self.disabled: return diff --git a/vllm/distributed/device_communicators/pynccl_wrapper.py b/vllm/distributed/device_communicators/pynccl_wrapper.py index 7619c98f22148..ff88f72470b27 100644 --- a/vllm/distributed/device_communicators/pynccl_wrapper.py +++ b/vllm/distributed/device_communicators/pynccl_wrapper.py @@ -151,6 +151,28 @@ class NCCLLibrary: ncclRedOp_t, ncclComm_t, cudaStream_t ]), + # ncclResult_t ncclAllGather( + # const void* sendbuff, void* recvbuff, size_t count, + # ncclDataType_t datatype, ncclComm_t comm, + # cudaStream_t stream); + # note that cudaStream_t is a pointer type, so the last argument + # is a pointer + Function("ncclAllGather", ncclResult_t, [ + buffer_type, buffer_type, ctypes.c_size_t, ncclDataType_t, + ncclComm_t, cudaStream_t + ]), + + # ncclResult_t ncclReduceScatter( + # const void* sendbuff, void* recvbuff, size_t count, + # ncclDataType_t datatype, ncclRedOp_t op, ncclComm_t comm, + # cudaStream_t stream); + # note that cudaStream_t is a pointer type, so the last argument + # is a pointer + Function("ncclReduceScatter", ncclResult_t, [ + buffer_type, buffer_type, ctypes.c_size_t, ncclDataType_t, + ncclRedOp_t, ncclComm_t, cudaStream_t + ]), + # ncclResult_t ncclSend( # const void* sendbuff, size_t count, ncclDataType_t datatype, # int dest, ncclComm_t comm, cudaStream_t stream); @@ -258,6 +280,28 @@ def ncclAllReduce(self, sendbuff: buffer_type, recvbuff: buffer_type, datatype, op, comm, stream)) + def ncclReduceScatter(self, sendbuff: buffer_type, recvbuff: buffer_type, + count: int, datatype: int, op: int, comm: ncclComm_t, + stream: cudaStream_t) -> None: + # `datatype` actually should be `ncclDataType_t` + # and `op` should be `ncclRedOp_t` + # both are aliases of `ctypes.c_int` + # when we pass int to a function, it will be converted to `ctypes.c_int` + # by ctypes automatically + self.NCCL_CHECK(self._funcs["ncclReduceScatter"](sendbuff, recvbuff, + count, datatype, op, + comm, stream)) + + def ncclAllGather(self, sendbuff: buffer_type, recvbuff: buffer_type, + count: int, datatype: int, comm: ncclComm_t, + stream: cudaStream_t) -> None: + # `datatype` actually should be `ncclDataType_t` + # which is an aliases of `ctypes.c_int` + # when we pass int to a function, it will be converted to `ctypes.c_int` + # by ctypes automatically + self.NCCL_CHECK(self._funcs["ncclAllGather"](sendbuff, recvbuff, count, + datatype, comm, stream)) + def ncclSend(self, sendbuff: buffer_type, count: int, datatype: int, dest: int, comm: ncclComm_t, stream: cudaStream_t) -> None: self.NCCL_CHECK(self._funcs["ncclSend"](sendbuff, count, datatype, From 4aba6e3d1a0cc5cec45efdee0adeaa09278f7518 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Fri, 22 Nov 2024 20:13:54 -0800 Subject: [PATCH 110/397] [core] gemma2 full context length support (#10584) Signed-off-by: youkaichao --- .../test_basic_correctness.py | 25 +++++++++++----- vllm/attention/layer.py | 12 ++++++-- vllm/config.py | 29 +++++++++++++------ vllm/model_executor/models/gemma2.py | 13 +++++---- 4 files changed, 55 insertions(+), 24 deletions(-) diff --git a/tests/basic_correctness/test_basic_correctness.py b/tests/basic_correctness/test_basic_correctness.py index 7f16baa65a644..fcba253d159f3 100644 --- a/tests/basic_correctness/test_basic_correctness.py +++ b/tests/basic_correctness/test_basic_correctness.py @@ -14,11 +14,12 @@ from vllm.platforms import current_platform from vllm.worker.model_runner import ModelInputForGPUWithSamplingMetadata +from ..conftest import VllmRunner from ..models.utils import check_outputs_equal from ..utils import multi_gpu_test MODELS = [ - "facebook/opt-125m", + "google/gemma-2-2b-it", "meta-llama/Llama-3.2-1B", ] @@ -42,8 +43,6 @@ def test_vllm_gc_ed(): @pytest.mark.parametrize("enforce_eager", [False, True]) def test_models( hf_runner, - vllm_runner, - example_prompts, model: str, backend: str, dtype: str, @@ -54,15 +53,27 @@ def test_models( if backend == "FLASHINFER" and current_platform.is_rocm(): pytest.skip("Flashinfer does not support ROCm/HIP.") + if backend == "XFORMERS" and model == "google/gemma-2-2b-it": + pytest.skip( + "XFORMERS does not support gemma2 with full context length.") + os.environ["VLLM_ATTENTION_BACKEND"] = backend + # 5042 tokens for gemma2 + # gemma2 has alternating sliding window size of 4096 + # we need a prompt with more than 4096 tokens to test the sliding window + prompt = "The following numbers of the sequence " + ", ".join( + str(i) for i in range(1024)) + " are:" + example_prompts = [prompt] + with hf_runner(model, dtype=dtype) as hf_model: hf_outputs = hf_model.generate_greedy(example_prompts, max_tokens) - with vllm_runner(model, - dtype=dtype, - enforce_eager=enforce_eager, - gpu_memory_utilization=0.7) as vllm_model: + with VllmRunner(model, + max_model_len=8192, + dtype=dtype, + enforce_eager=enforce_eager, + gpu_memory_utilization=0.7) as vllm_model: vllm_outputs = vllm_model.generate_greedy(example_prompts, max_tokens) check_outputs_equal( diff --git a/vllm/attention/layer.py b/vllm/attention/layer.py index 8acbeaf12b0cf..cb4dedf481c77 100644 --- a/vllm/attention/layer.py +++ b/vllm/attention/layer.py @@ -40,18 +40,26 @@ def __init__( quant_config: Optional[QuantizationConfig] = None, blocksparse_params: Optional[Dict[str, Any]] = None, logits_soft_cap: Optional[float] = None, + per_layer_sliding_window: Optional[int] = None, prefix: str = "", ) -> None: super().__init__() + if per_layer_sliding_window is not None: + # per-layer sliding window + sliding_window = per_layer_sliding_window + elif cache_config is not None: + # model-level sliding window + sliding_window = cache_config.sliding_window + else: + sliding_window = None + if cache_config is not None: kv_cache_dtype = cache_config.cache_dtype block_size = cache_config.block_size - sliding_window = cache_config.sliding_window is_attention_free = cache_config.is_attention_free else: kv_cache_dtype = "auto" block_size = 16 - sliding_window = None is_attention_free = False if num_kv_heads is None: num_kv_heads = num_heads diff --git a/vllm/config.py b/vllm/config.py index bb02c2ad4c7d4..730b069e076fb 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -233,15 +233,26 @@ def __init__( (self.hf_text_config.model_type in ["gemma2"])) if (not self.disable_sliding_window and has_interleaved_attention): - sliding_window_len_min = get_min_sliding_window( - self.hf_text_config.sliding_window) - - print_warning_once( - f"{self.hf_text_config.model_type} has interleaved attention, " - "which is currently not supported by vLLM. Disabling sliding " - "window and capping the max length to the sliding window size " - f"({sliding_window_len_min}).") - self.disable_sliding_window = True + if envs.VLLM_ATTENTION_BACKEND == "XFORMERS": + sliding_window_len_min = get_min_sliding_window( + self.hf_text_config.sliding_window) + + print_warning_once( + f"{self.hf_text_config.model_type} has interleaved " + "attention, which is currently not supported by the " + "XFORMERS backend. Disabling sliding window and capping " + "the max length to the sliding window size " + f"({sliding_window_len_min}).") + self.disable_sliding_window = True + else: + # for a model with interleaved attention, + # the scheduler and the model treat it as full attention + # (i.e., not dropping any tokens outside the window). + # only the attention layer itself is aware of the sliding + # window, and use the window size to compute the attention. + self.hf_text_config.interleaved_sliding_window = sliding_window + delattr(self.hf_text_config, "sliding_window") + sliding_window = None self.max_model_len = _get_and_verify_max_len( hf_config=self.hf_text_config, diff --git a/vllm/model_executor/models/gemma2.py b/vllm/model_executor/models/gemma2.py index 839130364ef4d..9309cced61bb3 100644 --- a/vllm/model_executor/models/gemma2.py +++ b/vllm/model_executor/models/gemma2.py @@ -143,12 +143,12 @@ def __init__(self, is_neox_style=True, ) - # FIXME(woosuk): While Gemma 2 uses sliding window attention for every - # odd layer, vLLM currently ignores it and uses global attention for - # all layers. - use_sliding_window = (layer_idx % 2 == 1 - and config.sliding_window is not None) - del use_sliding_window # Unused. + # reference: + # https://github.com/huggingface/transformers/blob/54be2d7ae87e873482b984cc956e165ca4dc0ba3/src/transformers/models/gemma2/modeling_gemma2.py#L312 # noqa + use_sliding_window = (layer_idx % 2 == 0 and + config.interleaved_sliding_window is not None) + sliding_window = config.interleaved_sliding_window if \ + use_sliding_window else None self.attn = Attention(self.num_heads, self.head_dim, self.scaling, @@ -156,6 +156,7 @@ def __init__(self, cache_config=cache_config, quant_config=quant_config, logits_soft_cap=attn_logits_soft_cap, + per_layer_sliding_window=sliding_window, prefix=f"{prefix}.attn") def forward( From 7d8ffb344f3b9a571d94073644b829eb4baa0a65 Mon Sep 17 00:00:00 2001 From: Varun Vinayak Shenoy Date: Fri, 22 Nov 2024 21:13:29 -0800 Subject: [PATCH 111/397] [Bugfix] Internal Server Error when tool_choice is incorrect. (#10567) Signed-off-by: Varun Shenoy --- tests/entrypoints/openai/test_chat.py | 14 ++++++++++++++ vllm/entrypoints/openai/protocol.py | 12 ++++++------ 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/tests/entrypoints/openai/test_chat.py b/tests/entrypoints/openai/test_chat.py index 843d15e768093..8d23a2be6f9bb 100644 --- a/tests/entrypoints/openai/test_chat.py +++ b/tests/entrypoints/openai/test_chat.py @@ -829,6 +829,20 @@ async def test_inconsistent_tool_choice_and_tools(client: openai.AsyncOpenAI, "name": "nondefined_function_name" } }) + with pytest.raises(openai.BadRequestError): + await client.chat.completions.create( + model=MODEL_NAME, + messages=messages, + max_completion_tokens=1000, + tools=[{ + "type": "function", + "function": { + "name": "dummy_function_name", + "description": "This is a dummy function", + "parameters": sample_json_schema + } + }], + tool_choice={}) @pytest.mark.asyncio diff --git a/vllm/entrypoints/openai/protocol.py b/vllm/entrypoints/openai/protocol.py index 9db5951e5fe5b..f343732174014 100644 --- a/vllm/entrypoints/openai/protocol.py +++ b/vllm/entrypoints/openai/protocol.py @@ -478,17 +478,17 @@ def check_tool_usage(cls, data): # it matches a valid tool if isinstance(data["tool_choice"], dict): valid_tool = False - specified_function = data["tool_choice"]["function"] + specified_function = data["tool_choice"].get("function") if not specified_function: raise ValueError( - "Incorrectly formatted `tool_choice`. Should be like " - "`{\"type\": \"function\"," + "Expected field `function` in `tool_choice`." + " Correct usage: `{\"type\": \"function\"," " \"function\": {\"name\": \"my_function\"}}`") - specified_function_name = specified_function["name"] + specified_function_name = specified_function.get("name") if not specified_function_name: raise ValueError( - "Incorrectly formatted `tool_choice`. Should be like " - "`{\"type\": \"function\", " + "Expected field `name` in `function` in `tool_choice`." + "Correct usage: `{\"type\": \"function\", " "\"function\": {\"name\": \"my_function\"}}`") for tool in data["tools"]: if tool["function"]["name"] == specified_function_name: From cfea9c04ef43420be594f23fc1773009d1fe88c3 Mon Sep 17 00:00:00 2001 From: Chen Wu <72850361+CNTRYROA@users.noreply.github.com> Date: Sat, 23 Nov 2024 13:13:59 +0800 Subject: [PATCH 112/397] [Model] Fix Baichuan BNB online quantization (#10572) Signed-off-by: Chen Wu --- vllm/model_executor/models/baichuan.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/vllm/model_executor/models/baichuan.py b/vllm/model_executor/models/baichuan.py index a923ed36a9db2..39cb5a8b2cbbe 100644 --- a/vllm/model_executor/models/baichuan.py +++ b/vllm/model_executor/models/baichuan.py @@ -350,6 +350,21 @@ class BaiChuanBaseForCausalLM(nn.Module, SupportsLoRA, SupportsPP): embedding_modules = {} embedding_padding_modules = [] + # BitandBytes specific attributes + default_bitsandbytes_target_modules = [ + ".W_pack.", + ".o_proj.", + ".down_proj.", + ".up_proj.", + ".gate_proj.", + ".up_proj.", + ] + bitsandbytes_stacked_params_mapping = { + # shard_name, weight_name, index + "gate_proj": ("gate_up_proj", 0), + "up_proj": ("gate_up_proj", 1), + } + def __init__( self, *, From 02a43f82a97e37581b48f1c177d3393aca4fe3f2 Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Sat, 23 Nov 2024 00:14:19 -0500 Subject: [PATCH 113/397] Update default max_num_batch_tokens for chunked prefill to 2048 (#10544) --- vllm/config.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index 730b069e076fb..42a44f5415e9f 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -1133,9 +1133,9 @@ def __post_init__(self) -> None: # max_num_batched_tokens. self.max_num_batched_tokens = max(self.max_model_len, 2048) else: - # It is the values that have the best balance between ITL - # and TTFT on A100. Note it is not optimized for throughput. - self.max_num_batched_tokens = 512 + # This value is chosen to have a balance between ITL + # and TTFT. Note it is not optimized for throughput. + self.max_num_batched_tokens = 2048 else: # If max_model_len is too short, use 2048 as the default value # for higher throughput. From 7c25fe45a6ef4fb5be148217cc7110e88e186446 Mon Sep 17 00:00:00 2001 From: kliuae <17350011+kliuae@users.noreply.github.com> Date: Sat, 23 Nov 2024 13:14:49 +0800 Subject: [PATCH 114/397] [AMD] Add support for GGUF quantization on ROCm (#10254) --- .buildkite/run-amd-test.sh | 1 - CMakeLists.txt | 2 +- csrc/ops.h | 2 + csrc/quantization/gguf/ggml-common.h | 17 +- csrc/quantization/gguf/gguf_kernel.cu | 6 +- csrc/quantization/gguf/mmq.cuh | 70 +++---- csrc/quantization/gguf/mmvq.cuh | 4 +- csrc/quantization/gguf/vecdotq.cuh | 286 +++++++++++++------------- csrc/torch_bindings.cpp | 2 + vllm/_custom_ops.py | 53 ++--- vllm/config.py | 2 +- 11 files changed, 234 insertions(+), 211 deletions(-) diff --git a/.buildkite/run-amd-test.sh b/.buildkite/run-amd-test.sh index 902e162720b89..3515ccd65667e 100755 --- a/.buildkite/run-amd-test.sh +++ b/.buildkite/run-amd-test.sh @@ -85,7 +85,6 @@ if [[ $commands == *" kernels "* ]]; then --ignore=kernels/test_encoder_decoder_attn.py \ --ignore=kernels/test_flash_attn.py \ --ignore=kernels/test_flashinfer.py \ - --ignore=kernels/test_gguf.py \ --ignore=kernels/test_int8_quant.py \ --ignore=kernels/test_machete_gemm.py \ --ignore=kernels/test_mamba_ssm.py \ diff --git a/CMakeLists.txt b/CMakeLists.txt index bfe435937e3bb..ff34225537cdd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -196,6 +196,7 @@ set(VLLM_EXT_SRC "csrc/quantization/gptq/q_gemm.cu" "csrc/quantization/compressed_tensors/int8_quant_kernels.cu" "csrc/quantization/fp8/common.cu" + "csrc/quantization/gguf/gguf_kernel.cu" "csrc/cuda_utils_kernels.cu" "csrc/prepare_inputs/advance_step.cu" "csrc/torch_bindings.cpp") @@ -237,7 +238,6 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") "csrc/mamba/causal_conv1d/causal_conv1d.cu" "csrc/quantization/aqlm/gemm_kernels.cu" "csrc/quantization/awq/gemm_kernels.cu" - "csrc/quantization/gguf/gguf_kernel.cu" "csrc/custom_all_reduce.cu" "csrc/permute_cols.cu" "csrc/quantization/cutlass_w8a8/scaled_mm_entry.cu") diff --git a/csrc/ops.h b/csrc/ops.h index 672e608e9c47e..ea001190bc202 100644 --- a/csrc/ops.h +++ b/csrc/ops.h @@ -128,6 +128,7 @@ torch::Tensor awq_dequantize(torch::Tensor _kernel, int64_t thx, int64_t thy); torch::Tensor permute_cols(torch::Tensor const& A, torch::Tensor const& perm); +#endif torch::Tensor ggml_dequantize(torch::Tensor W, int64_t type, int64_t m, int64_t n); @@ -138,6 +139,7 @@ torch::Tensor ggml_mul_mat_vec_a8(torch::Tensor W, torch::Tensor X, torch::Tensor ggml_mul_mat_a8(torch::Tensor W, torch::Tensor X, int64_t type, int64_t row); +#ifndef USE_ROCM bool cutlass_scaled_mm_supports_fp8(int64_t cuda_device_capability); void cutlass_scaled_mm(torch::Tensor& out, torch::Tensor const& a, diff --git a/csrc/quantization/gguf/ggml-common.h b/csrc/quantization/gguf/ggml-common.h index fba94fd1d157b..d42205a6571db 100644 --- a/csrc/quantization/gguf/ggml-common.h +++ b/csrc/quantization/gguf/ggml-common.h @@ -1,7 +1,7 @@ // copied from https://github.com/ggerganov/llama.cpp/blob/b2899/ggml-common.h #define QK_K 256 #define K_QUANTS_PER_ITERATION 2 -#define WARP_SIZE 32 +#define WARP_SIZE_GGUF 32 #define K_SCALE_SIZE 12 #define CUDA_DEQUANTIZE_BLOCK_SIZE 256 #define CUDA_QUANTIZE_BLOCK_SIZE 256 @@ -1112,4 +1112,19 @@ static __device__ __forceinline__ int __dp4a(const int a, const int b, int c) { #endif return c; } + +static __device__ __forceinline__ uint32_t __vcmpeq4(const uint32_t a, const uint32_t b) { + uint32_t neq = a^b; + return !(neq & 0xff000000) * 0xff000000 | + !(neq & 0x00ff0000) * 0x00ff0000 | + !(neq & 0x0000ff00) * 0x0000ff00 | + !(neq & 0x000000ff) * 0x000000ff; +} + +static __device__ __forceinline__ uint32_t __vsub4(const uint32_t a, const uint32_t b) { + return (static_cast(((a & 0xff000000) >> 24) - ((b & 0xff000000) >> 24)) << 24) + + (static_cast(((a & 0x00ff0000) >> 16) - ((b & 0x00ff0000) >> 16)) << 16) + + (static_cast(((a & 0x0000ff00) >> 8) - ((b & 0x0000ff00) >> 8)) << 8) + + (static_cast(((a & 0x000000ff) >> 0) - ((b & 0x000000ff) >> 0)) << 0); +} #endif // defined(USE_ROCM) diff --git a/csrc/quantization/gguf/gguf_kernel.cu b/csrc/quantization/gguf/gguf_kernel.cu index 37e4de4e14dd3..5f0eaf5a973fb 100644 --- a/csrc/quantization/gguf/gguf_kernel.cu +++ b/csrc/quantization/gguf/gguf_kernel.cu @@ -4,6 +4,8 @@ #include #include +#include "cuda_compat.h" + #include "ggml-common.h" #include "vecdotq.cuh" #include "dequantize.cuh" @@ -32,8 +34,8 @@ static __global__ void quantize_q8_1(const half* __restrict__ x, #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { - amax = fmaxf(amax, __shfl_xor_sync(0xffffffff, amax, mask, 32)); - sum += __shfl_xor_sync(0xffffffff, sum, mask, 32); + amax = fmaxf(amax, VLLM_SHFL_XOR_SYNC_WIDTH(amax, mask, 32)); + sum += VLLM_SHFL_XOR_SYNC_WIDTH(sum, mask, 32); } const float d = amax / 127; diff --git a/csrc/quantization/gguf/mmq.cuh b/csrc/quantization/gguf/mmq.cuh index d13efd5965313..c935faa07df0c 100644 --- a/csrc/quantization/gguf/mmq.cuh +++ b/csrc/quantization/gguf/mmq.cuh @@ -10,7 +10,7 @@ static __device__ __forceinline__ void mul_mat_q( const int blocks_per_row_x = ncols_x / qk; const int blocks_per_col_y = nrows_y / QK8_1; - const int blocks_per_warp = WARP_SIZE / qi; + const int blocks_per_warp = WARP_SIZE_GGUF / qi; const int & ncols_dst = ncols_y; @@ -27,10 +27,10 @@ static __device__ __forceinline__ void mul_mat_q( allocate_tiles(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc); - __shared__ int tile_y_qs[mmq_x * WARP_SIZE]; - __shared__ half2 tile_y_ds[mmq_x * WARP_SIZE/QI8_1]; + __shared__ int tile_y_qs[mmq_x * WARP_SIZE_GGUF]; + __shared__ half2 tile_y_ds[mmq_x * WARP_SIZE_GGUF/QI8_1]; - float sum[mmq_y/WARP_SIZE][mmq_x/nwarps] = {{0.0f}}; + float sum[mmq_y/WARP_SIZE_GGUF][mmq_x/nwarps] = {{0.0f}}; for (int ib0 = 0; ib0 < blocks_per_row_x; ib0 += blocks_per_warp) { @@ -39,26 +39,26 @@ static __device__ __forceinline__ void mul_mat_q( #pragma unroll for (int ir = 0; ir < qr; ++ir) { - const int kqs = ir*WARP_SIZE + threadIdx.x; + const int kqs = ir*WARP_SIZE_GGUF + threadIdx.x; const int kbxd = kqs / QI8_1; #pragma unroll for (int i = 0; i < mmq_x; i += nwarps) { const int col_y_eff = min(col_y_0 + threadIdx.y + i, ncols_y-1); // to prevent out-of-bounds memory accesses const block_q8_1 * by0 = &y[col_y_eff*blocks_per_col_y + ib0 * (qk/QK8_1) + kbxd]; - const int index_y = (threadIdx.y + i) * WARP_SIZE + kqs % WARP_SIZE; + const int index_y = (threadIdx.y + i) * WARP_SIZE_GGUF + kqs % WARP_SIZE_GGUF; tile_y_qs[index_y] = get_int_from_int8_aligned(by0->qs, threadIdx.x % QI8_1); } #pragma unroll for (int ids0 = 0; ids0 < mmq_x; ids0 += nwarps * QI8_1) { - const int ids = (ids0 + threadIdx.y * QI8_1 + threadIdx.x / (WARP_SIZE/QI8_1)) % mmq_x; - const int kby = threadIdx.x % (WARP_SIZE/QI8_1); + const int ids = (ids0 + threadIdx.y * QI8_1 + threadIdx.x / (WARP_SIZE_GGUF/QI8_1)) % mmq_x; + const int kby = threadIdx.x % (WARP_SIZE_GGUF/QI8_1); const int col_y_eff = min(col_y_0 + ids, ncols_y-1); // if the sum is not needed it's faster to transform the scale to f32 ahead of time - const half2 * dsi_src = &y[col_y_eff*blocks_per_col_y + ib0 * (qk/QK8_1) + ir*(WARP_SIZE/QI8_1) + kby].ds; - half2 * dsi_dst = &tile_y_ds[ids * (WARP_SIZE/QI8_1) + kby]; + const half2 * dsi_src = &y[col_y_eff*blocks_per_col_y + ib0 * (qk/QK8_1) + ir*(WARP_SIZE_GGUF/QI8_1) + kby].ds; + half2 * dsi_dst = &tile_y_ds[ids * (WARP_SIZE_GGUF/QI8_1) + kby]; if (need_sum) { *dsi_dst = *dsi_src; } else { @@ -70,12 +70,12 @@ static __device__ __forceinline__ void mul_mat_q( __syncthreads(); // #pragma unroll // unrolling this loop causes too much register pressure - for (int k = ir*WARP_SIZE/qr; k < (ir+1)*WARP_SIZE/qr; k += vdr) { + for (int k = ir*WARP_SIZE_GGUF/qr; k < (ir+1)*WARP_SIZE_GGUF/qr; k += vdr) { #pragma unroll for (int j = 0; j < mmq_x; j += nwarps) { #pragma unroll - for (int i = 0; i < mmq_y; i += WARP_SIZE) { - sum[i/WARP_SIZE][j/nwarps] += vec_dot( + for (int i = 0; i < mmq_y; i += WARP_SIZE_GGUF) { + sum[i/WARP_SIZE_GGUF][j/nwarps] += vec_dot( tile_x_ql, tile_x_dm, tile_x_qh, tile_x_sc, tile_y_qs, tile_y_ds, threadIdx.x + i, threadIdx.y + j, k); } @@ -93,12 +93,12 @@ static __device__ __forceinline__ void mul_mat_q( } #pragma unroll - for (int i = 0; i < mmq_y; i += WARP_SIZE) { + for (int i = 0; i < mmq_y; i += WARP_SIZE_GGUF) { const int row_dst = row_dst_0 + threadIdx.x + i; if (row_dst >= nrows_dst) { continue; } - dst[col_dst*nrows_dst + row_dst] = __float2half(sum[i/WARP_SIZE][j/nwarps]); + dst[col_dst*nrows_dst + row_dst] = __float2half(sum[i/WARP_SIZE_GGUF][j/nwarps]); } } } @@ -115,7 +115,7 @@ static __device__ __forceinline__ void mul_mat_q( template static __global__ void #if defined(USE_ROCM) -__launch_bounds__(WARP_SIZE*NWARPS_Q4_0, 2) +__launch_bounds__(WARP_SIZE_GGUF*NWARPS_Q4_0, 2) #endif mul_mat_q4_0( const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst, @@ -140,7 +140,7 @@ static void ggml_mul_mat_q4_0_q8_1_cuda( const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; const dim3 block_nums(block_num_x, block_num_y, 1); - const dim3 block_dims(WARP_SIZE, nwarps, 1); + const dim3 block_dims(WARP_SIZE_GGUF, nwarps, 1); if (nrows_x % mmq_y == 0) { const bool need_check = false; @@ -165,7 +165,7 @@ static void ggml_mul_mat_q4_0_q8_1_cuda( template static __global__ void #if defined(USE_ROCM) -__launch_bounds__(WARP_SIZE*NWARPS_Q4_1, 2) +__launch_bounds__(WARP_SIZE_GGUF*NWARPS_Q4_1, 2) #endif mul_mat_q4_1( const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst, @@ -190,7 +190,7 @@ static void ggml_mul_mat_q4_1_q8_1_cuda( const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; const dim3 block_nums(block_num_x, block_num_y, 1); - const dim3 block_dims(WARP_SIZE, nwarps, 1); + const dim3 block_dims(WARP_SIZE_GGUF, nwarps, 1); if (nrows_x % mmq_y == 0) { const bool need_check = false; @@ -215,7 +215,7 @@ static void ggml_mul_mat_q4_1_q8_1_cuda( template static __global__ void #if defined(USE_ROCM) -__launch_bounds__(WARP_SIZE*NWARPS_Q5_0, 2) +__launch_bounds__(WARP_SIZE_GGUF*NWARPS_Q5_0, 2) #endif mul_mat_q5_0( const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst, @@ -240,7 +240,7 @@ static void ggml_mul_mat_q5_0_q8_1_cuda( const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; const dim3 block_nums(block_num_x, block_num_y, 1); - const dim3 block_dims(WARP_SIZE, nwarps, 1); + const dim3 block_dims(WARP_SIZE_GGUF, nwarps, 1); if (nrows_x % mmq_y == 0) { const bool need_check = false; @@ -265,7 +265,7 @@ static void ggml_mul_mat_q5_0_q8_1_cuda( template static __global__ void #if defined(USE_ROCM) -__launch_bounds__(WARP_SIZE*NWARPS_Q5_1, 2) +__launch_bounds__(WARP_SIZE_GGUF*NWARPS_Q5_1, 2) #endif mul_mat_q5_1( const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst, @@ -289,7 +289,7 @@ static void ggml_mul_mat_q5_1_q8_1_cuda( const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; const dim3 block_nums(block_num_x, block_num_y, 1); - const dim3 block_dims(WARP_SIZE, nwarps, 1); + const dim3 block_dims(WARP_SIZE_GGUF, nwarps, 1); if (nrows_x % mmq_y == 0) { const bool need_check = false; @@ -314,7 +314,7 @@ static void ggml_mul_mat_q5_1_q8_1_cuda( template static __global__ void #if defined(USE_ROCM) -__launch_bounds__(WARP_SIZE*NWARPS_Q8_0, 2) +__launch_bounds__(WARP_SIZE_GGUF*NWARPS_Q8_0, 2) #endif mul_mat_q8_0( const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst, @@ -338,7 +338,7 @@ static void ggml_mul_mat_q8_0_q8_1_cuda( const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; const dim3 block_nums(block_num_x, block_num_y, 1); - const dim3 block_dims(WARP_SIZE, nwarps, 1); + const dim3 block_dims(WARP_SIZE_GGUF, nwarps, 1); if (nrows_x % mmq_y == 0) { const bool need_check = false; @@ -363,7 +363,7 @@ static void ggml_mul_mat_q8_0_q8_1_cuda( template static __global__ void #if defined(USE_ROCM) -__launch_bounds__(WARP_SIZE*NWARPS_Q2_K, 2) +__launch_bounds__(WARP_SIZE_GGUF*NWARPS_Q2_K, 2) #endif mul_mat_q2_K( const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst, @@ -387,7 +387,7 @@ static void ggml_mul_mat_q2_K_q8_1_cuda( const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; const dim3 block_nums(block_num_x, block_num_y, 1); - const dim3 block_dims(WARP_SIZE, nwarps, 1); + const dim3 block_dims(WARP_SIZE_GGUF, nwarps, 1); if (nrows_x % mmq_y == 0) { const bool need_check = false; @@ -412,7 +412,7 @@ static void ggml_mul_mat_q2_K_q8_1_cuda( template static __global__ void #if defined(USE_ROCM) -__launch_bounds__(WARP_SIZE*NWARPS_Q3_K, 2) +__launch_bounds__(WARP_SIZE_GGUF*NWARPS_Q3_K, 2) #endif mul_mat_q3_K( const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst, @@ -438,7 +438,7 @@ static void ggml_mul_mat_q3_K_q8_1_cuda( const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; const dim3 block_nums(block_num_x, block_num_y, 1); - const dim3 block_dims(WARP_SIZE, nwarps, 1); + const dim3 block_dims(WARP_SIZE_GGUF, nwarps, 1); if (nrows_x % mmq_y == 0) { const bool need_check = false; @@ -463,7 +463,7 @@ static void ggml_mul_mat_q3_K_q8_1_cuda( template static __global__ void #if defined(USE_ROCM) -__launch_bounds__(WARP_SIZE*NWARPS_Q4_K, 2) +__launch_bounds__(WARP_SIZE_GGUF*NWARPS_Q4_K, 2) #endif mul_mat_q4_K( const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst, @@ -487,7 +487,7 @@ static void ggml_mul_mat_q4_K_q8_1_cuda( const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; const dim3 block_nums(block_num_x, block_num_y, 1); - const dim3 block_dims(WARP_SIZE, nwarps, 1); + const dim3 block_dims(WARP_SIZE_GGUF, nwarps, 1); if (nrows_x % mmq_y == 0) { const bool need_check = false; @@ -512,7 +512,7 @@ static void ggml_mul_mat_q4_K_q8_1_cuda( template static __global__ void #if defined(USE_ROCM) -__launch_bounds__(WARP_SIZE*NWARPS_Q5_K, 2) +__launch_bounds__(WARP_SIZE_GGUF*NWARPS_Q5_K, 2) #endif mul_mat_q5_K( const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst, @@ -537,7 +537,7 @@ static void ggml_mul_mat_q5_K_q8_1_cuda( const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; const dim3 block_nums(block_num_x, block_num_y, 1); - const dim3 block_dims(WARP_SIZE, nwarps, 1); + const dim3 block_dims(WARP_SIZE_GGUF, nwarps, 1); if (nrows_x % mmq_y == 0) { const bool need_check = false; @@ -562,7 +562,7 @@ static void ggml_mul_mat_q5_K_q8_1_cuda( template static __global__ void #if defined(USE_ROCM) -__launch_bounds__(WARP_SIZE*NWARPS_Q6_K, 2) +__launch_bounds__(WARP_SIZE_GGUF*NWARPS_Q6_K, 2) #endif mul_mat_q6_K( const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst, @@ -586,7 +586,7 @@ static void ggml_mul_mat_q6_K_q8_1_cuda( const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y; const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x; const dim3 block_nums(block_num_x, block_num_y, 1); - const dim3 block_dims(WARP_SIZE, nwarps, 1); + const dim3 block_dims(WARP_SIZE_GGUF, nwarps, 1); if (nrows_x % mmq_y == 0) { const bool need_check = false; diff --git a/csrc/quantization/gguf/mmvq.cuh b/csrc/quantization/gguf/mmvq.cuh index b221ae7896138..b01e939808a3f 100644 --- a/csrc/quantization/gguf/mmvq.cuh +++ b/csrc/quantization/gguf/mmvq.cuh @@ -28,8 +28,8 @@ static __global__ void mul_mat_vec_q(const void * __restrict__ vx, const void * // sum up partial sums and write back result #pragma unroll - for (int mask = 16; mask > 0; mask >>= 1) { - tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); + for (int mask = WARP_SIZE/2; mask > 0; mask >>= 1) { + tmp += VLLM_SHFL_XOR_SYNC(tmp, mask); } if (threadIdx.x == 0) { diff --git a/csrc/quantization/gguf/vecdotq.cuh b/csrc/quantization/gguf/vecdotq.cuh index d5af345a6b26f..e00422637c65b 100644 --- a/csrc/quantization/gguf/vecdotq.cuh +++ b/csrc/quantization/gguf/vecdotq.cuh @@ -43,7 +43,7 @@ static __device__ __forceinline__ int get_int_from_uint8_aligned(const uint8_t * template static __device__ __forceinline__ float vec_dot_q4_0_q8_1_impl( const int * v, const int * u, const float & d4, const half2 & ds8) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM int sumi = 0; #pragma unroll @@ -68,7 +68,7 @@ template static __device__ __forceinline__ float vec_dot_q4_0_q8_1_imp template static __device__ __forceinline__ float vec_dot_q4_1_q8_1_impl( const int * v, const int * u, const half2 & dm4, const half2 & ds8) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM int sumi = 0; #pragma unroll @@ -95,7 +95,7 @@ template static __device__ __forceinline__ float vec_dot_q4_1_q8_1_imp template static __device__ __forceinline__ float vec_dot_q5_0_q8_1_impl( const int * vl, const int * vh, const int * u, const float & d5, const half2 & ds8) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM int sumi = 0; #pragma unroll @@ -128,7 +128,7 @@ template static __device__ __forceinline__ float vec_dot_q5_0_q8_1_imp template static __device__ __forceinline__ float vec_dot_q5_1_q8_1_impl( const int * vl, const int * vh, const int * u, const half2 & dm5, const half2 & ds8) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM int sumi = 0; #pragma unroll @@ -162,7 +162,7 @@ template static __device__ __forceinline__ float vec_dot_q5_1_q8_1_imp template static __device__ __forceinline__ float vec_dot_q8_0_q8_1_impl( const int * v, const int * u, const float & d8_0, const float & d8_1) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM int sumi = 0; #pragma unroll @@ -176,7 +176,7 @@ template static __device__ __forceinline__ float vec_dot_q8_0_q8_1_imp template static __device__ __forceinline__ float vec_dot_q8_1_q8_1_impl( const int * v, const int * u, const half2 & dm8, const half2 & ds8) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM int sumi = 0; @@ -202,7 +202,7 @@ template static __device__ __forceinline__ float vec_dot_q8_1_q8_1_imp static __device__ __forceinline__ float vec_dot_q2_K_q8_1_impl_mmvq( const int & v, const int * __restrict__ u, const uint8_t * __restrict__ scales, const half2 & dm2, const float * __restrict__ d8) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM float sumf_d = 0.0f; float sumf_m = 0.0f; @@ -230,7 +230,7 @@ static __device__ __forceinline__ float vec_dot_q2_K_q8_1_impl_mmvq( static __device__ __forceinline__ float vec_dot_q2_K_q8_1_impl_mmq( const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ scales, const half2 & dm2, const float & d8) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM int sumi_d = 0; int sumi_m = 0; @@ -267,7 +267,7 @@ static __device__ __forceinline__ float vec_dot_q2_K_q8_1_impl_mmq( static __device__ __forceinline__ float vec_dot_q3_K_q8_1_impl_mmvq( const int & vl, const int & vh, const int * __restrict__ u, const uint8_t * __restrict__ scales, const int & scale_offset, const float & d3, const float * __restrict__ d8) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM float sumf = 0.0f; @@ -301,7 +301,7 @@ static __device__ __forceinline__ float vec_dot_q3_K_q8_1_impl_mmvq( static __device__ __forceinline__ float vec_dot_q3_K_q8_1_impl_mmq( const int * __restrict__ v, const int * __restrict__ u, const int8_t * __restrict__ scales, const float & d3, const float & d8) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM int sumi = 0; #pragma unroll @@ -326,7 +326,7 @@ static __device__ __forceinline__ float vec_dot_q3_K_q8_1_impl_mmq( static __device__ __forceinline__ float vec_dot_q4_K_q8_1_impl_vmmq( const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc, const uint8_t * __restrict__ m, const half2 & dm4, const float * __restrict__ d8) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM float sumf_d = 0.0f; float sumf_m = 0.0f; @@ -351,7 +351,7 @@ static __device__ __forceinline__ float vec_dot_q4_K_q8_1_impl_vmmq( static __device__ __forceinline__ float vec_dot_q4_K_q8_1_impl_mmq( const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc, const uint8_t * __restrict__ m, const half2 & dm4, const half2 * __restrict__ ds8) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM float sumf_d = 0.0f; float sumf_m = 0.0f; @@ -382,7 +382,7 @@ static __device__ __forceinline__ float vec_dot_q4_K_q8_1_impl_mmq( static __device__ __forceinline__ float vec_dot_q5_K_q8_1_impl_vmmq( const int * __restrict__ vl, const int * __restrict__ vh, const int * __restrict__ u, const uint8_t * __restrict__ sc, const uint8_t * __restrict__ m, const half2 & dm5, const float * __restrict__ d8) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM float sumf_d = 0.0f; float sumf_m = 0.0f; @@ -413,7 +413,7 @@ static __device__ __forceinline__ float vec_dot_q5_K_q8_1_impl_vmmq( static __device__ __forceinline__ float vec_dot_q5_K_q8_1_impl_mmq( const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc, const uint8_t * __restrict__ m, const half2 & dm4, const half2 * __restrict__ ds8) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM float sumf_d = 0.0f; float sumf_m = 0.0f; @@ -445,7 +445,7 @@ static __device__ __forceinline__ float vec_dot_q5_K_q8_1_impl_mmq( static __device__ __forceinline__ float vec_dot_q6_K_q8_1_impl_mmvq( const int & vl, const int & vh, const int * __restrict__ u, const int8_t * __restrict__ scales, const float & d, const float * __restrict__ d8) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM float sumf = 0.0f; #pragma unroll @@ -465,7 +465,7 @@ static __device__ __forceinline__ float vec_dot_q6_K_q8_1_impl_mmvq( static __device__ __forceinline__ float vec_dot_q6_K_q8_1_impl_mmq( const int * __restrict__ v, const int * __restrict__ u, const int8_t * __restrict__ sc, const float & d6, const float * __restrict__ d8) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM float sumf_d = 0.0f; #pragma unroll @@ -507,8 +507,8 @@ static __device__ __forceinline__ float vec_dot_q4_0_q8_1( } template static __device__ __forceinline__ void allocate_tiles_q4_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { - __shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + mmq_y]; - __shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI4_0) + mmq_y/QI4_0]; + __shared__ int tile_x_qs[mmq_y * (WARP_SIZE_GGUF) + mmq_y]; + __shared__ float tile_x_d[mmq_y * (WARP_SIZE_GGUF/QI4_0) + mmq_y/QI4_0]; *x_ql = tile_x_qs; *x_dm = (half2 *) tile_x_d; } @@ -529,11 +529,11 @@ template static __device__ __forceinlin i = min(i, i_max); } const block_q4_0 * bxi = bx0 + i*blocks_per_row + kbx; - x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8(bxi->qs, kqsx); - // x_dmf[i * (WARP_SIZE/QI4_0) + i / QI4_0 + kbx] = bxi->d; + x_ql[i * (WARP_SIZE_GGUF + 1) + k] = get_int_from_uint8(bxi->qs, kqsx); + // x_dmf[i * (WARP_SIZE_GGUF/QI4_0) + i / QI4_0 + kbx] = bxi->d; } - const int blocks_per_tile_x_row = WARP_SIZE / QI4_0; + const int blocks_per_tile_x_row = WARP_SIZE_GGUF / QI4_0; const int kbxd = k % blocks_per_tile_x_row; #pragma unroll @@ -543,7 +543,7 @@ template static __device__ __forceinlin i = min(i, i_max); } const block_q4_0 * bxi = bx0 + i*blocks_per_row + kbxd; - x_dmf[i * (WARP_SIZE/QI4_0) + i / QI4_0 + kbxd] = __half2float(bxi->d); + x_dmf[i * (WARP_SIZE_GGUF/QI4_0) + i / QI4_0 + kbxd] = __half2float(bxi->d); } } @@ -559,13 +559,13 @@ static __device__ __forceinline__ float vec_dot_q4_0_q8_1_mul_mat( #pragma unroll for (int l = 0; l < VDR_Q4_0_Q8_1_MMQ; ++l) { - u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE]; - u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI4_0) % WARP_SIZE]; + u[2*l+0] = y_qs[j * WARP_SIZE_GGUF + (kyqs + l) % WARP_SIZE_GGUF]; + u[2*l+1] = y_qs[j * WARP_SIZE_GGUF + (kyqs + l + QI4_0) % WARP_SIZE_GGUF]; } return vec_dot_q4_0_q8_1_impl - (&x_ql[i * (WARP_SIZE + 1) + k], u, x_dmf[i * (WARP_SIZE/QI4_0) + i/QI4_0 + k/QI4_0], - y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]); + (&x_ql[i * (WARP_SIZE_GGUF + 1) + k], u, x_dmf[i * (WARP_SIZE_GGUF/QI4_0) + i/QI4_0 + k/QI4_0], + y_ds[j * (WARP_SIZE_GGUF/QI8_1) + (2*k/QI8_1) % (WARP_SIZE_GGUF/QI8_1)]); } static __device__ __forceinline__ float vec_dot_q4_1_q8_1( @@ -587,8 +587,8 @@ static __device__ __forceinline__ float vec_dot_q4_1_q8_1( } template static __device__ __forceinline__ void allocate_tiles_q4_1(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { - __shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + + mmq_y]; - __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI4_1) + mmq_y/QI4_1]; + __shared__ int tile_x_qs[mmq_y * (WARP_SIZE_GGUF) + + mmq_y]; + __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE_GGUF/QI4_1) + mmq_y/QI4_1]; *x_ql = tile_x_qs; *x_dm = tile_x_dm; } @@ -608,10 +608,10 @@ template static __device__ __forceinlin i = min(i, i_max); } const block_q4_1 * bxi = bx0 + i*blocks_per_row + kbx; - x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx); + x_ql[i * (WARP_SIZE_GGUF + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx); } - const int blocks_per_tile_x_row = WARP_SIZE / QI4_1; + const int blocks_per_tile_x_row = WARP_SIZE_GGUF / QI4_1; const int kbxd = k % blocks_per_tile_x_row; #pragma unroll @@ -621,7 +621,7 @@ template static __device__ __forceinlin i = min(i, i_max); } const block_q4_1 * bxi = bx0 + i*blocks_per_row + kbxd; - x_dm[i * (WARP_SIZE/QI4_1) + i / QI4_1 + kbxd] = bxi->dm; + x_dm[i * (WARP_SIZE_GGUF/QI4_1) + i / QI4_1 + kbxd] = bxi->dm; } } @@ -634,13 +634,13 @@ static __device__ __forceinline__ float vec_dot_q4_1_q8_1_mul_mat( #pragma unroll for (int l = 0; l < VDR_Q4_1_Q8_1_MMQ; ++l) { - u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE]; - u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI4_1) % WARP_SIZE]; + u[2*l+0] = y_qs[j * WARP_SIZE_GGUF + (kyqs + l) % WARP_SIZE_GGUF]; + u[2*l+1] = y_qs[j * WARP_SIZE_GGUF + (kyqs + l + QI4_1) % WARP_SIZE_GGUF]; } return vec_dot_q4_1_q8_1_impl - (&x_ql[i * (WARP_SIZE + 1) + k], u, x_dm[i * (WARP_SIZE/QI4_1) + i/QI4_1 + k/QI4_1], - y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]); + (&x_ql[i * (WARP_SIZE_GGUF + 1) + k], u, x_dm[i * (WARP_SIZE_GGUF/QI4_1) + i/QI4_1 + k/QI4_1], + y_ds[j * (WARP_SIZE_GGUF/QI8_1) + (2*k/QI8_1) % (WARP_SIZE_GGUF/QI8_1)]); } static __device__ __forceinline__ float vec_dot_q5_0_q8_1( @@ -664,8 +664,8 @@ static __device__ __forceinline__ float vec_dot_q5_0_q8_1( } template static __device__ __forceinline__ void allocate_tiles_q5_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { - __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y]; - __shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI5_0) + mmq_y/QI5_0]; + __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE_GGUF) + mmq_y]; + __shared__ float tile_x_d[mmq_y * (WARP_SIZE_GGUF/QI5_0) + mmq_y/QI5_0]; *x_ql = tile_x_ql; *x_dm = (half2 *) tile_x_d; @@ -697,7 +697,7 @@ template static __device__ __forceinlin qs0 |= (qh << 25) & 0x10000000; // 3 -> 28 qs0 = __vsubss4(qs0, 0x10101010); // subtract 16 - x_ql[i * (2*WARP_SIZE + 1) + 2*k+0] = qs0; + x_ql[i * (2*WARP_SIZE_GGUF + 1) + 2*k+0] = qs0; int qs1 = (ql >> 4) & 0x0F0F0F0F; qs1 |= (qh >> 12) & 0x00000010; // 16 -> 4 @@ -706,10 +706,10 @@ template static __device__ __forceinlin qs1 |= (qh << 9) & 0x10000000; // 19 -> 28 qs1 = __vsubss4(qs1, 0x10101010); // subtract 16 - x_ql[i * (2*WARP_SIZE + 1) + 2*k+1] = qs1; + x_ql[i * (2*WARP_SIZE_GGUF + 1) + 2*k+1] = qs1; } - const int blocks_per_tile_x_row = WARP_SIZE / QI5_0; + const int blocks_per_tile_x_row = WARP_SIZE_GGUF / QI5_0; const int kbxd = k % blocks_per_tile_x_row; float * x_dmf = (float *) x_dm; @@ -722,7 +722,7 @@ template static __device__ __forceinlin } const block_q5_0 * bxi = bx0 + i*blocks_per_row + kbxd; - x_dmf[i * (WARP_SIZE/QI5_0) + i / QI5_0 + kbxd] = __half2float(bxi->d); + x_dmf[i * (WARP_SIZE_GGUF/QI5_0) + i / QI5_0 + kbxd] = __half2float(bxi->d); } } @@ -730,7 +730,7 @@ static __device__ __forceinline__ float vec_dot_q5_0_q8_1_mul_mat( const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2)); - const int index_bx = i * (WARP_SIZE/QI5_0) + i/QI5_0 + k/QI5_0; + const int index_bx = i * (WARP_SIZE_GGUF/QI5_0) + i/QI5_0 + k/QI5_0; const float * x_dmf = (const float *) x_dm; const float * y_df = (const float *) y_ds; @@ -738,12 +738,12 @@ static __device__ __forceinline__ float vec_dot_q5_0_q8_1_mul_mat( #pragma unroll for (int l = 0; l < VDR_Q5_0_Q8_1_MMQ; ++l) { - u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE]; - u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI5_0) % WARP_SIZE]; + u[2*l+0] = y_qs[j * WARP_SIZE_GGUF + (kyqs + l) % WARP_SIZE_GGUF]; + u[2*l+1] = y_qs[j * WARP_SIZE_GGUF + (kyqs + l + QI5_0) % WARP_SIZE_GGUF]; } return vec_dot_q8_0_q8_1_impl - (&x_ql[i * (2*WARP_SIZE + 1) + 2 * k], u, x_dmf[index_bx], y_df[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]); + (&x_ql[i * (2*WARP_SIZE_GGUF + 1) + 2 * k], u, x_dmf[index_bx], y_df[j * (WARP_SIZE_GGUF/QI8_1) + (2*k/QI8_1) % (WARP_SIZE_GGUF/QI8_1)]); } static __device__ __forceinline__ float vec_dot_q5_1_q8_1( @@ -767,8 +767,8 @@ static __device__ __forceinline__ float vec_dot_q5_1_q8_1( } template static __device__ __forceinline__ void allocate_tiles_q5_1(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { - __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y]; - __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI5_1) + mmq_y/QI5_1]; + __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE_GGUF) + mmq_y]; + __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE_GGUF/QI5_1) + mmq_y/QI5_1]; *x_ql = tile_x_ql; *x_dm = tile_x_dm; @@ -801,7 +801,7 @@ template static __device__ __forceinlin qs0 |= (qh << 18) & 0x00100000; // 2 -> 20 qs0 |= (qh << 25) & 0x10000000; // 3 -> 28 - x_ql[i * (2*WARP_SIZE + 1) + 2*k+0] = qs0; + x_ql[i * (2*WARP_SIZE_GGUF + 1) + 2*k+0] = qs0; int qs1 = (ql >> 4) & 0x0F0F0F0F; qs1 |= (qh >> 12) & 0x00000010; // 16 -> 4 @@ -809,10 +809,10 @@ template static __device__ __forceinlin qs1 |= (qh << 2) & 0x00100000; // 18 -> 20 qs1 |= (qh << 9) & 0x10000000; // 19 -> 28 - x_ql[i * (2*WARP_SIZE + 1) + 2*k+1] = qs1; + x_ql[i * (2*WARP_SIZE_GGUF + 1) + 2*k+1] = qs1; } - const int blocks_per_tile_x_row = WARP_SIZE / QI5_1; + const int blocks_per_tile_x_row = WARP_SIZE_GGUF / QI5_1; const int kbxd = k % blocks_per_tile_x_row; #pragma unroll @@ -825,7 +825,7 @@ template static __device__ __forceinlin const block_q5_1 * bxi = bx0 + i*blocks_per_row + kbxd; - x_dm[i * (WARP_SIZE/QI5_1) + i / QI5_1 + kbxd] = bxi->dm; + x_dm[i * (WARP_SIZE_GGUF/QI5_1) + i / QI5_1 + kbxd] = bxi->dm; } } @@ -833,18 +833,18 @@ static __device__ __forceinline__ float vec_dot_q5_1_q8_1_mul_mat( const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2)); - const int index_bx = i * (WARP_SIZE/QI5_1) + + i/QI5_1 + k/QI5_1; + const int index_bx = i * (WARP_SIZE_GGUF/QI5_1) + + i/QI5_1 + k/QI5_1; int u[2*VDR_Q5_1_Q8_1_MMQ]; #pragma unroll for (int l = 0; l < VDR_Q5_1_Q8_1_MMQ; ++l) { - u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE]; - u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI5_1) % WARP_SIZE]; + u[2*l+0] = y_qs[j * WARP_SIZE_GGUF + (kyqs + l) % WARP_SIZE_GGUF]; + u[2*l+1] = y_qs[j * WARP_SIZE_GGUF + (kyqs + l + QI5_1) % WARP_SIZE_GGUF]; } return vec_dot_q8_1_q8_1_impl - (&x_ql[i * (2*WARP_SIZE + 1) + 2 * k], u, x_dm[index_bx], y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]); + (&x_ql[i * (2*WARP_SIZE_GGUF + 1) + 2 * k], u, x_dm[index_bx], y_ds[j * (WARP_SIZE_GGUF/QI8_1) + (2*k/QI8_1) % (WARP_SIZE_GGUF/QI8_1)]); } static __device__ __forceinline__ float vec_dot_q8_0_q8_1( @@ -865,8 +865,8 @@ static __device__ __forceinline__ float vec_dot_q8_0_q8_1( } template static __device__ __forceinline__ void allocate_tiles_q8_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { - __shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + mmq_y]; - __shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI8_0) + mmq_y/QI8_0]; + __shared__ int tile_x_qs[mmq_y * (WARP_SIZE_GGUF) + mmq_y]; + __shared__ float tile_x_d[mmq_y * (WARP_SIZE_GGUF/QI8_0) + mmq_y/QI8_0]; *x_ql = tile_x_qs; *x_dm = (half2 *) tile_x_d; @@ -889,10 +889,10 @@ template static __device__ __forceinlin i = min(i, i_max); } const block_q8_0 * bxi = bx0 + i*blocks_per_row + kbx; - x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_int8(bxi->qs, kqsx); + x_ql[i * (WARP_SIZE_GGUF + 1) + k] = get_int_from_int8(bxi->qs, kqsx); } - const int blocks_per_tile_x_row = WARP_SIZE / QI8_0; + const int blocks_per_tile_x_row = WARP_SIZE_GGUF / QI8_0; const int kbxd = k % blocks_per_tile_x_row; #pragma unroll @@ -903,7 +903,7 @@ template static __device__ __forceinlin i = min(i, i_max); } const block_q8_0 * bxi = bx0 + i*blocks_per_row + kbxd; - x_dmf[i * (WARP_SIZE/QI8_0) + i / QI8_0 + kbxd] = __half2float(bxi->d); + x_dmf[i * (WARP_SIZE_GGUF/QI8_0) + i / QI8_0 + kbxd] = __half2float(bxi->d); } } @@ -914,8 +914,8 @@ static __device__ __forceinline__ float vec_dot_q8_0_q8_1_mul_mat( const float * y_df = (const float *) y_ds; return vec_dot_q8_0_q8_1_impl - (&x_ql[i * (WARP_SIZE + 1) + k], &y_qs[j * WARP_SIZE + k], x_dmf[i * (WARP_SIZE/QI8_0) + i/QI8_0 + k/QI8_0], - y_df[j * (WARP_SIZE/QI8_1) + k/QI8_1]); + (&x_ql[i * (WARP_SIZE_GGUF + 1) + k], &y_qs[j * WARP_SIZE_GGUF + k], x_dmf[i * (WARP_SIZE_GGUF/QI8_0) + i/QI8_0 + k/QI8_0], + y_df[j * (WARP_SIZE_GGUF/QI8_1) + k/QI8_1]); } static __device__ __forceinline__ float vec_dot_q2_K_q8_1( @@ -942,9 +942,9 @@ static __device__ __forceinline__ float vec_dot_q2_K_q8_1( } template static __device__ __forceinline__ void allocate_tiles_q2_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { - __shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y]; - __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI2_K) + mmq_y/QI2_K]; - __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/4) + mmq_y/4]; + __shared__ int tile_x_ql[mmq_y * (WARP_SIZE_GGUF) + mmq_y]; + __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE_GGUF/QI2_K) + mmq_y/QI2_K]; + __shared__ int tile_x_sc[mmq_y * (WARP_SIZE_GGUF/4) + mmq_y/4]; *x_ql = tile_x_ql; *x_dm = tile_x_dm; @@ -967,10 +967,10 @@ template static __device__ __forceinlin i = min(i, i_max); } const block_q2_K * bxi = bx0 + i*blocks_per_row + kbx; - x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx); + x_ql[i * (WARP_SIZE_GGUF + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx); } - const int blocks_per_tile_x_row = WARP_SIZE / QI2_K; + const int blocks_per_tile_x_row = WARP_SIZE_GGUF / QI2_K; const int kbxd = k % blocks_per_tile_x_row; #pragma unroll @@ -981,18 +981,18 @@ template static __device__ __forceinlin i = min(i, i_max); } const block_q2_K * bxi = bx0 + i*blocks_per_row + kbxd; - x_dm[i * (WARP_SIZE/QI2_K) + i / QI2_K + kbxd] = bxi->dm; + x_dm[i * (WARP_SIZE_GGUF/QI2_K) + i / QI2_K + kbxd] = bxi->dm; } #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) { - int i = i0 + i_offset * 4 + k / (WARP_SIZE/4); + int i = i0 + i_offset * 4 + k / (WARP_SIZE_GGUF/4); if (need_check) { i = min(i, i_max); } - const block_q2_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/4)) / (QI2_K/4); - x_sc[i * (WARP_SIZE/4) + i / 4 + k % (WARP_SIZE/4)] = get_int_from_uint8_aligned(bxi->scales, k % (QI2_K/4)); + const block_q2_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE_GGUF/4)) / (QI2_K/4); + x_sc[i * (WARP_SIZE_GGUF/4) + i / 4 + k % (WARP_SIZE_GGUF/4)] = get_int_from_uint8_aligned(bxi->scales, k % (QI2_K/4)); } } @@ -1005,7 +1005,7 @@ static __device__ __forceinline__ float vec_dot_q2_K_q8_1_mul_mat( int v[QR2_K*VDR_Q2_K_Q8_1_MMQ]; - const int kqsx = i * (WARP_SIZE + 1) + kbx*QI2_K + (QI2_K/2) * (ky/(2*QI2_K)) + ky % (QI2_K/2); + const int kqsx = i * (WARP_SIZE_GGUF + 1) + kbx*QI2_K + (QI2_K/2) * (ky/(2*QI2_K)) + ky % (QI2_K/2); const int shift = 2 * ((ky % (2*QI2_K)) / (QI2_K/2)); #pragma unroll @@ -1013,10 +1013,10 @@ static __device__ __forceinline__ float vec_dot_q2_K_q8_1_mul_mat( v[l] = (x_ql[kqsx + l] >> shift) & 0x03030303; } - const uint8_t * scales = ((const uint8_t *) &x_sc[i * (WARP_SIZE/4) + i/4 + kbx*4]) + ky/4; + const uint8_t * scales = ((const uint8_t *) &x_sc[i * (WARP_SIZE_GGUF/4) + i/4 + kbx*4]) + ky/4; - const int index_y = j * WARP_SIZE + (QR2_K*k) % WARP_SIZE; - return vec_dot_q2_K_q8_1_impl_mmq(v, &y_qs[index_y], scales, x_dm[i * (WARP_SIZE/QI2_K) + i/QI2_K + kbx], y_df[index_y/QI8_1]); + const int index_y = j * WARP_SIZE_GGUF + (QR2_K*k) % WARP_SIZE_GGUF; + return vec_dot_q2_K_q8_1_impl_mmq(v, &y_qs[index_y], scales, x_dm[i * (WARP_SIZE_GGUF/QI2_K) + i/QI2_K + kbx], y_df[index_y/QI8_1]); } static __device__ __forceinline__ float vec_dot_q3_K_q8_1( @@ -1047,10 +1047,10 @@ static __device__ __forceinline__ float vec_dot_q3_K_q8_1( } template static __device__ __forceinline__ void allocate_tiles_q3_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { - __shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y]; - __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI3_K) + mmq_y/QI3_K]; - __shared__ int tile_x_qh[mmq_y * (WARP_SIZE/2) + mmq_y/2]; - __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/4) + mmq_y/4]; + __shared__ int tile_x_ql[mmq_y * (WARP_SIZE_GGUF) + mmq_y]; + __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE_GGUF/QI3_K) + mmq_y/QI3_K]; + __shared__ int tile_x_qh[mmq_y * (WARP_SIZE_GGUF/2) + mmq_y/2]; + __shared__ int tile_x_sc[mmq_y * (WARP_SIZE_GGUF/4) + mmq_y/4]; *x_ql = tile_x_ql; *x_dm = tile_x_dm; @@ -1073,10 +1073,10 @@ template static __device__ __forceinlin i = min(i, i_max); } const block_q3_K * bxi = bx0 + i*blocks_per_row + kbx; - x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8(bxi->qs, kqsx); + x_ql[i * (WARP_SIZE_GGUF + 1) + k] = get_int_from_uint8(bxi->qs, kqsx); } - const int blocks_per_tile_x_row = WARP_SIZE / QI3_K; + const int blocks_per_tile_x_row = WARP_SIZE_GGUF / QI3_K; const int kbxd = k % blocks_per_tile_x_row; float * x_dmf = (float *) x_dm; @@ -1087,27 +1087,27 @@ template static __device__ __forceinlin i = min(i, i_max); } const block_q3_K * bxi = bx0 + i*blocks_per_row + kbxd; - x_dmf[i * (WARP_SIZE/QI3_K) + i / QI3_K + kbxd] = __half2float(bxi->d); + x_dmf[i * (WARP_SIZE_GGUF/QI3_K) + i / QI3_K + kbxd] = __half2float(bxi->d); } #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 2) { - int i = i0 + i_offset * 2 + k / (WARP_SIZE/2); + int i = i0 + i_offset * 2 + k / (WARP_SIZE_GGUF/2); if (need_check) { i = min(i, i_max); } - const block_q3_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/2)) / (QI3_K/2); + const block_q3_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE_GGUF/2)) / (QI3_K/2); // invert the mask with ~ so that a 0/1 results in 4/0 being subtracted - x_qh[i * (WARP_SIZE/2) + i / 2 + k % (WARP_SIZE/2)] = ~get_int_from_uint8(bxi->hmask, k % (QI3_K/2)); + x_qh[i * (WARP_SIZE_GGUF/2) + i / 2 + k % (WARP_SIZE_GGUF/2)] = ~get_int_from_uint8(bxi->hmask, k % (QI3_K/2)); } #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) { - int i = i0 + i_offset * 4 + k / (WARP_SIZE/4); + int i = i0 + i_offset * 4 + k / (WARP_SIZE_GGUF/4); if (need_check) { i = min(i, i_max); } - const block_q3_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/4)) / (QI3_K/4); + const block_q3_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE_GGUF/4)) / (QI3_K/4); const int ksc = k % (QI3_K/4); @@ -1121,7 +1121,7 @@ template static __device__ __forceinlin const int sc = __vsubss4(sc_low | sc_high, 0x20202020); - x_sc[i * (WARP_SIZE/4) + i / 4 + k % (WARP_SIZE/4)] = sc; + x_sc[i * (WARP_SIZE_GGUF/4) + i / 4 + k % (WARP_SIZE_GGUF/4)] = sc; } } @@ -1134,24 +1134,24 @@ static __device__ __forceinline__ float vec_dot_q3_K_q8_1_mul_mat( const float * x_dmf = (const float *) x_dm; const float * y_df = (const float *) y_ds; - const int8_t * scales = ((const int8_t *) (x_sc + i * (WARP_SIZE/4) + i/4 + kbx*4)) + ky/4; + const int8_t * scales = ((const int8_t *) (x_sc + i * (WARP_SIZE_GGUF/4) + i/4 + kbx*4)) + ky/4; int v[QR3_K*VDR_Q3_K_Q8_1_MMQ]; #pragma unroll for (int l = 0; l < QR3_K*VDR_Q3_K_Q8_1_MMQ; ++l) { - const int kqsx = i * (WARP_SIZE + 1) + kbx*QI3_K + (QI3_K/2) * (ky/(2*QI3_K)) + ky % (QI3_K/2); + const int kqsx = i * (WARP_SIZE_GGUF + 1) + kbx*QI3_K + (QI3_K/2) * (ky/(2*QI3_K)) + ky % (QI3_K/2); const int shift = 2 * ((ky % 32) / 8); const int vll = (x_ql[kqsx + l] >> shift) & 0x03030303; - const int vh = x_qh[i * (WARP_SIZE/2) + i/2 + kbx * (QI3_K/2) + (ky+l)%8] >> ((ky+l) / 8); + const int vh = x_qh[i * (WARP_SIZE_GGUF/2) + i/2 + kbx * (QI3_K/2) + (ky+l)%8] >> ((ky+l) / 8); const int vlh = (vh << 2) & 0x04040404; v[l] = __vsubss4(vll, vlh); } - const int index_y = j * WARP_SIZE + (k*QR3_K) % WARP_SIZE; - return vec_dot_q3_K_q8_1_impl_mmq(v, &y_qs[index_y], scales, x_dmf[i * (WARP_SIZE/QI3_K) + i/QI3_K + kbx], y_df[index_y/QI8_1]); + const int index_y = j * WARP_SIZE_GGUF + (k*QR3_K) % WARP_SIZE_GGUF; + return vec_dot_q3_K_q8_1_impl_mmq(v, &y_qs[index_y], scales, x_dmf[i * (WARP_SIZE_GGUF/QI3_K) + i/QI3_K + kbx], y_df[index_y/QI8_1]); } static __device__ __forceinline__ float vec_dot_q4_K_q8_1( @@ -1200,9 +1200,9 @@ static __device__ __forceinline__ float vec_dot_q4_K_q8_1( } template static __device__ __forceinline__ void allocate_tiles_q4_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { - __shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y]; - __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI4_K) + mmq_y/QI4_K]; - __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8]; + __shared__ int tile_x_ql[mmq_y * (WARP_SIZE_GGUF) + mmq_y]; + __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE_GGUF/QI4_K) + mmq_y/QI4_K]; + __shared__ int tile_x_sc[mmq_y * (WARP_SIZE_GGUF/8) + mmq_y/8]; *x_ql = tile_x_ql; *x_dm = tile_x_dm; @@ -1225,10 +1225,10 @@ template static __device__ __forceinlin i = min(i, i_max); } const block_q4_K * bxi = bx0 + i*blocks_per_row + kbx; - x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx); + x_ql[i * (WARP_SIZE_GGUF + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx); } - const int blocks_per_tile_x_row = WARP_SIZE / QI4_K; // == 1 if QK_K == 256 + const int blocks_per_tile_x_row = WARP_SIZE_GGUF / QI4_K; // == 1 if QK_K == 256 const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256 #pragma unroll @@ -1238,27 +1238,27 @@ template static __device__ __forceinlin i = min(i, i_max); } const block_q4_K * bxi = bx0 + i*blocks_per_row + kbxd; - x_dm[i * (WARP_SIZE/QI4_K) + i / QI4_K + kbxd] = bxi->dm; + x_dm[i * (WARP_SIZE_GGUF/QI4_K) + i / QI4_K + kbxd] = bxi->dm; } #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) { - int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y; + int i = (i0 + i_offset * 8 + k / (WARP_SIZE_GGUF/8)) % mmq_y; if (need_check) { i = min(i, i_max); } - const block_q4_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / (QI4_K/8); + const block_q4_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE_GGUF/8)) / (QI4_K/8); const int * scales = (const int *) bxi->scales; - const int ksc = k % (WARP_SIZE/8); + const int ksc = k % (WARP_SIZE_GGUF/8); // scale arrangement after the following two lines: sc0,...,sc3, sc4,...,sc7, m0,...,m3, m4,...,m8 int scales8 = (scales[(ksc%2) + (ksc!=0)] >> (4 * (ksc & (ksc/2)))) & 0x0F0F0F0F; // lower 4 bits scales8 |= (scales[ksc/2] >> (2 * (ksc % 2))) & 0x30303030; // upper 2 bits - x_sc[i * (WARP_SIZE/8) + i / 8 + ksc] = scales8; + x_sc[i * (WARP_SIZE_GGUF/8) + i / 8 + ksc] = scales8; } } @@ -1267,11 +1267,11 @@ static __device__ __forceinline__ float vec_dot_q4_K_q8_1_mul_mat( const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { (void)x_qh; - const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/16]) + 2*((k % 16) / 8); + const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE_GGUF/8) + i/8 + k/16]) + 2*((k % 16) / 8); - const int index_y = j * WARP_SIZE + (QR4_K*k) % WARP_SIZE; - return vec_dot_q4_K_q8_1_impl_mmq(&x_ql[i * (WARP_SIZE + 1) + k], &y_qs[index_y], sc, sc+8, - x_dm[i * (WARP_SIZE/QI4_K) + i/QI4_K], &y_ds[index_y/QI8_1]); + const int index_y = j * WARP_SIZE_GGUF + (QR4_K*k) % WARP_SIZE_GGUF; + return vec_dot_q4_K_q8_1_impl_mmq(&x_ql[i * (WARP_SIZE_GGUF + 1) + k], &y_qs[index_y], sc, sc+8, + x_dm[i * (WARP_SIZE_GGUF/QI4_K) + i/QI4_K], &y_ds[index_y/QI8_1]); } static __device__ __forceinline__ float vec_dot_q5_K_q8_1( @@ -1321,9 +1321,9 @@ static __device__ __forceinline__ float vec_dot_q5_K_q8_1( } template static __device__ __forceinline__ void allocate_tiles_q5_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { - __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y]; - __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI5_K) + mmq_y/QI5_K]; - __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8]; + __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE_GGUF) + mmq_y]; + __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE_GGUF/QI5_K) + mmq_y/QI5_K]; + __shared__ int tile_x_sc[mmq_y * (WARP_SIZE_GGUF/8) + mmq_y/8]; *x_ql = tile_x_ql; *x_dm = tile_x_dm; @@ -1360,11 +1360,11 @@ template static __device__ __forceinlin const int kq0 = ky - ky % (QI5_K/2) + k % (QI5_K/4) + 0; const int kq1 = ky - ky % (QI5_K/2) + k % (QI5_K/4) + (QI5_K/4); - x_ql[i * (2*WARP_SIZE + 1) + kq0] = ql0 | qh0; - x_ql[i * (2*WARP_SIZE + 1) + kq1] = ql1 | qh1; + x_ql[i * (2*WARP_SIZE_GGUF + 1) + kq0] = ql0 | qh0; + x_ql[i * (2*WARP_SIZE_GGUF + 1) + kq1] = ql1 | qh1; } - const int blocks_per_tile_x_row = WARP_SIZE / QI5_K; // == 1 if QK_K == 256 + const int blocks_per_tile_x_row = WARP_SIZE_GGUF / QI5_K; // == 1 if QK_K == 256 const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256 #pragma unroll @@ -1376,40 +1376,40 @@ template static __device__ __forceinlin } const block_q5_K * bxi = bx0 + i*blocks_per_row + kbxd; - x_dm[i * (WARP_SIZE/QI5_K) + i / QI5_K + kbxd] = bxi->dm; + x_dm[i * (WARP_SIZE_GGUF/QI5_K) + i / QI5_K + kbxd] = bxi->dm; } #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) { - int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y; + int i = (i0 + i_offset * 8 + k / (WARP_SIZE_GGUF/8)) % mmq_y; if (need_check) { i = min(i, i_max); } - const block_q5_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / (QI5_K/8); + const block_q5_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE_GGUF/8)) / (QI5_K/8); const int * scales = (const int *) bxi->scales; - const int ksc = k % (WARP_SIZE/8); + const int ksc = k % (WARP_SIZE_GGUF/8); // scale arrangement after the following two lines: sc0,...,sc3, sc4,...,sc7, m0,...,m3, m4,...,m8 int scales8 = (scales[(ksc%2) + (ksc!=0)] >> (4 * (ksc & (ksc/2)))) & 0x0F0F0F0F; // lower 4 bits scales8 |= (scales[ksc/2] >> (2 * (ksc % 2))) & 0x30303030; // upper 2 bits - x_sc[i * (WARP_SIZE/8) + i / 8 + ksc] = scales8; + x_sc[i * (WARP_SIZE_GGUF/8) + i / 8 + ksc] = scales8; } } static __device__ __forceinline__ float vec_dot_q5_K_q8_1_mul_mat( const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { - const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/16]) + 2 * ((k % 16) / 8); + const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE_GGUF/8) + i/8 + k/16]) + 2 * ((k % 16) / 8); - const int index_x = i * (QR5_K*WARP_SIZE + 1) + QR5_K*k; - const int index_y = j * WARP_SIZE + (QR5_K*k) % WARP_SIZE; + const int index_x = i * (QR5_K*WARP_SIZE_GGUF + 1) + QR5_K*k; + const int index_y = j * WARP_SIZE_GGUF + (QR5_K*k) % WARP_SIZE_GGUF; return vec_dot_q5_K_q8_1_impl_mmq(&x_ql[index_x], &y_qs[index_y], sc, sc+8, - x_dm[i * (WARP_SIZE/QI5_K) + i/QI5_K], &y_ds[index_y/QI8_1]); + x_dm[i * (WARP_SIZE_GGUF/QI5_K) + i/QI5_K], &y_ds[index_y/QI8_1]); } static __device__ __forceinline__ float vec_dot_q6_K_q8_1( @@ -1439,9 +1439,9 @@ static __device__ __forceinline__ float vec_dot_q6_K_q8_1( } template static __device__ __forceinline__ void allocate_tiles_q6_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { - __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y]; - __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI6_K) + mmq_y/QI6_K]; - __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8]; + __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE_GGUF) + mmq_y]; + __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE_GGUF/QI6_K) + mmq_y/QI6_K]; + __shared__ int tile_x_sc[mmq_y * (WARP_SIZE_GGUF/8) + mmq_y/8]; *x_ql = tile_x_ql; *x_dm = tile_x_dm; @@ -1478,11 +1478,11 @@ template static __device__ __forceinlin const int kq0 = ky - ky % QI6_K + k % (QI6_K/2) + 0; const int kq1 = ky - ky % QI6_K + k % (QI6_K/2) + (QI6_K/2); - x_ql[i * (2*WARP_SIZE + 1) + kq0] = __vsubss4(ql0 | qh0, 0x20202020); - x_ql[i * (2*WARP_SIZE + 1) + kq1] = __vsubss4(ql1 | qh1, 0x20202020); + x_ql[i * (2*WARP_SIZE_GGUF + 1) + kq0] = __vsubss4(ql0 | qh0, 0x20202020); + x_ql[i * (2*WARP_SIZE_GGUF + 1) + kq1] = __vsubss4(ql1 | qh1, 0x20202020); } - const int blocks_per_tile_x_row = WARP_SIZE / QI6_K; // == 1 if QK_K == 256 + const int blocks_per_tile_x_row = WARP_SIZE_GGUF / QI6_K; // == 1 if QK_K == 256 const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256 float * x_dmf = (float *) x_dm; @@ -1496,20 +1496,20 @@ template static __device__ __forceinlin const block_q6_K * bxi = bx0 + i*blocks_per_row + kbxd; - x_dmf[i * (WARP_SIZE/QI6_K) + i / QI6_K + kbxd] = __half2float(bxi->d); + x_dmf[i * (WARP_SIZE_GGUF/QI6_K) + i / QI6_K + kbxd] = __half2float(bxi->d); } #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) { - int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y; + int i = (i0 + i_offset * 8 + k / (WARP_SIZE_GGUF/8)) % mmq_y; if (need_check) { i = min(i, i_max); } - const block_q6_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / 4; + const block_q6_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE_GGUF/8)) / 4; - x_sc[i * (WARP_SIZE/8) + i / 8 + k % (WARP_SIZE/8)] = get_int_from_int8(bxi->scales, k % (QI6_K/8)); + x_sc[i * (WARP_SIZE_GGUF/8) + i / 8 + k % (WARP_SIZE_GGUF/8)] = get_int_from_int8(bxi->scales, k % (QI6_K/8)); } } @@ -1519,11 +1519,11 @@ static __device__ __forceinline__ float vec_dot_q6_K_q8_1_mul_mat( const float * x_dmf = (const float *) x_dm; const float * y_df = (const float *) y_ds; - const int8_t * sc = ((const int8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/8]); + const int8_t * sc = ((const int8_t *) &x_sc[i * (WARP_SIZE_GGUF/8) + i/8 + k/8]); - const int index_x = i * (QR6_K*WARP_SIZE + 1) + QR6_K*k; - const int index_y = j * WARP_SIZE + (QR6_K*k) % WARP_SIZE; - return vec_dot_q6_K_q8_1_impl_mmq(&x_ql[index_x], &y_qs[index_y], sc, x_dmf[i * (WARP_SIZE/QI6_K) + i/QI6_K], &y_df[index_y/QI8_1]); + const int index_x = i * (QR6_K*WARP_SIZE_GGUF + 1) + QR6_K*k; + const int index_y = j * WARP_SIZE_GGUF + (QR6_K*k) % WARP_SIZE_GGUF; + return vec_dot_q6_K_q8_1_impl_mmq(&x_ql[index_x], &y_qs[index_y], sc, x_dmf[i * (WARP_SIZE_GGUF/QI6_K) + i/QI6_K], &y_df[index_y/QI8_1]); } static __device__ __forceinline__ float vec_dot_iq2_xxs_q8_1( @@ -1582,7 +1582,7 @@ static __device__ __forceinline__ float vec_dot_iq2_xs_q8_1( static __device__ __forceinline__ float vec_dot_iq2_s_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM const block_iq2_s * bq2 = (const block_iq2_s *) vbq; const int ib32 = iqs; @@ -1619,7 +1619,7 @@ static __device__ __forceinline__ float vec_dot_iq2_s_q8_1( static __device__ __forceinline__ float vec_dot_iq3_xxs_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM const block_iq3_xxs * bq2 = (const block_iq3_xxs *) vbq; const int ib32 = iqs; @@ -1646,7 +1646,7 @@ static __device__ __forceinline__ float vec_dot_iq3_xxs_q8_1( static __device__ __forceinline__ float vec_dot_iq3_s_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM const block_iq3_s * bq2 = (const block_iq3_s *) vbq; const int ib32 = iqs; @@ -1671,7 +1671,7 @@ static __device__ __forceinline__ float vec_dot_iq3_s_q8_1( static __device__ __forceinline__ float vec_dot_iq1_s_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM const block_iq1_s * bq1 = (const block_iq1_s *) vbq; const int qs_packed = get_int_b2(bq1->qs, iqs); @@ -1703,7 +1703,7 @@ static __device__ __forceinline__ float vec_dot_iq1_s_q8_1( static __device__ __forceinline__ float vec_dot_iq1_m_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM const block_iq1_m * bq1 = (const block_iq1_m *) vbq; @@ -1763,7 +1763,7 @@ static __device__ __forceinline__ void get_int_from_table_16(const uint32_t & q4 static __device__ __forceinline__ float vec_dot_iq4_nl_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM const block_iq4_nl * bq = (const block_iq4_nl *) vbq; @@ -1788,7 +1788,7 @@ static __device__ __forceinline__ float vec_dot_iq4_nl_q8_1( static __device__ __forceinline__ float vec_dot_iq4_xs_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) { -#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 +#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 610 || defined USE_ROCM const block_iq4_xs * bq4 = (const block_iq4_xs *) vbq; const uint8_t * values = (const uint8_t *)kvalues_iq4nl; diff --git a/csrc/torch_bindings.cpp b/csrc/torch_bindings.cpp index 3dccdf61abf3b..4e64b9c92773a 100644 --- a/csrc/torch_bindings.cpp +++ b/csrc/torch_bindings.cpp @@ -258,6 +258,7 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) { "awq_marlin_repack(Tensor b_q_weight, SymInt size_k, " "SymInt size_n, int num_bits) -> Tensor"); // conditionally compiled so impl registrations are in source file +#endif // Dequantization for GGML. ops.def("ggml_dequantize(Tensor W, int type, SymInt m, SymInt n) -> Tensor"); @@ -274,6 +275,7 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) { "ggml_mul_mat_a8(Tensor W, Tensor X, int type, SymInt row) -> Tensor"); ops.impl("ggml_mul_mat_a8", torch::kCUDA, &ggml_mul_mat_a8); +#ifndef USE_ROCM // fp8_marlin Optimized Quantized GEMM for FP8 weight-only. ops.def( "fp8_marlin_gemm(Tensor a, Tensor b_q_weight, Tensor b_scales, " diff --git a/vllm/_custom_ops.py b/vllm/_custom_ops.py index 41892e4dddf7e..c192c9a7b0e4d 100644 --- a/vllm/_custom_ops.py +++ b/vllm/_custom_ops.py @@ -344,31 +344,6 @@ def _gptq_marlin_gemm_fake(a: torch.Tensor, is_zp_float: bool = False) -> torch.Tensor: return torch.empty((size_m, size_n), device=a.device, dtype=a.dtype) - @register_fake("_C::ggml_dequantize") - def _ggml_dequantize_fake(W: torch.Tensor, quant_type: int, - m: torch.SymInt, - n: torch.SymInt) -> torch.Tensor: - return torch.empty((m, n), dtype=torch.float16, device=W.device) - - @register_fake("_C::ggml_mul_mat_vec_a8") - def _ggml_mul_mat_vec_a8_fake( - W: torch.Tensor, - X: torch.Tensor, - quant_type: int, - row: torch.SymInt, - ) -> torch.Tensor: - return torch.empty((1, row), dtype=torch.float16, device=W.device) - - @register_fake("_C::ggml_mul_mat_a8") - def _ggml_mul_mat_a8_fake( - W: torch.Tensor, - X: torch.Tensor, - quant_type: int, - row: torch.SymInt, - ) -> torch.Tensor: - batch = X.size(0) - return torch.empty((batch, row), dtype=torch.float16, device=W.device) - @register_fake("_C::marlin_qqq_gemm") def _marlin_qqq_gemm_fake(a: torch.Tensor, b_q_weight: torch.Tensor, s_tok: torch.Tensor, s_ch: torch.Tensor, @@ -468,6 +443,34 @@ def machete_prepack_B_fake( memory_format=torch.contiguous_format) +if hasattr(torch.ops._C, "ggml_dequantize"): + + @register_fake("_C::ggml_dequantize") + def _ggml_dequantize_fake(W: torch.Tensor, quant_type: int, + m: torch.SymInt, + n: torch.SymInt) -> torch.Tensor: + return torch.empty((m, n), dtype=torch.float16, device=W.device) + + @register_fake("_C::ggml_mul_mat_vec_a8") + def _ggml_mul_mat_vec_a8_fake( + W: torch.Tensor, + X: torch.Tensor, + quant_type: int, + row: torch.SymInt, + ) -> torch.Tensor: + return torch.empty((1, row), dtype=torch.float16, device=W.device) + + @register_fake("_C::ggml_mul_mat_a8") + def _ggml_mul_mat_a8_fake( + W: torch.Tensor, + X: torch.Tensor, + quant_type: int, + row: torch.SymInt, + ) -> torch.Tensor: + batch = X.size(0) + return torch.empty((batch, row), dtype=torch.float16, device=W.device) + + # cutlass def cutlass_scaled_mm_supports_fp8(cuda_device_capability: int) -> bool: return torch.ops._C.cutlass_scaled_mm_supports_fp8(cuda_device_capability) diff --git a/vllm/config.py b/vllm/config.py index 42a44f5415e9f..f163665e2c063 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -387,7 +387,7 @@ def _verify_quantization(self) -> None: supported_quantization = QUANTIZATION_METHODS rocm_supported_quantization = [ "awq", "gptq", "fp8", "compressed_tensors", "compressed-tensors", - "fbgemm_fp8" + "fbgemm_fp8", "gguf" ] optimized_quantization_methods = [ "fp8", "marlin", "modelopt", "gptq_marlin_24", "gptq_marlin", From 4634a89d18569ef0ee2d7dd2d535377a1f460188 Mon Sep 17 00:00:00 2001 From: Ricky Xu Date: Fri, 22 Nov 2024 21:15:55 -0800 Subject: [PATCH 115/397] Prefix Cache Aware Scheduling [1/n] (#10128) Signed-off-by: rickyx --- tests/core/block/test_prefix_caching_block.py | 181 +++++++++- tests/core/test_scheduler.py | 179 +++++++++- tests/core/utils.py | 51 ++- tests/prefix_caching/test_prefix_caching.py | 106 +++++- vllm/core/block/cpu_gpu_block_allocator.py | 15 +- vllm/core/block/interfaces.py | 36 +- vllm/core/block/naive_block.py | 11 +- vllm/core/block/prefix_caching_block.py | 258 ++++++++----- vllm/core/block_manager.py | 23 +- vllm/core/interfaces.py | 4 + vllm/core/placeholder_block_space_manager.py | 3 + vllm/core/scheduler.py | 338 +++++++++++++----- vllm/sequence.py | 3 + 13 files changed, 967 insertions(+), 241 deletions(-) diff --git a/tests/core/block/test_prefix_caching_block.py b/tests/core/block/test_prefix_caching_block.py index d325b9606843e..bbeb4b3a58f2a 100644 --- a/tests/core/block/test_prefix_caching_block.py +++ b/tests/core/block/test_prefix_caching_block.py @@ -5,9 +5,14 @@ import pytest +from tests.core.utils import create_dummy_sequence +from vllm.core.block.cpu_gpu_block_allocator import CpuGpuBlockAllocator from vllm.core.block.interfaces import Block, BlockAllocator -from vllm.core.block.prefix_caching_block import (PrefixCachingBlock, +from vllm.core.block.prefix_caching_block import (ComputedBlocksTracker, + PrefixCachingBlock, PrefixCachingBlockAllocator) +from vllm.sequence import Logprob +from vllm.utils import Device class TestPrefixCachingBlock: @@ -726,18 +731,71 @@ def test_touch_block(): token_ids=common_token_ids, allocator=allocator, ) - block_ids = [block.block_id for block in blocks] + block_hashes = [block.content_hash for block in blocks] # The allocated blocks should be marked as touched # but not computed. - computed_block_ids = allocator.get_computed_block_ids( - [], block_ids, skip_last_block_id=False) + computed_block_ids = allocator.find_cached_blocks_prefix( + block_hashes) assert len(computed_block_ids) == 0 allocator.mark_blocks_as_computed([]) - computed_block_ids = allocator.get_computed_block_ids( - [], block_ids, skip_last_block_id=False) + computed_block_ids = allocator.find_cached_blocks_prefix( + block_hashes=block_hashes) assert len(computed_block_ids) == common_blocks + @staticmethod + def test_find_cached_blocks_prefix(): + """ + This test verifies the behavior of find_cached_blocks_prefix. + """ + block_size = 4 + num_blocks = 8 + total_test_blocks = 12 + allocator = PrefixCachingBlockAllocator(num_blocks=num_blocks, + block_size=block_size) + + token_ids = list(range(total_test_blocks * block_size)) + block_tokens_seq1 = token_ids[:num_blocks * block_size] + blocks_seq1 = TestPrefixCachingBlockAllocator.create_immutable_chain( + block_size=block_size, + token_ids=block_tokens_seq1, + allocator=allocator, + ) + block_hashes_seq1 = [block.content_hash for block in blocks_seq1] + allocator.mark_blocks_as_computed([]) + + # All blocks should be cached. + cached_blocks_seq1 = allocator.find_cached_blocks_prefix( + block_hashes=block_hashes_seq1) + assert len(cached_blocks_seq1) == num_blocks + + # Free the first sequence. + for block in blocks_seq1: + allocator.free(block) + + # All blocks should be still be cached if not required to be allocated. + cached_blocks = allocator.find_cached_blocks_prefix( + block_hashes=block_hashes_seq1) + assert len(cached_blocks) == num_blocks + + block_tokens_seq2 = token_ids[num_blocks * block_size:] + blocks_seq2 = TestPrefixCachingBlockAllocator.create_immutable_chain( + block_size=block_size, + token_ids=block_tokens_seq2, + allocator=allocator, + ) + block_hashes_seq2 = [block.content_hash for block in blocks_seq2] + allocator.mark_blocks_as_computed([]) + cached_blocks = allocator.find_cached_blocks_prefix( + block_hashes=block_hashes_seq2) + assert len(cached_blocks) == len(blocks_seq2) + + # Half of the blocks from seq1 should still be cached. + num_evicted_blocks = len(blocks_seq2) + cached_blocks = allocator.find_cached_blocks_prefix( + block_hashes=block_hashes_seq1) + assert len(cached_blocks) == len(blocks_seq1) - num_evicted_blocks + @staticmethod def create_immutable_chain( block_size: int, @@ -762,3 +820,114 @@ def create_immutable_chain( blocks.append(prev_block) return blocks + + +class TestComputedBlocksTracker: + + @staticmethod + def _get_mock_allocator(): + return MagicMock(spec=PrefixCachingBlockAllocator) + + @staticmethod + def test_get_num_cached_tokens(): + """ + Test it correctly computes the number of cached tokens for a given + sequence: + + - The cache token count is derived from the number of cached blocks. + - The cache token count is updated when the allocator is updated. + - When a sequence is removed, the cache token count should be updated + accordingly. + + # TODO(rickyx): This behaviour for prefill sequence is a hack until + we fix the computed blocks tracking. + - The cache token count for prefill sequence doesn't change while + the sequence is in continuous prefill (chunked prefill). + """ + block_size = 4 + mock_allocator = TestComputedBlocksTracker._get_mock_allocator() + tracker = ComputedBlocksTracker( + allocator=mock_allocator, + block_size=block_size, + enable_caching=True, + ) + + # Not yet allocated. + tokens = [0, 1, 2, 3, 4, 5] + seq1 = create_dummy_sequence(request_id=0, + token_ids=tokens, + block_size=block_size) + mock_allocator.find_cached_blocks_prefix.return_value = [] + assert tracker.get_num_cached_tokens(seq1) == 0 + + mock_allocator.find_cached_blocks_prefix.return_value = [ + None + ] # 1 block cached. + # Result is cached for prefill sequence. + assert tracker.get_num_cached_tokens(seq1) == 0 + + # Mark the sequence as non-prefill. + seq1.data.update_num_computed_tokens(len(tokens)) # 6 tokens computed. + assert not seq1.is_prefill() + + # Recomputes for decoding sequence. + assert tracker.get_num_cached_tokens(seq1) == 4 + + # Append new tokens to the sequence. + num_new_tokens = 3 + for i in range(num_new_tokens): + seq1.append_token_id(i, {i: Logprob(logprob=0.0)}) + + assert tracker.get_num_cached_tokens(seq1) == 4 + + # Update the allocator. + mock_allocator.find_cached_blocks_prefix.return_value = [ + None + ] * 2 # 2 blocks cached. + assert tracker.get_num_cached_tokens(seq1) == 8 + + # Remove the sequence. + tracker.remove_seq(seq1.seq_id) + + # Re-create the sequence with the same request id to simulate recompute. + seq1 = create_dummy_sequence(request_id=0, + token_ids=tokens, + block_size=block_size) + mock_allocator.find_cached_blocks_prefix.return_value = [ + ] # no cached block + assert tracker.get_num_cached_tokens(seq1) == 0 + + @staticmethod + def test_correct_block_hash(): + """ + Test that the block hash is correctly computed for a sequence (should + match the underlying block allocator's block hash). So the number of + cached tokens is correctly retrieved. + """ + block_size = 4 + allocator = CpuGpuBlockAllocator.create( + allocator_type="prefix_caching", + num_gpu_blocks=16, + num_cpu_blocks=16, + block_size=block_size, + ) + gpu_allocator = allocator._allocators[Device.GPU] + + tracker = ComputedBlocksTracker( + allocator=allocator, + block_size=block_size, + enable_caching=True, + ) + + tokens = list(range(block_size * 4)) # 4 blocks. + seq = create_dummy_sequence(request_id=0, + token_ids=tokens, + block_size=block_size) + _ = TestPrefixCachingBlockAllocator.create_immutable_chain( + block_size=block_size, + token_ids=tokens, + allocator=gpu_allocator, + ) + allocator.mark_blocks_as_computed([]) + + assert tracker.get_num_cached_tokens(seq) == len(tokens) diff --git a/tests/core/test_scheduler.py b/tests/core/test_scheduler.py index 5ff32be611592..8f6de84e566e7 100644 --- a/tests/core/test_scheduler.py +++ b/tests/core/test_scheduler.py @@ -12,9 +12,9 @@ from vllm.lora.request import LoRARequest from vllm.sequence import SequenceGroup -from .utils import (append_new_token, append_new_token_seq_group, - create_dummy_prompt, get_sequence_groups, - schedule_and_update_computed_tokens) +from .utils import (append_new_token, append_new_token_seq, + append_new_token_seq_group, create_dummy_prompt, + get_sequence_groups, schedule_and_update_computed_tokens) def test_scheduler_add_seq_group(): @@ -305,6 +305,8 @@ def initialize_scheduler( block_size=4, num_cpu_blocks=8, num_gpu_blocks=8, + enable_prefix_caching=False, + enable_chunked_prefill=False, ): block_size = block_size scheduler_config = SchedulerConfig( @@ -312,8 +314,15 @@ def initialize_scheduler( max_num_batched_tokens=max_token_budget, max_num_seqs=max_num_seqs, max_model_len=max_model_len, + enable_chunked_prefill=enable_chunked_prefill, + ) + cache_config = CacheConfig( + block_size, + 1.0, + 1, + "auto", + enable_prefix_caching=enable_prefix_caching, ) - cache_config = CacheConfig(block_size, 1.0, 1, "auto") cache_config.num_cpu_blocks = num_cpu_blocks cache_config.num_gpu_blocks = num_gpu_blocks scheduler = Scheduler(scheduler_config, cache_config, lora_config) @@ -800,3 +809,165 @@ def test_scheduling_budget(): assert budget.num_curr_seqs == 0 budget.subtract_num_seqs(seq_group.request_id, 2) assert budget.num_curr_seqs == 0 + + +@pytest.mark.parametrize("enable_prefix_caching", [True, False]) +def test_prefix_caching_aware_prefills(enable_prefix_caching): + """ + Test the below scenario: + + For 3 sequences, seqA, seqB, seqC, share the first block as prefix. + + The test verifies the below scenarios: + 1. SeqA is first scheduled. + 2. SeqB and SeqC can be prefilled together in a single schedule round + even though there are not enough token budgets to prefill both without + considering prefix caching. + """ + + block_size = 4 + max_num_batched_tokens = 12 + max_seq_group = 3 + scheduler = initialize_scheduler( + block_size=block_size, + num_cpu_blocks=16, + num_gpu_blocks=16, + max_token_budget=max_num_batched_tokens, + max_num_seqs=max_seq_group, + max_model_len=max_num_batched_tokens, + enable_prefix_caching=enable_prefix_caching, + ) + + seqA_tokens = list(range(8)) + num_shared_tokens = 4 + seqB_tokens = seqA_tokens[:num_shared_tokens] + list(range( + 12, 16)) # Shared prefix first 4. + seqC_tokens = seqA_tokens[:num_shared_tokens] + list(range( + 16, 20)) # Shared prefix first 4. + + seqA, seqA_group = create_dummy_prompt("0", + prompt_tokens=seqA_tokens, + block_size=block_size) + seqB, seqB_group = create_dummy_prompt("1", + prompt_tokens=seqB_tokens, + block_size=block_size) + seqC, seqC_group = create_dummy_prompt("2", + prompt_tokens=seqC_tokens, + block_size=block_size) + + # Schedule seqA prefill. + scheduler.add_seq_group(seqA_group) + metas, out, _ = scheduler.schedule() + assert (len(out.scheduled_seq_groups) == 1 + and out.scheduled_seq_groups[0].seq_group == seqA_group) + assert out.scheduled_seq_groups[0].token_chunk_size == len(seqA_tokens) + + # Schedule seqA decode. + append_new_token_seq_group(len(seqA_tokens), seqA_group, 999) + metas, out, _ = scheduler.schedule() + + assert len(out.scheduled_seq_groups) == 1 + assert out.scheduled_seq_groups[0].seq_group == seqA_group + assert out.scheduled_seq_groups[0].token_chunk_size == 1 + + # Schedule seqB and seqC prefills should work with prefix caching. + scheduler.add_seq_group(seqB_group) + scheduler.add_seq_group(seqC_group) + metas, out, _ = scheduler.schedule() + + if enable_prefix_caching: + assert len(out.scheduled_seq_groups) == 2 + assert set([ + out.scheduled_seq_groups[0].seq_group, + out.scheduled_seq_groups[1].seq_group, + ]) == set([seqB_group, seqC_group]) + assert len(metas) == 2 + for meta in metas: + assert meta.token_chunk_size == 8 + assert (len(meta.computed_block_nums) == num_shared_tokens // + block_size) # 1 Block for the 8 tokens. + else: + assert len(out.scheduled_seq_groups) == 1 + assert len(metas) == 1 + assert metas[0].token_chunk_size == 8 + assert len(metas[0].computed_block_nums) == 0 # No blocks computed. + + +def test_no_multiple_partial_prefills_with_chunked_prefill_and_prefix_caching( +): + """ + This test verifies that we don't schedule new prefills if there's already + a continuous prefill in progress even though the new prefills with shared + prefix can fit in the token budget: + + - SeqA is being chunked prefill. + - SeqB with the same prompt shouldn't be scheduled for prefill even though + there's enough token budget to prefill the cached tokens. + - Neither should seqC be scheduled. + + - When seqA is in decoding phase, seqB and seqC can be scheduled. + - Entire seqB should be prefilled since it's a full prefix cache hit. + - SeqC would be partially prefilled with the prefix shared, and the + remaining unique tokens would be prefilled (rounded down to be + block-size aligned). + """ + + block_size = 2 + max_num_batched_tokens = 4 + max_seq_group = 3 + scheduler = initialize_scheduler( + block_size=block_size, + num_cpu_blocks=16, + num_gpu_blocks=16, + max_token_budget=max_num_batched_tokens, + max_num_seqs=max_seq_group, + max_model_len=100, + enable_prefix_caching=True, + enable_chunked_prefill=True, + ) + + seqA_tokens = list(range(8)) + seqB_tokens = seqA_tokens + seqC_shared_prefix_len = 4 + seqC_tokens = seqA_tokens[:seqC_shared_prefix_len] + list(range(12, 20)) + + seqA, seqA_group = create_dummy_prompt("0", + prompt_tokens=seqA_tokens, + block_size=block_size) + seqB, seqB_group = create_dummy_prompt("1", + prompt_tokens=seqB_tokens, + block_size=block_size) + + # Chunked prefill seqA. + scheduler.add_seq_group(seqA_group) + metas, out = schedule_and_update_computed_tokens(scheduler) + assert len(out.scheduled_seq_groups) == 1 + assert out.scheduled_seq_groups[0].seq_group == seqA_group + assert out.scheduled_seq_groups[0].token_chunk_size == 4 + + # seqB should not be scheduled with ongoing prefills. + scheduler.add_seq_group(seqB_group) + metas, out = schedule_and_update_computed_tokens(scheduler) + assert len(out.scheduled_seq_groups) == 1 + assert out.scheduled_seq_groups[0].seq_group == seqA_group + assert out.scheduled_seq_groups[0].token_chunk_size == 4 + + # both seqB and seqC can now be scheduled with seqA is over. + # seqA is in decoding phase. + append_new_token_seq(seqA, 999) + seqC, seqC_group = create_dummy_prompt("2", + prompt_tokens=seqC_tokens, + block_size=block_size) + scheduler.add_seq_group(seqC_group) + metas, out = schedule_and_update_computed_tokens(scheduler) + assert len(out.scheduled_seq_groups) == 3 + + metas = {meta.request_id: meta for meta in metas} + assert metas[seqA_group.request_id].token_chunk_size == 1 # Decode + assert (metas[seqB_group.request_id].token_chunk_size == 8 + ) # Fully cached prefill + assert ( + metas[seqC_group.request_id].token_chunk_size == 6 + ), "A partial prefix of C (4 tokens) should be prefilled, with the " + "remaining tokens fit into 3 token budget (4-1 from the seqA). It will " + "then be rounded down to 2 tokens on block size, thus 6 tokens in total." diff --git a/tests/core/utils.py b/tests/core/utils.py index cd0caa4704e11..277368b57b938 100644 --- a/tests/core/utils.py +++ b/tests/core/utils.py @@ -1,17 +1,20 @@ import time -from typing import List, Optional +from collections import defaultdict +from typing import Any, Dict, List, Optional from typing import Sequence as GenericSequence from typing import Tuple from vllm import SamplingParams +from vllm.core.scheduler import Scheduler, SchedulerOutputs from vllm.inputs import EncoderDecoderInputs, token_inputs from vllm.lora.request import LoRARequest -from vllm.sequence import Logprob, Sequence, SequenceGroup +from vllm.sequence import (Logprob, Sequence, SequenceGroup, + SequenceGroupMetadata) def create_dummy_prompt( request_id: str, - prompt_length: int, + prompt_length: int = -1, block_size: Optional[int] = None, lora_request: Optional[LoRARequest] = None, best_of: int = 1, @@ -26,6 +29,7 @@ def create_dummy_prompt( # Create dummy prompt sequence with tokens 0...block_size-1 # and prompt "0 ... block_size". prompt_tokens = list(range(prompt_length)) + prompt_str = " ".join([str(t) for t in prompt_tokens]) prompt = Sequence(int(request_id), inputs=token_inputs(prompt_tokens, prompt=prompt_str), @@ -42,6 +46,15 @@ def create_dummy_prompt( return prompt, seq_group +def create_dummy_sequence(request_id: int, token_ids: List[int], + block_size: int) -> Sequence: + return Sequence( + seq_id=request_id, + inputs=token_inputs(token_ids), + block_size=block_size, + ) + + def create_dummy_prompt_encoder_decoder( request_id: str, decoder_prompt_length: int, @@ -194,12 +207,40 @@ def append_new_token(out, token_id: int): def schedule_and_update_computed_tokens(scheduler): metas, out, _ = scheduler.schedule() - for s, meta in zip(out.scheduled_seq_groups, metas): - s.seq_group.update_num_computed_tokens(meta.token_chunk_size) + for s in out.scheduled_seq_groups: + s.seq_group.update_num_computed_tokens(s.token_chunk_size) return metas, out +def append_new_token_seq(seq: Sequence, token_id: int): + seq.append_token_id(token_id, {token_id: Logprob(token_id)}) + + def append_new_token_seq_group(token_chunk_size, seq_group, token_id: int): seq_group.update_num_computed_tokens(token_chunk_size) for seq in seq_group.get_seqs(): seq.append_token_id(token_id, {token_id: Logprob(token_id)}) + + +class SchedulerProxy: + """ + A proxy class to forward calls to the scheduler. + """ + + def __init__(self, scheduler: Scheduler): + self.scheduler_ = scheduler + self.call_history: Dict[str, List[Any]] = defaultdict(list) + + def __getattr__(self, name: str) -> Any: + + def wrapper(*args, **kwargs): + result = getattr(self.scheduler_, name)(*args, **kwargs) + self.call_history[name].append((args, kwargs, result)) + return result + + return wrapper + + def last_schedule_ret( + self, ) -> Tuple[List[SequenceGroupMetadata], SchedulerOutputs, Any]: + _, _, ret = self.call_history["schedule"][-1] + return ret diff --git a/tests/prefix_caching/test_prefix_caching.py b/tests/prefix_caching/test_prefix_caching.py index 50723dbb610ac..8d16710f14585 100644 --- a/tests/prefix_caching/test_prefix_caching.py +++ b/tests/prefix_caching/test_prefix_caching.py @@ -2,10 +2,15 @@ Run `pytest tests/prefix_caching/test_prefix_caching.py`. """ + import pytest +from tests.conftest import VllmRunner +from tests.core.utils import SchedulerProxy, create_dummy_prompt from tests.kernels.utils import override_backend_env_variable from vllm import SamplingParams, TokensPrompt +from vllm.core.scheduler import Scheduler +from vllm.engine.llm_engine import LLMEngine from ..models.utils import check_outputs_equal @@ -27,6 +32,7 @@ @pytest.mark.parametrize("dtype", ["half"]) @pytest.mark.parametrize("max_tokens", [5]) @pytest.mark.parametrize("cached_position", [0, 1]) +@pytest.mark.parametrize("enable_chunked_prefill", [True, False]) @pytest.mark.parametrize("block_size", [16]) def test_mixed_requests( hf_runner, @@ -37,6 +43,7 @@ def test_mixed_requests( dtype: str, max_tokens: int, cached_position: int, + enable_chunked_prefill: bool, block_size: int, monkeypatch, ) -> None: @@ -55,6 +62,7 @@ def test_mixed_requests( model, dtype=dtype, enable_prefix_caching=True, + enable_chunked_prefill=enable_chunked_prefill, block_size=block_size, ) as vllm_model: # Run the first prompt so the cache is populated @@ -72,13 +80,13 @@ def test_mixed_requests( block_size) * block_size else: expected_num_cached_tokens = 0 - assert req_outputs[ - i].num_cached_tokens == expected_num_cached_tokens + assert ( + req_outputs[i].num_cached_tokens == expected_num_cached_tokens) - vllm_outputs = [ - (output.prompt_token_ids + list(output.outputs[0].token_ids), - output.prompt + output.outputs[0].text) for output in req_outputs - ] + vllm_outputs = [( + output.prompt_token_ids + list(output.outputs[0].token_ids), + output.prompt + output.outputs[0].text, + ) for output in req_outputs] check_outputs_equal( outputs_0_lst=hf_outputs, @@ -105,3 +113,89 @@ def test_unstable_prompt_sequence( for prompt in UNSTABLE_PROMPT_SEQUENCE: vllm_model.generate(TokensPrompt(prompt_token_ids=prompt), SamplingParams(max_tokens=1)) + + +@pytest.mark.parametrize("model", MODELS) +def test_fully_cached_prefill_needs_uncached_token(model): + block_size = 16 + max_num_batched_tokens = 16 + num_output_tokens = 5 + # Make a vllm engine + runner = VllmRunner( + model_name=model, + gpu_memory_utilization=0.7, + enable_chunked_prefill=True, + enforce_eager=True, + enable_prefix_caching=True, + block_size=block_size, + max_num_batched_tokens=max_num_batched_tokens, + max_num_seqs=max_num_batched_tokens, + ) + engine: LLMEngine = runner.model.llm_engine + + scheduler: Scheduler = SchedulerProxy(engine.scheduler[0]) # type: ignore + engine.scheduler[0] = scheduler + + # SeqA + seqA_tokens = list(range(2 * block_size)) + seqA, seq_groupA = create_dummy_prompt( + request_id="0", + prompt_tokens=seqA_tokens, + max_tokens=num_output_tokens, + block_size=block_size, + ) + + scheduler.add_seq_group(seq_groupA) + + assert seqA.data.get_num_computed_tokens() == 0 + + # Prefill seqA + while not seqA.is_finished(): + engine.step() + + # seqB + seqB_tokens = [t + 1 for t in seqA_tokens] # shift by 1 + seqB, seq_groupB = create_dummy_prompt( + request_id="1", + prompt_tokens=seqB_tokens, + max_tokens=num_output_tokens, + block_size=block_size, + ) + + # seqC is the same as seqA + seqC, seq_groupC = create_dummy_prompt( + request_id="2", + prompt_tokens=seqA_tokens, + max_tokens=num_output_tokens, + block_size=block_size, + ) + + scheduler.add_seq_group(seq_groupB) + scheduler.add_seq_group(seq_groupC) + + # Even seqC is fully cached, it should not be prefilled since we + # require at least 1 uncached token. + engine.step() + + sched_metas, sched_out, _ = scheduler.last_schedule_ret() + assert len(sched_out.scheduled_seq_groups) == 1 + assert (sched_out.scheduled_seq_groups[0].seq_group.request_id == + seq_groupB.request_id) + assert (sched_out.scheduled_seq_groups[0].token_chunk_size == + max_num_batched_tokens) + + # When seqB is finished, seqC could be prefilled. + while not seqB.is_finished(): + engine.step() + sched_metas, sched_out, _ = scheduler.last_schedule_ret() + assert len(sched_out.scheduled_seq_groups) == 1 + assert (sched_out.scheduled_seq_groups[0].seq_group.request_id == + seq_groupB.request_id) + + engine.step() + sched_metas, sched_out, _ = scheduler.last_schedule_ret() + assert len(sched_out.scheduled_seq_groups) == 1 + assert (sched_out.scheduled_seq_groups[0].seq_group.request_id == + seq_groupC.request_id) + assert sched_out.scheduled_seq_groups[0].token_chunk_size == len( + seqA_tokens) diff --git a/vllm/core/block/cpu_gpu_block_allocator.py b/vllm/core/block/cpu_gpu_block_allocator.py index 9727f6e19b84e..3197af3c2b7a4 100644 --- a/vllm/core/block/cpu_gpu_block_allocator.py +++ b/vllm/core/block/cpu_gpu_block_allocator.py @@ -306,14 +306,6 @@ def mark_blocks_as_computed(self, block_ids: List[int]) -> None: device = Device.GPU return self._allocators[device].mark_blocks_as_computed(block_ids) - def get_computed_block_ids(self, prev_computed_block_ids: List[int], - block_ids: List[int], - skip_last_block_id: bool) -> List[int]: - # Prefix caching only supported on GPU. - device = Device.GPU - return self._allocators[device].get_computed_block_ids( - prev_computed_block_ids, block_ids, skip_last_block_id) - def get_common_computed_block_ids( self, computed_seq_block_ids: List[List[int]]) -> List[int]: # Prefix caching only supported on GPU. @@ -342,6 +334,13 @@ def get_and_reset_swaps(self) -> List[Tuple[int, int]]: self._swap_mapping.clear() return list(mapping.items()) + def find_cached_blocks_prefix( + self, + block_hashes: List[int], + device: Device = Device.GPU, + ) -> List[int]: + return self._allocators[device].find_cached_blocks_prefix(block_hashes) + class NullBlock(Block): """ diff --git a/vllm/core/block/interfaces.py b/vllm/core/block/interfaces.py index 72bbab1dcea5d..06f4851af3466 100644 --- a/vllm/core/block/interfaces.py +++ b/vllm/core/block/interfaces.py @@ -159,12 +159,6 @@ def mark_blocks_as_accessed(self, block_ids: List[int], def mark_blocks_as_computed(self, block_ids: List[int]) -> None: pass - @abstractmethod - def get_computed_block_ids(self, prev_computed_block_ids: List[int], - block_ids: List[int], - skip_last_block_id: bool) -> List[int]: - pass - @abstractmethod def get_common_computed_block_ids( self, computed_seq_block_ids: List[List[int]]) -> List[int]: @@ -192,6 +186,13 @@ def get_prefix_cache_hit_rate(self) -> float: class NoFreeBlocksError(ValueError): pass + @abstractmethod + def find_cached_blocks_prefix( + self, + block_hashes: List[int], + ) -> List[int]: + pass + class DeviceAwareBlockAllocator(ABC): @@ -207,9 +208,12 @@ def allocate_immutable_block(self, prev_block: Optional[Block], pass @abstractmethod - def allocate_immutable_blocks(self, prev_block: Optional[Block], - block_token_ids: List[List[int]], - device: Device) -> List[Block]: + def allocate_immutable_blocks( + self, + prev_block: Optional[Block], + block_token_ids: List[List[int]], + device: Device, + ) -> List[Block]: pass @abstractmethod @@ -246,12 +250,6 @@ def mark_blocks_as_accessed(self, block_ids: List[int], def mark_blocks_as_computed(self, block_ids: List[int]) -> None: pass - @abstractmethod - def get_computed_block_ids(self, prev_computed_block_ids: List[int], - block_ids: List[int], - skip_last_block_id: bool) -> List[int]: - pass - @abstractmethod def get_common_computed_block_ids( self, computed_seq_block_ids: List[List[int]]) -> List[int]: @@ -284,3 +282,11 @@ def allocate_or_get_null_block(self) -> Block: def get_prefix_cache_hit_rate(self, device: Device) -> float: """Prefix cache hit rate. -1 means not supported or disabled.""" pass + + @abstractmethod + def find_cached_blocks_prefix( + self, + block_hashes: List[int], + device: Device = Device.GPU, + ) -> List[int]: + pass diff --git a/vllm/core/block/naive_block.py b/vllm/core/block/naive_block.py index 9341a518d11c6..a2af5ad6362c1 100644 --- a/vllm/core/block/naive_block.py +++ b/vllm/core/block/naive_block.py @@ -262,13 +262,6 @@ def mark_blocks_as_computed(self, block_ids: List[int]) -> None: """ pass - def get_computed_block_ids(self, prev_computed_block_ids: List[int], - block_ids: List[int], - skip_last_block_id: bool) -> List[int]: - """No prefix caching here => return empty list - """ - return [] - def get_common_computed_block_ids( self, computed_seq_block_ids: List[List[int]]) -> List[int]: """Determine blocks that can be skipped in prefill. @@ -329,6 +322,10 @@ def swap_in(self, blocks: List[Block]) -> None: def get_prefix_cache_hit_rate(self) -> float: return -1 + def find_cached_blocks_prefix(self, block_hashes: List[int]) -> List[int]: + # Not applicable for naive block allocator. + return [] + class NaiveBlock(Block): """An implementation of the Block class that does not support prefix diff --git a/vllm/core/block/prefix_caching_block.py b/vllm/core/block/prefix_caching_block.py index 57527e39b9bdd..b736167f6ceb4 100644 --- a/vllm/core/block/prefix_caching_block.py +++ b/vllm/core/block/prefix_caching_block.py @@ -1,13 +1,18 @@ """Token blocks.""" +import sys +from bisect import bisect_left from os.path import commonprefix -from typing import Dict, FrozenSet, Iterable, List, Optional, Set, Tuple +from typing import (Callable, Dict, FrozenSet, Iterable, List, Optional, Set, + Tuple) from vllm.core.block.common import (CacheMetricData, CopyOnWriteTracker, get_all_blocks_recursively) -from vllm.core.block.interfaces import Block, BlockAllocator, BlockId, Device +from vllm.core.block.interfaces import (Block, BlockAllocator, BlockId, Device, + DeviceAwareBlockAllocator) from vllm.core.block.naive_block import (BlockPool, NaiveBlock, NaiveBlockAllocator) from vllm.core.evictor import EvictionPolicy, Evictor, make_evictor +from vllm.sequence import Sequence PrefixHash = int @@ -534,26 +539,6 @@ def block_is_computed(self, block_id: int) -> bool: else: return block_id in self.evictor - def get_computed_block_ids(self, - prev_computed_block_ids: List[int], - block_ids: List[int], - skip_last_block_id: bool = True) -> List[int]: - prev_prefix_size = len(prev_computed_block_ids) - cur_size = len(block_ids) - if skip_last_block_id: - cur_size -= 1 - - # Sanity checks - assert cur_size >= 0 - assert prev_prefix_size <= cur_size - - ret = prev_computed_block_ids - for i in range(prev_prefix_size, cur_size): - block_id = block_ids[i] - if self.block_is_computed(block_id): - ret.append(block_id) - return ret - def get_common_computed_block_ids( self, computed_seq_block_ids: List[List[int]]) -> List[int]: """Return the block ids that are common for a given sequence group. @@ -634,6 +619,47 @@ def swap_in(self, blocks: List[Block]) -> None: block.block_id = block_id # Assign block_id + def find_cached_blocks_prefix(self, block_hashes: List[int]) -> List[int]: + """ + Given a list of block hashes, return the prefix of the block hashes that + are all cached. + + Since a block's block hash includes the hashes of all previous blocks, + and we only allocate/deallocate blocks in the entire sequence, so if a + block is cached, then all previous blocks are also cached. With this + property, we can use binary search to find the prefix of cached blocks. + + Args: + block_hashes (List[int]): The list of block hashes. + + Returns: + List[int]: The prefix of the `block_hashes` that are cached. + """ + + def _block_is_cached(block_hash: PrefixHash) -> bool: + if block_hash not in self._cached_blocks: + return False + + cached_block_id = self._cached_blocks[block_hash] + # We only consider the blocks that are marked as computed. + return self.block_is_computed(cached_block_id) + + def _bisect_left(a, x, key: Callable[[PrefixHash], bool]) -> int: + + # python <= 3.10 don't have the key argument + if sys.version_info < (3, 10): + a = [key(e) for e in a] + return bisect_left(a, x) + else: + return bisect_left(a, x, key=key) + + # Look for the first block that's not cached, and returns the prefix + # i.e. blocks that are cached. + idx = _bisect_left(block_hashes, + True, + key=lambda x: not _block_is_cached(x)) + return block_hashes[:idx] + class PrefixCachingBlock(Block): """A block implementation that supports prefix caching. @@ -843,86 +869,126 @@ def hash_block_tokens(is_first_block: bool, prev_block_hash: Optional[int], class ComputedBlocksTracker: - """Handles caching of per-sequence computed block ids. - When a sequence appears for the first time, it traverses all of the - blocks and detects the prefix of blocks that is computed. On the - subsequent times, it only traverses the new blocks that were added - and updates the already recorded prefix of blocks with the newly - computed blocks. - - To avoid redundant traversals, the algorithm also detects when there - is a "gap" in the computed prefix. For example, if we have blocks = - [1,2,3,4,5], and we have detected [1,2,3] as the computed prefix, then - we won't try to add more computed blocks to [1,2,3] in this sequence - iteration, and will add more computed blocks only after the sequence is - freed and reused again. - - Note that currently, for a given sequence, we also skip the last - block id for caching purposes, to avoid caching of a full sequence """ + Tracks the computed blocks for each sequence. - def __init__(self, allocator): - self._allocator = allocator - self._cached_computed_seq_blocks: Dict[int, Tuple[List[int], - bool]] = {} + Internally, it maintains a map from sequence id to the list of block hashes + for the sequence. We cache the hashes of the full blocks for each sequence, + and make sure the hash is calculated in the same way as the allocator. + When a sequence is being decoded, we also update the sequence's hash + accordingly and incrementally. - def add_seq(self, seq_id: int) -> None: - """Start tracking seq_id - """ - assert seq_id not in self._cached_computed_seq_blocks - self._cached_computed_seq_blocks[seq_id] = ([], False) - - def remove_seq(self, seq_id: int) -> None: - """Stop tracking seq_id - """ - assert seq_id in self._cached_computed_seq_blocks - del self._cached_computed_seq_blocks[seq_id] - - def get_cached_computed_blocks_and_update( - self, seq_id: int, block_ids: List[int]) -> List[int]: - """ Look at the class documentation for details - """ - # Ensure seq_id is already tracked - assert seq_id in self._cached_computed_seq_blocks - - # Get cached data (may be empty on the first time) - prev_computed_block_ids, has_gap = self._cached_computed_seq_blocks[ - seq_id] - - if has_gap: - # When gap is detected, we do not add more computed blocks at this - # sequence iteration - return prev_computed_block_ids - - # We do not consider the last block id for caching purposes. - num_cur_blocks = len(block_ids) - 1 - assert num_cur_blocks >= 0 - - if len(prev_computed_block_ids) >= num_cur_blocks: - # Cache HIT - assert len(prev_computed_block_ids) == num_cur_blocks - return prev_computed_block_ids - - # If here, then we may possibly add more computed blocks. As a result, - # traverse the additional blocks after prev_computed_block_ids to - # detect more computed blocks and add them. - - # Incremental init for seq_id => Look only at the new blocks - computed_block_ids = self._allocator.get_computed_block_ids( # noqa: E501 - prev_computed_block_ids, - block_ids, - skip_last_block_id= - True, # We skip last block id to avoid caching of full seq - ) + From the sequence hash, with prefix caching enabled, we could also calculate + the number of cached tokens for the sequence by looking up the number of + cached block hashes in the allocator. + """ - # Detect if there is a "gap" - has_gap = len(computed_block_ids) < num_cur_blocks + def __init__( + self, + allocator: DeviceAwareBlockAllocator, + block_size: int, + enable_caching: bool, + ): + self._allocator = allocator + self._block_size = block_size + self._enable_caching = enable_caching + + # A map from seq_id to the list of block hashes for the + # sequence. This is so that we don't have to recompute the block hashes + # for the sequence when we need to check if the sequence is cached. + # Note a block that's not full will not have its hash calculated and + # recorded. + self._seq_id_to_blocks_hashes: Dict[int, List[int]] = {} + + # A map from seq_id to the number of tokens that are cached for the + # sequence. + # We need this so that a sequence in continuous prefill doesn't + # accidentally see its cached token count change. See comments in + # `get_num_cached_tokens` for more details. + self._seq_id_to_num_tokens_computed: Dict[int, int] = {} + + def _update_seq_hashes(self, seq: Sequence) -> None: + """Incrementally update the sequence's block hashes and record them.""" + assert self._enable_caching + + block_hashes_recorded = self._seq_id_to_blocks_hashes.get( + seq.seq_id, []) + cur_num_blocks_recorded = len(block_hashes_recorded) + token_ids = seq.get_token_ids() + assert len(token_ids) >= cur_num_blocks_recorded * self._block_size, ( + f"The sequence has {len(token_ids)} tokens, but" + f" already recorded {cur_num_blocks_recorded} blocks. " + "This should not happen since we assume blocks are " + "only appended other than recomputation. When the sequence is " + "recomputed, we should have removed the info of the old blocks.") + # Update the computed block hashes for the sequence. Since only full + # blocks are considered as "computed", we take floor here. + num_computed_blocks = len(token_ids) // self._block_size + + # We need to know the hash of the previous block to compute the hash of + # the current block so that blocks could be uniquely identified across + # sequences of prefixes. + prev_block_hash = (None if cur_num_blocks_recorded == 0 else + block_hashes_recorded[-1]) + # Only update the computed block hashes for the new blocks + for i in range(cur_num_blocks_recorded, num_computed_blocks): + assert len(token_ids) >= (i + 1) * self._block_size + block_token_ids = token_ids[i * self._block_size:(i + 1) * + self._block_size] + # This has to be kept in sync with the allocator's hash + # calculation. + block_hash = PrefixCachingBlock.hash_block_tokens( + is_first_block=prev_block_hash is None, + prev_block_hash=prev_block_hash, + cur_block_token_ids=block_token_ids, + ) + block_hashes_recorded.append(block_hash) + prev_block_hash = block_hash + + self._seq_id_to_blocks_hashes[seq.seq_id] = block_hashes_recorded + + def get_num_cached_tokens(self, seq: Sequence) -> int: + if not self._enable_caching: + return 0 + + # We always try to update the sequence hashes on the fly. + # This is to ensure that we don't miss any cached tokens for the + # sequence during decode. + # This routine should only update hash for any new blocks too. + self._update_seq_hashes(seq) + + num_computed_tokens_prev = self._seq_id_to_num_tokens_computed.get( + seq.seq_id, None) + + # TODO(rickyx): This hack could be removed once we mark blocks as + # computed correctly with chunked prefills. + if num_computed_tokens_prev is not None and seq.is_prefill(): + # For a sequence that is still in prefill, we don't + # recompute the number of cached tokens. + # This also handles correctly chunked prefill since currently + # we mark blocks as computed even if the sequence is still partially + # prefilled. So a continuously prefilled sequence should not + # see its cached token count change while running. + return num_computed_tokens_prev + + block_hashes = self._seq_id_to_blocks_hashes[seq.seq_id] + + # This is O(logN), where N is the number of blocks. + num_cached_blocks = len( + self._allocator.find_cached_blocks_prefix(block_hashes)) + num_cached_tokens = num_cached_blocks * self._block_size + self._seq_id_to_num_tokens_computed[seq.seq_id] = num_cached_tokens + return num_cached_tokens - # Record - self._cached_computed_seq_blocks[seq_id] = (computed_block_ids, - has_gap) + def remove_seq(self, seq_id: int) -> None: + """Stop tracking the sequence.""" + if not self._enable_caching: + return + assert seq_id in self._seq_id_to_blocks_hashes + del self._seq_id_to_blocks_hashes[seq_id] - return computed_block_ids + assert seq_id in self._seq_id_to_num_tokens_computed + del self._seq_id_to_num_tokens_computed[seq_id] class LastAccessBlocksTracker: diff --git a/vllm/core/block_manager.py b/vllm/core/block_manager.py index 21f4c63b6572d..209487c6b4f9e 100644 --- a/vllm/core/block_manager.py +++ b/vllm/core/block_manager.py @@ -101,7 +101,7 @@ def __init__( self.cross_block_tables: Dict[EncoderSeqId, BlockTable] = {} self._computed_blocks_tracker = ComputedBlocksTracker( - self.block_allocator) + self.block_allocator, self.block_size, self.enable_caching) self._last_access_blocks_tracker = LastAccessBlocksTracker( self.block_allocator) @@ -170,7 +170,6 @@ def allocate(self, seq_group: SequenceGroup) -> None: self.block_tables[seq.seq_id] = block_table # Track seq - self._computed_blocks_tracker.add_seq(seq.seq_id) self._last_access_blocks_tracker.add_seq(seq.seq_id) # Assign the block table for each sequence. @@ -178,7 +177,6 @@ def allocate(self, seq_group: SequenceGroup) -> None: self.block_tables[seq.seq_id] = block_table.fork() # Track seq - self._computed_blocks_tracker.add_seq(seq.seq_id) self._last_access_blocks_tracker.add_seq(seq.seq_id) # Allocate cross-attention block table for encoder sequence @@ -314,11 +312,13 @@ def get_common_computed_block_ids( """ computed_seq_block_ids = [] for seq in seqs: - computed_seq_block_ids.append( - self._computed_blocks_tracker. - get_cached_computed_blocks_and_update( - seq.seq_id, - self.block_tables[seq.seq_id].physical_block_ids)) + all_blocks = self.block_tables[seq.seq_id].physical_block_ids + num_cached_tokens = ( + self._computed_blocks_tracker.get_num_cached_tokens(seq)) + assert num_cached_tokens % self.block_size == 0 + num_cached_blocks = num_cached_tokens // self.block_size + computed_block_ids = all_blocks[:num_cached_blocks] + computed_seq_block_ids.append(computed_block_ids) # NOTE(sang): This assumes seq_block_ids doesn't contain any None. return self.block_allocator.get_common_computed_block_ids( @@ -332,7 +332,6 @@ def fork(self, parent_seq: Sequence, child_seq: Sequence) -> None: self.block_tables[child_seq.seq_id] = src_block_table.fork() # Track child seq - self._computed_blocks_tracker.add_seq(child_seq.seq_id) self._last_access_blocks_tracker.add_seq(child_seq.seq_id) def can_swap_in(self, seq_group: SequenceGroup, @@ -503,3 +502,9 @@ def _can_swap(self, return AllocStatus.OK else: return AllocStatus.LATER + + def get_num_cached_tokens(self, seq: Sequence) -> int: + """Get the number of tokens in blocks that are already computed and + cached in the block manager for the sequence. + """ + return self._computed_blocks_tracker.get_num_cached_tokens(seq) diff --git a/vllm/core/interfaces.py b/vllm/core/interfaces.py index 9501a516bf020..b10b8d3f4a5bf 100644 --- a/vllm/core/interfaces.py +++ b/vllm/core/interfaces.py @@ -121,3 +121,7 @@ def mark_blocks_as_computed(self, seq_group: SequenceGroup, def get_prefix_cache_hit_rate(self, device: Device) -> float: """Prefix cache hit rate. -1 means not supported or disabled.""" pass + + @abstractmethod + def get_num_cached_tokens(self, seq: Sequence) -> int: + pass diff --git a/vllm/core/placeholder_block_space_manager.py b/vllm/core/placeholder_block_space_manager.py index a337392bbed53..26d42b7f1790e 100644 --- a/vllm/core/placeholder_block_space_manager.py +++ b/vllm/core/placeholder_block_space_manager.py @@ -89,3 +89,6 @@ def mark_blocks_as_computed(self, seq_group: SequenceGroup, def get_prefix_cache_hit_rate(self, device: Device) -> float: return -1 + + def get_num_cached_tokens(self, seq: Sequence) -> int: + return 0 diff --git a/vllm/core/scheduler.py b/vllm/core/scheduler.py index af4671ec29be9..841e65c488fc6 100644 --- a/vllm/core/scheduler.py +++ b/vllm/core/scheduler.py @@ -56,11 +56,16 @@ class SchedulingBudget: max_num_seqs: int _request_ids_num_batched_tokens: Set[str] = field(default_factory=set) _request_ids_num_curr_seqs: Set[str] = field(default_factory=set) + # Number of cached tokens in the batch. + _num_cached_tokens: int = 0 + # Number of actual non-cached tokens in the batch. _num_batched_tokens: int = 0 _num_curr_seqs: int = 0 def can_schedule(self, *, num_new_tokens: int, num_new_seqs: int): - assert num_new_tokens != 0 + # We allow num_new_tokens to be 0 when the entire sequence has + # been cached. + assert num_new_tokens >= 0 assert num_new_seqs != 0 return (self.num_batched_tokens + num_new_tokens <= self.token_budget and self.num_curr_seqs + num_new_seqs <= self.max_num_seqs) @@ -68,12 +73,18 @@ def can_schedule(self, *, num_new_tokens: int, num_new_seqs: int): def remaining_token_budget(self): return self.token_budget - self.num_batched_tokens - def add_num_batched_tokens(self, req_id: str, num_batched_tokens: int): + def add_num_batched_tokens(self, + req_id: str, + num_batched_tokens: int, + num_cached_tokens: int = 0): if req_id in self._request_ids_num_batched_tokens: return + assert num_cached_tokens >= 0 + assert num_batched_tokens >= 0 self._request_ids_num_batched_tokens.add(req_id) self._num_batched_tokens += num_batched_tokens + self._num_cached_tokens += num_cached_tokens def subtract_num_batched_tokens(self, req_id: str, num_batched_tokens: int): @@ -101,6 +112,10 @@ def num_batched_tokens(self): def num_curr_seqs(self): return self._num_curr_seqs + @property + def num_cached_tokens(self): + return self._num_cached_tokens + @dataclass class ScheduledSequenceGroup: @@ -541,9 +556,19 @@ def _schedule_running( assert len(self._async_stopped) == 0 while running_queue: seq_group = running_queue[0] - num_running_tokens = self._get_num_new_tokens( - seq_group, SequenceStatus.RUNNING, enable_chunking, budget) - + # We discard the cached tokens info here because we don't need it + # for running sequence: + # 1. If a sequence is running with chunked prefill, the cached + # tokens info was already used for the first prefill. + # 2. If a sequence is running with non-chunked prefill, then + # there it's a decoding sequence, and the cached tokens info is + # irrelevant. + num_uncached_new_tokens, _ = ( + self._get_num_new_uncached_and_cached_tokens( + seq_group, SequenceStatus.RUNNING, enable_chunking, + budget)) + + num_running_tokens = num_uncached_new_tokens if num_running_tokens == 0: # No budget => Stop break @@ -715,13 +740,15 @@ def _schedule_swapped( # The total number of sequences in the RUNNING state should not # exceed the maximum number of sequences. num_new_seqs = seq_group.get_max_num_running_seqs() - num_new_tokens = self._get_num_new_tokens(seq_group, - SequenceStatus.SWAPPED, - enable_chunking, budget) - - if (num_new_tokens == 0 - or not budget.can_schedule(num_new_tokens=num_new_tokens, - num_new_seqs=num_new_seqs)): + num_new_tokens_uncached, num_new_tokens_cached = ( + self._get_num_new_uncached_and_cached_tokens( + seq_group, SequenceStatus.SWAPPED, enable_chunking, + budget)) + + if num_new_tokens_uncached == 0 or not budget.can_schedule( + num_new_tokens=num_new_tokens_uncached, + num_new_seqs=num_new_seqs, + ): break if lora_int_id > 0 and curr_loras is not None: @@ -732,12 +759,19 @@ def _schedule_swapped( is_prefill = seq_group.is_prefill() if is_prefill: prefill_seq_groups.append( - ScheduledSequenceGroup(seq_group, - token_chunk_size=num_new_tokens)) + ScheduledSequenceGroup( + seq_group, + token_chunk_size=num_new_tokens_uncached + + num_new_tokens_cached, + )) else: decode_seq_groups.append( ScheduledSequenceGroup(seq_group, token_chunk_size=1)) - budget.add_num_batched_tokens(seq_group.request_id, num_new_tokens) + budget.add_num_batched_tokens( + seq_group.request_id, + num_batched_tokens=num_new_tokens_uncached, + num_cached_tokens=num_new_tokens_cached, + ) budget.add_num_seqs(seq_group.request_id, num_new_seqs) swapped_queue.extendleft(leftover_swapped) @@ -803,26 +837,30 @@ def _schedule_priority_preemption( if waiting_queue: seq_group = waiting_queue.popleft() num_new_seqs = seq_group.get_max_num_running_seqs() - num_new_tokens = self._get_num_new_tokens(seq_group, - SequenceStatus.WAITING, - False, budget) + num_new_tokens_uncached, _ = ( + self._get_num_new_uncached_and_cached_tokens( + seq_group, SequenceStatus.WAITING, False, budget)) #Only preempt if priority inversion exists while running_queue and self._get_priority( running_queue[-1]) > self._get_priority(seq_group): #Only preempt if waiting sequence cannot be allocated can_allocate = self.block_manager.can_allocate(seq_group) - if (num_new_tokens and can_allocate == AllocStatus.OK - and budget.can_schedule(num_new_tokens=num_new_tokens, - num_new_seqs=num_new_seqs)): + if (num_new_tokens_uncached > 0 + and can_allocate == AllocStatus.OK + and budget.can_schedule( + num_new_tokens=num_new_tokens_uncached, + num_new_seqs=num_new_seqs, + )): break #Adjust budget to remove the victim sequence group vseq_group = running_queue.pop() - num_running_tokens = self._get_num_new_tokens( - vseq_group, SequenceStatus.RUNNING, False, budget) - budget.subtract_num_batched_tokens(vseq_group.request_id, - num_running_tokens) + num_running_tokens_uncached, _ = ( + self._get_num_new_uncached_and_cached_tokens( + vseq_group, SequenceStatus.RUNNING, False, budget)) + budget.subtract_num_batched_tokens( + vseq_group.request_id, num_running_tokens_uncached) num_running_seqs = vseq_group.get_max_num_running_seqs() budget.subtract_num_seqs(vseq_group.request_id, num_running_seqs) @@ -882,9 +920,12 @@ def _schedule_prefills( assert len(waiting_seqs) == 1, ( "Waiting sequence group should have only one prompt " "sequence.") - num_new_tokens = self._get_num_new_tokens(seq_group, - SequenceStatus.WAITING, - enable_chunking, budget) + num_new_tokens_uncached, num_new_tokens_cached = ( + self._get_num_new_uncached_and_cached_tokens( + seq_group, SequenceStatus.WAITING, enable_chunking, + budget)) + num_new_tokens = num_new_tokens_uncached + num_new_tokens_cached + if not enable_chunking: num_prompt_tokens = waiting_seqs[0].get_len() assert num_new_tokens == num_prompt_tokens @@ -935,10 +976,18 @@ def _schedule_prefills( waiting_queue.popleft() continue + if (budget.num_batched_tokens >= + self.scheduler_config.max_num_batched_tokens): + # We've reached the budget limit - since there might be + # continuous prefills in the running queue, we should break + # to avoid scheduling any new prefills. + break + num_new_seqs = seq_group.get_max_num_running_seqs() - if (num_new_tokens == 0 - or not budget.can_schedule(num_new_tokens=num_new_tokens, - num_new_seqs=num_new_seqs)): + if num_new_tokens_uncached == 0 or not budget.can_schedule( + num_new_tokens=num_new_tokens_uncached, + num_new_seqs=num_new_seqs, + ): break # Can schedule this request. @@ -967,7 +1016,11 @@ def _schedule_prefills( seq_groups.append( ScheduledSequenceGroup(seq_group=seq_group, token_chunk_size=num_new_tokens)) - budget.add_num_batched_tokens(seq_group.request_id, num_new_tokens) + budget.add_num_batched_tokens( + seq_group.request_id, + num_batched_tokens=num_new_tokens_uncached, + num_cached_tokens=num_new_tokens_cached, + ) budget.add_num_seqs(seq_group.request_id, num_new_seqs) # Queue requests that couldn't be scheduled. @@ -1075,7 +1128,8 @@ def _schedule_default(self) -> SchedulerOutputs: return SchedulerOutputs( scheduled_seq_groups=scheduled_seq_groups, num_prefill_groups=num_prefill_groups, - num_batched_tokens=budget.num_batched_tokens, + num_batched_tokens=budget.num_batched_tokens + + budget.num_cached_tokens, blocks_to_swap_in=swapped_in.blocks_to_swap_in, blocks_to_swap_out=running_scheduled.blocks_to_swap_out, blocks_to_copy=blocks_to_copy, @@ -1119,7 +1173,6 @@ def _schedule_chunked_prefill(self) -> SchedulerOutputs: running_scheduled.swapped_out) == 0: swapped_in = self._schedule_swapped(budget, curr_loras) - # Schedule new prefills. prefills = self._schedule_prefills(budget, curr_loras, enable_chunking=True) @@ -1157,7 +1210,8 @@ def _schedule_chunked_prefill(self) -> SchedulerOutputs: num_prefill_groups=(len(prefills.seq_groups) + len(swapped_in.prefill_seq_groups) + len(running_scheduled.prefill_seq_groups)), - num_batched_tokens=budget.num_batched_tokens, + num_batched_tokens=budget.num_batched_tokens + + budget.num_cached_tokens, blocks_to_swap_in=swapped_in.blocks_to_swap_in, blocks_to_swap_out=running_scheduled.blocks_to_swap_out, blocks_to_copy=running_scheduled.blocks_to_copy + @@ -1584,64 +1638,178 @@ def _get_num_lookahead_slots(self, is_prefill: bool, return self.scheduler_config.num_lookahead_slots - def _get_num_new_tokens(self, seq_group: SequenceGroup, - status: SequenceStatus, enable_chunking: bool, - budget: SchedulingBudget) -> int: - """Get the next new tokens to compute for a given sequence group - that's in a given `status`. + def _get_num_new_uncached_and_cached_tokens( + self, + seq_group: SequenceGroup, + status: SequenceStatus, + enable_chunking: bool, + budget: SchedulingBudget, + ) -> Tuple[int, int]: + """ + Returns the number of new uncached and cached tokens to schedule for a + given sequence group that's in a given `status`. The API could chunk the number of tokens to compute based on `budget` if `enable_chunking` is True. If a sequence group has multiple sequences (e.g., running beam search), it means it is in decoding phase, so chunking doesn't happen. - Returns 0 if the new token cannot be computed due to token budget. + Returns (0, 0) if the new token cannot be computed due to token budget. + + The cached tokens's blocks are already computed, and the attention + backend will reuse the cached blocks rather than recomputing them. So + the scheduler could schedule these cached tokens "for free". + + Args: + seq_group: The sequence group to get the number of new tokens to + schedule. + status: The status of the sequences to get the number of new tokens + to schedule. + enable_chunking: Whether to chunk the number of tokens to compute. + budget: The budget to chunk the number of tokens to compute. + + + Returns: + A tuple of two ints. The first int is the number of new uncached + tokens to schedule. The second int is the number of cached tokens. + If no more new tokens can be scheduled, returns (0, 0). """ - num_new_tokens = 0 + num_cached_new_tokens = 0 + num_uncached_new_tokens = 0 + seqs = seq_group.get_seqs(status=status) + # Compute the number of new uncached and cached tokens for + # each sequence. for seq in seqs: - num_new_tokens += seq.get_num_new_tokens() - assert num_new_tokens > 0 - # Chunk if a running request cannot fit in the given budget. - # If number of seq > 1, it means it is doing beam search - # in a decode phase. Do not chunk. + if not seq.is_prefill(): + # Decode sequences should always just have 1 uncached token + # TODO(rickyx): Actually is this still correct for multi-step? + num_uncached_new_tokens += 1 + continue + + num_computed_tokens_seq = seq.get_num_computed_tokens() + all_num_new_tokens_seq = seq.get_len() - num_computed_tokens_seq + if not self.cache_config.enable_prefix_caching: + # If prefix caching is not enabled, all new tokens are uncached. + num_uncached_new_tokens += all_num_new_tokens_seq + continue + + # NOTE: the cache token might be currently in a block that's in an + # evictor meaning that it's not yet allocated. However, we don't + # exclude such tokens in the cache count because it will be + # guaranteed to be allocated later if the sequence can be allocated. + num_cached_tokens_seq = self.block_manager.get_num_cached_tokens( + seq) + + # Sanity check. + if num_cached_tokens_seq < num_computed_tokens_seq: + # This should only happen with chunked prefill, and + # the seq is still in prefill. The `num_cached_tokens_seq` + # is the value we calculated on scheduling the first prefill. + # For subsequent continuous prefill steps, we cached the + # number of cache tokens for the sequence so the cached token + # count could be less than the number of computed tokens. + # See comments on `ComputedBlocksTracker` for more details. + assert ( + seq.is_prefill() and seq.status == SequenceStatus.RUNNING + and self.scheduler_config.chunked_prefill_enabled + ), ("Number of cached tokens should not be less than the " + "number of computed tokens for a sequence that's still " + f"in prefill. But there are {num_cached_tokens_seq} cached " + f"tokens and {num_computed_tokens_seq} computed tokens " + f"for sequence {seq.seq_id}.") + + num_cached_new_tokens_seq = max( + 0, num_cached_tokens_seq - num_computed_tokens_seq) + num_uncached_new_tokens_seq = (all_num_new_tokens_seq - + num_cached_new_tokens_seq) + + num_uncached_new_tokens += num_uncached_new_tokens_seq + num_cached_new_tokens += num_cached_new_tokens_seq + + if num_uncached_new_tokens == 0 and num_cached_new_tokens > 0: + # For a fully cached hit sequence, we actually need to recompute the + # last token. So we need at least 1 uncached token to schedule. + # See ModelRunner._compute_for_prefix_cache_hit for more details. + num_uncached_new_tokens = 1 + num_cached_new_tokens -= 1 + if enable_chunking and len(seqs) == 1: - remaining_token_budget = budget.remaining_token_budget() - if self.scheduler_config.is_multi_step: - # The current multi-step + chunked prefill capability does - # not actually support chunking prompts. - # - # Therefore, `num_new_tokens` is computed in the same fashion - # for both multi-step+chunked-prefill & - # multi-step+chunked-prefill+APC - # - # Prompts with more tokens than the current remaining budget - # are postponed to future scheduler steps - if num_new_tokens > self._get_prompt_limit(seq_group): - # If the seq_group is in prompt-stage, pass the - # num_new_tokens as-is so the caller can ignore - # the sequence. - pass - else: - num_new_tokens = 0 \ - if num_new_tokens > remaining_token_budget \ - else num_new_tokens - elif self.cache_config.enable_prefix_caching: - # When prefix caching is enabled, we always allocate - # the number of new tokens that is dividable by the block - # size to avoid partial block matching. - block_size = self.cache_config.block_size - remainder = budget.token_budget % block_size - if remainder != 0: - raise ValueError("When enabling chunked prefill and " - "prefix caching, max_num_batched_tokens " - "(chunk size) must be dividable by " - "block size, but got chunk_size " - f"({budget.token_budget}) % block_size " - f"({block_size}) = {remainder}") - if remaining_token_budget < num_new_tokens: - num_new_tokens = (remaining_token_budget // - block_size) * block_size - else: - num_new_tokens = min(num_new_tokens, remaining_token_budget) + # Chunk if a running request cannot fit in the given budget. + # If number of seq > 1, it means it is doing beam search + # in a decode phase. Do not chunk. + num_uncached_new_tokens = self._chunk_new_tokens_to_schedule( + self.scheduler_config, + self.cache_config, + budget, + self._get_prompt_limit(seq_group), + num_uncached_new_tokens, + ) + + return num_uncached_new_tokens, num_cached_new_tokens + + @staticmethod + def _chunk_new_tokens_to_schedule( + scheduler_config: SchedulerConfig, + cache_config: CacheConfig, + budget: SchedulingBudget, + prompt_limit: int, + num_new_tokens: int, + ) -> int: + """ + Chunks the number of new tokens to schedule based on the budget when + chunked prefill is enabled. + + Args: + scheduler_config: The scheduler config. + cache_config: The cache config. + budget: The budget to chunk the number of tokens to compute. + prompt_limit: The maximum number of tokens allowed in a prompt. + num_new_tokens: The number of new tokens to schedule. + + Returns: + The number of new tokens to schedule after chunking. + """ + remaining_token_budget = budget.remaining_token_budget() + if scheduler_config.is_multi_step: + # The current multi-step + chunked prefill capability does + # not actually support chunking prompts. + # + # Therefore, `num_new_tokens` is computed in the same fashion + # for both multi-step+chunked-prefill & + # multi-step+chunked-prefill+APC + # + # Prompts with more tokens than the current remaining budget + # are postponed to future scheduler steps + if num_new_tokens > prompt_limit: + # If the seq_group is in prompt-stage, pass the + # num_new_tokens as-is so the caller can ignore + # the sequence. + return num_new_tokens + + return (0 if num_new_tokens > remaining_token_budget else + num_new_tokens) + + if cache_config.enable_prefix_caching: + # Adjust the remaining token budget to be divisible by the block + # size when prefix caching is enabled. + + # When prefix caching is enabled, we always allocate + # the number of new tokens that is dividable by the block + # size to avoid partial block matching. + block_size = cache_config.block_size + remainder = budget.token_budget % block_size + if remainder != 0: + raise ValueError("When enabling chunked prefill and " + "prefix caching, max_num_batched_tokens " + "(chunk size) must be dividable by " + "block size, but got chunk_size " + f"({budget.token_budget}) % block_size " + f"({block_size}) = {remainder}") + # Round down to block size. + remaining_token_budget = (remaining_token_budget // block_size * + block_size) + + num_new_tokens = min(num_new_tokens, remaining_token_budget) + return num_new_tokens diff --git a/vllm/sequence.py b/vllm/sequence.py index 3b41d25a2fe42..a1cc8fc3b09de 100644 --- a/vllm/sequence.py +++ b/vllm/sequence.py @@ -579,6 +579,9 @@ def get_num_new_tokens(self) -> int: return 1 return self.data.get_num_uncomputed_tokens() + def get_num_computed_tokens(self) -> int: + return self.data.get_num_computed_tokens() + def is_prefill(self) -> bool: return self.data.stage == SequenceStage.PREFILL From c8acd80548c77bd5d6302353708dd16ea705f031 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Sat, 23 Nov 2024 13:25:09 +0800 Subject: [PATCH 116/397] [2/N] handling placeholders in merged multi-modal processor (#10485) Signed-off-by: DarkLight1337 --- tests/multimodal/test_processing.py | 370 ++++++++++++++ tests/multimodal/test_utils.py | 3 +- vllm/multimodal/inputs.py | 9 +- vllm/multimodal/processing.py | 720 ++++++++++++++++++++++------ vllm/utils.py | 20 +- 5 files changed, 975 insertions(+), 147 deletions(-) create mode 100644 tests/multimodal/test_processing.py diff --git a/tests/multimodal/test_processing.py b/tests/multimodal/test_processing.py new file mode 100644 index 0000000000000..b2367060c6c1b --- /dev/null +++ b/tests/multimodal/test_processing.py @@ -0,0 +1,370 @@ +from typing import cast + +import pytest +from transformers import BatchFeature + +from vllm.multimodal.processing import (PromptReplacement, find_text_matches, + find_token_matches, iter_token_matches, + iter_token_runs, replace_text_matches) +from vllm.transformers_utils.tokenizer import AnyTokenizer +from vllm.utils import full_groupby + + +# yapf: disable +@pytest.mark.parametrize( + ("token_ids", "expected"), + [ + ([], []), + ( + [32000, 32000, 32000], + [{ "token_id": 32000, "start_idx": 0, "length": 3 }], + ), + ( + [9833, 28747, 32000, 32000, 32000, 9833, 28747, 32000, 32000, 918], + [ + { "token_id": 9833, "start_idx": 0, "length": 1 }, + { "token_id": 28747, "start_idx": 1, "length": 1 }, + { "token_id": 32000, "start_idx": 2, "length": 3 }, + { "token_id": 9833, "start_idx": 5, "length": 1 }, + { "token_id": 28747, "start_idx": 6, "length": 1 }, + { "token_id": 32000, "start_idx": 7, "length": 2 }, + { "token_id": 918, "start_idx": 9, "length": 1 }, + ], + ), + ], +) +# yapf: enable +def test_iter_token_runs(token_ids, expected): + result = list(iter_token_runs(token_ids)) + + # Only displayed on error + print("result:", result) + + # Manually constructed results + assert [item._asdict() for item in result] == expected + + # Invariants + assert sum(run_info.length for run_info in result) == len(token_ids) + + +# yapf: disable +@pytest.mark.parametrize( + ("token_ids", "match_ids", "expected"), + [ + ([], [], [{ "start_idx": 0, "end_idx": 0 }]), + ([], [32000], []), + ( + [32000, 32000, 32000], + [32000], + [ + { "start_idx": 0, "end_idx": 1 }, + { "start_idx": 1, "end_idx": 2 }, + { "start_idx": 2, "end_idx": 3 }, + ], + ), + ( + [32000, 32000, 32000], + [32000, 32000], + [{ "start_idx": 0, "end_idx": 2 }], + ), + ( + [32000, 32000, 32000], + [32000, 32000, 32000], + [{ "start_idx": 0, "end_idx": 3 }], + ), + ( + [9833, 28747, 32000, 32000, 32000, 9833, 28747, 32000, 32000, 918], + [28747, 32000], + [ + { "start_idx": 1, "end_idx": 3 }, + { "start_idx": 6, "end_idx": 8 }, + ], + ), + ( + [9833, 28747, 32000, 32000, 32000, 9833, 28747, 32000, 32000, 918], + [28747, 32000, 32000, 32000], + [ + { "start_idx": 1, "end_idx": 5 }, + ], + ), + ( + [9833, 28747, 32000, 32000, 32000, 9833, 28747, 32000, 32000, 918], + [28747, 0, 32000], + [], + ), + ], +) +# yapf: enable +def test_iter_token_matches(token_ids, match_ids, expected): + result = list(iter_token_matches(token_ids, match_ids)) + + # Manually constructed results + assert [item._asdict() for item in result] == expected + + # Invariants + match_lens = [end - start for start, end in result] + print("match_lens:", match_lens) # Only displayed on error + assert all(match_len == len(match_ids) for match_len in match_lens) + + +# yapf: disable +@pytest.mark.parametrize( + ("prompt", "target_by_key", "expected_by_key"), + [ + ( + [], + { + "pattern_1": [], + "pattern_2": [32000], + }, + { + "pattern_1": [{ "start_idx": 0, "end_idx": 0 }], + "pattern_2": [], + } + ), + ( + [32000, 32000, 32000, 32000], + { + "pattern_1": [32000], + "pattern_2": [32000, 32000], + "pattern_3": [32000, 32000, 32000], + }, + { + "pattern_1": [ + { "start_idx": 0, "end_idx": 1 }, + { "start_idx": 1, "end_idx": 2 }, + { "start_idx": 2, "end_idx": 3 }, + { "start_idx": 3, "end_idx": 4 }, + ], + "pattern_2": [ + { "start_idx": 0, "end_idx": 2 }, + { "start_idx": 2, "end_idx": 4 }, + ], + "pattern_3": [ + { "start_idx": 0, "end_idx": 3 }, + ], + }, + ), + ( + [9833, 28747, 32000, 32000, 32000, 9833, 28747, 32000, 32000, 918], + { + "pattern_1": [28747, 32000], + "pattern_2": [28747, 32000, 32000, 32000], + "pattern_3": [28747, 0, 32000], + }, + { + "pattern_1": [ + { "start_idx": 1, "end_idx": 3 }, + { "start_idx": 6, "end_idx": 8 }, + ], + "pattern_2": [ + { "start_idx": 1, "end_idx": 5 }, + ], + "pattern_3": [], + }, + ), + ], +) +# yapf: enable +def test_find_token_matches(prompt, target_by_key, expected_by_key): + # Should not be used since there is nothing to convert to token IDs + mock_tokenizer = cast(AnyTokenizer, object()) + + result = find_token_matches( + prompt, + [ + PromptReplacement(target, [], 0).bind(key, mock_tokenizer) + for key, target in target_by_key.items() + ], + ) + + # Only displayed on error + print("result:", result) + + # Manually constructed results + result_groups = dict(full_groupby(result, key=lambda x: x.modality)) + assert { + key: [ + dict(start_idx=item.start_idx, end_idx=item.end_idx) + for item in result_groups.get(key, []) + ] + for key in expected_by_key + } == expected_by_key + + +# yapf: disable +@pytest.mark.parametrize( + ("prompt", "target_by_key", "expected_by_key"), + [ + # Detokenized test cases of `test_find_token_matches` + # using the vocab of llava-hf/llava-v1.6-mistral-7b-hf + ( + "", + { + "pattern_1": "", + "pattern_2": "", + }, + { + "pattern_1": [{ "start_idx": 0, "end_idx": 0 }], + "pattern_2": [], + } + ), + ( + "", + { + "pattern_1": "", + "pattern_2": "", + "pattern_3": "", + }, + { + "pattern_1": [ + { "start_idx": 0, "end_idx": 7 }, + { "start_idx": 7, "end_idx": 14 }, + { "start_idx": 14, "end_idx": 21 }, + { "start_idx": 21, "end_idx": 28 }, + ], + "pattern_2": [ + { "start_idx": 0, "end_idx": 14 }, + { "start_idx": 14, "end_idx": 28 }, + ], + "pattern_3": [ + { "start_idx": 0, "end_idx": 21 }, + ], + }, + ), + ( + "Image:Image:!", + { + "pattern_1": "Image:", + "pattern_2": "Image:", + "pattern_3": "Image:", + }, + { + "pattern_1": [ + { "start_idx": 0, "end_idx": 13 }, + { "start_idx": 27, "end_idx": 40 }, + ], + "pattern_2": [ + { "start_idx": 0, "end_idx": 27 }, + ], + "pattern_3": [], + }, + ), + # Test regex escape + ( + "<|image|><|image|>", + { + "pattern_1": "<|image|>", + "pattern_2": "<|image|>", + "pattern_3": "<|image|><|image|>", + }, + { + "pattern_1": [ + { "start_idx": 0, "end_idx": 9 }, + { "start_idx": 16, "end_idx": 25 }, + ], + "pattern_2": [ + { "start_idx": 0, "end_idx": 16 }, + { "start_idx": 16, "end_idx": 32 }, + ], + "pattern_3": [ + { "start_idx": 0, "end_idx": 25 }, + ], + }, + ), + ], +) +# yapf: enable +def test_find_text_matches(prompt, target_by_key, expected_by_key): + # Should not be used since there is nothing to convert to text + mock_tokenizer = cast(AnyTokenizer, object()) + + result = find_text_matches( + prompt, + [ + PromptReplacement(target, [], 0).bind(key, mock_tokenizer) + for key, target in target_by_key.items() + ], + ) + + # Only displayed on error + print("result:", result) + + # Manually constructed results + result_groups = dict(full_groupby(result, key=lambda x: x.modality)) + assert { + key: [ + dict(start_idx=item.start_idx, end_idx=item.end_idx) + for item in result_groups.get(key, []) + ] + for key in expected_by_key + } == expected_by_key + + +# yapf: disable +@pytest.mark.parametrize( + ("prompt", "target_by_key", "repl_by_key", "expected_by_mm_count"), + [ + ( + "Image:Image:!", + { + # We use `` before `Image:` to test matches that + # occur out of order + "pattern_1": "", + "pattern_2": "Image:", + "pattern_3": "!", + }, + { + # Test whether target is confused with repl_unit + "pattern_1": ("", 1), + # Test empty repl_unit + "pattern_2": ("", 1), + # Test multiple repl_count + "pattern_3": ("?", 2), + }, + { + # Test no replacement + 0: "Image:Image:!", + # Test single replacement + 1: "Image:??", + # Test repeated replacement + 2: "??", + }, + ), + ] +) +# yapf: enable +def test_find_replace_text( + prompt, + target_by_key, + repl_by_key, + expected_by_mm_count, +): + # Should not be used since there is nothing to convert to text + mock_tokenizer = cast(AnyTokenizer, object()) + + matches = find_text_matches( + prompt, + [ + PromptReplacement(target, *repl_by_key[key]) \ + .bind(key, mock_tokenizer) + for key, target in target_by_key.items() + ], + ) + result_by_mm_count = { + mm_count: replace_text_matches( + prompt, + matches, + {key: list(range(mm_count)) + for key in repl_by_key}, + BatchFeature(), + ) + for mm_count in expected_by_mm_count + } + + # Only displayed on error + print("matches:", matches) + print("result_by_mm_count:", result_by_mm_count) + + # Manually constructed results + assert result_by_mm_count == expected_by_mm_count diff --git a/tests/multimodal/test_utils.py b/tests/multimodal/test_utils.py index 9869c8123f001..fd82fb0c55fd7 100644 --- a/tests/multimodal/test_utils.py +++ b/tests/multimodal/test_utils.py @@ -139,7 +139,8 @@ def test_repeat_and_pad_placeholder_tokens(model): 2, "", [32000, 32000, 32000], - [{ "offset": 0, "length": 2 }]), + [{ "offset": 0, "length": 2 }], + ), ( "", [3, 2], diff --git a/vllm/multimodal/inputs.py b/vllm/multimodal/inputs.py index 64a4c58d5509c..8e67a552afe12 100644 --- a/vllm/multimodal/inputs.py +++ b/vllm/multimodal/inputs.py @@ -203,14 +203,7 @@ class MultiModalInputsV2(TypedDict): """The type of inputs.""" prompt: str - """ - The original, unprocessed prompt text. - - Note: - Since prompt text is not required by vLLM internals, we leave this - unprocessed to save CPU computation. You can still call - :code:`tokenizer.decode(prompt_token_ids)` to get the processed text. - """ + """The processed prompt text.""" prompt_token_ids: List[int] """The processed token IDs which includes placeholder tokens.""" diff --git a/vllm/multimodal/processing.py b/vllm/multimodal/processing.py index 88a924da174a6..28c8dda581982 100644 --- a/vllm/multimodal/processing.py +++ b/vllm/multimodal/processing.py @@ -1,34 +1,91 @@ +import re +from abc import ABC, abstractmethod +from collections.abc import Callable, ItemsView, Iterable, Mapping, Sequence from dataclasses import dataclass -from functools import lru_cache, partial -from typing import (Any, Callable, Collection, Generic, List, Mapping, - Optional, TypedDict, TypeVar, final) +from functools import lru_cache +from itertools import groupby +from typing import Any, Generic, NamedTuple, Optional, Protocol, TypeVar, Union +import numpy as np from transformers import BatchFeature -from typing_extensions import TypeAlias +from typing_extensions import TypeAlias, TypedDict from vllm.inputs import InputProcessingContext from vllm.transformers_utils.tokenizer import AnyTokenizer, MistralTokenizer -from vllm.utils import is_list_of +from vllm.utils import flatten_2d_lists, full_groupby, is_list_of from .inputs import (AudioItem, ImageItem, MultiModalDataDict, MultiModalInputsV2, MultiModalKwargs, PlaceholderRange, VideoItem) + +def bind_prompt_sequence( + seq: Union[str, list[int]], + tokenizer: AnyTokenizer, +) -> "_BoundPromptSequence": + """ + Bind a text or token sequence to a tokenizer so that it can be + lazily converted into the other format on demand. + """ + return _BoundPromptSequence( + tokenizer=tokenizer, + _text=seq if isinstance(seq, str) else None, + _token_ids=seq if isinstance(seq, list) else None, + ) + + _T = TypeVar("_T") +_S = TypeVar("_S", str, list[int]) -ReplacementFunc: TypeAlias = Callable[[_T, BatchFeature, int], List[int]] -""" -Given the original data item, HF-processed data, and index of the processed -item, output the replacement token IDs to be allocated in vLLM. -""" + +@dataclass +class PromptReplacement(Generic[_S, _T]): + target: _S + """The text or token sequence to find and replace.""" + + repl_unit: _S + """ + The unit making up the replacement text or token sequence. + + See :code:`repl_count` for more details. + """ + + repl_count: Union[Callable[[list[_T], BatchFeature, int], int], int] + """ + Given the original multi-modal items for this modality, HF-processed data, + and index of the processed item, output the number of repetitions of + :code:`repl_unit` to build up the replacement text or token sequence. + + For convenience, you can pass in an integer if the number of repetitions is + a constant. + """ + + def __repr__(self) -> str: + return (f"{type(self).__name__}(target={self.target!r}, " + f"repl_unit={self.repl_unit!r})") + + def bind( + self, + modality: str, + tokenizer: AnyTokenizer, + ) -> "_BoundPromptReplacement[_T]": + return _BoundPromptReplacement( + modality=modality, + target=bind_prompt_sequence(self.target, tokenizer), + repl_unit=bind_prompt_sequence(self.repl_unit, tokenizer), + repl_count=self.repl_count, + ) @dataclass class ModalityProcessingMetadata(Generic[_T]): - placeholder_replacements: Mapping[str, ReplacementFunc] + prompt_repls: Sequence[Union[PromptReplacement[str, _T], + PromptReplacement[list[int], _T]]] """ - A dictionary where each item represents the original placeholder in the - prompt text and the corresponding replacement. + Defines each text or token sequence to replace in the HF-processed prompt. + + This is skipped if the HF-processed prompt is found to already contain + the replacement prompts. """ @@ -52,46 +109,138 @@ class MultiModalProcessingMetadataBuiltins(TypedDict, total=False): Read more on that :ref:`here `. """ -MultiModalMultiData: TypeAlias = List[_T] -""" -A list of data items, where the number of data items allowed -per modality is restricted by :code:`--limit-mm-per-prompt`. -""" +def _encode( + tokenizer: AnyTokenizer, + text: str, + *, + add_special_tokens: bool = False, +) -> list[int]: + """ + Backend-agnostic equivalent of HF's + :code:`tokenizer.encode(text, add_special_tokens=...)`. + """ + if isinstance(tokenizer, MistralTokenizer): + return tokenizer.tokenizer.encode(text, + bos=add_special_tokens, + eos=add_special_tokens) -@final -class MultiModalMultiDataBuiltins(TypedDict, total=False): - """Type annotations for modality types predefined by vLLM.""" + return tokenizer.encode(text, add_special_tokens=add_special_tokens) - image: MultiModalMultiData[ImageItem] - """The input images.""" - video: MultiModalMultiData[VideoItem] - """The input videos.""" +@lru_cache(maxsize=2048) +def _cached_encode( + tokenizer: AnyTokenizer, + text: str, + *, + add_special_tokens: bool = False, +) -> list[int]: + return _encode(tokenizer, text, add_special_tokens=add_special_tokens) - audio: MultiModalMultiData[AudioItem] - """The input audios.""" +def _decode( + tokenizer: AnyTokenizer, + token_ids: list[int], + *, + skip_special_tokens: bool = False, +) -> str: + """ + Backend-agnostic equivalent of HF's + :code:`tokenizer.decode(token_ids, skip_special_tokens=...)`. + """ + return tokenizer.decode(token_ids, skip_special_tokens=skip_special_tokens) -MultiModalMultiDataDict: TypeAlias = Mapping[str, MultiModalMultiData[Any]] -""" -A dictionary containing an entry for each modality type to input. -Note: - This dictionary also accepts modality keys defined outside - :class:`MultiModalMultiDataBuiltins` as long as a customized plugin - is registered through the :class:`~vllm.multimodal.MULTIMODAL_REGISTRY`. - Read more on that :ref:`here `. -""" +@lru_cache(maxsize=2048) +def _cached_decode( + tokenizer: AnyTokenizer, + token_ids: tuple[int, ...], + *, + skip_special_tokens: bool = False, +) -> str: + return _decode(tokenizer, + list(token_ids), + skip_special_tokens=skip_special_tokens) + + +class _HasModalityAttr(Protocol): + modality: str + +class _HasModalityProp(Protocol): -def to_multi_format(data: MultiModalDataDict) -> MultiModalMultiDataDict: + @property + def modality(self) -> str: + ... + + +_M = TypeVar("_M", bound=Union[_HasModalityAttr, _HasModalityProp]) + + +def full_groupby_modality(values: Iterable[_M]) -> ItemsView[str, list[_M]]: + """Convenience function to apply :func:`full_groupby` based on modality.""" + return full_groupby(values, key=lambda x: x.modality) + + +@dataclass +class _BoundPromptSequence: + tokenizer: AnyTokenizer + _text: Optional[str] + _token_ids: Optional[list[int]] + + def __post_init__(self) -> None: + if self._text is None and self._token_ids is None: + raise ValueError("At least one of 'text' and 'token_ids' must be " + "specified") + + @property + def text(self) -> str: + if self._text is None: + assert self._token_ids is not None + self._text = _cached_decode(self.tokenizer, tuple(self._token_ids)) + + return self._text + + @property + def token_ids(self) -> list[int]: + if self._token_ids is None: + assert self._text is not None + self._token_ids = _cached_encode(self.tokenizer, self._text) + + return self._token_ids + + def __repr__(self) -> str: + return (f"{type(self).__name__}(_text={self._text!r}, " + f"_token_ids={self._token_ids!r})") + + +@dataclass +class _BoundPromptReplacement(Generic[_T]): + modality: str + target: _BoundPromptSequence + repl_unit: _BoundPromptSequence + repl_count: Union[Callable[[list[_T], BatchFeature, int], int], int] + + def get_count( + self, + mm_items: list[_T], + hf_inputs: BatchFeature, + item_idx: int, + ) -> int: + repl_count = self.repl_count + if isinstance(repl_count, int): + return repl_count + + return repl_count(mm_items, hf_inputs, item_idx) + + +def to_multi_format(data: MultiModalDataDict) -> dict[str, list[Any]]: """ Convert a :class:`MultiModalDataDict` containing single data items to a :class:`MultiModalMultiDataDict` containing multiple data items per entry. """ - multi_data: Mapping[str, MultiModalMultiData[Any]] = {} + multi_data = dict[str, list[Any]]() for k, v in data.items(): # yapf: disable @@ -107,86 +256,279 @@ def to_multi_format(data: MultiModalDataDict) -> MultiModalMultiDataDict: return multi_data -def encode_no_special_tokens( - tokenizer: AnyTokenizer, - text: str, -) -> List[int]: +class _TokenRun(NamedTuple): + token_id: int + + start_idx: int + length: int + + +def iter_token_runs(token_ids: list[int]) -> Iterable[_TokenRun]: """ - Backend-agnostic equivalent of HF's - :code:`tokenizer.encode(text, add_special_tokens=False)`. + Yield the starting index and length of each run of tokens that are the same. """ - if isinstance(tokenizer, MistralTokenizer): - return tokenizer.tokenizer.encode(text, bos=False, eos=False) + start_idx = 0 + + for token_id, it in groupby(token_ids): + length = sum(1 for _ in it) + yield _TokenRun(token_id=token_id, start_idx=start_idx, length=length) + + start_idx += length + + +class _PlaceholderInfo(NamedTuple): + modality: str + offset: int + length: int + + def to_range(self) -> PlaceholderRange: + return PlaceholderRange(offset=self.offset, length=self.length) + + +def iter_placeholders( + prompt_repls: Sequence[_BoundPromptReplacement[Any]], + token_ids: list[int], + *, + min_placeholder_count: int, +) -> Iterable[_PlaceholderInfo]: + """Yield each set of placeholder tokens found in :code:`token_ids`.""" + placeholder_ids_by_modality = { + modality: { + token_id + for prompt_repl in repls + for token_id in prompt_repl.repl_unit.token_ids + } + for modality, repls in full_groupby_modality(prompt_repls) + } - return tokenizer.encode(text, add_special_tokens=False) + for run_info in iter_token_runs(token_ids): + if run_info.length > min_placeholder_count: + for (modality, + placeholder_ids) in placeholder_ids_by_modality.items(): + if run_info.token_id in placeholder_ids: + yield _PlaceholderInfo( + modality=modality, + offset=run_info.start_idx, + length=run_info.length, + ) -@lru_cache -def candidate_placeholders( - tokenizer: AnyTokenizer, - placeholder_text: str, -) -> Collection[List[int]]: - """Generate token ID sequences that may represent a placeholder text.""" - # When the placeholder text is not mapped to a special token ID, - # it may be tokenized differently based on whether it is at the start/end - # of the string. So, we go through each combination of whether the text - # is at the start and end boundaries of the string - - # Matches the placeholder when it is in the middle of the string - start_id, = encode_no_special_tokens(tokenizer, "a") - end_id, = encode_no_special_tokens(tokenizer, "b") - - candidate_basic = encode_no_special_tokens(tokenizer, placeholder_text) - - start_id_, *candidate_a = encode_no_special_tokens( - tokenizer, - f"a{placeholder_text}", - ) - assert start_id == start_id_ +class _TokenMatch(NamedTuple): + start_idx: int + end_idx: int - start_id_, *candidate_ab, end_id_ = encode_no_special_tokens( - tokenizer, - f"a{placeholder_text}b", - ) - assert start_id == start_id_ and end_id == end_id_ - *candidate_b, end_id_ = encode_no_special_tokens( - tokenizer, - f"{placeholder_text}b", - ) - assert end_id == end_id_ +def iter_token_matches( + token_ids: list[int], + match_ids: list[int], +) -> Iterable[_TokenMatch]: + """Yield each occurrence of :code:`match_ids` in :code:`token_ids`.""" + match_len = len(match_ids) - # Remove duplicates (need to convert to tuple to be hashable) - unique_candidates = { - tuple(c) - for c in [candidate_basic, candidate_a, candidate_ab, candidate_b] - } + last_end_idx = 0 + for start_idx in range(len(token_ids) - match_len + 1): + if start_idx < last_end_idx: + continue # Exclude overlapping matches - # Convert back to list - return [list(c) for c in unique_candidates] + end_idx = start_idx + match_len + if token_ids[start_idx:end_idx] == match_ids: + yield _TokenMatch(start_idx=start_idx, end_idx=end_idx) + last_end_idx = end_idx -def apply_placeholders( - token_ids: List[int], - placeholder_ids: List[int], - get_replacement_ids: Callable[[], List[int]], -) -> Optional[PlaceholderRange]: - """ - Find the first occurrence of :code:`placeholder_ids`, - and replace it with the output of :code:`get_replacement_ids`. +class _PromptReplacementMatch(ABC, Generic[_T, _S]): + prompt_repl: _BoundPromptReplacement[_T] + + @property + def modality(self) -> str: + return self.prompt_repl.modality + + @property + @abstractmethod + def start_idx(self) -> int: + raise NotImplementedError + + @property + @abstractmethod + def end_idx(self) -> int: + raise NotImplementedError + + @abstractmethod + def get_repl( + self, + mm_items: list[_T], + hf_inputs: BatchFeature, + item_idx: int, + ) -> _S: + raise NotImplementedError + + def __repr__(self) -> str: + return (f"{type(self).__name__}(modality={self.modality!r}, " + f"start_idx={self.start_idx!r}, end_idx={self.end_idx!r})") + + +@dataclass(repr=False) +class _PromptReplacementTokenMatch(_PromptReplacementMatch[_T, list[int]]): + prompt_repl: _BoundPromptReplacement[_T] + match: _TokenMatch + + @property + def start_idx(self) -> int: + return self.match.start_idx + + @property + def end_idx(self) -> int: + return self.match.end_idx + + def get_repl( + self, + mm_items: list[_T], + hf_inputs: BatchFeature, + item_idx: int, + ) -> list[int]: + prompt_repl = self.prompt_repl + count = prompt_repl.get_count(mm_items, hf_inputs, item_idx) + return prompt_repl.repl_unit.token_ids * count - This function updates :code:`token_ids` in place. + +@dataclass(repr=False) +class _PromptReplacementTextMatch(_PromptReplacementMatch[_T, str]): + prompt_repl: _BoundPromptReplacement[_T] + match: re.Match[str] + + @property + def start_idx(self) -> int: + return self.match.start() + + @property + def end_idx(self) -> int: + return self.match.end() + + def get_repl( + self, + mm_items: list[_T], + hf_inputs: BatchFeature, + item_idx: int, + ) -> str: + prompt_repl = self.prompt_repl + count = prompt_repl.get_count(mm_items, hf_inputs, item_idx) + return prompt_repl.repl_unit.text * count + + +def find_token_matches( + prompt: list[int], + prompt_repls: Sequence[_BoundPromptReplacement[_T]], +) -> list[_PromptReplacementTokenMatch[_T]]: + """Return each target of :code:`prompt_repls` found in :code:`prompt`.""" + return [ + _PromptReplacementTokenMatch(prompt_repl, match) + for prompt_repl in prompt_repls + for match in iter_token_matches(prompt, prompt_repl.target.token_ids) + ] + + +def find_text_matches( + prompt: str, + prompt_repls: Sequence[_BoundPromptReplacement[_T]], +) -> list[_PromptReplacementTextMatch[_T]]: + """Return each target of :code:`prompt_repls` found in :code:`prompt`.""" + return [ + _PromptReplacementTextMatch(prompt_repl, match) + for prompt_repl in prompt_repls + for match in re.finditer(re.escape(prompt_repl.target.text), prompt) + ] + + +def _resolve_matches( + prompt: _S, + matches: Sequence[_PromptReplacementMatch[_T, _S]], +) -> list[_PromptReplacementMatch[_T, _S]]: + """ + Resolve :code:`matches` to ensure that there are no overlapping matches, + and sort them such that earlier matches take priority over later ones. """ - placeholder_length = len(placeholder_ids) + num_matches_by_idx = np.zeros(len(prompt), dtype=int) + for match in matches: + num_matches_by_idx[match.start_idx:match.end_idx] += 1 + + duplicate_matches_idxs, = np.nonzero(num_matches_by_idx > 1) + if len(duplicate_matches_idxs) > 0: + raise ValueError("Unable to find a unique replacement " + f"at indices={duplicate_matches_idxs} " + f"of prompt={prompt}") + + return sorted(matches, key=lambda x: x.start_idx) + + +def _replace_matches( + prompt: _S, + matches: Sequence[_PromptReplacementMatch[_T, _S]], + mm_items_by_modality: Mapping[str, list[_T]], + hf_inputs: BatchFeature, +) -> list[_S]: + out_seqs = list[_S]() + prev_end_idx = 0 + next_idx_by_modality = {modality: 0 for modality in mm_items_by_modality} + + for match in _resolve_matches(prompt, matches): + modality = match.modality + mm_items = mm_items_by_modality[modality] + + item_idx = next_idx_by_modality[modality] + if item_idx >= len(mm_items): + continue + + start_idx = match.start_idx + end_idx = match.end_idx + repl_ids = match.get_repl(mm_items, hf_inputs, item_idx) + + out_seqs.append(prompt[prev_end_idx:start_idx] + repl_ids) + prev_end_idx = end_idx + next_idx_by_modality[modality] += 1 + + out_seqs.append(prompt[prev_end_idx:]) + + return out_seqs + + +def replace_token_matches( + prompt: list[int], + matches: Sequence[_PromptReplacementMatch[_T, list[int]]], + mm_items_by_modality: Mapping[str, list[_T]], + hf_inputs: BatchFeature, +) -> list[int]: + """Apply :code:`prompt_repls` to :code:`prompt`.""" + if not matches: + return prompt + + token_id_seqs = _replace_matches( + prompt, + matches, + mm_items_by_modality, + hf_inputs, + ) + + return flatten_2d_lists(token_id_seqs) - for start_idx in range(len(token_ids) - placeholder_length + 1): - if token_ids[start_idx:placeholder_length] == placeholder_ids: - token_ids[start_idx:placeholder_length] = get_replacement_ids() - return PlaceholderRange(offset=start_idx, - length=placeholder_length) +def replace_text_matches( + prompt: str, + matches: Sequence[_PromptReplacementMatch[_T, str]], + mm_items_by_modality: Mapping[str, list[_T]], + hf_inputs: BatchFeature, +) -> str: + """Apply :code:`prompt_repls` to :code:`prompt`.""" + if not matches: + return prompt - return None + texts = _replace_matches( + prompt, + matches, + mm_items_by_modality, + hf_inputs, + ) + + return "".join(texts) class MultiModalProcessor: @@ -212,62 +554,166 @@ def __call__( ) -> MultiModalInputsV2: return self.apply(prompt, mm_data, mm_processor_kwargs) - def apply( + def _find_placeholders( + self, + all_prompt_repls: Sequence[_BoundPromptReplacement[Any]], + new_token_ids: list[int], + *, + # To avoid false positives from multi-input when detecting + # whether placeholder tokens have been inserted, in case + # the target sequence is a subset of the replacement tokens + min_placeholder_count: int = 16, + ) -> list[_PlaceholderInfo]: + return list( + iter_placeholders( + all_prompt_repls, + new_token_ids, + min_placeholder_count=min_placeholder_count, + )) + + def _apply_hf_processor( self, prompt: str, mm_data: MultiModalDataDict, mm_processor_kwargs: Mapping[str, object], - ) -> MultiModalInputsV2: - tokenizer = self.ctx.tokenizer + ) -> BatchFeature: hf_processor = self.ctx.get_hf_processor() - processed_inputs = hf_processor( + return hf_processor( text=prompt, # type: ignore **mm_data, **mm_processor_kwargs, ) - new_token_ids, = processed_inputs.pop("input_ids").tolist() - mm_kwargs = MultiModalKwargs(processed_inputs) - mm_placeholders: Mapping[str, List[PlaceholderRange]] = {} + def _bind_prompt_replacements( + self, + mm_data: MultiModalDataDict, + ) -> list[_BoundPromptReplacement[Any]]: + tokenizer = self.ctx.tokenizer - for modality, orig_inputs in to_multi_format(mm_data).items(): - assert isinstance(orig_inputs, list) + return [ + prompt_repl.bind(modality, tokenizer) + for modality, metadata in self.metadata.items() + if modality in mm_data for prompt_repl in metadata.prompt_repls + ] - metadata = self.metadata[modality] - placeholder_replacements = metadata.placeholder_replacements + def _apply_prompt_replacements( + self, + mm_data: MultiModalDataDict, + hf_inputs: BatchFeature, + token_ids: list[int], + prompt_repls: Sequence[_BoundPromptReplacement[Any]], + ) -> tuple[list[int], str, list[_PlaceholderInfo]]: + tokenizer = self.ctx.tokenizer - modality_placeholders: List[PlaceholderRange] = [] + mm_items = to_multi_format(mm_data) + token_matches = find_token_matches(token_ids, prompt_repls) + + # If the search text does not represent a special token, + # it may have different token IDs in the prompt, because + # the tokens may go across the boundaries of the search text. + # ---- + # e.g. when searching for "foo" in "food", if "food" itself makes + # up a token, then the token ID of "foo" will not appear at all + # ---- + # Since it is inefficient to search for all possible tokenizations + # of the search text in the prompt, we instead perform string + # replacement on the decoded token IDs, then encode them back. + if all( + len(matches) >= len(mm_data[modality]) + for modality, matches in full_groupby_modality(token_matches) + ): # yapf: disable + token_ids = replace_token_matches( + token_ids, + token_matches, + mm_items, + hf_inputs, + ) + + text = _decode(tokenizer, token_ids) + matched_repls = [match.prompt_repl for match in token_matches] + else: + text = _decode(tokenizer, token_ids) + + text_matches = find_text_matches(text, prompt_repls) + text = replace_text_matches( + text, + text_matches, + mm_items, + hf_inputs, + ) + + token_ids = _encode(tokenizer, text) + matched_repls = [match.prompt_repl for match in text_matches] + + placeholders = self._find_placeholders(matched_repls, token_ids) + + # Sanity check + assert len(placeholders) == len(matched_repls), dict( + # Log this information for easier debugging + text=text, + token_ids=token_ids, + placeholders=placeholders, + matched_repls=matched_repls, + ) - for item_idx, orig_item in enumerate(orig_inputs): - for match_text, replace_fn in placeholder_replacements.items(): - candidates = candidate_placeholders(tokenizer, match_text) - get_replacement_ids = partial( - replace_fn, - orig_item, - processed_inputs, - item_idx, - ) + return token_ids, text, placeholders - for match_ids in candidates: - # TODO(youkaichao): Don't update new_token_ids - placeholders = apply_placeholders( - new_token_ids, - match_ids, - get_replacement_ids, - ) + def apply( + self, + prompt_text: str, + mm_data: MultiModalDataDict, + mm_processor_kwargs: Mapping[str, object], + ) -> MultiModalInputsV2: + """ + Process multi-modal inputs to be used in vLLM. + + The main steps are: + + 1. Apply HF Processor on prompt text and multi-modal data together, + outputting token IDs and processed tensors. + 2. Find and replace sequences in the token IDs with placeholder tokens. + The number of placeholder tokens equals the feature size of the + multi-modal data outputted by the multi-modal encoder. + 3. Extract information about the placeholder tokens from the + processed token IDs. + """ + tokenizer = self.ctx.tokenizer + + hf_inputs = self._apply_hf_processor(prompt_text, mm_data, + mm_processor_kwargs) + prompt_ids, = hf_inputs.pop("input_ids").tolist() + mm_kwargs = MultiModalKwargs(hf_inputs) - if placeholders is not None: - modality_placeholders.append(placeholders) + all_prompt_repls = self._bind_prompt_replacements(mm_data) - # yapf: disable - mm_placeholders[modality] = modality_placeholders # type: ignore[index] - # yapf: enable + # If HF processor already inserts placeholder tokens, + # there is no need for us to insert them + all_placeholders = self._find_placeholders(all_prompt_repls, + prompt_ids) + if all_placeholders: + prompt_text = _decode(tokenizer, prompt_ids) + else: + ( + prompt_ids, + prompt_text, + all_placeholders, + ) = self._apply_prompt_replacements( + mm_data, + hf_inputs, + prompt_ids, + all_prompt_repls, + ) + + mm_placeholders = { + modality: [item.to_range() for item in items] + for modality, items in full_groupby_modality(all_placeholders) + } return MultiModalInputsV2( type="multimodal", - prompt=prompt, - prompt_token_ids=new_token_ids, + prompt=prompt_text, + prompt_token_ids=prompt_ids, mm_kwargs=mm_kwargs, mm_placeholders=mm_placeholders, ) diff --git a/vllm/utils.py b/vllm/utils.py index 30c371b0e3591..dd4283e3ac381 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -19,7 +19,8 @@ import warnings import weakref from asyncio import FIRST_COMPLETED, AbstractEventLoop, Future, Task -from collections.abc import Mapping +from collections import defaultdict +from collections.abc import Iterable, Mapping from functools import lru_cache, partial, wraps from platform import uname from typing import (Any, AsyncGenerator, Awaitable, Callable, Dict, Generic, @@ -905,6 +906,23 @@ def flatten_2d_lists(lists: List[List[T]]) -> List[T]: return [item for sublist in lists for item in sublist] +_K = TypeVar("_K", bound=Hashable) +_V = TypeVar("_V") + + +def full_groupby(values: Iterable[_V], *, key: Callable[[_V], _K]): + """ + Unlike :class:`itertools.groupby`, groups are not broken by + non-contiguous data. + """ + groups = defaultdict[_K, list[_V]](list) + + for value in values: + groups[key(value)].append(value) + + return groups.items() + + # TODO: This function can be removed if transformer_modules classes are # serialized by value when communicating between processes def init_cached_hf_modules() -> None: From 4cfe5d2bcafe1f47d1df046e6788ebbe038eaf3f Mon Sep 17 00:00:00 2001 From: Isotr0py Date: Sat, 23 Nov 2024 13:25:46 +0800 Subject: [PATCH 117/397] [Bugfix] `multi_modal_kwargs` broadcast for CPU tensor parallel (#10541) Signed-off-by: Isotr0py <2037008807@qq.com> --- vllm/worker/cpu_enc_dec_model_runner.py | 1 + vllm/worker/cpu_model_runner.py | 1 + 2 files changed, 2 insertions(+) diff --git a/vllm/worker/cpu_enc_dec_model_runner.py b/vllm/worker/cpu_enc_dec_model_runner.py index 1f8e2d2d88a23..cc24cfe04d2ba 100644 --- a/vllm/worker/cpu_enc_dec_model_runner.py +++ b/vllm/worker/cpu_enc_dec_model_runner.py @@ -35,6 +35,7 @@ def as_broadcastable_tensor_dict(self) -> Dict[str, Any]: "input_positions": self.input_positions, "encoder_input_tokens": self.encoder_input_tokens, "encoder_input_positions": self.encoder_input_positions, + "multi_modal_kwargs": self.multi_modal_kwargs, } _add_attn_metadata_broadcastable_dict(tensor_dict, self.attn_metadata) _add_sampling_metadata_broadcastable_dict(tensor_dict, diff --git a/vllm/worker/cpu_model_runner.py b/vllm/worker/cpu_model_runner.py index 2cf573625401a..7cab476d7fca4 100644 --- a/vllm/worker/cpu_model_runner.py +++ b/vllm/worker/cpu_model_runner.py @@ -83,6 +83,7 @@ def as_broadcastable_tensor_dict(self) -> Dict[str, Any]: tensor_dict = { "input_tokens": self.input_tokens, "input_positions": self.input_positions, + "multi_modal_kwargs": self.multi_modal_kwargs, } _add_attn_metadata_broadcastable_dict(tensor_dict, self.attn_metadata) _add_sampling_metadata_broadcastable_dict(tensor_dict, From 86a44fb8967f757b0701aaa33aeaa8a431714a27 Mon Sep 17 00:00:00 2001 From: JiHuazhong Date: Sat, 23 Nov 2024 14:23:12 +0800 Subject: [PATCH 118/397] [Platforms] Refactor openvino code (#10573) Signed-off-by: statelesshz --- vllm/executor/openvino_executor.py | 81 ++---------------------------- vllm/platforms/openvino.py | 69 +++++++++++++++++++++++++ 2 files changed, 72 insertions(+), 78 deletions(-) diff --git a/vllm/executor/openvino_executor.py b/vllm/executor/openvino_executor.py index dcd4b7621381d..db0070ce510ee 100644 --- a/vllm/executor/openvino_executor.py +++ b/vllm/executor/openvino_executor.py @@ -1,19 +1,16 @@ from typing import List, Set, Tuple import openvino as ov -import openvino.properties.hint as hints -import torch import vllm.envs as envs -from vllm.config import CacheConfig, ModelConfig from vllm.executor.executor_base import ExecutorAsyncBase, ExecutorBase from vllm.logger import init_logger from vllm.lora.request import LoRARequest from vllm.model_executor.layers.sampler import SamplerOutput from vllm.platforms import current_platform from vllm.sequence import ExecuteModelRequest -from vllm.utils import (GiB_bytes, get_distributed_init_method, get_ip, - get_open_port, make_async) +from vllm.utils import (get_distributed_init_method, get_ip, get_open_port, + make_async) from vllm.worker.worker_base import WorkerWrapperBase logger = init_logger(__name__) @@ -30,11 +27,6 @@ def _init_executor(self) -> None: current_platform.is_openvino_gpu(), \ "OpenVINO backend supports only CPU and GPU devices" - self.ov_core = ov.Core() - self.model_config = _verify_and_get_model_config(self.model_config) - self.cache_config = _verify_and_get_cache_config( - self.ov_core, self.cache_config) - # Instantiate the worker and load the model to CPU. self._init_worker() @@ -45,7 +37,7 @@ def _init_worker(self): distributed_init_method = get_distributed_init_method( get_ip(), get_open_port()) self.driver_worker = wrapper.init_worker( - ov_core=self.ov_core, + ov_core=ov.Core(), vllm_config=self.vllm_config, local_rank=0, rank=0, @@ -130,70 +122,3 @@ async def check_health_async(self) -> None: # OpenVINOExecutor will always be healthy as long as # it's running. return - - -def _verify_and_get_model_config(config: ModelConfig) -> ModelConfig: - if config.dtype != torch.float32: - logger.warning( - f"Only float32 dtype is supported on OpenVINO, casting from {config.dtype}." # noqa: G004, E501 - ) - config.dtype = torch.float32 - if not config.enforce_eager: - logger.warning( - "CUDA graph is not supported on OpenVINO backend, fallback to the " - "eager mode.") - config.enforce_eager = True - return config - - -def _verify_and_get_cache_config(ov_core: ov.Core, - config: CacheConfig) -> CacheConfig: - if envs.VLLM_OPENVINO_CPU_KV_CACHE_PRECISION == "u8": - if not current_platform.is_openvino_cpu(): - logger.info("VLLM_OPENVINO_CPU_KV_CACHE_PRECISION is" - "ignored for GPU, f16 data type will be used.") - config.cache_dtype = ov.Type.f16 - else: - logger.info("KV cache type is overridden to u8 via " - "VLLM_OPENVINO_CPU_KV_CACHE_PRECISION env var.") - config.cache_dtype = ov.Type.u8 - else: - if current_platform.is_openvino_cpu(): - ov_device = envs.VLLM_OPENVINO_DEVICE - inference_precision = ov_core.get_property( - ov_device, hints.inference_precision) - if inference_precision == ov.Type.bf16: - config.cache_dtype = ov.Type.bf16 - else: - config.cache_dtype = ov.Type.f16 - else: - config.cache_dtype = ov.Type.f16 - - if current_platform.is_openvino_cpu(): - if config.block_size != 32: - logger.info( - f"OpenVINO CPU optimal block size is 32, overriding currently set {config.block_size}" # noqa: G004, E501 - ) - config.block_size = 32 - else: - if config.block_size != 16: - logger.info( - f"OpenVINO GPU optimal block size is 16, overriding currently set {config.block_size}" # noqa: G004, E501 - ) - config.block_size = 16 - - kv_cache_space = envs.VLLM_OPENVINO_KVCACHE_SPACE - if kv_cache_space >= 0: - if kv_cache_space == 0 and current_platform.is_openvino_cpu(): - config.openvino_kvcache_space_bytes = 4 * GiB_bytes # type: ignore - logger.warning( - "Environment variable VLLM_OPENVINO_KVCACHE_SPACE (GB) " - "for OpenVINO backend is not set, using 4 by default.") - else: - config.openvino_kvcache_space_bytes = kv_cache_space * GiB_bytes # type: ignore - else: - raise RuntimeError( - "Invalid environment variable VLLM_OPENVINO_KVCACHE_SPACE" - f" {kv_cache_space}, expect a positive integer value.") - - return config diff --git a/vllm/platforms/openvino.py b/vllm/platforms/openvino.py index 694de836e1517..91e615481ff8e 100644 --- a/vllm/platforms/openvino.py +++ b/vllm/platforms/openvino.py @@ -1,5 +1,7 @@ from typing import TYPE_CHECKING +import openvino as ov +import openvino.properties.hint as hints import torch import vllm.envs as envs @@ -49,6 +51,8 @@ def is_pin_memory_available(self) -> bool: @classmethod def check_and_update_config(cls, vllm_config: VllmConfig) -> None: + from vllm.utils import GiB_bytes + parallel_config = vllm_config.parallel_config assert ( parallel_config.world_size == 1 @@ -57,3 +61,68 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: if parallel_config.worker_cls == "auto": parallel_config.worker_cls = \ "vllm.worker.openvino_worker.OpenVINOWorker" + + # check and update model config + model_config = vllm_config.model_config + if model_config.dtype != torch.float32: + logger.warning( + f"Only float32 dtype is supported on OpenVINO, casting from {model_config.dtype}." # noqa: G004, E501 + ) + model_config.dtype = torch.float32 + if not model_config.enforce_eager: + logger.warning( + "CUDA graph is not supported on OpenVINO backend, fallback to " + "the eager mode.") + model_config.enforce_eager = True + + # check and update cache config + ov_core = ov.Core() + cache_config = vllm_config.cache_config + if envs.VLLM_OPENVINO_CPU_KV_CACHE_PRECISION == "u8": + if not OpenVinoPlatform.is_openvino_cpu(): + logger.info("VLLM_OPENVINO_CPU_KV_CACHE_PRECISION is" + "ignored for GPU, f16 data type will be used.") + cache_config.cache_dtype = ov.Type.f16 + else: + logger.info("KV cache type is overridden to u8 via " + "VLLM_OPENVINO_CPU_KV_CACHE_PRECISION env var.") + cache_config.cache_dtype = ov.Type.u8 + else: + if OpenVinoPlatform.is_openvino_cpu(): + ov_device = envs.VLLM_OPENVINO_DEVICE + inference_precision = ov_core.get_property( + ov_device, hints.inference_precision) + if inference_precision == ov.Type.bf16: + cache_config.cache_dtype = ov.Type.bf16 + else: + cache_config.cache_dtype = ov.Type.f16 + else: + cache_config.cache_dtype = ov.Type.f16 + + if OpenVinoPlatform.is_openvino_cpu(): + if cache_config.block_size != 32: + logger.info( + f"OpenVINO CPU optimal block size is 32, overriding currently set {cache_config.block_size}" # noqa: G004, E501 + ) + cache_config.block_size = 32 + else: + if cache_config.block_size != 16: + logger.info( + f"OpenVINO GPU optimal block size is 16, overriding currently set {cache_config.block_size}" # noqa: G004, E501 + ) + cache_config.block_size = 16 + + kv_cache_space = envs.VLLM_OPENVINO_KVCACHE_SPACE + if kv_cache_space >= 0: + if kv_cache_space == 0 and OpenVinoPlatform.is_openvino_cpu(): + cache_config.openvino_kvcache_space_bytes = 4 * GiB_bytes # type: ignore + logger.warning( + "Environment variable VLLM_OPENVINO_KVCACHE_SPACE (GB) " + "for OpenVINO backend is not set, using 4 by default.") + else: + cache_config.openvino_kvcache_space_bytes = ( # type: ignore + kv_cache_space * GiB_bytes) + else: + raise RuntimeError( + "Invalid environment variable VLLM_OPENVINO_KVCACHE_SPACE" + f" {kv_cache_space}, expect a positive integer value.") From 651f6c31ac86f29aa72fa682ef6c34349bcc75db Mon Sep 17 00:00:00 2001 From: Nishidha Date: Sat, 23 Nov 2024 15:03:53 +0530 Subject: [PATCH 119/397] For ppc64le, disabled tests for now and addressed space issues (#10538) --- .buildkite/run-cpu-test-ppc64le.sh | 44 ++---------------------------- 1 file changed, 3 insertions(+), 41 deletions(-) diff --git a/.buildkite/run-cpu-test-ppc64le.sh b/.buildkite/run-cpu-test-ppc64le.sh index 5d7a0bff90963..bc06838d804ff 100755 --- a/.buildkite/run-cpu-test-ppc64le.sh +++ b/.buildkite/run-cpu-test-ppc64le.sh @@ -4,49 +4,11 @@ # It serves a sanity check for compilation and basic model usage. set -ex -# Try building the docker image -docker build -t cpu-test -f Dockerfile.ppc64le . - # Setup cleanup -remove_docker_container() { docker rm -f cpu-test || true; } +remove_docker_container() { docker rm -f cpu-test || true; docker system prune -f; } trap remove_docker_container EXIT remove_docker_container -# Run the image, setting --shm-size=4g for tensor parallel. -source /etc/environment -#docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --privileged=true --network host -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=4 --shm-size=4g --name cpu-test cpu-test -docker run -itd --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --privileged=true --network host -e HF_TOKEN="$HF_TOKEN" --name cpu-test cpu-test - -function cpu_tests() { - set -e - - # Run basic model test - docker exec cpu-test bash -c " - set -e - pip install pytest pytest-asyncio \ - decord einops librosa peft Pillow sentence-transformers soundfile \ - transformers_stream_generator matplotlib datamodel_code_generator - pip install torchvision --index-url https://download.pytorch.org/whl/cpu - pytest -v -s tests/models/decoder_only/language -m cpu_model - pytest -v -s tests/models/embedding/language -m cpu_model - pytest -v -s tests/models/encoder_decoder/language -m cpu_model - pytest -v -s tests/models/decoder_only/audio_language -m cpu_model - pytest -v -s tests/models/decoder_only/vision_language -m cpu_model" - - # online inference - docker exec cpu-test bash -c " - set -e - python3 -m vllm.entrypoints.openai.api_server --model facebook/opt-125m & - timeout 600 bash -c 'until curl localhost:8000/v1/models; do sleep 1; done' || exit 1 - python3 benchmarks/benchmark_serving.py \ - --backend vllm \ - --dataset-name random \ - --model facebook/opt-125m \ - --num-prompts 20 \ - --endpoint /v1/completions \ - --tokenizer facebook/opt-125m" -} +# Try building the docker image +docker build -t cpu-test -f Dockerfile.ppc64le . -# All of CPU tests are expected to be finished less than 25 mins. -export -f cpu_tests -timeout 25m bash -c "cpu_tests" From 04668ebe7a35b69f1d2f8b04ef255bb16c8d2a01 Mon Sep 17 00:00:00 2001 From: Isotr0py Date: Sun, 24 Nov 2024 02:12:20 +0800 Subject: [PATCH 120/397] [Bugfix] Avoid import AttentionMetadata explicitly in Mllama (#10593) Signed-off-by: Isotr0py <2037008807@qq.com> --- vllm/attention/backends/blocksparse_attn.py | 5 +++++ vllm/attention/layer.py | 3 ++- vllm/model_executor/models/mllama.py | 14 +++++++------- vllm/platforms/openvino.py | 8 ++++++-- vllm/v1/attention/backends/flash_attn.py | 2 +- 5 files changed, 21 insertions(+), 11 deletions(-) diff --git a/vllm/attention/backends/blocksparse_attn.py b/vllm/attention/backends/blocksparse_attn.py index 94002e36db2bb..9e54c3b40c54e 100644 --- a/vllm/attention/backends/blocksparse_attn.py +++ b/vllm/attention/backends/blocksparse_attn.py @@ -87,6 +87,11 @@ def __post_init__(self): class BlocksparseFlashAttentionBackend(AttentionBackend): + @staticmethod + def get_name() -> str: + # For attention layer compatibility + return "FLASH_ATTN" + @staticmethod def get_impl_cls() -> Type["BlocksparseFlashAttentionImpl"]: return BlocksparseFlashAttentionImpl diff --git a/vllm/attention/layer.py b/vllm/attention/layer.py index cb4dedf481c77..1bb335909484b 100644 --- a/vllm/attention/layer.py +++ b/vllm/attention/layer.py @@ -6,7 +6,7 @@ import vllm.envs as envs from vllm.attention import AttentionMetadata, AttentionType -from vllm.attention.selector import get_attn_backend +from vllm.attention.selector import backend_name_to_enum, get_attn_backend from vllm.config import CacheConfig from vllm.forward_context import ForwardContext, get_forward_context from vllm.model_executor.layers.quantization.base_config import ( @@ -98,6 +98,7 @@ def __init__( self.impl = impl_cls(num_heads, head_size, scale, num_kv_heads, alibi_slopes, sliding_window, kv_cache_dtype, blocksparse_params, logits_soft_cap) + self.backend = backend_name_to_enum(attn_backend.get_name()) # For cuda-alike (CUDA and ROCM) and cpu platforms, we control how # torch.compile works by registering the attention as one giant diff --git a/vllm/model_executor/models/mllama.py b/vllm/model_executor/models/mllama.py index 41f62b37f3bd9..9e6634a9a7579 100644 --- a/vllm/model_executor/models/mllama.py +++ b/vllm/model_executor/models/mllama.py @@ -32,9 +32,8 @@ import vllm.distributed.parallel_state as ps from vllm.attention import Attention, AttentionMetadata, AttentionType -from vllm.attention.backends.flash_attn import FlashAttentionMetadata -from vllm.attention.backends.xformers import XFormersMetadata from vllm.attention.ops.paged_attn import PagedAttention +from vllm.attention.selector import _Backend from vllm.config import VllmConfig from vllm.distributed import get_tensor_model_parallel_world_size from vllm.inputs import (INPUT_REGISTRY, DummyData, EncoderDecoderInputs, @@ -828,7 +827,8 @@ def _attention_with_mask( ) -> torch.Tensor: # Skip writing kv-cache for the initial profiling run. if len(kv_cache.shape) > 1: - if isinstance(attn_metadata, FlashAttentionMetadata): + if self.attn.backend in (_Backend.FLASH_ATTN, + _Backend.FLASH_ATTN_VLLM_V1): cached_k = torch.cat([k[s:e] for s, e in kv_range_for_decode]) cached_v = torch.cat([v[s:e] for s, e in kv_range_for_decode]) torch.ops._C_cache_ops.reshape_and_cache_flash( @@ -842,7 +842,7 @@ def _attention_with_mask( 1.0, 1.0, ) - elif isinstance(attn_metadata, XFormersMetadata): + elif self.attn.backend in (_Backend.XFORMERS, _Backend.TORCH_SDPA): key_cache, value_cache = PagedAttention.split_kv_cache( kv_cache, self.num_local_key_value_heads, self.head_dim) cached_k = torch.cat([k[s:e] for s, e in kv_range_for_decode]) @@ -852,9 +852,9 @@ def _attention_with_mask( attn_metadata.cross_slot_mapping, "auto", 1.0, 1.0) else: raise ValueError( - f"Unsupported AttentionMetadata {type(attn_metadata)} " - f"class found. Expected the AttentionMetadata to " - f"be either XFormersMetadata or FlashAttentionMetadata.") + f"Unsupported Attention backend {self.attn.backend} " + "enum found. Expected the Attention backend to be " + "FLASH_ATTN, FLASH_ATTN_VLLM_V1, XFORMERS or TORCH_SDPA.") # We have to call torch.sdpa for prefill when using a # custom cross-attention mask. Because the mask is not a diff --git a/vllm/platforms/openvino.py b/vllm/platforms/openvino.py index 91e615481ff8e..ea5ec7b40b95c 100644 --- a/vllm/platforms/openvino.py +++ b/vllm/platforms/openvino.py @@ -1,7 +1,5 @@ from typing import TYPE_CHECKING -import openvino as ov -import openvino.properties.hint as hints import torch import vllm.envs as envs @@ -16,6 +14,12 @@ logger = init_logger(__name__) +try: + import openvino as ov + import openvino.properties.hint as hints +except ImportError as e: + logger.warning("Failed to import OpenVINO with %r", e) + class OpenVinoPlatform(Platform): _enum = PlatformEnum.OPENVINO diff --git a/vllm/v1/attention/backends/flash_attn.py b/vllm/v1/attention/backends/flash_attn.py index d98bb5a716e97..5f8535eaa303f 100644 --- a/vllm/v1/attention/backends/flash_attn.py +++ b/vllm/v1/attention/backends/flash_attn.py @@ -19,7 +19,7 @@ def get_supported_head_sizes() -> List[int]: @staticmethod def get_name() -> str: - return "flash-attn-vllm-v1" + return "FLASH_ATTN_VLLM_V1" @staticmethod def get_impl_cls() -> Type["FlashAttentionImpl"]: From 17d8fc1806c61e3f859a45b69be9f8dccf9a5fcc Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Sun, 24 Nov 2024 09:22:33 +0800 Subject: [PATCH 121/397] [bugfix] Fix example/tensorize_vllm_model tests (#10595) Signed-off-by: Jee Jee Li --- vllm/model_executor/model_loader/tensorizer.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/vllm/model_executor/model_loader/tensorizer.py b/vllm/model_executor/model_loader/tensorizer.py index c48b287ed181a..3fd668765a1b1 100644 --- a/vllm/model_executor/model_loader/tensorizer.py +++ b/vllm/model_executor/model_loader/tensorizer.py @@ -19,6 +19,7 @@ from vllm.logger import init_logger from vllm.model_executor.layers.vocab_parallel_embedding import ( VocabParallelEmbedding) +from vllm.plugins import set_current_vllm_config from vllm.utils import FlexibleArgumentParser tensorizer_error_msg = None @@ -284,7 +285,8 @@ def _init_model(self): model_args = self.tensorizer_config.hf_config model_args.torch_dtype = self.tensorizer_config.dtype assert self.tensorizer_config.model_class is not None - with no_init_or_tensor(): + # TODO: Do we need to consider old-style model class? + with no_init_or_tensor(), set_current_vllm_config(self.vllm_config): return self.tensorizer_config.model_class( vllm_config=self.vllm_config, ) From 1700c543a556e669e559c369a36c0a0d36a8de19 Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Sun, 24 Nov 2024 09:23:17 +0800 Subject: [PATCH 122/397] [Bugfix] Fix LoRA weight sharding (#10450) Signed-off-by: Jee Jee Li Co-authored-by: Cyrus Leung --- .buildkite/test-pipeline.yaml | 13 +- .../{test_chatglm3.py => test_chatglm3_tp.py} | 63 +++++-- tests/lora/test_llama.py | 146 ---------------- tests/lora/test_llama_tp.py | 161 ++++++++++++++++++ vllm/lora/fully_sharded_layers.py | 5 + vllm/lora/layers.py | 34 +++- vllm/model_executor/models/chatglm.py | 4 +- 7 files changed, 258 insertions(+), 168 deletions(-) rename tests/lora/{test_chatglm3.py => test_chatglm3_tp.py} (56%) delete mode 100644 tests/lora/test_llama.py create mode 100644 tests/lora/test_llama_tp.py diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index c436d2b48d20f..bff33d35b423e 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -230,7 +230,7 @@ steps: source_file_dependencies: - vllm/lora - tests/lora - command: pytest -v -s lora --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT --ignore=lora/test_long_context.py + command: pytest -v -s lora --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT --ignore lora/test_long_context.py lora/test_chatglm3_tp.py lora/test_llama_tp.py parallelism: 4 - label: "PyTorch Fullgraph Smoke Test" # 9min @@ -475,18 +475,23 @@ steps: - pytest -v -s distributed/test_pp_cudagraph.py - pytest -v -s distributed/test_pipeline_parallel.py -- label: LoRA Long Context (Distributed) # 11min - # This test runs llama 13B, so it is required to run on 4 GPUs. +- label: LoRA TP Test (Distributed) num_gpus: 4 soft_fail: true source_file_dependencies: - vllm/lora - - tests/lora/test_long_context + - tests/lora commands: # FIXIT: find out which code initialize cuda before running the test # before the fix, we need to use spawn to test it - export VLLM_WORKER_MULTIPROC_METHOD=spawn + # This test runs llama 13B, so it is required to run on 4 GPUs. - pytest -v -s -x lora/test_long_context.py + # There is some Tensor Parallelism related processing logic in LoRA that + # requires multi-GPU testing for validation. + - pytest -v -s -x lora/test_chatglm3_tp.py + - pytest -v -s -x lora/test_llama_tp.py + - label: Weight Loading Multiple GPU Test # 33min working_dir: "/vllm-workspace/tests" diff --git a/tests/lora/test_chatglm3.py b/tests/lora/test_chatglm3_tp.py similarity index 56% rename from tests/lora/test_chatglm3.py rename to tests/lora/test_chatglm3_tp.py index de4cbea80924e..f17464573459f 100644 --- a/tests/lora/test_chatglm3.py +++ b/tests/lora/test_chatglm3_tp.py @@ -1,12 +1,21 @@ from typing import List import vllm +from tests.utils import fork_new_process_for_each_test from vllm.lora.request import LoRARequest +from ..utils import multi_gpu_test + MODEL_PATH = "THUDM/chatglm3-6b" PROMPT_TEMPLATE = """I want you to act as a SQL terminal in front of an example database, you need only to return the sql command to me.Below is an instruction that describes a task, Write a response that appropriately completes the request.\n"\n##Instruction:\nconcert_singer contains tables such as stadium, singer, concert, singer_in_concert. Table stadium has columns such as Stadium_ID, Location, Name, Capacity, Highest, Lowest, Average. Stadium_ID is the primary key.\nTable singer has columns such as Singer_ID, Name, Country, Song_Name, Song_release_year, Age, Is_male. Singer_ID is the primary key.\nTable concert has columns such as concert_ID, concert_Name, Theme, Stadium_ID, Year. concert_ID is the primary key.\nTable singer_in_concert has columns such as concert_ID, Singer_ID. concert_ID is the primary key.\nThe Stadium_ID of concert is the foreign key of Stadium_ID of stadium.\nThe Singer_ID of singer_in_concert is the foreign key of Singer_ID of singer.\nThe concert_ID of singer_in_concert is the foreign key of concert_ID of concert.\n\n###Input:\n{query}\n\n###Response:""" # noqa: E501 +EXPECTED_LORA_OUTPUT = [ + "SELECT count(*) FROM singer", + "SELECT avg(age) , min(age) , max(age) FROM singer WHERE country = 'France'", # noqa: E501 + "SELECT name , country , age FROM singer ORDER BY age", +] + def do_sample(llm: vllm.LLM, lora_path: str, lora_id: int) -> List[str]: prompts = [ @@ -20,7 +29,6 @@ def do_sample(llm: vllm.LLM, lora_path: str, lora_id: int) -> List[str]: "Show name, country, age for all singers ordered by age from the oldest to the youngest." # noqa: E501 ), ] - print(prompts) sampling_params = vllm.SamplingParams(temperature=0, max_tokens=32) outputs = llm.generate( prompts, @@ -37,23 +45,58 @@ def do_sample(llm: vllm.LLM, lora_path: str, lora_id: int) -> List[str]: return generated_texts +@fork_new_process_for_each_test def test_chatglm3_lora(chatglm3_lora_files): llm = vllm.LLM(MODEL_PATH, max_model_len=1024, enable_lora=True, max_loras=4, max_lora_rank=64, + tensor_parallel_size=1, trust_remote_code=True) - expected_lora_output = [ - "SELECT count(*) FROM singer", - "SELECT avg(age) , min(age) , max(age) FROM singer WHERE country = 'France'", # noqa: E501 - "SELECT name , country , age FROM singer ORDER BY age", - ] + output1 = do_sample(llm, chatglm3_lora_files, lora_id=1) + for i in range(len(EXPECTED_LORA_OUTPUT)): + assert output1[i] == EXPECTED_LORA_OUTPUT[i] + output2 = do_sample(llm, chatglm3_lora_files, lora_id=2) + for i in range(len(EXPECTED_LORA_OUTPUT)): + assert output2[i] == EXPECTED_LORA_OUTPUT[i] + +@multi_gpu_test(num_gpus=4) +@fork_new_process_for_each_test +def test_chatglm3_lora_tp4(chatglm3_lora_files): + llm = vllm.LLM(MODEL_PATH, + max_model_len=1024, + enable_lora=True, + max_loras=4, + max_lora_rank=64, + tensor_parallel_size=4, + trust_remote_code=True, + fully_sharded_loras=False) + + output1 = do_sample(llm, chatglm3_lora_files, lora_id=1) + for i in range(len(EXPECTED_LORA_OUTPUT)): + assert output1[i] == EXPECTED_LORA_OUTPUT[i] + output2 = do_sample(llm, chatglm3_lora_files, lora_id=2) + for i in range(len(EXPECTED_LORA_OUTPUT)): + assert output2[i] == EXPECTED_LORA_OUTPUT[i] + + +@multi_gpu_test(num_gpus=4) +@fork_new_process_for_each_test +def test_chatglm3_lora_tp4_fully_sharded_loras(chatglm3_lora_files): + llm = vllm.LLM(MODEL_PATH, + max_model_len=1024, + enable_lora=True, + max_loras=4, + max_lora_rank=64, + tensor_parallel_size=4, + trust_remote_code=True, + fully_sharded_loras=True) output1 = do_sample(llm, chatglm3_lora_files, lora_id=1) - for i in range(len(expected_lora_output)): - assert output1[i] == expected_lora_output[i] + for i in range(len(EXPECTED_LORA_OUTPUT)): + assert output1[i] == EXPECTED_LORA_OUTPUT[i] output2 = do_sample(llm, chatglm3_lora_files, lora_id=2) - for i in range(len(expected_lora_output)): - assert output2[i] == expected_lora_output[i] + for i in range(len(EXPECTED_LORA_OUTPUT)): + assert output2[i] == EXPECTED_LORA_OUTPUT[i] diff --git a/tests/lora/test_llama.py b/tests/lora/test_llama.py deleted file mode 100644 index e2a4f1ed0496a..0000000000000 --- a/tests/lora/test_llama.py +++ /dev/null @@ -1,146 +0,0 @@ -from typing import List - -import pytest -import ray - -import vllm -from vllm.distributed import cleanup_dist_env_and_memory -from vllm.lora.request import LoRARequest - -MODEL_PATH = "meta-llama/Llama-2-7b-hf" - - -def do_sample(llm: vllm.LLM, lora_path: str, lora_id: int) -> List[str]: - prompts = [ - "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_74 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]", # noqa: E501 - "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_11 (nationality VARCHAR, elector VARCHAR)\n\n question: When Anchero Pantaleone was the elector what is under nationality? [/user] [assistant]", # noqa: E501 - "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_95 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one mora for a low tone mora with a gloss of /˩okiru/ [òkìɽɯ́]? [/user] [assistant]", # noqa: E501 - "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE candidate (people_id VARCHAR, unsure_rate INTEGER); CREATE TABLE people (sex VARCHAR, people_id VARCHAR)\n\n question: which gender got the highest average uncertain ratio. [/user] [assistant]", # noqa: E501 - "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_60 (pick INTEGER, former_wnba_team VARCHAR)\n\n question: What pick was a player that previously played for the Minnesota Lynx? [/user] [assistant]", # noqa: E501 - "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]" # noqa: E501 - ] - sampling_params = vllm.SamplingParams(temperature=0, - max_tokens=256, - stop=["[/assistant]"]) - outputs = llm.generate( - prompts, - sampling_params, - lora_request=LoRARequest(str(lora_id), lora_id, lora_path) - if lora_id else None) - # Print the outputs. - generated_texts: List[str] = [] - for output in outputs: - prompt = output.prompt - generated_text = output.outputs[0].text - generated_texts.append(generated_text) - print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") - return generated_texts - - -@pytest.mark.parametrize("tp_size", [1, 2, 4]) -def test_llama_lora(sql_lora_files, tp_size, num_gpus_available): - if num_gpus_available < tp_size: - pytest.skip(f"Not enough GPUs for tensor parallelism {tp_size}") - - llm = vllm.LLM(MODEL_PATH, - enable_lora=True, - max_num_seqs=16, - max_loras=4, - tensor_parallel_size=tp_size) - - expected_no_lora_output = [ - "\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_75 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_76 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_77 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_78 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user]", # noqa: E501 - " Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_11 (nationality VARCHAR, elector VARCHAR)\n\n question: When Anchero Pantaleone was the elector what is under nationality? ", # noqa: E501 - "\n\n answer: 1\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_96 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one mora for a high tone mora with a gloss of /˧kot/ [kòt]? [/user] [assistant]\n\n answer: 2\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_97 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one mora for a high tone mora with a gloss of /˧kot/ [kòt]? [/user] [assistant]\n\n answer: 2\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_98 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one m", # noqa: E501 - " Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE candidate (people_id VARCHAR, unsure_rate INTEGER); CREATE TABLE people (sex VARCHAR, people_id VARCHAR)\n\n question: which gender got the highest average uncertain ratio. ", # noqa: E501 - " Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_60 (pick INTEGER, former_wnba_team VARCHAR)\n\n question: What pick was a player that previously played for the Minnesota Lynx? ", # noqa: E501 - "\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE", # noqa: E501 - ] - expected_lora_output = [ - " SELECT icao FROM table_name_74 WHERE airport = 'lilongwe international airport' ", # noqa: E501 - " SELECT nationality FROM table_name_11 WHERE elector = 'anchero pantaleone' ", # noqa: E501 - " SELECT one_mora FROM table_name_95 WHERE gloss = 'low tone mora with a gloss of /˩okiru/' [òkìɽɯ́] AND accented_mora = 'low tone mora with a gloss of /˩okiru/' [òkìɽɯ́] ", # noqa: E501 - " SELECT sex FROM people WHERE people_id IN (SELECT people_id FROM candidate GROUP BY sex ORDER BY COUNT(people_id) DESC LIMIT 1) ", # noqa: E501 - " SELECT pick FROM table_name_60 WHERE former_wnba_team = 'Minnesota Lynx' ", # noqa: E501 - " SELECT womens_doubles FROM table_28138035_4 WHERE mens_singles = 'Werner Schlager' " # noqa: E501 - ] - - print("lora adapter created") - assert do_sample(llm, sql_lora_files, lora_id=0) == expected_no_lora_output - - print("lora 1") - assert do_sample(llm, sql_lora_files, lora_id=1) == expected_lora_output - - print("no lora") - assert do_sample(llm, sql_lora_files, lora_id=0) == expected_no_lora_output - - print("lora 2") - assert do_sample(llm, sql_lora_files, lora_id=2) == expected_lora_output - - print("removing lora") - - -def test_llama_tensor_parallel_equality(sql_lora_files, num_gpus_available): - if num_gpus_available < 4: - pytest.skip("Not enough GPUs for tensor parallelism 4") - - llm_tp1 = vllm.LLM(MODEL_PATH, - enable_lora=True, - max_num_seqs=16, - max_loras=4, - tensor_parallel_size=1) - output_tp1 = do_sample(llm_tp1, sql_lora_files, lora_id=1) - - del llm_tp1 - cleanup_dist_env_and_memory() - - llm_tp2 = vllm.LLM(MODEL_PATH, - enable_lora=True, - max_num_seqs=16, - max_loras=4, - tensor_parallel_size=2) - output_tp2 = do_sample(llm_tp2, sql_lora_files, lora_id=1) - - del llm_tp2 - cleanup_dist_env_and_memory() - - assert output_tp1 == output_tp2 - - llm_tp4 = vllm.LLM(MODEL_PATH, - enable_lora=True, - max_num_seqs=16, - max_loras=4, - tensor_parallel_size=4) - output_tp4 = do_sample(llm_tp4, sql_lora_files, lora_id=1) - - del llm_tp4 - cleanup_dist_env_and_memory() - - assert output_tp1 == output_tp4 - - -def test_llama_lora_warmup(sql_lora_files): - """Test that the LLM initialization works with a warmup LORA path and - is more conservative""" - - @ray.remote(num_gpus=1) - def get_num_gpu_blocks_lora(): - llm = vllm.LLM(MODEL_PATH, enable_lora=True, max_num_seqs=16) - num_gpu_blocks_lora_warmup = llm.llm_engine.cache_config.num_gpu_blocks - return num_gpu_blocks_lora_warmup - - @ray.remote(num_gpus=1) - def get_num_gpu_blocks_no_lora(): - llm = vllm.LLM(MODEL_PATH, max_num_seqs=16) - num_gpu_blocks_no_lora_warmup = ( - llm.llm_engine.cache_config.num_gpu_blocks) - return num_gpu_blocks_no_lora_warmup - - num_gpu_blocks_lora_warmup = ray.get(get_num_gpu_blocks_lora.remote()) - num_gpu_blocks_no_lora_warmup = ray.get( - get_num_gpu_blocks_no_lora.remote()) - assert num_gpu_blocks_lora_warmup < num_gpu_blocks_no_lora_warmup, ( - "The warmup with lora should be more " - "conservative than without lora, therefore the number of " - "memory blocks for the KV cache should be " - "less when using lora than when not using lora") diff --git a/tests/lora/test_llama_tp.py b/tests/lora/test_llama_tp.py new file mode 100644 index 0000000000000..aae6310a2a213 --- /dev/null +++ b/tests/lora/test_llama_tp.py @@ -0,0 +1,161 @@ +from typing import List + +import ray + +import vllm +from tests.utils import fork_new_process_for_each_test +from vllm.lora.request import LoRARequest + +from ..utils import multi_gpu_test + +MODEL_PATH = "meta-llama/Llama-2-7b-hf" + +EXPECTED_NO_LORA_OUTPUT = [ + "\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_75 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_76 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_77 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_78 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user]", # noqa: E501 + " Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_11 (nationality VARCHAR, elector VARCHAR)\n\n question: When Anchero Pantaleone was the elector what is under nationality? ", # noqa: E501 + "\n\n answer: 1\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_96 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one mora for a high tone mora with a gloss of /˧kot/ [kòt]? [/user] [assistant]\n\n answer: 2\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_97 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one mora for a high tone mora with a gloss of /˧kot/ [kòt]? [/user] [assistant]\n\n answer: 2\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_98 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one m", # noqa: E501 + " Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE candidate (people_id VARCHAR, unsure_rate INTEGER); CREATE TABLE people (sex VARCHAR, people_id VARCHAR)\n\n question: which gender got the highest average uncertain ratio. ", # noqa: E501 + " Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_60 (pick INTEGER, former_wnba_team VARCHAR)\n\n question: What pick was a player that previously played for the Minnesota Lynx? ", # noqa: E501 + "\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]\n\n [user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE", # noqa: E501 +] +EXPECTED_LORA_OUTPUT = [ + " SELECT icao FROM table_name_74 WHERE airport = 'lilongwe international airport' ", # noqa: E501 + " SELECT nationality FROM table_name_11 WHERE elector = 'anchero pantaleone' ", # noqa: E501 + " SELECT one_mora FROM table_name_95 WHERE gloss = 'low tone mora with a gloss of /˩okiru/' [òkìɽɯ́] AND accented_mora = 'low tone mora with a gloss of /˩okiru/' [òkìɽɯ́] ", # noqa: E501 + " SELECT sex FROM people WHERE people_id IN (SELECT people_id FROM candidate GROUP BY sex ORDER BY COUNT(people_id) DESC LIMIT 1) ", # noqa: E501 + " SELECT pick FROM table_name_60 WHERE former_wnba_team = 'Minnesota Lynx' ", # noqa: E501 + " SELECT womens_doubles FROM table_28138035_4 WHERE mens_singles = 'Werner Schlager' " # noqa: E501 +] + + +def do_sample(llm: vllm.LLM, lora_path: str, lora_id: int) -> List[str]: + prompts = [ + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_74 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]", # noqa: E501 + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_11 (nationality VARCHAR, elector VARCHAR)\n\n question: When Anchero Pantaleone was the elector what is under nationality? [/user] [assistant]", # noqa: E501 + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_95 (one_mora VARCHAR, gloss VARCHAR, accented_mora VARCHAR)\n\n question: What is the one mora for a low tone mora with a gloss of /˩okiru/ [òkìɽɯ́]? [/user] [assistant]", # noqa: E501 + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE candidate (people_id VARCHAR, unsure_rate INTEGER); CREATE TABLE people (sex VARCHAR, people_id VARCHAR)\n\n question: which gender got the highest average uncertain ratio. [/user] [assistant]", # noqa: E501 + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_60 (pick INTEGER, former_wnba_team VARCHAR)\n\n question: What pick was a player that previously played for the Minnesota Lynx? [/user] [assistant]", # noqa: E501 + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_28138035_4 (womens_doubles VARCHAR, mens_singles VARCHAR)\n\n question: Name the women's doubles for werner schlager [/user] [assistant]" # noqa: E501 + ] + sampling_params = vllm.SamplingParams(temperature=0, + max_tokens=256, + stop=["[/assistant]"]) + outputs = llm.generate( + prompts, + sampling_params, + lora_request=LoRARequest(str(lora_id), lora_id, lora_path) + if lora_id else None) + # Print the outputs. + generated_texts: List[str] = [] + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + generated_texts.append(generated_text) + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + return generated_texts + + +@fork_new_process_for_each_test +def test_llama_lora(sql_lora_files): + + llm = vllm.LLM(MODEL_PATH, + enable_lora=True, + max_num_seqs=16, + max_loras=4, + tensor_parallel_size=1) + + print("lora adapter created") + assert do_sample(llm, sql_lora_files, lora_id=0) == EXPECTED_NO_LORA_OUTPUT + + print("lora 1") + assert do_sample(llm, sql_lora_files, lora_id=1) == EXPECTED_LORA_OUTPUT + + print("no lora") + assert do_sample(llm, sql_lora_files, lora_id=0) == EXPECTED_NO_LORA_OUTPUT + + print("lora 2") + assert do_sample(llm, sql_lora_files, lora_id=2) == EXPECTED_LORA_OUTPUT + + print("removing lora") + + +@fork_new_process_for_each_test +def test_llama_lora_warmup(sql_lora_files): + """Test that the LLM initialization works with a warmup LORA path and + is more conservative""" + + @ray.remote(num_gpus=1) + def get_num_gpu_blocks_lora(): + llm = vllm.LLM(MODEL_PATH, enable_lora=True, max_num_seqs=16) + num_gpu_blocks_lora_warmup = llm.llm_engine.cache_config.num_gpu_blocks + return num_gpu_blocks_lora_warmup + + @ray.remote(num_gpus=1) + def get_num_gpu_blocks_no_lora(): + llm = vllm.LLM(MODEL_PATH, max_num_seqs=16) + num_gpu_blocks_no_lora_warmup = ( + llm.llm_engine.cache_config.num_gpu_blocks) + return num_gpu_blocks_no_lora_warmup + + num_gpu_blocks_lora_warmup = ray.get(get_num_gpu_blocks_lora.remote()) + num_gpu_blocks_no_lora_warmup = ray.get( + get_num_gpu_blocks_no_lora.remote()) + assert num_gpu_blocks_lora_warmup < num_gpu_blocks_no_lora_warmup, ( + "The warmup with lora should be more " + "conservative than without lora, therefore the number of " + "memory blocks for the KV cache should be " + "less when using lora than when not using lora") + + +@multi_gpu_test(num_gpus=4) +@fork_new_process_for_each_test +def test_llama_lora_tp4(sql_lora_files): + + llm = vllm.LLM( + MODEL_PATH, + enable_lora=True, + max_num_seqs=16, + max_loras=4, + tensor_parallel_size=4, + ) + + print("lora adapter created") + assert do_sample(llm, sql_lora_files, lora_id=0) == EXPECTED_NO_LORA_OUTPUT + + print("lora 1") + assert do_sample(llm, sql_lora_files, lora_id=1) == EXPECTED_LORA_OUTPUT + + print("no lora") + assert do_sample(llm, sql_lora_files, lora_id=0) == EXPECTED_NO_LORA_OUTPUT + + print("lora 2") + assert do_sample(llm, sql_lora_files, lora_id=2) == EXPECTED_LORA_OUTPUT + + print("removing lora") + + +@multi_gpu_test(num_gpus=4) +@fork_new_process_for_each_test +def test_llama_lora_tp4_fully_sharded_loras(sql_lora_files): + + llm = vllm.LLM( + MODEL_PATH, + enable_lora=True, + max_num_seqs=16, + max_loras=4, + tensor_parallel_size=4, + fully_sharded_loras=True, + ) + print("lora adapter created") + assert do_sample(llm, sql_lora_files, lora_id=0) == EXPECTED_NO_LORA_OUTPUT + + print("lora 1") + assert do_sample(llm, sql_lora_files, lora_id=1) == EXPECTED_LORA_OUTPUT + + print("no lora") + assert do_sample(llm, sql_lora_files, lora_id=0) == EXPECTED_NO_LORA_OUTPUT + + print("lora 2") + assert do_sample(llm, sql_lora_files, lora_id=2) == EXPECTED_LORA_OUTPUT + + print("removing lora") diff --git a/vllm/lora/fully_sharded_layers.py b/vllm/lora/fully_sharded_layers.py index 3443c3feb4d2a..f5c2eced9d2bb 100644 --- a/vllm/lora/fully_sharded_layers.py +++ b/vllm/lora/fully_sharded_layers.py @@ -44,6 +44,11 @@ class ColumnParallelLinearWithShardedLoRA(ColumnParallelLinearWithLoRA): Based on S-LoRA, slicing happens along the rank dim. """ + # For all LoRA layers where the `base_layer` is `ColumnParallelLinear`, + # their `lora_a` and `lora_b` have different sharding patterns. After + # completing the `lora_a` GEMM , a gather operation is performed. + # Therefore, the sharding of `lora_a` only needs to correspond with the + # gather operation. def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor: tp_rank = get_tensor_model_parallel_rank() shard_size = self.lora_a_stacked.shape[2] diff --git a/vllm/lora/layers.py b/vllm/lora/layers.py index 6afe80219fe07..3701988ff692f 100644 --- a/vllm/lora/layers.py +++ b/vllm/lora/layers.py @@ -451,6 +451,12 @@ class ColumnParallelLinearWithLoRA(BaseLayerWithLoRA): def __init__(self, base_layer: ColumnParallelLinear) -> None: super().__init__() + # The base_layer type is ColumnParallelLinear or + # MergedColumnParallelLinear, their weight sharding logic is + # inconsistent when TP is greater than 1. + self.is_merged_col_linear = type( + base_layer) is MergedColumnParallelLinear + self.base_layer = base_layer self.tp_size = get_tensor_model_parallel_world_size() self.input_size = self.base_layer.input_size @@ -508,14 +514,30 @@ def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor: return lora_a def slice_lora_b(self, lora_b: torch.Tensor) -> torch.Tensor: - tensor_model_parallel_rank = get_tensor_model_parallel_rank() - shard_size = self.output_dim - start_idx = tensor_model_parallel_rank * shard_size - end_idx = (tensor_model_parallel_rank + 1) * shard_size - lora_b = lora_b[:, start_idx:end_idx] + # Applicable to cases where the base_layer is + # MergedColumnParallelLinear. + if self.is_merged_col_linear: + tp_rank = get_tensor_model_parallel_rank() + shard_size = self.output_size // 2 + offset = lora_b.shape[-1] // 2 + + left_weight = lora_b[:, tp_rank * shard_size:(tp_rank + 1) * + shard_size] + right_weight = lora_b[:, offset + tp_rank * shard_size:offset + + (tp_rank + 1) * shard_size] + lora_b = torch.cat([left_weight, right_weight], dim=1) + # Applicable to cases where the base_layer is + # ColumnParallelLinear. + else: + tensor_model_parallel_rank = get_tensor_model_parallel_rank() + shard_size = self.output_dim + start_idx = tensor_model_parallel_rank * shard_size + end_idx = (tensor_model_parallel_rank + 1) * shard_size + lora_b = lora_b[:, start_idx:end_idx] return lora_b def slice_bias(self, bias: torch.Tensor) -> torch.Tensor: + # TODO: Fix the slicing logic of bias. if bias is None: return bias tensor_model_parallel_rank = get_tensor_model_parallel_rank() @@ -779,7 +801,7 @@ def can_replace_layer( class QKVParallelLinearWithLora(ColumnParallelLinearWithLoRA): """ ColumnParallelLinear layer that is specifically designed for - qkv_proj. Certain models, such as chtglm3 and baichuan-7b, + qkv_proj. Certain models, such as chatglm3 and baichuan-7b, only contains a single LoRA within their qkv_proj layer. During inference with Tensor Parallel, the weights of lora_b diff --git a/vllm/model_executor/models/chatglm.py b/vllm/model_executor/models/chatglm.py index e3a068908b7f3..5bcbce7180ca4 100644 --- a/vllm/model_executor/models/chatglm.py +++ b/vllm/model_executor/models/chatglm.py @@ -760,7 +760,7 @@ def __new__( config = vllm_config.model_config.hf_config # Initialize VL if hasattr(config, "visual"): - return ChatGLM(vllm_config=vllm_config, prefix=prefix) + return ChatGLMV(vllm_config=vllm_config, prefix=prefix) # Initialize LLM else: - return ChatGLMV(vllm_config=vllm_config, prefix=prefix) + return ChatGLM(vllm_config=vllm_config, prefix=prefix) From 1c445dca51a877ac6a5b7e03ecdb73e0e34d139e Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Sun, 24 Nov 2024 11:57:13 +0800 Subject: [PATCH 123/397] [CI/Build] Print running script to enhance CI log readability (#10594) Signed-off-by: Jee Jee Li --- .buildkite/test-pipeline.yaml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index bff33d35b423e..ed8c84ce9f5c0 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -52,6 +52,7 @@ steps: - tests/worker - tests/test_lazy_torch_compile.py commands: + - echo 'Running test_lazy_torch_compile.py...' # print running script to enhance CI log readability - python3 test_lazy_torch_compile.py - pytest -v -s mq_llm_engine # MQLLMEngine - pytest -v -s async_engine # AsyncLLMEngine @@ -182,15 +183,25 @@ steps: - examples/ commands: - pip install awscli tensorizer # for llava example and tensorizer test + - echo 'Running offline_inference.py...' # print running script to enhance CI log readability - python3 offline_inference.py + - echo 'Running cpu_offload.py...' - python3 cpu_offload.py + - echo 'Running offline_inference_chat.py...' - python3 offline_inference_chat.py + - echo 'Running offline_inference_with_prefix.py...' - python3 offline_inference_with_prefix.py + - echo 'Running llm_engine_example.py...' - python3 llm_engine_example.py + - echo 'Running offline_inference_vision_language.py...' - python3 offline_inference_vision_language.py + - echo 'Running offline_inference_vision_language_multi_image.py...' - python3 offline_inference_vision_language_multi_image.py + - echo 'Running tensorize_vllm_model.py...' - python3 tensorize_vllm_model.py --model facebook/opt-125m serialize --serialized-directory /tmp/ --suffix v1 && python3 tensorize_vllm_model.py --model facebook/opt-125m deserialize --path-to-tensors /tmp/vllm/facebook/opt-125m/v1/model.tensors + - echo 'Running offline_inference_encoder_decoder.py...' - python3 offline_inference_encoder_decoder.py + - echo 'Running offline_profile.py...' - python3 offline_profile.py --model facebook/opt-125m - label: Prefix Caching Test # 9min From eda2b3589c8b27a9b8f8aea24afe1673890d19d2 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sat, 23 Nov 2024 21:31:47 -0800 Subject: [PATCH 124/397] Revert "Print running script to enhance CI log readability" (#10601) --- .buildkite/test-pipeline.yaml | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index ed8c84ce9f5c0..bff33d35b423e 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -52,7 +52,6 @@ steps: - tests/worker - tests/test_lazy_torch_compile.py commands: - - echo 'Running test_lazy_torch_compile.py...' # print running script to enhance CI log readability - python3 test_lazy_torch_compile.py - pytest -v -s mq_llm_engine # MQLLMEngine - pytest -v -s async_engine # AsyncLLMEngine @@ -183,25 +182,15 @@ steps: - examples/ commands: - pip install awscli tensorizer # for llava example and tensorizer test - - echo 'Running offline_inference.py...' # print running script to enhance CI log readability - python3 offline_inference.py - - echo 'Running cpu_offload.py...' - python3 cpu_offload.py - - echo 'Running offline_inference_chat.py...' - python3 offline_inference_chat.py - - echo 'Running offline_inference_with_prefix.py...' - python3 offline_inference_with_prefix.py - - echo 'Running llm_engine_example.py...' - python3 llm_engine_example.py - - echo 'Running offline_inference_vision_language.py...' - python3 offline_inference_vision_language.py - - echo 'Running offline_inference_vision_language_multi_image.py...' - python3 offline_inference_vision_language_multi_image.py - - echo 'Running tensorize_vllm_model.py...' - python3 tensorize_vllm_model.py --model facebook/opt-125m serialize --serialized-directory /tmp/ --suffix v1 && python3 tensorize_vllm_model.py --model facebook/opt-125m deserialize --path-to-tensors /tmp/vllm/facebook/opt-125m/v1/model.tensors - - echo 'Running offline_inference_encoder_decoder.py...' - python3 offline_inference_encoder_decoder.py - - echo 'Running offline_profile.py...' - python3 offline_profile.py --model facebook/opt-125m - label: Prefix Caching Test # 9min From c055747867e771dbc791c9aa3c394c4d4489cd82 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sat, 23 Nov 2024 22:22:54 -0800 Subject: [PATCH 125/397] [model][utils] add extract_layer_index utility function (#10599) Signed-off-by: youkaichao --- vllm/model_executor/models/arctic.py | 41 +++++++++++-------------- vllm/model_executor/models/deepseek.py | 19 +++++++----- vllm/model_executor/models/gemma2.py | 15 +++------ vllm/model_executor/models/olmoe.py | 8 ++--- vllm/model_executor/models/qwen2_moe.py | 6 ++-- vllm/model_executor/models/utils.py | 21 +++++++++++++ 6 files changed, 59 insertions(+), 51 deletions(-) diff --git a/vllm/model_executor/models/arctic.py b/vllm/model_executor/models/arctic.py index ac4c464aa10ac..fd6b5659df5d1 100644 --- a/vllm/model_executor/models/arctic.py +++ b/vllm/model_executor/models/arctic.py @@ -33,7 +33,7 @@ from vllm.transformers_utils.configs.arctic import ArcticConfig from .interfaces import SupportsPP -from .utils import (is_pp_missing_parameter, +from .utils import (extract_layer_index, is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) @@ -44,15 +44,14 @@ class ArcticMLP(nn.Module): def __init__(self, config: ArcticConfig, - layer_id: int, expert_id: int = -1, is_residual_mlp: bool = False, quant_config: Optional[QuantizationConfig] = None, - reduce_results: bool = True): + reduce_results: bool = True, + prefix: str = ""): super().__init__() self.hidden_size = config.hidden_size self.expert_id = expert_id - self.layer_id = layer_id self.ffn_dim = config.intermediate_size if not is_residual_mlp \ else self.hidden_size @@ -85,13 +84,14 @@ class ArcticMoE(nn.Module): def __init__(self, config: ArcticConfig, - layer_id: int, tp_size: Optional[int] = None, params_dtype: Optional[torch.dtype] = None, quant_config: Optional[QuantizationConfig] = None, - reduce_results: bool = True): + reduce_results: bool = True, + prefix: str = ""): super().__init__() + layer_id = extract_layer_index(prefix) self.tp_size = tp_size or get_tensor_model_parallel_world_size() self.hidden_size = config.hidden_size self.num_experts = config.num_local_experts @@ -109,15 +109,16 @@ def __init__(self, if not self.is_moe_layer: self.mlp = ArcticMLP(config, - layer_id=layer_id, quant_config=quant_config, - reduce_results=reduce_results) + reduce_results=reduce_results, + prefix=f"{prefix}.mlp") else: self.gate = ReplicatedLinear(self.hidden_size, self.num_experts, bias=False, params_dtype=self.params_dtype, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.gate") if self.is_quant: self.ws = DeepSpeedFPParameter( torch.Size((self.num_experts, 2 * self.intermediate_size, @@ -220,14 +221,12 @@ class ArcticAttention(nn.Module): def __init__( self, config: ArcticConfig, - layer_idx: Optional[int] = None, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, prefix: str = "", ): super().__init__() self.config = config - self.layer_idx = layer_idx self.hidden_size = config.hidden_size tp_size = get_tensor_model_parallel_world_size() @@ -298,26 +297,25 @@ class ArcticDecoderLayer(nn.Module): def __init__( self, config: ArcticConfig, - layer_idx: int, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, prefix: str = "", ) -> None: super().__init__() - self.layer_idx = layer_idx self.hidden_size = config.hidden_size + layer_idx = extract_layer_index(prefix) is_moe_layer = (layer_idx + 1) % config.moe_layer_frequency == 0 self.use_residual = config.use_residual and is_moe_layer self.self_attn = ArcticAttention(config, - layer_idx, cache_config, quant_config=quant_config, prefix=f"{prefix}.self_attn") self.block_sparse_moe = ArcticMoE( config, - layer_id=layer_idx, quant_config=quant_config, - reduce_results=(not self.use_residual)) + reduce_results=(not self.use_residual), + prefix=f"{prefix}.block_sparse_moe", + ) self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) @@ -328,9 +326,9 @@ def __init__( self.residual_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.residual_mlp = ArcticMLP(config, - layer_id=layer_idx, is_residual_mlp=True, - reduce_results=False) + reduce_results=False, + prefix=f"{prefix}.residual_mlp") def forward( self, @@ -384,11 +382,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): org_num_embeddings=self.vocab_size) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: ArcticDecoderLayer(config, - int(prefix.split(".")[-1]), - cache_config, - quant_config, - prefix=prefix), + lambda prefix: ArcticDecoderLayer( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers") self._attn_implementation = config._attn_implementation self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) diff --git a/vllm/model_executor/models/deepseek.py b/vllm/model_executor/models/deepseek.py index 32488d931ea1c..74b6bfdf21909 100644 --- a/vllm/model_executor/models/deepseek.py +++ b/vllm/model_executor/models/deepseek.py @@ -49,7 +49,7 @@ from vllm.sequence import IntermediateTensors from .interfaces import SupportsPP -from .utils import (is_pp_missing_parameter, +from .utils import (extract_layer_index, is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) @@ -63,6 +63,7 @@ def __init__( hidden_act: str, quant_config: Optional[QuantizationConfig] = None, reduce_results: bool = True, + prefix: str = "", ) -> None: super().__init__() self.gate_up_proj = MergedColumnParallelLinear( @@ -92,6 +93,7 @@ def __init__( self, config: PretrainedConfig, quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", ): super().__init__() self.config = config @@ -260,12 +262,12 @@ class DeepseekDecoderLayer(nn.Module): def __init__( self, config: PretrainedConfig, - layer_idx: int, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, prefix: str = "", ) -> None: super().__init__() + layer_idx = extract_layer_index(prefix) self.hidden_size = config.hidden_size rope_theta = getattr(config, "rope_theta", 10000) rope_scaling = getattr(config, "rope_scaling", None) @@ -285,13 +287,16 @@ def __init__( if (config.n_routed_experts is not None and layer_idx >= config.first_k_dense_replace and layer_idx % config.moe_layer_freq == 0): - self.mlp = DeepseekMoE(config=config, quant_config=quant_config) + self.mlp = DeepseekMoE(config=config, + quant_config=quant_config, + prefix=f"{prefix}.mlp") else: self.mlp = DeepseekMLP( hidden_size=config.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, quant_config=quant_config, + prefix=f"{prefix}.mlp", ) self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) @@ -347,11 +352,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: DeepseekDecoderLayer(config, - int(prefix.split(".")[-1]), - cache_config, - quant_config=quant_config, - prefix=prefix), + lambda prefix: DeepseekDecoderLayer( + config, cache_config, quant_config=quant_config, prefix=prefix + ), prefix=f"{prefix}.layers") self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.make_empty_intermediate_tensors = ( diff --git a/vllm/model_executor/models/gemma2.py b/vllm/model_executor/models/gemma2.py index 9309cced61bb3..fd8223dd9be1b 100644 --- a/vllm/model_executor/models/gemma2.py +++ b/vllm/model_executor/models/gemma2.py @@ -42,7 +42,8 @@ from vllm.sequence import IntermediateTensors, PoolerOutput from .interfaces import SupportsLoRA, SupportsPP -from .utils import (AutoWeightsLoader, is_pp_missing_parameter, +from .utils import (AutoWeightsLoader, extract_layer_index, + is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) @@ -85,7 +86,6 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: class Gemma2Attention(nn.Module): def __init__(self, - layer_idx: int, config: Gemma2Config, hidden_size: int, num_heads: int, @@ -98,7 +98,6 @@ def __init__(self, attn_logits_soft_cap: Optional[float] = None, prefix: str = "") -> None: super().__init__() - self.layer_idx = layer_idx self.config = config self.hidden_size = hidden_size tp_size = get_tensor_model_parallel_world_size() @@ -145,6 +144,7 @@ def __init__(self, # reference: # https://github.com/huggingface/transformers/blob/54be2d7ae87e873482b984cc956e165ca4dc0ba3/src/transformers/models/gemma2/modeling_gemma2.py#L312 # noqa + layer_idx = extract_layer_index(prefix) use_sliding_window = (layer_idx % 2 == 0 and config.interleaved_sliding_window is not None) sliding_window = config.interleaved_sliding_window if \ @@ -178,7 +178,6 @@ class Gemma2DecoderLayer(nn.Module): def __init__( self, - layer_idx: int, config: Gemma2Config, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, @@ -187,7 +186,6 @@ def __init__( super().__init__() self.hidden_size = config.hidden_size self.self_attn = Gemma2Attention( - layer_idx=layer_idx, config=config, hidden_size=self.hidden_size, num_heads=config.num_attention_heads, @@ -262,11 +260,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: Gemma2DecoderLayer(int(prefix.split(".")[-1]), - config, - cache_config, - quant_config, - prefix=prefix), + lambda prefix: Gemma2DecoderLayer( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers") self.norm = GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) diff --git a/vllm/model_executor/models/olmoe.py b/vllm/model_executor/models/olmoe.py index 5b5b3ef48b035..5d9091cfb9311 100644 --- a/vllm/model_executor/models/olmoe.py +++ b/vllm/model_executor/models/olmoe.py @@ -181,7 +181,6 @@ class OlmoeDecoderLayer(nn.Module): def __init__( self, config: PretrainedConfig, - layer_idx: int, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, prefix: str = "", @@ -264,11 +263,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: OlmoeDecoderLayer(config, - int(prefix.split(".")[-1]), - cache_config, - quant_config, - prefix=prefix), + lambda prefix: OlmoeDecoderLayer( + config, cache_config, quant_config, prefix=prefix), prefix=f"{prefix}.layers") self.norm = RMSNorm(config.hidden_size, eps=1e-5) diff --git a/vllm/model_executor/models/qwen2_moe.py b/vllm/model_executor/models/qwen2_moe.py index 1091f88ab2534..ba70243c6533d 100644 --- a/vllm/model_executor/models/qwen2_moe.py +++ b/vllm/model_executor/models/qwen2_moe.py @@ -53,7 +53,7 @@ from vllm.utils import print_warning_once from .interfaces import SupportsPP -from .utils import (is_pp_missing_parameter, +from .utils import (extract_layer_index, is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) @@ -244,7 +244,6 @@ class Qwen2MoeDecoderLayer(nn.Module): def __init__( self, config: PretrainedConfig, - layer_idx: int, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, prefix: str = "", @@ -269,6 +268,7 @@ def __init__( # Note: Qwen/Qwen2-57B-A14B-Instruct does not have # `mlp_only_layers` in the config. + layer_idx = extract_layer_index(prefix) mlp_only_layers = ([] if not hasattr(config, "mlp_only_layers") else config.mlp_only_layers) if (layer_idx not in mlp_only_layers) and ( @@ -337,8 +337,6 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, lambda prefix: Qwen2MoeDecoderLayer(config=config, - layer_idx=int( - prefix.split(".")[-1]), cache_config=cache_config, quant_config=quant_config, prefix=prefix), diff --git a/vllm/model_executor/models/utils.py b/vllm/model_executor/models/utils.py index 2ab9b19e22068..dcfd2cb7d2622 100644 --- a/vllm/model_executor/models/utils.py +++ b/vllm/model_executor/models/utils.py @@ -629,3 +629,24 @@ def maybe_prefix(prefix: str, name: str) -> str: The string "prefix.name" if prefix was non-empty, otherwise just "name". """ return name if not prefix else f"{prefix}.{name}" + + +def extract_layer_index(layer_name: str) -> int: + """ + Extract the layer index from the module name. + Examples: + - "encoder.layers.0" -> 0 + - "encoder.layers.1.self_attn" -> 1 + - "2.self_attn" -> 2 + - "model.encoder.layers.0.sub.1" -> ValueError + """ + subnames = layer_name.split(".") + int_vals: List[int] = [] + for subname in subnames: + try: + int_vals.append(int(subname)) + except ValueError: + continue + assert len(int_vals) == 1, (f"layer name {layer_name} should" + " only contain one integer") + return int_vals[0] From e4fbb1441454847fdd871c9959b5cb05b5037aa2 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sun, 24 Nov 2024 11:21:40 -0800 Subject: [PATCH 126/397] [doc] update the code to add models (#10603) Signed-off-by: youkaichao Co-authored-by: Cyrus Leung --- docs/source/models/adding_model.rst | 85 +++++++++++++++++++---------- 1 file changed, 57 insertions(+), 28 deletions(-) diff --git a/docs/source/models/adding_model.rst b/docs/source/models/adding_model.rst index a70ebf99c746f..df06d736ca86b 100644 --- a/docs/source/models/adding_model.rst +++ b/docs/source/models/adding_model.rst @@ -38,41 +38,70 @@ For instance, vLLM's `OPT model Union[Tuple, CausalLMOutputWithPast]: - + positions: torch.Tensor, - + kv_caches: List[torch.Tensor], - + attn_metadata: AttentionMetadata, - + ) -> Optional[SamplerOutput]: - -1. Update the code by considering that :code:`input_ids` and :code:`positions` are now flattened tensors. -2. Replace the attention operation with either :code:`PagedAttention`, :code:`PagedAttentionWithRoPE`, or :code:`PagedAttentionWithALiBi` depending on the model's architecture. +To ensure compatibility with vLLM, your model must meet the following requirements: + +Initialization Code +^^^^^^^^^^^^^^^^^^^ + +All vLLM modules within the model must include a ``prefix`` argument in their constructor. This ``prefix`` is typically the full name of the module in the model's state dictionary and is crucial for: + +* Runtime support: vLLM's attention operators are registered in a model's state by their full names. Each attention operator must have a unique prefix as its layer name to avoid conflicts. +* Non-uniform quantization support: A quantized checkpoint can selectively quantize certain layers while keeping others in full precision. By providing the ``prefix`` during initialization, vLLM can match the current layer's ``prefix`` with the quantization configuration to determine if the layer should be initialized in quantized mode. + +The initialization code should look like this: + +.. code-block:: python + + from torch import nn + from vllm.config import VllmConfig + from vllm.attention import Attention + + class MyAttention(nn.Module): + def __init__(self, vllm_config: VllmConfig, prefix: str): + super().__init__() + self.attn = Attention(prefix=f"{prefix}.attn") + + class MyDecoderLayer(nn.Module): + def __init__(self, vllm_config: VllmConfig, prefix: str): + super().__init__() + self.self_attn = MyAttention(prefix=f"{prefix}.self_attn") + + class MyModel(nn.Module): + def __init__(self, vllm_config: VllmConfig, prefix: str): + super().__init__() + self.layers = nn.ModuleList( + [MyDecoderLayer(vllm_config, prefix=f"{prefix}.layers.{i}") for i in range(vllm_config.model_config.hf_config.num_hidden_layers)] + ) + + class MyModelForCausalLM(nn.Module): + def __init__(self, vllm_config: VllmConfig, prefix: str = ""): + super().__init__() + self.model = MyModel(vllm_config, prefix=f"{prefix}.model") + +Computation Code +^^^^^^^^^^^^^^^^ + +Rewrite the :meth:`~torch.nn.Module.forward` method of your model to remove any unnecessary code, such as training-specific code. Modify the input parameters to treat ``input_ids`` and ``positions`` as flattened tensors with a single batch size dimension, without a max-sequence length dimension. + +.. code-block:: python + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + ... .. note:: Currently, vLLM supports the basic multi-head attention mechanism and its variant with rotary positional embeddings. If your model employs a different attention mechanism, you will need to implement a new attention layer in vLLM. +For reference, check out the `LLAMA model `__. vLLM already supports a large number of models. It is recommended to find a model similar to yours and adapt it to your model's architecture. Check out the `vLLM models `__ directory for more examples. 3. (Optional) Implement tensor parallelism and quantization support ------------------------------------------------------------------- From 49628fe13e1021ce036bbae257242ab71e40aa25 Mon Sep 17 00:00:00 2001 From: Zhuohan Li Date: Sun, 24 Nov 2024 16:45:09 -0800 Subject: [PATCH 127/397] [Doc] Update README.md with Ray Summit talk links (#10610) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 0ef073210d070..4e1353d98f1dc 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ Easy, fast, and cheap LLM serving for everyone *Latest News* 🔥 - [2024/11] We hosted [the seventh vLLM meetup](https://lu.ma/h0qvrajz) with Snowflake! Please find the meetup slides [here](https://docs.google.com/presentation/d/1e3CxQBV3JsfGp30SwyvS3eM_tW-ghOhJ9PAJGK6KR54/edit?usp=sharing). - [2024/10] We have just created a developer slack ([slack.vllm.ai](https://slack.vllm.ai)) focusing on coordinating contributions and discussing features. Please feel free to join us there! -- [2024/10] Ray Summit 2024 held a special track for vLLM! Please find the opening talk slides from the vLLM team [here](https://docs.google.com/presentation/d/1B_KQxpHBTRa_mDF-tR6i8rWdOU5QoTZNcEg2MKZxEHM/edit?usp=sharing). Learn more from the [talks](https://raysummit.anyscale.com/flow/anyscale/raysummit2024/landing/page/sessioncatalog?tab.day=20241001&search.sessiontracks=1719251906298001uzJ2) from other vLLM contributors and users! +- [2024/10] Ray Summit 2024 held a special track for vLLM! Please find the opening talk slides from the vLLM team [here](https://docs.google.com/presentation/d/1B_KQxpHBTRa_mDF-tR6i8rWdOU5QoTZNcEg2MKZxEHM/edit?usp=sharing). Learn more from the [talks](https://www.youtube.com/playlist?list=PLzTswPQNepXl6AQwifuwUImLPFRVpksjR) from other vLLM contributors and users! - [2024/09] We hosted [the sixth vLLM meetup](https://lu.ma/87q3nvnh) with NVIDIA! Please find the meetup slides [here](https://docs.google.com/presentation/d/1wrLGwytQfaOTd5wCGSPNhoaW3nq0E-9wqyP7ny93xRs/edit?usp=sharing). - [2024/07] We hosted [the fifth vLLM meetup](https://lu.ma/lp0gyjqr) with AWS! Please find the meetup slides [here](https://docs.google.com/presentation/d/1RgUD8aCfcHocghoP3zmXzck9vX3RCI9yfUAB2Bbcl4Y/edit?usp=sharing). - [2024/07] In partnership with Meta, vLLM officially supports Llama 3.1 with FP8 quantization and pipeline parallelism! Please check out our blog post [here](https://blog.vllm.ai/2024/07/23/llama31.html). From 214efc2c3cb568e8eb3f7d234f3bd8f5bbe24795 Mon Sep 17 00:00:00 2001 From: Maximilien de Bayser Date: Sun, 24 Nov 2024 23:56:20 -0300 Subject: [PATCH 128/397] Support Cross encoder models (#10400) Signed-off-by: Max de Bayser Signed-off-by: Max de Bayser Signed-off-by: Flavia Beo Co-authored-by: Flavia Beo --- .../serving/openai_compatible_server.md | 142 ++++++++++++ examples/openai_cross_encoder_score.py | 58 +++++ tests/conftest.py | 20 ++ tests/entrypoints/openai/test_score.py | 93 ++++++++ .../models/embedding/language/test_scoring.py | 95 ++++++++ tests/models/registry.py | 9 + tests/models/test_registry.py | 23 +- vllm/config.py | 5 + vllm/core/scheduler.py | 1 + vllm/entrypoints/llm.py | 124 +++++++++- vllm/entrypoints/openai/api_server.py | 35 ++- vllm/entrypoints/openai/protocol.py | 36 +++ vllm/entrypoints/openai/serving_score.py | 215 ++++++++++++++++++ vllm/inputs/data.py | 18 ++ vllm/inputs/preprocess.py | 2 + vllm/model_executor/layers/pooler.py | 64 ++++++ vllm/model_executor/models/bert.py | 128 ++++++++++- vllm/model_executor/models/interfaces.py | 36 +++ vllm/model_executor/models/registry.py | 23 +- vllm/model_executor/models/roberta.py | 179 ++++++++++++--- vllm/multimodal/inputs.py | 5 +- vllm/outputs.py | 45 +++- vllm/sequence.py | 9 + vllm/transformers_utils/config.py | 15 ++ vllm/worker/cpu_embedding_model_runner.py | 4 + vllm/worker/cpu_model_runner.py | 13 ++ vllm/worker/embedding_model_runner.py | 7 +- vllm/worker/model_runner.py | 28 +++ 28 files changed, 1370 insertions(+), 62 deletions(-) create mode 100644 examples/openai_cross_encoder_score.py create mode 100644 tests/entrypoints/openai/test_score.py create mode 100644 tests/models/embedding/language/test_scoring.py create mode 100644 vllm/entrypoints/openai/serving_score.py diff --git a/docs/source/serving/openai_compatible_server.md b/docs/source/serving/openai_compatible_server.md index 79d032bf8b211..c39cef85897ed 100644 --- a/docs/source/serving/openai_compatible_server.md +++ b/docs/source/serving/openai_compatible_server.md @@ -44,6 +44,148 @@ We currently support the following OpenAI APIs: - This enables multi-modal inputs to be passed to embedding models, see [Using VLMs](../models/vlm.rst). - *Note: You should run `vllm serve` with `--task embedding` to ensure that the model is being run in embedding mode.* +## Score API for Cross Encoder Models + +vLLM supports *cross encoders models* at the **/v1/score** endpoint, which is not an OpenAI API standard endpoint. You can find the documentation for these kind of models at [sbert.net](https://www.sbert.net/docs/package_reference/cross_encoder/cross_encoder.html). + +A ***Cross Encoder*** takes exactly two sentences / texts as input and either predicts a score or label for this sentence pair. It can for example predict the similarity of the sentence pair on a scale of 0 … 1. + +### Example of usage for a pair of a string and a list of texts + +In this case, the model will compare the first given text to each of the texts containing the list. + +```bash +curl -X 'POST' \ + 'http://127.0.0.1:8000/v1/score' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "model": "BAAI/bge-reranker-v2-m3", + "text_1": "What is the capital of France?", + "text_2": [ + "The capital of Brazil is Brasilia.", + "The capital of France is Paris." + ] +}' +``` + +Response: + +```bash +{ + "id": "score-request-id", + "object": "list", + "created": 693570, + "model": "BAAI/bge-reranker-v2-m3", + "data": [ + { + "index": 0, + "object": "score", + "score": [ + 0.001094818115234375 + ] + }, + { + "index": 1, + "object": "score", + "score": [ + 1 + ] + } + ], + "usage": {} +} +``` + +### Example of usage for a pair of two lists of texts + +In this case, the model will compare the one by one, making pairs by same index correspondent in each list. + +```bash +curl -X 'POST' \ + 'http://127.0.0.1:8000/v1/score' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "model": "BAAI/bge-reranker-v2-m3", + "encoding_format": "float", + "text_1": [ + "What is the capital of Brazil?", + "What is the capital of France?" + ], + "text_2": [ + "The capital of Brazil is Brasilia.", + "The capital of France is Paris." + ] +}' +``` + +Response: + +```bash +{ + "id": "score-request-id", + "object": "list", + "created": 693447, + "model": "BAAI/bge-reranker-v2-m3", + "data": [ + { + "index": 0, + "object": "score", + "score": [ + 1 + ] + }, + { + "index": 1, + "object": "score", + "score": [ + 1 + ] + } + ], + "usage": {} +} +``` + +### Example of usage for a pair of two strings + +In this case, the model will compare the strings of texts. + +```bash +curl -X 'POST' \ + 'http://127.0.0.1:8000/v1/score' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "model": "BAAI/bge-reranker-v2-m3", + "encoding_format": "float", + "text_1": "What is the capital of France?", + "text_2": "The capital of France is Paris." +}' +``` + +Response: + +```bash +{ + "id": "score-request-id", + "object": "list", + "created": 693447, + "model": "BAAI/bge-reranker-v2-m3", + "data": [ + { + "index": 0, + "object": "score", + "score": [ + 1 + ] + } + ], + "usage": {} +} +``` + ## Extra Parameters vLLM supports a set of parameters that are not part of the OpenAI API. diff --git a/examples/openai_cross_encoder_score.py b/examples/openai_cross_encoder_score.py new file mode 100644 index 0000000000000..8c32eea5dd252 --- /dev/null +++ b/examples/openai_cross_encoder_score.py @@ -0,0 +1,58 @@ +"""Examples Python client Score for Cross Encoder Models +""" + +import argparse +import json +import pprint + +import requests + + +def post_http_request(prompt: json, api_url: str) -> requests.Response: + headers = {"User-Agent": "Test Client"} + response = requests.post(api_url, headers=headers, json=prompt) + return response + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--host", type=str, default="localhost") + parser.add_argument("--port", type=int, default=8000) + parser.add_argument("--model", type=str, default="BAAI/bge-reranker-v2-m3") + args = parser.parse_args() + api_url = f"http://{args.host}:{args.port}/v1/score" + + model_name = args.model + + text_1 = "What is the capital of France?" + text_2 = [ + "The capital of Brazil is Brasilia.", "The capital of France is Paris." + ] + prompt = {"model": model_name, "text_1": text_1, "text_2": text_2} + score_response = post_http_request(prompt=prompt, api_url=api_url) + print("Prompt for text_1 is string and text_2 is a list:") + pprint.pprint(prompt) + print("Score Response:") + pprint.pprint(score_response.data) + + text_1 = [ + "What is the capital of Brazil?", "What is the capital of France?" + ] + text_2 = [ + "The capital of Brazil is Brasilia.", "The capital of France is Paris." + ] + prompt = {"model": model_name, "text_1": text_1, "text_2": text_2} + score_response = post_http_request(prompt=prompt, api_url=api_url) + print("Prompt for text_1 and text_2 are lists:") + pprint.pprint(prompt) + print("Score Response:") + pprint.pprint(score_response.data) + + text_1 = "What is the capital of Brazil?" + text_2 = "The capital of Brazil is Brasilia." + prompt = {"model": model_name, "text_1": text_1, "text_2": text_2} + score_response = post_http_request(prompt=prompt, api_url=api_url) + print("Prompt for text_1 and text_2 are strings:") + pprint.pprint(prompt) + print("Score Response:") + pprint.pprint(score_response.data) \ No newline at end of file diff --git a/tests/conftest.py b/tests/conftest.py index 0dc1cc6e83c18..29707f975e2a0 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -265,6 +265,7 @@ def __init__( model_kwargs: Optional[Dict[str, Any]] = None, is_embedding_model: bool = False, is_sentence_transformer: bool = False, + is_cross_encoder: bool = False, skip_tokenizer_init: bool = False, auto_cls: Type[_BaseAutoModelClass] = AutoModelForCausalLM, postprocess_inputs: Callable[..., BatchEncoding] = identity, @@ -282,6 +283,14 @@ def __init__( device="cpu", trust_remote_code=True, ).to(dtype=torch_dtype)) + elif is_cross_encoder: + # Lazy init required for AMD CI + from sentence_transformers import CrossEncoder + self.model = CrossEncoder(model_name, + device="cpu", + trust_remote_code=True) + self.model.model = self.wrap_device(self.model.model)\ + .to(dtype=torch_dtype) else: model_kwargs = model_kwargs if model_kwargs is not None else {} self.model = self.wrap_device( @@ -625,6 +634,9 @@ def generate_encoder_decoder_greedy_logprobs_limit( def encode(self, prompts: List[str]) -> List[List[torch.Tensor]]: return self.model.encode(prompts) + def predict(self, prompts: List[List[str]]) -> torch.Tensor: + return self.model.predict(prompts, convert_to_tensor=True) + def __enter__(self): return self @@ -898,6 +910,14 @@ def encode( req_outputs = self.model.encode(inputs) return [req_output.outputs.embedding for req_output in req_outputs] + def score( + self, + text_1: Union[str, List[str]], + text_2: Union[str, List[str]], + ) -> List[List[float]]: + req_outputs = self.model.score(text_1, text_2) + return [req_output.outputs.embedding for req_output in req_outputs] + def __enter__(self): return self diff --git a/tests/entrypoints/openai/test_score.py b/tests/entrypoints/openai/test_score.py new file mode 100644 index 0000000000000..7565ff7192f67 --- /dev/null +++ b/tests/entrypoints/openai/test_score.py @@ -0,0 +1,93 @@ +import pytest +import requests + +from vllm.entrypoints.openai.protocol import ScoreResponse + +from ...utils import RemoteOpenAIServer + +MODEL_NAME = "BAAI/bge-reranker-v2-m3" + + +@pytest.fixture(scope="module") +def server(): + args = [ + "--enforce-eager", + ] + + with RemoteOpenAIServer(MODEL_NAME, args) as remote_server: + yield remote_server + + +@pytest.mark.asyncio +@pytest.mark.parametrize("model_name", [MODEL_NAME]) +async def test_text_1_str_text_2_list(server: RemoteOpenAIServer, + model_name: str): + text_1 = "What is the capital of France?" + text_2 = [ + "The capital of Brazil is Brasilia.", "The capital of France is Paris." + ] + + score_response = requests.post(server.url_for("v1/score"), + json={ + "model": model_name, + "text_1": text_1, + "text_2": text_2, + }) + score_response.raise_for_status() + score = ScoreResponse.model_validate(score_response.json()) + + assert score.id is not None + assert score.data is not None + assert len(score.data) == 2 + assert score.data[0].score[0] <= 0.01 + assert score.data[1].score[0] >= 0.9 + + +@pytest.mark.asyncio +@pytest.mark.parametrize("model_name", [MODEL_NAME]) +async def test_text_1_list_text_2_list(server: RemoteOpenAIServer, + model_name: str): + text_1 = [ + "What is the capital of the United States?", + "What is the capital of France?" + ] + text_2 = [ + "The capital of Brazil is Brasilia.", "The capital of France is Paris." + ] + + score_response = requests.post(server.url_for("v1/score"), + json={ + "model": model_name, + "text_1": text_1, + "text_2": text_2, + }) + score_response.raise_for_status() + score = ScoreResponse.model_validate(score_response.json()) + + assert score.id is not None + assert score.data is not None + assert len(score.data) == 2 + assert score.data[0].score[0] <= 0.01 + assert score.data[1].score[0] >= 0.9 + + +@pytest.mark.asyncio +@pytest.mark.parametrize("model_name", [MODEL_NAME]) +async def test_text_1_str_text_2_str(server: RemoteOpenAIServer, + model_name: str): + text_1 = "What is the capital of France?" + text_2 = "The capital of France is Paris." + + score_response = requests.post(server.url_for("v1/score"), + json={ + "model": model_name, + "text_1": text_1, + "text_2": text_2, + }) + score_response.raise_for_status() + score = ScoreResponse.model_validate(score_response.json()) + + assert score.id is not None + assert score.data is not None + assert len(score.data) == 1 + assert score.data[0].score[0] >= 0.9 diff --git a/tests/models/embedding/language/test_scoring.py b/tests/models/embedding/language/test_scoring.py new file mode 100644 index 0000000000000..30fa5ea7b36c0 --- /dev/null +++ b/tests/models/embedding/language/test_scoring.py @@ -0,0 +1,95 @@ +"""Compare the embedding outputs of HF and vLLM models. + +Run `pytest tests/models/embedding/language/test_embedding.py`. +""" +import math + +import pytest + +MODELS = [ + "cross-encoder/ms-marco-MiniLM-L-6-v2", # Bert + "BAAI/bge-reranker-v2-m3", # Roberta +] + +TEXTS_1 = [ + "What is the capital of France?", + "What is the capital of Germany?", +] + +TEXTS_2 = [ + "The capital of France is Paris.", + "The capital of Germany is Berlin.", +] + + +@pytest.fixture(scope="module", params=MODELS) +def model_name(request): + yield request.param + + +@pytest.mark.parametrize("dtype", ["half"]) +def test_llm_1_to_1(vllm_runner, hf_runner, model_name, dtype: str): + + text_pair = [TEXTS_1[0], TEXTS_2[0]] + + with hf_runner(model_name, dtype=dtype, is_cross_encoder=True) as hf_model: + hf_outputs = hf_model.predict([text_pair]).tolist() + + with vllm_runner(model_name, + task="embedding", + dtype=dtype, + max_model_len=None) as vllm_model: + vllm_outputs = vllm_model.score(text_pair[0], text_pair[1]) + + assert len(vllm_outputs) == 1 + assert len(hf_outputs) == 1 + + assert math.isclose(hf_outputs[0], vllm_outputs[0][0], rel_tol=0.01) + + +@pytest.mark.parametrize("dtype", ["half"]) +def test_llm_1_to_N(vllm_runner, hf_runner, model_name, dtype: str): + + text_pairs = [ + [TEXTS_1[0], TEXTS_2[0]], + [TEXTS_1[0], TEXTS_2[1]], + ] + + with hf_runner(model_name, dtype=dtype, is_cross_encoder=True) as hf_model: + hf_outputs = hf_model.predict(text_pairs).tolist() + + with vllm_runner(model_name, + task="embedding", + dtype=dtype, + max_model_len=None) as vllm_model: + vllm_outputs = vllm_model.score(TEXTS_1[0], TEXTS_2) + + assert len(vllm_outputs) == 2 + assert len(hf_outputs) == 2 + + assert math.isclose(hf_outputs[0], vllm_outputs[0][0], rel_tol=0.01) + assert math.isclose(hf_outputs[1], vllm_outputs[1][0], rel_tol=0.01) + + +@pytest.mark.parametrize("dtype", ["half"]) +def test_llm_N_to_N(vllm_runner, hf_runner, model_name, dtype: str): + + text_pairs = [ + [TEXTS_1[0], TEXTS_2[0]], + [TEXTS_1[1], TEXTS_2[1]], + ] + + with hf_runner(model_name, dtype=dtype, is_cross_encoder=True) as hf_model: + hf_outputs = hf_model.predict(text_pairs).tolist() + + with vllm_runner(model_name, + task="embedding", + dtype=dtype, + max_model_len=None) as vllm_model: + vllm_outputs = vllm_model.score(TEXTS_1, TEXTS_2) + + assert len(vllm_outputs) == 2 + assert len(hf_outputs) == 2 + + assert math.isclose(hf_outputs[0], vllm_outputs[0][0], rel_tol=0.01) + assert math.isclose(hf_outputs[1], vllm_outputs[1][0], rel_tol=0.01) diff --git a/tests/models/registry.py b/tests/models/registry.py index 3848367b6126c..fa0818c4f0bd1 100644 --- a/tests/models/registry.py +++ b/tests/models/registry.py @@ -135,6 +135,7 @@ class _HfExamplesInfo: "Qwen2ForRewardModel": _HfExamplesInfo("Qwen/Qwen2.5-Math-RM-72B"), "Qwen2ForSequenceClassification": _HfExamplesInfo("jason9693/Qwen2.5-1.5B-apeach"), # noqa: E501 "RobertaModel": _HfExamplesInfo("sentence-transformers/stsb-roberta-base-v2"), # noqa: E501 + "RobertaForMaskedLM": _HfExamplesInfo("sentence-transformers/all-roberta-large-v1"), # noqa: E501 "XLMRobertaModel": _HfExamplesInfo("intfloat/multilingual-e5-large"), # [Multimodal] "LlavaNextForConditionalGeneration": _HfExamplesInfo("royokong/e5-v"), @@ -143,6 +144,13 @@ class _HfExamplesInfo: "Qwen2VLForConditionalGeneration": _HfExamplesInfo("MrLight/dse-qwen2-2b-mrl-v1"), # noqa: E501 } +_CROSS_ENCODER_EXAMPLE_MODELS = { + # [Text-only] + "BertForSequenceClassification": _HfExamplesInfo("cross-encoder/ms-marco-MiniLM-L-6-v2"), # noqa: E501 + "RobertaForSequenceClassification": _HfExamplesInfo("cross-encoder/quora-roberta-base"), # noqa: E501 + "XLMRobertaForSequenceClassification": _HfExamplesInfo("BAAI/bge-reranker-v2-m3"), # noqa: E501 +} + _MULTIMODAL_EXAMPLE_MODELS = { # [Decoder-only] "Blip2ForConditionalGeneration": _HfExamplesInfo("Salesforce/blip2-opt-2.7b"), # noqa: E501 @@ -195,6 +203,7 @@ class _HfExamplesInfo: _EXAMPLE_MODELS = { **_TEXT_GENERATION_EXAMPLE_MODELS, **_EMBEDDING_EXAMPLE_MODELS, + **_CROSS_ENCODER_EXAMPLE_MODELS, **_MULTIMODAL_EXAMPLE_MODELS, **_SPECULATIVE_DECODING_EXAMPLE_MODELS, } diff --git a/tests/models/test_registry.py b/tests/models/test_registry.py index e462dae3dc688..289ea66b5ebc5 100644 --- a/tests/models/test_registry.py +++ b/tests/models/test_registry.py @@ -6,7 +6,10 @@ from vllm.model_executor.models import (is_embedding_model, is_text_generation_model, supports_multimodal) -from vllm.model_executor.models.registry import (_EMBEDDING_MODELS, +# yapf conflicts with isort for this block +# yapf: disable +from vllm.model_executor.models.registry import (_CROSS_ENCODER_MODELS, + _EMBEDDING_MODELS, _MULTIMODAL_MODELS, _SPECULATIVE_DECODING_MODELS, _TEXT_GENERATION_MODELS, @@ -29,22 +32,28 @@ def test_registry_imports(model_arch): model_arch in _TEXT_GENERATION_MODELS or model_arch in _MULTIMODAL_MODELS) + embedding_models = {**_EMBEDDING_MODELS, **_CROSS_ENCODER_MODELS} assert is_embedding_model(model_cls) is (model_arch - in _EMBEDDING_MODELS) + in embedding_models) assert supports_multimodal(model_cls) is (model_arch in _MULTIMODAL_MODELS) @fork_new_process_for_each_test -@pytest.mark.parametrize("model_arch,is_mm,init_cuda", [ - ("LlamaForCausalLM", False, False), - ("MllamaForConditionalGeneration", True, False), - ("LlavaForConditionalGeneration", True, True), +@pytest.mark.parametrize("model_arch,is_mm,init_cuda,is_ce", [ + ("LlamaForCausalLM", False, False, False), + ("MllamaForConditionalGeneration", True, False, False), + ("LlavaForConditionalGeneration", True, True, False), + ("BertForSequenceClassification", False, False, True), + ("RobertaForSequenceClassification", False, False, True), + ("XLMRobertaForSequenceClassification", False, False, True), ]) -def test_registry_is_multimodal(model_arch, is_mm, init_cuda): +def test_registry_model_property(model_arch, is_mm, init_cuda, is_ce): assert ModelRegistry.is_multimodal_model(model_arch) is is_mm + assert ModelRegistry.is_cross_encoder_model(model_arch) is is_ce + if init_cuda and current_platform.is_cuda_alike(): assert not torch.cuda.is_initialized() diff --git a/vllm/config.py b/vllm/config.py index f163665e2c063..4ea56a14cabba 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -712,6 +712,11 @@ def uses_mrope(self) -> bool: def is_multimodal_model(self) -> bool: return self.multimodal_config is not None + @property + def is_cross_encoder(self) -> bool: + architectures = getattr(self.hf_config, "architectures", []) + return ModelRegistry.is_cross_encoder_model(architectures) + class CacheConfig: """Configuration for the KV cache. diff --git a/vllm/core/scheduler.py b/vllm/core/scheduler.py index 841e65c488fc6..530cbdc3a9190 100644 --- a/vllm/core/scheduler.py +++ b/vllm/core/scheduler.py @@ -1357,6 +1357,7 @@ def schedule( encoder_seq_data=encoder_seq_data, cross_block_table=cross_block_table, state=seq_group.state, + token_type_ids=seq_group.token_type_ids, # `multi_modal_data` will only be present for the 1st comm # between engine and worker. # the subsequent comms can still use delta, but diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index c211ec5aee080..e07f4c04abd84 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -20,7 +20,7 @@ apply_mistral_chat_template, parse_chat_messages, resolve_chat_template_content_format) -from vllm.inputs import PromptType, TextPrompt, TokensPrompt +from vllm.inputs import PromptType, SingletonPrompt, TextPrompt, TokensPrompt from vllm.inputs.parse import parse_and_batch_prompt from vllm.logger import init_logger from vllm.lora.request import LoRARequest @@ -817,6 +817,128 @@ def encode( return self.engine_class.validate_outputs(outputs, EmbeddingRequestOutput) + def score( + self, + text_1: Union[SingletonPrompt, Sequence[SingletonPrompt]], + text_2: Union[SingletonPrompt, Sequence[SingletonPrompt]], + /, + truncate_prompt_tokens: Optional[int] = None, + use_tqdm: bool = True, + lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, + prompt_adapter_request: Optional[PromptAdapterRequest] = None, + ) -> List[EmbeddingRequestOutput]: + """Generates similarity scores for all pairs . + + The inputs can be 1 -> 1, 1 -> N or N -> N. In the 1 - N case + the text_1 sentence will be replicated N times to pair with the text_2 + sentences. The input pairs are used to build a list of prompts for the + cross encoder model. This class automatically batches the prompts, + considering the memory constraint. For the best performance, put all + of your texts into a single list and pass it to this method. + + Args: + text_1: can be a single prompt or a list of prompts, in which + case it has to have the same length as the text_2 list + text_2: The texts to pair with the query to form the input + to the LLM. See :class:`~vllm.inputs.PromptType` for + more details about the format of each prompts. + use_tqdm: Whether to use tqdm to display the progress bar. + lora_request: LoRA request to use for generation, if any. + prompt_adapter_request: Prompt Adapter request to use for + generation, if any. + + Returns: + A list of ``EmbeddingRequestOutput`` objects containing the + generated scores in the same order as the input prompts. + """ + task = self.llm_engine.model_config.task + if task != "embedding": + messages = ["LLM.score() is only supported for embedding models."] + + supported_tasks = self.llm_engine.model_config.supported_tasks + if "embedding" in supported_tasks: + messages.append( + "Your model supports the 'embedding' task, but is " + f"currently initialized for the '{task}' task. Please " + "initialize the model using `--task embedding`.") + + raise ValueError(" ".join(messages)) + + if not self.llm_engine.model_config.is_cross_encoder: + raise ValueError("Your model does not support the cross encoding") + + tokenizer = self.llm_engine.get_tokenizer() + + if isinstance(tokenizer, MistralTokenizer): + raise ValueError( + "MistralTokenizer not supported for cross-encoding") + + # the tokenizer for models such as + # "cross-encoder/ms-marco-MiniLM-L-6-v2" doesn't support passing + # lists of tokens to the `text` and `text_pair` kwargs + def ensure_str(prompt: SingletonPrompt): + if isinstance(prompt, dict): + if "multi_modal_data" in prompt: + raise ValueError("Multi-modal prompt is not " + "supported for cross encoding") + elif "prompt_token_ids" in prompt: + prompt = tokenizer.decode( + cast(TokensPrompt, prompt)["prompt_token_ids"]) + elif "prompt" in prompt: + prompt = cast(TextPrompt, prompt)["prompt"] + assert type(prompt) is str + return prompt + + if isinstance(text_1, (str, dict)): + # Convert a single prompt to a list. + text_1 = [text_1] + text_1 = [ensure_str(t) for t in text_1] + + if isinstance(text_2, (str, dict)): + # Convert a single prompt to a list. + text_2 = [text_2] + text_2 = [ensure_str(t) for t in text_2] + + if len(text_1) > 1 and len(text_1) != len(text_2): + raise ValueError("Input lengths must be either 1:1, 1:N or N:N") + if len(text_1) == 0: + raise ValueError("At least one text element must be given") + if len(text_2) == 0: + raise ValueError("At least one text_pair element must be given") + + if len(text_1) == 1: + text_1 = text_1 * len(text_2) + + input_pairs = [(t1, t2) for t1, t2 in zip(text_1, text_2)] + pooling_params = PoolingParams() + + tokenization_kwargs: Dict[str, Any] = {} + if truncate_prompt_tokens is not None: + tokenization_kwargs["truncation"] = True + tokenization_kwargs["max_length"] = truncate_prompt_tokens + + parsed_prompts = [] + + for q, t in input_pairs: + prompt_inputs = tokenizer(text=q, + text_pair=t, + **tokenization_kwargs) + engine_prompt = TokensPrompt( + prompt_token_ids=prompt_inputs["input_ids"], + token_type_ids=prompt_inputs.get("token_type_ids")) + parsed_prompts.append(engine_prompt) + + self._validate_and_add_requests( + prompts=parsed_prompts, + params=pooling_params, + lora_request=lora_request, + prompt_adapter_request=prompt_adapter_request, + ) + + outputs = self._run_engine(use_tqdm=use_tqdm) + return self.engine_class.validate_outputs(outputs, + EmbeddingRequestOutput) + def start_profile(self) -> None: self.llm_engine.start_profile() diff --git a/vllm/entrypoints/openai/api_server.py b/vllm/entrypoints/openai/api_server.py index b0fe061f5db4a..2b1f14b89b1f2 100644 --- a/vllm/entrypoints/openai/api_server.py +++ b/vllm/entrypoints/openai/api_server.py @@ -45,6 +45,7 @@ EmbeddingRequest, EmbeddingResponse, ErrorResponse, LoadLoraAdapterRequest, + ScoreRequest, ScoreResponse, TokenizeRequest, TokenizeResponse, UnloadLoraAdapterRequest) @@ -53,6 +54,7 @@ from vllm.entrypoints.openai.serving_completion import OpenAIServingCompletion from vllm.entrypoints.openai.serving_embedding import OpenAIServingEmbedding from vllm.entrypoints.openai.serving_engine import BaseModelPath, OpenAIServing +from vllm.entrypoints.openai.serving_score import OpenAIServingScores from vllm.entrypoints.openai.serving_tokenization import ( OpenAIServingTokenization) from vllm.entrypoints.openai.tool_parsers import ToolParserManager @@ -280,6 +282,10 @@ def embedding(request: Request) -> Optional[OpenAIServingEmbedding]: return request.app.state.openai_serving_embedding +def score(request: Request) -> Optional[OpenAIServingScores]: + return request.app.state.openai_serving_scores + + def tokenization(request: Request) -> OpenAIServingTokenization: return request.app.state.openai_serving_tokenization @@ -391,6 +397,23 @@ async def create_embedding(request: EmbeddingRequest, raw_request: Request): assert_never(generator) +@router.post("/v1/score") +async def create_score(request: ScoreRequest, raw_request: Request): + handler = score(raw_request) + if handler is None: + return base(raw_request).create_error_response( + message="The model does not support Score API") + + generator = await handler.create_score(request, raw_request) + if isinstance(generator, ErrorResponse): + return JSONResponse(content=generator.model_dump(), + status_code=generator.code) + elif isinstance(generator, ScoreResponse): + return JSONResponse(content=generator.model_dump()) + + assert_never(generator) + + if envs.VLLM_TORCH_PROFILER_DIR: logger.warning( "Torch Profiler is enabled in the API server. This should ONLY be " @@ -466,8 +489,9 @@ def build_app(args: Namespace) -> FastAPI: @app.exception_handler(RequestValidationError) async def validation_exception_handler(_, exc): - chat = app.state.openai_serving_chat - err = chat.create_error_response(message=str(exc)) + err = ErrorResponse(message=str(exc), + type="BadRequestError", + code=HTTPStatus.BAD_REQUEST) return JSONResponse(err.model_dump(), status_code=HTTPStatus.BAD_REQUEST) @@ -565,6 +589,13 @@ def init_app_state( chat_template=resolved_chat_template, chat_template_content_format=args.chat_template_content_format, ) if model_config.task == "embedding" else None + state.openai_serving_scores = OpenAIServingScores( + engine_client, + model_config, + base_model_paths, + request_logger=request_logger + ) if (model_config.task == "embedding" \ + and model_config.is_cross_encoder) else None state.openai_serving_tokenization = OpenAIServingTokenization( engine_client, model_config, diff --git a/vllm/entrypoints/openai/protocol.py b/vllm/entrypoints/openai/protocol.py index f343732174014..ee94a9413f098 100644 --- a/vllm/entrypoints/openai/protocol.py +++ b/vllm/entrypoints/openai/protocol.py @@ -806,6 +806,27 @@ def to_pooling_params(self): EmbeddingRequest = Union[EmbeddingCompletionRequest, EmbeddingChatRequest] +class ScoreRequest(OpenAIBaseModel): + model: str + text_1: Union[List[str], str] + text_2: Union[List[str], str] + truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]] = None + + # doc: begin-chat-embedding-pooling-params + additional_data: Optional[Any] = None + # doc: end-chat-embedding-pooling-params + + priority: int = Field( + default=0, + description=( + "The priority of the request (lower means earlier handling; " + "default: 0). Any priority other than 0 will raise an error " + "if the served model does not use priority scheduling.")) + + def to_pooling_params(self): + return PoolingParams(additional_data=self.additional_data) + + class CompletionLogProbs(OpenAIBaseModel): text_offset: List[int] = Field(default_factory=list) token_logprobs: List[Optional[float]] = Field(default_factory=list) @@ -876,6 +897,21 @@ class EmbeddingResponse(OpenAIBaseModel): usage: UsageInfo +class ScoreResponseData(OpenAIBaseModel): + index: int + object: str = "score" + score: Union[List[float], str] + + +class ScoreResponse(OpenAIBaseModel): + id: str = Field(default_factory=lambda: f"embd-{random_uuid()}") + object: str = "list" + created: int = Field(default_factory=lambda: int(time.time())) + model: str + data: List[ScoreResponseData] + usage: UsageInfo + + class FunctionCall(OpenAIBaseModel): name: str arguments: str diff --git a/vllm/entrypoints/openai/serving_score.py b/vllm/entrypoints/openai/serving_score.py new file mode 100644 index 0000000000000..156fea6f47982 --- /dev/null +++ b/vllm/entrypoints/openai/serving_score.py @@ -0,0 +1,215 @@ +import asyncio +import time +from typing import Any, AsyncGenerator, Dict, List, Optional, Union, cast + +from fastapi import Request + +from vllm.config import ModelConfig +from vllm.engine.protocol import EngineClient +from vllm.entrypoints.logger import RequestLogger +from vllm.entrypoints.openai.protocol import (ErrorResponse, ScoreRequest, + ScoreResponse, ScoreResponseData, + UsageInfo) +from vllm.entrypoints.openai.serving_engine import BaseModelPath, OpenAIServing +from vllm.inputs.data import TokensPrompt +from vllm.logger import init_logger +from vllm.outputs import EmbeddingRequestOutput +from vllm.transformers_utils.tokenizers.mistral import MistralTokenizer +from vllm.utils import merge_async_iterators, random_uuid + +logger = init_logger(__name__) + + +def request_output_to_score_response( + final_res_batch: List[EmbeddingRequestOutput], request_id: str, + created_time: int, model_name: str) -> ScoreResponse: + data: List[ScoreResponseData] = [] + score = None + num_prompt_tokens = 0 + for idx, final_res in enumerate(final_res_batch): + if final_res is not None: + score = final_res.outputs.embedding + score_data = ScoreResponseData(index=idx, score=score) + data.append(score_data) + + usage = UsageInfo( + prompt_tokens=num_prompt_tokens, + total_tokens=num_prompt_tokens, + ) + + return ScoreResponse( + id=request_id, + created=created_time, + model=model_name, + data=data, + usage=usage, + ) + + +def make_pairs(text_1: Union[List[str], str], text_2: Union[List[str], + str]) -> List: + if isinstance(text_1, (str, dict)): + # Convert a single prompt to a list. + text_1 = [text_1] + text_1 = [t for t in text_1] + + if isinstance(text_2, (str, dict)): + # Convert a single prompt to a list. + text_2 = [text_2] + text_2 = [t for t in text_2] + if len(text_1) > 1 and len(text_1) != len(text_2): + raise ValueError("Input lengths must be either 1:1, 1:N or N:N") + if len(text_1) == 0: + raise ValueError("At least one text element must be given") + if len(text_2) == 0: + raise ValueError("At least one text_pair element must be given") + + if len(text_1) == 1: + text_1 = text_1 * len(text_2) + + return [(t1, t2) for t1, t2 in zip(text_1, text_2)] + + +class OpenAIServingScores(OpenAIServing): + + def __init__( + self, + engine_client: EngineClient, + model_config: ModelConfig, + base_model_paths: List[BaseModelPath], + *, + request_logger: Optional[RequestLogger], + ) -> None: + super().__init__(engine_client=engine_client, + model_config=model_config, + base_model_paths=base_model_paths, + lora_modules=None, + prompt_adapters=None, + request_logger=request_logger) + + async def create_score( + self, + request: ScoreRequest, + raw_request: Optional[Request] = None, + ) -> Union[ScoreResponse, ErrorResponse]: + """ + Score API similar to Sentence Transformers cross encoder + + See https://sbert.net/docs/package_reference/cross_encoder + """ + error_check_ret = await self._check_model(request) + if error_check_ret is not None: + return error_check_ret + + model_name = request.model + request_id = f"score-{random_uuid()}" + created_time = int(time.monotonic()) + truncate_prompt_tokens = request.truncate_prompt_tokens + + request_prompts = [] + engine_prompts = [] + + try: + ( + lora_request, + prompt_adapter_request, + ) = self._maybe_get_adapters(request) + + tokenizer = await self.engine_client.get_tokenizer(lora_request) + + if prompt_adapter_request is not None: + raise NotImplementedError("Prompt adapter is not supported " + "for embedding models") + + if isinstance(tokenizer, MistralTokenizer): + raise ValueError( + "MistralTokenizer not supported for cross-encoding") + + if not self.model_config.is_cross_encoder: + raise ValueError("Model is not cross encoder.") + + except ValueError as e: + logger.exception("Error in preprocessing prompt inputs") + return self.create_error_response(str(e)) + + # Schedule the request and get the result generator. + generators: List[AsyncGenerator[EmbeddingRequestOutput, None]] = [] + + input_pairs = make_pairs(request.text_1, request.text_2) + + for q, t in input_pairs: + request_prompt = f"{q}{tokenizer.sep_token}{t}" + + tokenization_kwargs: Dict[str, Any] = {} + if truncate_prompt_tokens is not None: + tokenization_kwargs["truncation"] = True + tokenization_kwargs["max_length"] = truncate_prompt_tokens + + prompt_inputs = tokenizer(text=q, + text_pair=t, + **tokenization_kwargs) + engine_prompt = TokensPrompt( + prompt_token_ids=prompt_inputs["input_ids"], + token_type_ids=prompt_inputs.get("token_type_ids")) + + request_prompts.append(request_prompt) + engine_prompts.append(engine_prompt) + + try: + pooling_params = request.to_pooling_params() + + for i, engine_prompt in enumerate(engine_prompts): + request_id_item = f"{request_id}-{i}" + + self._log_inputs(request_id_item, + request_prompts[i], + params=pooling_params, + lora_request=lora_request, + prompt_adapter_request=prompt_adapter_request) + + trace_headers = (None if raw_request is None else await + self._get_trace_headers(raw_request.headers)) + + generator = self.engine_client.encode( + engine_prompt, + pooling_params, + request_id_item, + lora_request=lora_request, + trace_headers=trace_headers, + priority=request.priority, + ) + + generators.append(generator) + except ValueError as e: + # TODO: Use a vllm-specific Validation Error + return self.create_error_response(str(e)) + + result_generator = merge_async_iterators( + *generators, + is_cancelled=raw_request.is_disconnected if raw_request else None, + ) + + num_prompts = len(engine_prompts) + + # Non-streaming response + final_res_batch: List[Optional[EmbeddingRequestOutput]] + final_res_batch = [None] * num_prompts + + try: + async for i, res in result_generator: + final_res_batch[i] = res + + assert all(final_res is not None for final_res in final_res_batch) + + final_res_batch_checked = cast(List[EmbeddingRequestOutput], + final_res_batch) + + response = request_output_to_score_response( + final_res_batch_checked, request_id, created_time, model_name) + except asyncio.CancelledError: + return self.create_error_response("Client disconnected") + except ValueError as e: + # TODO: Use a vllm-specific Validation Error + return self.create_error_response(str(e)) + + return response diff --git a/vllm/inputs/data.py b/vllm/inputs/data.py index 07ff9faa50f13..fb7dbbebd7b90 100644 --- a/vllm/inputs/data.py +++ b/vllm/inputs/data.py @@ -38,6 +38,9 @@ class TokensPrompt(TypedDict): prompt_token_ids: List[int] """A list of token IDs to pass to the model.""" + token_type_ids: NotRequired[List[int]] + """A list of token type IDs to pass to the cross encoder model.""" + multi_modal_data: NotRequired["MultiModalDataDict"] """ DEPRECATED: Optional multi-modal data to pass to the model, @@ -133,6 +136,9 @@ class TokenInputs(TypedDict): prompt_token_ids: List[int] """The token IDs of the prompt.""" + token_type_ids: NotRequired[List[int]] + """The token type IDs of the prompt.""" + prompt: NotRequired[str] """ The original prompt text corresponding to the token IDs, if available. @@ -160,6 +166,7 @@ class TokenInputs(TypedDict): def token_inputs( prompt_token_ids: List[int], + token_type_ids: Optional[List[int]] = None, prompt: Optional[str] = None, multi_modal_data: Optional["MultiModalDataDict"] = None, multi_modal_placeholders: Optional["MultiModalPlaceholderDict"] = None, @@ -170,6 +177,8 @@ def token_inputs( if prompt is not None: inputs["prompt"] = prompt + if token_type_ids is not None: + inputs["token_type_ids"] = token_type_ids if multi_modal_data is not None: inputs["multi_modal_data"] = multi_modal_data if multi_modal_placeholders is not None: @@ -234,6 +243,15 @@ def prompt_token_ids(self) -> List[int]: assert_never(inputs) + @cached_property + def token_type_ids(self) -> List[int]: + inputs = self.inputs + + if inputs["type"] == "token" or inputs["type"] == "multimodal": + return inputs.get("token_type_ids", []) + + assert_never(inputs) + @cached_property def prompt_embeds(self) -> Optional[torch.Tensor]: inputs = self.inputs diff --git a/vllm/inputs/preprocess.py b/vllm/inputs/preprocess.py index 853257c5ad71f..3d606817e90aa 100644 --- a/vllm/inputs/preprocess.py +++ b/vllm/inputs/preprocess.py @@ -305,6 +305,7 @@ def _prompt_to_llm_inputs( tokens_content = parsed["content"] prompt_token_ids = tokens_content["prompt_token_ids"] + token_type_ids = tokens_content.get("token_type_ids") multi_modal_data = tokens_content.get("multi_modal_data") mm_processor_kwargs = tokens_content.get("mm_processor_kwargs") @@ -318,6 +319,7 @@ def _prompt_to_llm_inputs( return token_inputs( prompt_token_ids=prompt_token_ids, + token_type_ids=token_type_ids, multi_modal_data=multi_modal_data, mm_processor_kwargs=mm_processor_kwargs, ) diff --git a/vllm/model_executor/layers/pooler.py b/vllm/model_executor/layers/pooler.py index df1978241340b..f9437b4112ceb 100644 --- a/vllm/model_executor/layers/pooler.py +++ b/vllm/model_executor/layers/pooler.py @@ -3,11 +3,14 @@ import torch import torch.nn as nn +from transformers import PretrainedConfig from vllm.config import PoolerConfig from vllm.model_executor.pooling_metadata import (PoolingMetadata, PoolingTensors) from vllm.sequence import EmbeddingSequenceGroupOutput, PoolerOutput +from vllm.transformers_utils.config import ( + get_cross_encoder_activation_function) class PoolingType(IntEnum): @@ -152,3 +155,64 @@ def forward( ] return PoolerOutput(outputs=pooled_outputs) + + +class CrossEncodingPooler(nn.Module): + """A layer that pools specific information from hidden states. + + This layer does the following: + 1. Extracts specific tokens or aggregates data based on pooling method. + 2. Normalizes output if specified. + 3. Returns structured results as `PoolerOutput`. + + Attributes: + pooling_type: The type of pooling to use. + normalize: Whether to normalize the pooled data. + """ + + def __init__( + self, + config: PretrainedConfig, + classifier: nn.Module, + pooler: Optional[nn.Module] = None, + ): + super().__init__() + self.classifier = classifier + self.pooler = pooler + self.default_activation_function = \ + get_cross_encoder_activation_function(config) + + def forward( + self, + hidden_states: torch.Tensor, + pooling_metadata: PoolingMetadata, + ) -> PoolerOutput: + """Pools sentence pair scores from the hidden_states.""" + + prompt_lens = PoolingTensors.from_pooling_metadata( + pooling_metadata, hidden_states.device).prompt_lens + + offset = 0 + pooled_data_lst = [] + for prompt_len in prompt_lens: + pooled_data_i = hidden_states[offset:offset + prompt_len] + + if self.pooler is not None: + final_shape_tensor = self.pooler(pooled_data_i) + else: + final_shape_tensor = self.classifier(pooled_data_i) + + pooled_data_lst.append(final_shape_tensor) + offset += prompt_len + + pooled_output = torch.stack(pooled_data_lst) + + if self.pooler is not None: + # apply classifier once on the full batch if possible + pooled_output = self.classifier(pooled_output) + logits = self.default_activation_function(pooled_output) + + pooled_outputs = [ + EmbeddingSequenceGroupOutput(data.tolist()) for data in logits + ] + return PoolerOutput(outputs=pooled_outputs) diff --git a/vllm/model_executor/models/bert.py b/vllm/model_executor/models/bert.py index d8301a36acb01..1fc87bc650d92 100644 --- a/vllm/model_executor/models/bert.py +++ b/vllm/model_executor/models/bert.py @@ -11,14 +11,18 @@ from vllm.model_executor.layers.linear import (ColumnParallelLinear, QKVParallelLinear, RowParallelLinear) -from vllm.model_executor.layers.pooler import Pooler, PoolingType +from vllm.model_executor.layers.pooler import (CrossEncodingPooler, Pooler, + PoolingType) from vllm.model_executor.layers.quantization.base_config import ( QuantizationConfig) from vllm.model_executor.layers.vocab_parallel_embedding import ( VocabParallelEmbedding) from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.models.interfaces import SupportsCrossEncoding from vllm.model_executor.pooling_metadata import PoolingMetadata from vllm.sequence import IntermediateTensors, PoolerOutput +from vllm.transformers_utils.config import ( + get_cross_encoder_activation_function) from .utils import maybe_prefix @@ -48,7 +52,9 @@ def __init__(self, config: BertConfig): def forward( self, input_ids: torch.Tensor, - position_ids: Optional[torch.Tensor] = None, + seq_lens: torch.Tensor, + position_ids: torch.Tensor, + token_type_ids: Optional[torch.Tensor] = None, ) -> torch.Tensor: input_shape = input_ids.size() @@ -58,17 +64,34 @@ def forward( # Position embeddings. position_embeddings = self.position_embeddings(position_ids) - # Token type embeddings. (TODO: move off hotpath?) - token_type_embeddings = self.token_type_embeddings( - torch.zeros(input_shape, - dtype=torch.long, - device=inputs_embeds.device)) + if token_type_ids is None: + token_type_ids = torch.zeros(input_shape, + dtype=torch.long, + device=inputs_embeds.device) + + token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings + position_embeddings embeddings = self.LayerNorm(embeddings) return embeddings +class BertPooler(nn.Module): + + def __init__(self, config: BertConfig): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[0, :] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + class BertEncoder(nn.Module): def __init__(self, @@ -309,7 +332,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "", - embedding_class: type = BertEmbedding): + embedding_class: type = BertEmbedding, + add_pooling_layer: bool = False): super().__init__() config = vllm_config.model_config.hf_config cache_config = vllm_config.cache_config @@ -319,6 +343,7 @@ def __init__(self, cache_config, quant_config, prefix=f"{prefix}.encoder") + self.pooler = BertPooler(config) if add_pooling_layer else None def forward( self, @@ -328,13 +353,17 @@ def forward( attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, inputs_embeds: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, ) -> torch.Tensor: if inputs_embeds is not None: hidden_states = inputs_embeds else: - hidden_states = self.embeddings(input_ids=input_ids, - position_ids=position_ids) - + assert hasattr(attn_metadata, "seq_lens_tensor") + hidden_states = self.embeddings( + input_ids=input_ids, + seq_lens=attn_metadata.seq_lens_tensor, + position_ids=position_ids, + token_type_ids=token_type_ids) return self.encoder(hidden_states, kv_caches, attn_metadata) def load_weights(self, weights: Iterable[Tuple[str, @@ -349,7 +378,7 @@ def load_weights(self, weights: Iterable[Tuple[str, params_dict = dict(self.named_parameters()) loaded_params: Set[str] = set() for name, loaded_weight in weights: - if "pooler" in name: + if self.pooler is None and "pooler" in name: continue for (param_name, weight_name, shard_id) in stacked_params_mapping: if weight_name not in name: @@ -430,3 +459,78 @@ def _build_pooler(self, pooler_config: PoolerConfig) -> Pooler: pooling_type=PoolingType.CLS, normalize=True, softmax=False) + + +class BertForSequenceClassification(nn.Module, SupportsCrossEncoding): + """A model that uses Bert to provide embedding functionalities. + + This class encapsulates the BertModel and provides an interface for + embedding operations and customized pooling functions. + + Attributes: + model: An instance of BertModel used for forward operations. + _pooler: An instance of Pooler used for pooling operations. + """ + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super().__init__() + config = vllm_config.model_config.hf_config + + self.default_activation_function = \ + get_cross_encoder_activation_function(config) + + self.num_labels = config.num_labels + self.bert = BertModel(vllm_config=vllm_config, + prefix=maybe_prefix(prefix, "bert"), + embedding_class=BertEmbedding, + add_pooling_layer=True) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + self._pooler = CrossEncodingPooler(config, self.classifier, + self.bert.pooler) + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + + self_weights = [] + + def weight_filter(): + for name, weight in weights: + if name.startswith("bert."): + yield (name[len("bert."):], weight) + else: + self_weights.append((name, weight)) + + self.bert.load_weights(weight_filter()) + + params_dict = dict(self.named_parameters()) + + for name, loaded_weight in self_weights: + if name.startswith("classifier"): + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + + def pooler( + self, + hidden_states: torch.Tensor, + pooling_metadata: PoolingMetadata, + ) -> Optional[PoolerOutput]: + return self._pooler(hidden_states, pooling_metadata) + + def forward( + self, + input_ids: Optional[torch.Tensor], + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + return self.bert(input_ids=input_ids, + position_ids=positions, + kv_caches=kv_caches, + inputs_embeds=inputs_embeds, + intermediate_tensors=intermediate_tensors, + attn_metadata=attn_metadata, + token_type_ids=token_type_ids) diff --git a/vllm/model_executor/models/interfaces.py b/vllm/model_executor/models/interfaces.py index dcead65115132..4f0c75b2c6a57 100644 --- a/vllm/model_executor/models/interfaces.py +++ b/vllm/model_executor/models/interfaces.py @@ -7,6 +7,8 @@ from vllm.logger import init_logger from vllm.utils import supports_kw +from .interfaces_base import is_embedding_model + if TYPE_CHECKING: from vllm.config import LoRAConfig, MultiModalConfig, SchedulerConfig from vllm.sequence import IntermediateTensors @@ -350,3 +352,37 @@ def is_attention_free( return isinstance(model, _IsAttentionFreeType) return isinstance(model, IsAttentionFree) + + +@runtime_checkable +class SupportsCrossEncoding(Protocol): + """The interface required for all models that support cross encoding.""" + + supports_cross_encoding: ClassVar[Literal[True]] = True + + +@overload +def supports_cross_encoding( + model: Type[object]) -> TypeIs[Type[SupportsCrossEncoding]]: + ... + + +@overload +def supports_cross_encoding(model: object) -> TypeIs[SupportsCrossEncoding]: + ... + + +def _supports_cross_encoding( + model: Union[Type[object], object], +) -> Union[TypeIs[Type[SupportsCrossEncoding]], TypeIs[SupportsCrossEncoding]]: + + if isinstance(model, type): + return isinstance(model, SupportsCrossEncoding) + + return isinstance(model, SupportsCrossEncoding) + + +def supports_cross_encoding( + model: Union[Type[object], object], +) -> Union[TypeIs[Type[SupportsCrossEncoding]], TypeIs[SupportsCrossEncoding]]: + return is_embedding_model(model) and _supports_cross_encoding(model) diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index 22c2e328bfb65..789ffb4d3bde0 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -21,7 +21,8 @@ from vllm.platforms import current_platform from .interfaces import (has_inner_state, is_attention_free, - supports_multimodal, supports_pp) + supports_cross_encoding, supports_multimodal, + supports_pp) from .interfaces_base import is_embedding_model, is_text_generation_model logger = init_logger(__name__) @@ -100,6 +101,7 @@ # [Text-only] "BertModel": ("bert", "BertEmbeddingModel"), "RobertaModel": ("roberta", "RobertaEmbeddingModel"), + "RobertaForMaskedLM": ("roberta", "RobertaEmbeddingModel"), "XLMRobertaModel": ("roberta", "RobertaEmbeddingModel"), "DeciLMForCausalLM": ("decilm", "DeciLMForCausalLM"), "Gemma2Model": ("gemma2", "Gemma2EmbeddingModel"), @@ -121,6 +123,14 @@ "Qwen2VLForConditionalGeneration": ("qwen2_vl", "Qwen2VLForConditionalGeneration") # noqa: E501, } +_CROSS_ENCODER_MODELS = { + "BertForSequenceClassification": ("bert", "BertForSequenceClassification"), + "RobertaForSequenceClassification": ("roberta", + "RobertaForSequenceClassification"), + "XLMRobertaForSequenceClassification": ("roberta", + "RobertaForSequenceClassification"), +} + _MULTIMODAL_MODELS = { # [Decoder-only] "Blip2ForConditionalGeneration": ("blip2", "Blip2ForConditionalGeneration"), @@ -159,6 +169,7 @@ _VLLM_MODELS = { **_TEXT_GENERATION_MODELS, **_EMBEDDING_MODELS, + **_CROSS_ENCODER_MODELS, **_MULTIMODAL_MODELS, **_SPECULATIVE_DECODING_MODELS, } @@ -193,6 +204,7 @@ class _ModelInfo: is_text_generation_model: bool is_embedding_model: bool + supports_cross_encoding: bool supports_multimodal: bool supports_pp: bool has_inner_state: bool @@ -203,6 +215,7 @@ def from_model_cls(model: Type[nn.Module]) -> "_ModelInfo": return _ModelInfo( is_text_generation_model=is_text_generation_model(model), is_embedding_model=is_embedding_model(model), + supports_cross_encoding=supports_cross_encoding(model), supports_multimodal=supports_multimodal(model), supports_pp=supports_pp(model), has_inner_state=has_inner_state(model), @@ -415,6 +428,12 @@ def is_embedding_model( ) -> bool: return self.inspect_model_cls(architectures).is_embedding_model + def is_cross_encoder_model( + self, + architectures: Union[str, List[str]], + ) -> bool: + return self.inspect_model_cls(architectures).supports_cross_encoding + def is_multimodal_model( self, architectures: Union[str, List[str]], @@ -489,4 +508,4 @@ def _run() -> None: if __name__ == "__main__": - _run() \ No newline at end of file + _run() diff --git a/vllm/model_executor/models/roberta.py b/vllm/model_executor/models/roberta.py index c1dcdd36ec3de..5a296e311f079 100644 --- a/vllm/model_executor/models/roberta.py +++ b/vllm/model_executor/models/roberta.py @@ -1,4 +1,4 @@ -from typing import List, Optional +from typing import Iterable, List, Optional, Tuple import torch from torch import nn @@ -6,10 +6,17 @@ from vllm.attention import AttentionMetadata from vllm.config import VllmConfig +from vllm.model_executor.layers.pooler import CrossEncodingPooler from vllm.model_executor.layers.vocab_parallel_embedding import ( VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.model_executor.models.bert import BertEmbeddingModel, BertModel -from vllm.sequence import IntermediateTensors +from vllm.model_executor.models.interfaces import SupportsCrossEncoding +from vllm.model_executor.models.utils import maybe_prefix +from vllm.model_executor.pooling_metadata import PoolingMetadata +from vllm.sequence import IntermediateTensors, PoolerOutput +from vllm.transformers_utils.config import ( + get_cross_encoder_activation_function) class RobertaEmbedding(nn.Module): @@ -39,34 +46,93 @@ def __init__(self, config: RobertaConfig): def forward( self, input_ids: torch.Tensor, - position_ids: Optional[torch.Tensor] = None, + seq_lens: torch.Tensor, + position_ids: torch.Tensor, + token_type_ids: Optional[torch.Tensor] = None, ) -> torch.Tensor: input_shape = input_ids.size() - - # Input embeddings. inputs_embeds = self.word_embeddings(input_ids) - # TODO: figure out if there is a better way - # to make to make position ids start at padding_idx + 1 + # Replace position ids because in RoBERTa models + # they have to start at padding_idx + 1 and ignore + # existing padding tokens # References: # - https://github.com/huggingface/transformers/blob/a3d69a8994d673899608a7c17fbf4f953f50474e/src/transformers/models/roberta/modeling_roberta.py#L133 # - https://github.com/huggingface/transformers/blob/a3d69a8994d673899608a7c17fbf4f953f50474e/src/transformers/models/roberta/modeling_roberta.py#L1669 - position_ids += self.padding_idx + 1 + pos_list = [] + token_list = [] + offset = 0 + for seq_len in seq_lens: + pos_list.append(position_ids[offset:offset + seq_len]) + token_list.append(input_ids[offset:offset + seq_len]) + offset += seq_len + + new_pos_list = [] + for positions, tokens in zip(pos_list, token_list): + # Verify assumption that incoming position are + # always a sequence from 0 to N. + expected_pos = torch.arange(positions.size()[0], + dtype=torch.long, + device=inputs_embeds.device) + assert torch.equal(positions, expected_pos) + new_pos_list.append( + create_position_ids_from_input_ids(tokens, self.padding_idx)) + position_ids = torch.cat(new_pos_list) # Position embeddings. position_embeddings = self.position_embeddings(position_ids) + if token_type_ids is None: + token_type_ids = torch.zeros(input_shape, + dtype=torch.long, + device=inputs_embeds.device) - # Token type embeddings. (TODO: move off hotpath?) - token_type_embeddings = self.token_type_embeddings( - torch.zeros(input_shape, - dtype=torch.long, - device=inputs_embeds.device)) - + token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings + position_embeddings embeddings = self.LayerNorm(embeddings) return embeddings +# Adapted from transformers +def create_position_ids_from_input_ids(input_ids, + padding_idx, + past_key_values_length=0): + """ + Replace non-padding symbols with their position numbers. + Position numbers begin at padding_idx+1. Padding symbols + are ignored. This is modified from fairseq's `utils.make_positions`. + + Args: + x: torch.Tensor x: + + Returns: torch.Tensor + """ + # The series of casts and type-conversions here are carefully + # balanced to both work with ONNX export and XLA. + mask = input_ids.ne(padding_idx).int() + + incremental_indices = (torch.cumsum(mask, dim=0).type_as(mask) + + past_key_values_length) * mask + + return incremental_indices.long() + padding_idx + + +# Adapted from transformers +class RobertaClassificationHead(nn.Module): + """Head for sentence-level classification tasks.""" + + def __init__(self, config: RobertaConfig): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.out_proj = nn.Linear(config.hidden_size, config.num_labels) + + def forward(self, features, **kwargs): + x = features[0, :] # take token (equiv. to [CLS]) + x = self.dense(x) + x = torch.tanh(x) + x = self.out_proj(x) + return x + + class RobertaEmbeddingModel(BertEmbeddingModel): """A model that uses Roberta to provide embedding functionalities. @@ -85,6 +151,62 @@ def _build_model(self, prefix=prefix, embedding_class=RobertaEmbedding) + +class RobertaForSequenceClassification(nn.Module, SupportsCrossEncoding): + """A model that uses Roberta to provide embedding functionalities. + + This class encapsulates the BertModel and provides an interface for + embedding operations and customized pooling functions. + + Attributes: + roberta: An instance of BertModel used for forward operations. + _pooler: An instance of Pooler used for pooling operations. + """ + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super().__init__() + config = vllm_config.model_config.hf_config + + self.default_activation_function = \ + get_cross_encoder_activation_function(config) + + self.num_labels = config.num_labels + self.roberta = BertModel(vllm_config=vllm_config, + prefix=maybe_prefix(prefix, "bert"), + embedding_class=RobertaEmbedding, + add_pooling_layer=False) + self.classifier = RobertaClassificationHead(config) + self._pooler = CrossEncodingPooler(config, self.classifier) + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + + self_weights = [] + + def weight_filter(): + for name, weight in weights: + if name.startswith("roberta."): + yield (name[len("roberta."):], weight) + else: + self_weights.append((name, weight)) + + self.roberta.load_weights(weight_filter()) + + params_dict = dict(self.named_parameters()) + + for name, loaded_weight in self_weights: + if name.startswith("classifier"): + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + + def pooler( + self, + hidden_states: torch.Tensor, + pooling_metadata: PoolingMetadata, + ) -> Optional[PoolerOutput]: + return self._pooler(hidden_states, pooling_metadata) + def forward( self, input_ids: Optional[torch.Tensor], @@ -93,25 +215,12 @@ def forward( attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, inputs_embeds: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, ) -> torch.Tensor: - - # Verify assumption that position are always a sequence from - # 0 to N. (Actually here we just check 0 and N to simplify). - # This is important to fix the position which are assumed to - # start from padding_idx + 1 instead of 0 in the Roberta models. - assert hasattr(attn_metadata, "seq_lens_tensor") - cumulative = attn_metadata.seq_lens_tensor.cumsum(dim=0) - start_pos = torch.cat( - (torch.tensor([0], device=attn_metadata.seq_lens_tensor.device), - cumulative[:-1])) - assert len(torch.nonzero(positions[start_pos])) == 0 - end_pos = cumulative - 1 - last_tokens = attn_metadata.seq_lens_tensor - 1 - assert len(torch.nonzero(positions[end_pos] - last_tokens)) == 0 - - return super().forward(input_ids=input_ids, - positions=positions, - kv_caches=kv_caches, - attn_metadata=attn_metadata, - intermediate_tensors=intermediate_tensors, - inputs_embeds=inputs_embeds) + return self.roberta(input_ids=input_ids, + position_ids=positions, + kv_caches=kv_caches, + inputs_embeds=inputs_embeds, + intermediate_tensors=intermediate_tensors, + attn_metadata=attn_metadata, + token_type_ids=token_type_ids) diff --git a/vllm/multimodal/inputs.py b/vllm/multimodal/inputs.py index 8e67a552afe12..640c7c04b8817 100644 --- a/vllm/multimodal/inputs.py +++ b/vllm/multimodal/inputs.py @@ -6,7 +6,7 @@ import torch import torch.types from PIL.Image import Image -from typing_extensions import TypeAlias +from typing_extensions import NotRequired, TypeAlias from vllm.utils import JSONTree, is_list_of, json_map_leaves @@ -208,6 +208,9 @@ class MultiModalInputsV2(TypedDict): prompt_token_ids: List[int] """The processed token IDs which includes placeholder tokens.""" + token_type_ids: NotRequired[List[int]] + """The token type IDs of the prompt.""" + mm_kwargs: MultiModalKwargs """Keyword arguments to be directly passed to the model after batching.""" diff --git a/vllm/outputs.py b/vllm/outputs.py index 4ae9b377ae693..2d256803edfe8 100644 --- a/vllm/outputs.py +++ b/vllm/outputs.py @@ -60,7 +60,6 @@ class EmbeddingOutput: embedding: The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the embedding guide. """ - embedding: List[float] def __repr__(self) -> str: @@ -363,6 +362,50 @@ def __repr__(self): f"finished={self.finished})") +@dataclass +class ScoreOutput: + """The output data of one completion output of a request. + + Args: + score: The score, which is a list of floats. + index: The correspondent text index of the score. + """ + index: int + score: List[float] + + def __repr__(self) -> str: + return (f"ScoreOutput(" + f"score={self.score}), " + f"index={self.index})") + + +class ScoreRequestOutput: + """ + The output data of an score request to the LLM. + + Args: + request_id (str): A unique identifier for the score request. + outputs (score): The embedding results for the given input. + """ + + def __init__(self, request_id: str, outputs: "ScoreOutput"): + self.request_id = request_id + self.outputs = outputs + + def __repr__(self): + """ + Returns a string representation of an ScoreRequestOutput instance. + + The representation includes the request_id and the number of outputs, + providing a quick overview of the embedding request's results. + + Returns: + str: A string representation of the ScoreRequestOutput instance. + """ + return (f"ScoreRequestOutput(request_id='{self.request_id}', " + f"outputs={repr(self.outputs)}") + + class RequestOutputFactory: @staticmethod diff --git a/vllm/sequence.py b/vllm/sequence.py index a1cc8fc3b09de..669124319c4f4 100644 --- a/vllm/sequence.py +++ b/vllm/sequence.py @@ -449,6 +449,10 @@ def prompt_token_ids(self) -> List[int]: def prompt_embeds(self) -> Optional[torch.Tensor]: return self.inputs.prompt_embeds + @property + def token_type_ids(self) -> List[int]: + return self.inputs.token_type_ids + @property def multi_modal_data(self) -> "MultiModalDataDict": return self.inputs.multi_modal_data @@ -687,6 +691,10 @@ def encoder_prompt_token_ids(self) -> Optional[List[int]]: return (self.encoder_seq.prompt_token_ids if self.encoder_seq is not None else None) + @property + def token_type_ids(self) -> Optional[List[int]]: + return self.first_seq.token_type_ids + @property def multi_modal_data(self) -> MultiModalDataDict: return self.first_seq.multi_modal_data @@ -909,6 +917,7 @@ class SequenceGroupMetadata( default_factory=lambda: SequenceGroupState()) # "MultiModalDataDict" types. We have to use Any due to msgspec # doesn't allow to have union of 2 different dicts. + token_type_ids: Optional[List[int]] = None multi_modal_data: Optional[Any] = None multi_modal_placeholders: Optional[MultiModalPlaceholderDict] = None mm_processor_kwargs: Optional[Dict[str, Any]] = None diff --git a/vllm/transformers_utils/config.py b/vllm/transformers_utils/config.py index 59096753c395d..70d18d40b7aa7 100644 --- a/vllm/transformers_utils/config.py +++ b/vllm/transformers_utils/config.py @@ -9,6 +9,7 @@ from huggingface_hub.utils import (EntryNotFoundError, LocalEntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError) +from torch import nn from transformers import GenerationConfig, PretrainedConfig from transformers.models.auto.image_processing_auto import ( get_image_processor_config) @@ -31,6 +32,7 @@ UltravoxConfig) # yapf: enable from vllm.transformers_utils.utils import check_gguf_file +from vllm.utils import resolve_obj_by_qualname if VLLM_USE_MODELSCOPE: from modelscope import AutoConfig @@ -577,3 +579,16 @@ def try_get_generation_config( return GenerationConfig.from_model_config(config) except OSError: # Not found return None + + +def get_cross_encoder_activation_function(config: PretrainedConfig): + if (hasattr(config, "sbert_ce_default_activation_function") + and config.sbert_ce_default_activation_function is not None): + + function_name = config.sbert_ce_default_activation_function + assert function_name.startswith("torch.nn.modules."), \ + "Loading of activation functions is restricted to " \ + "torch.nn.modules for security reasons" + return resolve_obj_by_qualname(function_name)() + else: + return nn.Sigmoid() if config.num_labels == 1 else nn.Identity() diff --git a/vllm/worker/cpu_embedding_model_runner.py b/vllm/worker/cpu_embedding_model_runner.py index 978de73df6b70..3954e4c4c8a5b 100644 --- a/vllm/worker/cpu_embedding_model_runner.py +++ b/vllm/worker/cpu_embedding_model_runner.py @@ -50,6 +50,9 @@ def execute_model( ] model_executable = self.model + cross_enc_kwargs = {} + if model_input.token_type_ids is not None: + cross_enc_kwargs["token_type_ids"] = model_input.token_type_ids execute_model_kwargs = { "input_ids": model_input.input_tokens, @@ -61,6 +64,7 @@ def execute_model( model_input.attn_metadata, **MultiModalKwargs.as_kwargs(model_input.multi_modal_kwargs or {}, device=self.device), + **cross_enc_kwargs, "intermediate_tensors": intermediate_tensors, } diff --git a/vllm/worker/cpu_model_runner.py b/vllm/worker/cpu_model_runner.py index 7cab476d7fca4..b08171d79f002 100644 --- a/vllm/worker/cpu_model_runner.py +++ b/vllm/worker/cpu_model_runner.py @@ -43,6 +43,7 @@ class ModelInputForCPU(ModelRunnerInputBase): """ input_tokens: Optional[torch.Tensor] = None input_positions: Optional[torch.Tensor] = None + token_type_ids: Optional[torch.Tensor] = None attn_metadata: Optional["AttentionMetadata"] = None multi_modal_kwargs: Optional[BatchedTensorInputs] = None virtual_engine: Optional[int] = None @@ -54,6 +55,7 @@ def as_broadcastable_tensor_dict( tensor_dict = { "input_tokens": self.input_tokens, "input_positions": self.input_positions, + "token_type_ids": self.token_type_ids, "multi_modal_kwargs": self.multi_modal_kwargs, } _add_attn_metadata_broadcastable_dict(tensor_dict, self.attn_metadata) @@ -83,6 +85,7 @@ def as_broadcastable_tensor_dict(self) -> Dict[str, Any]: tensor_dict = { "input_tokens": self.input_tokens, "input_positions": self.input_positions, + "token_type_ids": self.token_type_ids, "multi_modal_kwargs": self.multi_modal_kwargs, } _add_attn_metadata_broadcastable_dict(tensor_dict, self.attn_metadata) @@ -112,6 +115,7 @@ def __init__(self, use_mrope: bool): self.input_tokens: List[int] = [] self.input_positions: Optional[ List[int]] = [] if not self.use_mrope else None + self.token_type_ids: Optional[List[int]] = [] self.seq_lens: List[int] = [] self.query_lens: List[int] = [] self.prefill_block_tables: List[List[int]] = [] @@ -165,6 +169,10 @@ def build(self) -> ModelInputForCPU: if not input_data.use_mrope else input_data.input_mrope_positions, dtype=torch.long, device="cpu") + token_type_ids = torch.tensor(input_data.token_type_ids, + dtype=torch.long, + device="cpu") \ + if input_data.token_type_ids else None # For multi-modal models multi_modal_kwargs = None @@ -178,6 +186,7 @@ def build(self) -> ModelInputForCPU: return self.model_input_cls( input_tokens=input_tokens, input_positions=input_positions, + token_type_ids=token_type_ids, seq_lens=input_data.seq_lens, query_lens=input_data.query_lens, attn_metadata=attn_metadata, @@ -285,6 +294,7 @@ def _compute_prompt_input_tokens(self, data: ModelInputData, tokens = seq_data.get_token_ids() tokens = tokens[context_len:seq_len] token_positions = range(context_len, seq_len) + token_types = seq_group_metadata.token_type_ids # For encoder-only models, the block_table is None, # and there is no need to initialize the slot_mapping. @@ -301,6 +311,9 @@ def _compute_prompt_input_tokens(self, data: ModelInputData, if data.input_positions is not None: data.input_positions.extend(token_positions) + if data.token_type_ids is not None: + data.token_type_ids.extend(token_types if token_types else []) + # Update fields data.input_tokens.extend(tokens) data.num_prefills += 1 diff --git a/vllm/worker/embedding_model_runner.py b/vllm/worker/embedding_model_runner.py index 4a55d91e71484..f56805918fd15 100644 --- a/vllm/worker/embedding_model_runner.py +++ b/vllm/worker/embedding_model_runner.py @@ -97,6 +97,10 @@ def execute_model( model_forward_end = torch.cuda.Event(enable_timing=True) model_forward_start.record() + cross_enc_kwargs = {} + if model_input.token_types is not None: + cross_enc_kwargs["token_type_ids"] = model_input.token_types + with set_forward_context(model_input.attn_metadata, self.vllm_config): hidden_or_intermediate_states = model_executable( input_ids=model_input.input_tokens, @@ -105,7 +109,8 @@ def execute_model( attn_metadata=model_input.attn_metadata, intermediate_tensors=intermediate_tensors, **MultiModalKwargs.as_kwargs(multi_modal_kwargs, - device=self.device)) + device=self.device), + **cross_enc_kwargs) if (self.observability_config is not None and self.observability_config.collect_model_forward_time): diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py index 13301b876217d..1f654a9cce465 100644 --- a/vllm/worker/model_runner.py +++ b/vllm/worker/model_runner.py @@ -92,6 +92,7 @@ class ModelInputForGPU(ModelRunnerInputBase): """ input_tokens: Optional[torch.Tensor] = None input_positions: Optional[torch.Tensor] = None + token_types: Optional[torch.Tensor] = None seq_lens: Optional[List[int]] = None query_lens: Optional[List[int]] = None lora_mapping: Optional["LoRAMapping"] = None @@ -200,6 +201,7 @@ class InterDataForSeqGroup: def simple_reinit(self): self.input_tokens[0].clear() # type: ignore self.input_positions[0].clear() # type: ignore + self.token_types[0].clear() # type: ignore self.mrope_input_positions = None # type: ignore self.seq_lens[0] = 0 # type: ignore self.orig_seq_lens[0] = 0 # type: ignore @@ -226,6 +228,7 @@ def __init__( # Input tokens and positions. input_tokens: Optional[List[List[int]]] = None, input_positions: Optional[List[List[int]]] = None, + token_types: Optional[List[List[int]]] = None, mrope_input_positions: Optional[List[List[List[int]]]] = None, # The sequence length (may be capped to the sliding window). @@ -291,6 +294,12 @@ def __init__( for seq_id in range(len(self.seq_ids)): self.input_positions[seq_id].clear() + if token_types: + self.token_types = token_types + else: + for seq_id in range(len(self.seq_ids)): + self.token_types[seq_id].clear() + self.mrope_input_positions = None if seq_lens: @@ -354,6 +363,7 @@ def __init__( else: self.input_tokens = input_tokens or [] self.input_positions = input_positions or [] + self.token_types = token_types or [] self.mrope_input_positions = mrope_input_positions or None self.seq_lens = seq_lens or [] self.orig_seq_lens = orig_seq_lens or [] @@ -386,6 +396,7 @@ def __post_init__(self): self.input_tokens = [[] for _ in range(self.n_seqs)] self.input_positions = [[] for _ in range(self.n_seqs)] + self.token_types = [[] for _ in range(self.n_seqs)] self.mrope_input_positions = None self.seq_lens = [0] * self.n_seqs self.orig_seq_lens = [0] * self.n_seqs @@ -498,12 +509,15 @@ def _compute_lens(self, inter_data: InterDataForSeqGroup, seq_idx: int, # Compute tokens. tokens = seq_data.get_token_ids()[context_len:seq_len] + token_types = seq_group_metadata.token_type_ids inter_data.seq_lens[seq_idx] = seq_len inter_data.orig_seq_lens[seq_idx] = seq_len inter_data.context_lens[seq_idx] = context_len inter_data.input_tokens[seq_idx].extend(tokens) inter_data.input_positions[seq_idx].extend(range(context_len, seq_len)) + inter_data.token_types[seq_idx].extend( + token_types if token_types else []) inter_data.query_lens[seq_idx] = seq_len - context_len if seq_data.mrope_position_delta is not None: @@ -561,6 +575,8 @@ def _compute_for_prefix_cache_hit( seq_idx][uncomputed_start:] inter_data.input_positions[seq_idx] = inter_data.input_positions[ seq_idx][uncomputed_start:] + inter_data.token_types[seq_idx] = inter_data.token_types[seq_idx][ + uncomputed_start:] context_len = prefix_cache_len inter_data.context_lens[seq_idx] = context_len @@ -575,6 +591,8 @@ def _compute_for_prefix_cache_hit( seq_idx][-1:] inter_data.input_positions[seq_idx] = inter_data.input_positions[ seq_idx][-1:] + inter_data.token_types[seq_idx] = inter_data.token_types[seq_idx][ + -1:] inter_data.query_lens[seq_idx] = 1 inter_data.context_lens[seq_idx] = inter_data.seq_lens[seq_idx] - 1 @@ -803,9 +821,12 @@ def build(self) -> ModelInputForGPU: """ # Combine and flatten intermediate data. input_tokens = [] + token_types = [] for inter_data in self.inter_data_list: for cur_input_tokens in inter_data.input_tokens: input_tokens.extend(cur_input_tokens) + for cur_token_types in inter_data.token_types: + token_types.extend(cur_token_types) if not input_tokens: # This may happen when all prefill requests hit @@ -874,6 +895,12 @@ def build(self) -> ModelInputForGPU: input_tokens_tensor = async_tensor_h2d(input_tokens, torch.long, self.runner.device, self.runner.pin_memory) + + token_types_tensor = async_tensor_h2d(token_types, torch.long, + self.runner.device, + self.runner.pin_memory) \ + if token_types else None + if mrope_input_positions is not None: for idx in range(3): mrope_input_positions[idx].extend( @@ -952,6 +979,7 @@ def build(self) -> ModelInputForGPU: return self.model_input_cls( input_tokens=input_tokens_tensor, input_positions=input_positions_tensor, + token_types=token_types_tensor, attn_metadata=attn_metadata, seq_lens=seq_lens, query_lens=query_lens, From 7ea3cd7c3e9fa1db06cdf8ad1973237b061b7d64 Mon Sep 17 00:00:00 2001 From: Mengqing Cao Date: Mon, 25 Nov 2024 13:14:56 +0800 Subject: [PATCH 129/397] [Refactor][MISC] del redundant code in ParallelConfig.postinit (#10614) Signed-off-by: MengqingCao --- vllm/config.py | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index 4ea56a14cabba..dcdaf58b5ccdb 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -998,20 +998,15 @@ def __post_init__(self) -> None: raise ValueError(f"worker-use-ray can't be used with " f"distributed executor backend " f"'{self.distributed_executor_backend}'.") - - if current_platform.is_tpu() and self.world_size > 1: - if self.distributed_executor_backend is None: - self.distributed_executor_backend = "ray" - if self.distributed_executor_backend != "ray": - raise ValueError( - "TPU backend only supports Ray for distributed inference.") - - if current_platform.is_hpu() and self.world_size > 1: + ray_only_devices = ["tpu", "hpu"] + if (current_platform.device_type in ray_only_devices + and self.world_size > 1): if self.distributed_executor_backend is None: self.distributed_executor_backend = "ray" if self.distributed_executor_backend != "ray": raise ValueError( - "HPU backend only supports Ray for distributed inference.") + f"{current_platform.device_type.upper()} backend only " + "supports Ray for distributed inference.") if self.distributed_executor_backend is None and self.world_size > 1: # We use multiprocessing by default if world_size fits on the From 571841b7fcc67f8b1d171522f6249ed4224033e1 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sun, 24 Nov 2024 21:24:33 -0800 Subject: [PATCH 130/397] [torch.compile] support encoder based models (#10613) Signed-off-by: youkaichao --- tests/compile/test_basic_correctness.py | 10 ++++++++++ vllm/model_executor/models/bert.py | 17 +++++++---------- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/tests/compile/test_basic_correctness.py b/tests/compile/test_basic_correctness.py index b7170886d2556..99781c55b672e 100644 --- a/tests/compile/test_basic_correctness.py +++ b/tests/compile/test_basic_correctness.py @@ -62,6 +62,16 @@ class TestSetting: method="encode", fullgraph=True, ), + # encoder-based embedding model (BERT) + TestSetting( + model="BAAI/bge-base-en-v1.5", + model_args=["--task", "embedding"], + pp_size=1, + tp_size=1, + attn_backend="XFORMERS", + method="encode", + fullgraph=True, + ), # vision language model TestSetting( model="microsoft/Phi-3.5-vision-instruct", diff --git a/vllm/model_executor/models/bert.py b/vllm/model_executor/models/bert.py index 1fc87bc650d92..f570d6d3c12b3 100644 --- a/vllm/model_executor/models/bert.py +++ b/vllm/model_executor/models/bert.py @@ -5,6 +5,7 @@ from transformers import BertConfig from vllm.attention import Attention, AttentionMetadata, AttentionType +from vllm.compilation.decorators import support_torch_compile from vllm.config import CacheConfig, PoolerConfig, VllmConfig from vllm.distributed import get_tensor_model_parallel_world_size from vllm.model_executor.layers.activation import get_act_fn @@ -92,14 +93,14 @@ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return pooled_output +@support_torch_compile class BertEncoder(nn.Module): - def __init__(self, - config: BertConfig, - cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None, - prefix: str = ""): + def __init__(self, vllm_config: VllmConfig, prefix: str = ""): super().__init__() + config = vllm_config.model_config.hf_config + cache_config = vllm_config.cache_config + quant_config = vllm_config.quant_config self.layer = nn.ModuleList([ BertLayer(config=config, cache_config=cache_config, @@ -336,12 +337,8 @@ def __init__(self, add_pooling_layer: bool = False): super().__init__() config = vllm_config.model_config.hf_config - cache_config = vllm_config.cache_config - quant_config = vllm_config.quant_config self.embeddings = embedding_class(config) - self.encoder = BertEncoder(config, - cache_config, - quant_config, + self.encoder = BertEncoder(vllm_config=vllm_config, prefix=f"{prefix}.encoder") self.pooler = BertPooler(config) if add_pooling_layer else None From a30a605d214e051c31057f8c0cb948c841a2f743 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Mon, 25 Nov 2024 14:34:07 +0800 Subject: [PATCH 131/397] [Doc] Add encoder-based models to Supported Models page (#10616) Signed-off-by: DarkLight1337 --- docs/source/models/supported_models.rst | 45 +++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/docs/source/models/supported_models.rst b/docs/source/models/supported_models.rst index e902d393f2f70..ccd2d8de8ec0b 100644 --- a/docs/source/models/supported_models.rst +++ b/docs/source/models/supported_models.rst @@ -325,6 +325,11 @@ Text Embedding - Example HF Models - :ref:`LoRA ` - :ref:`PP ` + * - :code:`BertModel` + - BERT-based + - :code:`BAAI/bge-base-en-v1.5`, etc. + - + - * - :code:`Gemma2Model` - Gemma2-based - :code:`BAAI/bge-multilingual-gemma2`, etc. @@ -340,6 +345,16 @@ Text Embedding - :code:`ssmits/Qwen2-7B-Instruct-embed-base`, :code:`Alibaba-NLP/gte-Qwen2-1.5B-instruct`, etc. - ✅︎ - ✅︎ + * - :code:`RobertaModel`, :code:`RobertaForMaskedLM` + - RoBERTa-based + - :code:`sentence-transformers/all-roberta-large-v1`, :code:`sentence-transformers/all-roberta-large-v1`, etc. + - + - + * - :code:`XLMRobertaModel` + - XLM-RoBERTa-based + - :code:`intfloat/multilingual-e5-large`, etc. + - + - .. important:: Some model architectures support both generation and embedding tasks. @@ -390,6 +405,36 @@ Classification .. note:: As an interim measure, these models are supported in both offline and online inference via Embeddings API. +Sentence Pair Scoring +--------------------- + +.. list-table:: + :widths: 25 25 50 5 5 + :header-rows: 1 + + * - Architecture + - Models + - Example HF Models + - :ref:`LoRA ` + - :ref:`PP ` + * - :code:`BertForSequenceClassification` + - BERT-based + - :code:`cross-encoder/ms-marco-MiniLM-L-6-v2`, etc. + - + - + * - :code:`RobertaForSequenceClassification` + - RoBERTa-based + - :code:`cross-encoder/quora-roberta-base`, etc. + - + - + * - :code:`XLMRobertaForSequenceClassification` + - XLM-RoBERTa-based + - :code:`BAAI/bge-reranker-v2-m3`, etc. + - + - + +.. note:: + These models are supported in both offline and online inference via Score API. Multimodal Language Models ^^^^^^^^^^^^^^^^^^^^^^^^^^ From 7c2134beda9a4f72c71c4faffcca22cebd4e1c3c Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Mon, 25 Nov 2024 15:04:21 +0800 Subject: [PATCH 132/397] [torch.compile] force inductor threads (#10620) Signed-off-by: Jee Jee Li --- vllm/plugins/__init__.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/vllm/plugins/__init__.py b/vllm/plugins/__init__.py index d5056b18fe968..bd4764c5cc79c 100644 --- a/vllm/plugins/__init__.py +++ b/vllm/plugins/__init__.py @@ -3,6 +3,8 @@ from contextlib import contextmanager from typing import TYPE_CHECKING, Optional +import torch + import vllm.envs as envs if TYPE_CHECKING: @@ -26,7 +28,8 @@ def load_general_plugins(): # see https://github.com/vllm-project/vllm/issues/10480 os.environ['TORCHINDUCTOR_COMPILE_THREADS'] = '1' - + # see https://github.com/vllm-project/vllm/issues/10619 + torch._inductor.config.compile_threads = 1 global plugins_loaded if plugins_loaded: return From 65813781a2e2e76d18741601afe66b870a90a717 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sun, 24 Nov 2024 23:27:51 -0800 Subject: [PATCH 133/397] [torch.compile] add warning for unsupported models (#10622) Signed-off-by: youkaichao --- vllm/compilation/counter.py | 1 + vllm/compilation/decorators.py | 2 ++ vllm/plugins/__init__.py | 15 +++++++++++++++ 3 files changed, 18 insertions(+) diff --git a/vllm/compilation/counter.py b/vllm/compilation/counter.py index 100a49aba74ac..6385f1c5dbf81 100644 --- a/vllm/compilation/counter.py +++ b/vllm/compilation/counter.py @@ -5,6 +5,7 @@ @dataclasses.dataclass class CompilationCounter: + num_models_seen: int = 0 num_graphs_seen: int = 0 # including the splitting ops num_piecewise_graphs_seen: int = 0 diff --git a/vllm/compilation/decorators.py b/vllm/compilation/decorators.py index 4b78491bc5a48..8b81a29936989 100644 --- a/vllm/compilation/decorators.py +++ b/vllm/compilation/decorators.py @@ -3,6 +3,7 @@ import torch +from vllm.compilation.counter import compilation_counter from vllm.compilation.wrapper import TorchCompileWrapperWithCustomDispatcher from vllm.config import CompilationLevel, VllmConfig from vllm.logger import init_logger @@ -130,6 +131,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = '', **kwargs): ] or not supports_dynamo() if self.do_not_compile: return + compilation_counter.num_models_seen += 1 TorchCompileWrapperWithCustomDispatcher.__init__( self, compilation_level=vllm_config.compilation_config.level) diff --git a/vllm/plugins/__init__.py b/vllm/plugins/__init__.py index bd4764c5cc79c..8b43167693598 100644 --- a/vllm/plugins/__init__.py +++ b/vllm/plugins/__init__.py @@ -80,6 +80,9 @@ def set_current_vllm_config(vllm_config: "VllmConfig"): """ global _current_vllm_config old_vllm_config = _current_vllm_config + from vllm.compilation.counter import compilation_counter + from vllm.config import CompilationLevel + num_models_seen = compilation_counter.num_models_seen try: _current_vllm_config = vllm_config yield @@ -88,6 +91,18 @@ def set_current_vllm_config(vllm_config: "VllmConfig"): vllm_config.compilation_config.enabled_custom_ops) logger.debug("disabled custom ops: %s", vllm_config.compilation_config.disabled_custom_ops) + if vllm_config.compilation_config.level == CompilationLevel.PIECEWISE \ + and compilation_counter.num_models_seen == num_models_seen: + # If the model supports compilation, + # compilation_counter.num_models_seen should be increased + # by at least 1. + # If it is not increased, it means the model does not support + # compilation (does not have @support_torch_compile decorator). + logger.warning( + "`torch.compile` is turned on, but the model %s" + " does not support it. Please open an issue on GitHub" + "if you want it to be supported.", + vllm_config.model_config.model) _current_vllm_config = old_vllm_config From 25d806e95391a8556deb69bdb214714425f776c9 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sun, 24 Nov 2024 23:40:08 -0800 Subject: [PATCH 134/397] [misc] add torch.compile compatibility check (#10618) Signed-off-by: youkaichao --- tests/v1/engine/test_engine_core_client.py | 2 +- vllm/config.py | 14 ++++++++++++++ vllm/engine/arg_utils.py | 7 +++++++ 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/tests/v1/engine/test_engine_core_client.py b/tests/v1/engine/test_engine_core_client.py index 7b241bf836a0e..e248e35ae4069 100644 --- a/tests/v1/engine/test_engine_core_client.py +++ b/tests/v1/engine/test_engine_core_client.py @@ -81,7 +81,7 @@ def test_engine_core_client(monkeypatch, multiprocessing_mode: bool): with monkeypatch.context() as m: m.setenv("VLLM_USE_V1", "1") - engine_args = EngineArgs(model=MODEL_NAME) + engine_args = EngineArgs(model=MODEL_NAME, compilation_config=3) vllm_config = engine_args.create_engine_config() executor_class = AsyncLLM._get_executor_cls(vllm_config) client = EngineCoreClient.make_client( diff --git a/vllm/config.py b/vllm/config.py index dcdaf58b5ccdb..68720f3a3034d 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -2394,6 +2394,20 @@ def __post_init__(self): self.compilation_config.pass_config.enable_reshape = False self.compilation_config.level = CompilationLevel.PIECEWISE + if self.cache_config is not None and \ + self.cache_config.cpu_offload_gb > 0 and \ + self.compilation_config.level != CompilationLevel.NO_COMPILATION: + logger.warning( + "CPU offload is not supported with `torch.compile` yet." + " Disabling `torch.compile`.") + self.compilation_config.level = CompilationLevel.NO_COMPILATION + + if self.lora_config is not None and self.compilation_config.level !=\ + CompilationLevel.NO_COMPILATION: + logger.warning("LoRA is not supported with `torch.compile` yet. " + "Disabling `torch.compile`.") + self.compilation_config.level = CompilationLevel.NO_COMPILATION + current_platform.check_and_update_config(self) def __str__(self): diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 82f1ef51255e9..a43e133f21ac2 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -197,6 +197,13 @@ def __post_init__(self): if not self.tokenizer: self.tokenizer = self.model + # support `EngineArgs(compilation_config={...})` + # without having to manually construct a + # CompilationConfig object + if isinstance(self.compilation_config, (int, dict)): + self.compilation_config = CompilationConfig.from_cli( + json.dumps(self.compilation_config)) + # Setup plugins from vllm.plugins import load_general_plugins load_general_plugins() From 05d1f8c9c64b4458ae7cee2650eb97498146ee50 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Mon, 25 Nov 2024 01:27:30 -0800 Subject: [PATCH 135/397] [misc] move functions to config.py (#10624) Signed-off-by: youkaichao --- tests/compile/piecewise/test_simple.py | 4 +- tests/compile/piecewise/test_toy_llama.py | 4 +- tests/kernels/test_encoder_decoder_attn.py | 3 +- .../model_executor/test_enabled_custom_ops.py | 3 +- vllm/attention/layer.py | 3 +- vllm/compilation/wrapper.py | 3 +- vllm/config.py | 51 +++++++++++++++++ vllm/model_executor/custom_op.py | 2 +- vllm/model_executor/model_loader/loader.py | 3 +- .../model_executor/model_loader/tensorizer.py | 3 +- vllm/plugins/__init__.py | 56 ------------------- 11 files changed, 62 insertions(+), 73 deletions(-) diff --git a/tests/compile/piecewise/test_simple.py b/tests/compile/piecewise/test_simple.py index 0db12d6b6a43c..7ef502abee345 100644 --- a/tests/compile/piecewise/test_simple.py +++ b/tests/compile/piecewise/test_simple.py @@ -10,8 +10,8 @@ from vllm.compilation.compile_context import set_compile_context from vllm.compilation.counter import compilation_counter from vllm.compilation.decorators import support_torch_compile -from vllm.config import CompilationConfig, CompilationLevel, VllmConfig -from vllm.plugins import set_current_vllm_config +from vllm.config import (CompilationConfig, CompilationLevel, VllmConfig, + set_current_vllm_config) from vllm.utils import direct_register_custom_op global_counter = 0 diff --git a/tests/compile/piecewise/test_toy_llama.py b/tests/compile/piecewise/test_toy_llama.py index cfe661b8871e0..dbd5a3bbffeab 100644 --- a/tests/compile/piecewise/test_toy_llama.py +++ b/tests/compile/piecewise/test_toy_llama.py @@ -16,8 +16,8 @@ from vllm.compilation.compile_context import set_compile_context from vllm.compilation.counter import compilation_counter from vllm.compilation.decorators import support_torch_compile -from vllm.config import CompilationConfig, CompilationLevel, VllmConfig -from vllm.plugins import set_current_vllm_config +from vllm.config import (CompilationConfig, CompilationLevel, VllmConfig, + set_current_vllm_config) from vllm.utils import direct_register_custom_op # create a library to hold the custom op diff --git a/tests/kernels/test_encoder_decoder_attn.py b/tests/kernels/test_encoder_decoder_attn.py index c4b72ba6bf4ee..d943b048b7934 100644 --- a/tests/kernels/test_encoder_decoder_attn.py +++ b/tests/kernels/test_encoder_decoder_attn.py @@ -18,10 +18,9 @@ from vllm.attention.backends.utils import STR_NOT_IMPL_ENC_DEC_ROCM_HIP from vllm.attention.selector import (_Backend, _cached_get_attn_backend, global_force_attn_backend_context_manager) -from vllm.config import VllmConfig +from vllm.config import VllmConfig, set_current_vllm_config from vllm.forward_context import set_forward_context from vllm.platforms import current_platform -from vllm.plugins import set_current_vllm_config # List of support backends for encoder/decoder models LIST_ENC_DEC_SUPPORTED_BACKENDS = [_Backend.XFORMERS, _Backend.FLASH_ATTN] diff --git a/tests/model_executor/test_enabled_custom_ops.py b/tests/model_executor/test_enabled_custom_ops.py index c54e30995da49..0a3aba255fd76 100644 --- a/tests/model_executor/test_enabled_custom_ops.py +++ b/tests/model_executor/test_enabled_custom_ops.py @@ -2,13 +2,12 @@ import pytest -from vllm.config import CompilationConfig, VllmConfig +from vllm.config import CompilationConfig, VllmConfig, set_current_vllm_config from vllm.model_executor.custom_op import CustomOp from vllm.model_executor.layers.activation import (GeluAndMul, ReLUSquaredActivation, SiluAndMul) from vllm.model_executor.layers.layernorm import RMSNorm -from vllm.plugins import set_current_vllm_config # Registered subclass for test diff --git a/vllm/attention/layer.py b/vllm/attention/layer.py index 1bb335909484b..17157617248f7 100644 --- a/vllm/attention/layer.py +++ b/vllm/attention/layer.py @@ -7,13 +7,12 @@ import vllm.envs as envs from vllm.attention import AttentionMetadata, AttentionType from vllm.attention.selector import backend_name_to_enum, get_attn_backend -from vllm.config import CacheConfig +from vllm.config import CacheConfig, get_current_vllm_config from vllm.forward_context import ForwardContext, get_forward_context from vllm.model_executor.layers.quantization.base_config import ( QuantizationConfig) from vllm.model_executor.layers.quantization.kv_cache import BaseKVCacheMethod from vllm.platforms import current_platform -from vllm.plugins import get_current_vllm_config from vllm.utils import direct_register_custom_op diff --git a/vllm/compilation/wrapper.py b/vllm/compilation/wrapper.py index 0143d0301ca1a..bc4d292fef402 100644 --- a/vllm/compilation/wrapper.py +++ b/vllm/compilation/wrapper.py @@ -8,7 +8,7 @@ import torch import vllm.envs as envs -from vllm.config import CompilationLevel +from vllm.config import CompilationLevel, get_current_vllm_config class TorchCompileWrapperWithCustomDispatcher: @@ -32,7 +32,6 @@ def __init__(self, # default compilation settings # compiling the forward method - from vllm.plugins import get_current_vllm_config backend = get_current_vllm_config( ).compilation_config.init_backend() diff --git a/vllm/config.py b/vllm/config.py index 68720f3a3034d..0a390c4311ba6 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -3,6 +3,7 @@ import hashlib import json import warnings +from contextlib import contextmanager from dataclasses import dataclass, field, replace from pathlib import Path from typing import (TYPE_CHECKING, Any, Callable, ClassVar, Counter, Dict, @@ -2450,3 +2451,53 @@ def __str__(self): self.cache_config.enable_prefix_caching, self.model_config.use_async_output_proc, self.model_config.mm_processor_kwargs) + + +_current_vllm_config: Optional[VllmConfig] = None + + +@contextmanager +def set_current_vllm_config(vllm_config: VllmConfig): + """ + Temporarily set the current VLLM config. + Used during model initialization. + We save the current VLLM config in a global variable, + so that all modules can access it, e.g. custom ops + can access the VLLM config to determine how to dispatch. + """ + global _current_vllm_config + old_vllm_config = _current_vllm_config + from vllm.compilation.counter import compilation_counter + num_models_seen = compilation_counter.num_models_seen + try: + _current_vllm_config = vllm_config + yield + finally: + logger.debug("enabled custom ops: %s", + vllm_config.compilation_config.enabled_custom_ops) + logger.debug("disabled custom ops: %s", + vllm_config.compilation_config.disabled_custom_ops) + if vllm_config.compilation_config.level == CompilationLevel.PIECEWISE \ + and compilation_counter.num_models_seen == num_models_seen: + # If the model supports compilation, + # compilation_counter.num_models_seen should be increased + # by at least 1. + # If it is not increased, it means the model does not support + # compilation (does not have @support_torch_compile decorator). + logger.warning( + "`torch.compile` is turned on, but the model %s" + " does not support it. Please open an issue on GitHub" + "if you want it to be supported.", + vllm_config.model_config.model) + _current_vllm_config = old_vllm_config + + +def get_current_vllm_config() -> VllmConfig: + if _current_vllm_config is None: + # in ci, usually when we test custom ops/modules directly, + # we don't set the vllm config. In that case, we set a default + # config. + logger.warning("Current VLLM config is not set.") + from vllm.config import VllmConfig + return VllmConfig() + return _current_vllm_config diff --git a/vllm/model_executor/custom_op.py b/vllm/model_executor/custom_op.py index b07966f2ab7d0..fddc8bad09ef5 100644 --- a/vllm/model_executor/custom_op.py +++ b/vllm/model_executor/custom_op.py @@ -2,9 +2,9 @@ import torch.nn as nn +from vllm.config import get_current_vllm_config from vllm.logger import init_logger from vllm.platforms import current_platform -from vllm.plugins import get_current_vllm_config from vllm.utils import print_warning_once logger = init_logger(__name__) diff --git a/vllm/model_executor/model_loader/loader.py b/vllm/model_executor/model_loader/loader.py index 34e0860162260..441dd409b4f9d 100644 --- a/vllm/model_executor/model_loader/loader.py +++ b/vllm/model_executor/model_loader/loader.py @@ -23,7 +23,7 @@ from transformers.utils import SAFE_WEIGHTS_INDEX_NAME from vllm.config import (LoadConfig, LoadFormat, ModelConfig, ParallelConfig, - VllmConfig) + VllmConfig, set_current_vllm_config) from vllm.distributed import (get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size) from vllm.envs import VLLM_USE_MODELSCOPE @@ -47,7 +47,6 @@ safetensors_weights_iterator) from vllm.model_executor.utils import set_weight_attrs from vllm.platforms import current_platform -from vllm.plugins import set_current_vllm_config from vllm.utils import is_pin_memory_available diff --git a/vllm/model_executor/model_loader/tensorizer.py b/vllm/model_executor/model_loader/tensorizer.py index 3fd668765a1b1..87f3fcb5cae00 100644 --- a/vllm/model_executor/model_loader/tensorizer.py +++ b/vllm/model_executor/model_loader/tensorizer.py @@ -13,13 +13,12 @@ from transformers import PretrainedConfig import vllm.envs as envs -from vllm.config import ModelConfig, ParallelConfig +from vllm.config import ModelConfig, ParallelConfig, set_current_vllm_config from vllm.engine.arg_utils import EngineArgs from vllm.engine.llm_engine import LLMEngine from vllm.logger import init_logger from vllm.model_executor.layers.vocab_parallel_embedding import ( VocabParallelEmbedding) -from vllm.plugins import set_current_vllm_config from vllm.utils import FlexibleArgumentParser tensorizer_error_msg = None diff --git a/vllm/plugins/__init__.py b/vllm/plugins/__init__.py index 8b43167693598..3c64726ca3344 100644 --- a/vllm/plugins/__init__.py +++ b/vllm/plugins/__init__.py @@ -1,15 +1,10 @@ import logging import os -from contextlib import contextmanager -from typing import TYPE_CHECKING, Optional import torch import vllm.envs as envs -if TYPE_CHECKING: - from vllm.config import VllmConfig - logger = logging.getLogger(__name__) # make sure one process only loads plugins once @@ -64,54 +59,3 @@ def load_general_plugins(): logger.info("plugin %s loaded.", plugin.name) except Exception: logger.exception("Failed to load plugin %s", plugin.name) - - -_current_vllm_config: Optional["VllmConfig"] = None - - -@contextmanager -def set_current_vllm_config(vllm_config: "VllmConfig"): - """ - Temporarily set the current VLLM config. - Used during model initialization. - We save the current VLLM config in a global variable, - so that all modules can access it, e.g. custom ops - can access the VLLM config to determine how to dispatch. - """ - global _current_vllm_config - old_vllm_config = _current_vllm_config - from vllm.compilation.counter import compilation_counter - from vllm.config import CompilationLevel - num_models_seen = compilation_counter.num_models_seen - try: - _current_vllm_config = vllm_config - yield - finally: - logger.debug("enabled custom ops: %s", - vllm_config.compilation_config.enabled_custom_ops) - logger.debug("disabled custom ops: %s", - vllm_config.compilation_config.disabled_custom_ops) - if vllm_config.compilation_config.level == CompilationLevel.PIECEWISE \ - and compilation_counter.num_models_seen == num_models_seen: - # If the model supports compilation, - # compilation_counter.num_models_seen should be increased - # by at least 1. - # If it is not increased, it means the model does not support - # compilation (does not have @support_torch_compile decorator). - logger.warning( - "`torch.compile` is turned on, but the model %s" - " does not support it. Please open an issue on GitHub" - "if you want it to be supported.", - vllm_config.model_config.model) - _current_vllm_config = old_vllm_config - - -def get_current_vllm_config() -> "VllmConfig": - if _current_vllm_config is None: - # in ci, usually when we test custom ops/modules directly, - # we don't set the vllm config. In that case, we set a default - # config. - logger.warning("Current VLLM config is not set.") - from vllm.config import VllmConfig - return VllmConfig() - return _current_vllm_config From ed46f143212203b7afcbc8538119b6e8155c643e Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Mon, 25 Nov 2024 17:51:20 +0800 Subject: [PATCH 136/397] [Model] Support `is_causal` HF config field for Qwen2 model (#10621) Signed-off-by: DarkLight1337 --- docs/source/models/supported_models.rst | 13 +++++++++--- .../embedding/language/test_embedding.py | 12 +++++++++-- tests/models/embedding/utils.py | 4 ++-- vllm/config.py | 15 ++++++++++---- vllm/model_executor/models/qwen2.py | 20 +++++++++++++++++-- 5 files changed, 51 insertions(+), 13 deletions(-) diff --git a/docs/source/models/supported_models.rst b/docs/source/models/supported_models.rst index ccd2d8de8ec0b..54e2c4479c2c9 100644 --- a/docs/source/models/supported_models.rst +++ b/docs/source/models/supported_models.rst @@ -342,7 +342,7 @@ Text Embedding - ✅︎ * - :code:`Qwen2Model`, :code:`Qwen2ForCausalLM` - Qwen2-based - - :code:`ssmits/Qwen2-7B-Instruct-embed-base`, :code:`Alibaba-NLP/gte-Qwen2-1.5B-instruct`, etc. + - :code:`ssmits/Qwen2-7B-Instruct-embed-base`, :code:`Alibaba-NLP/gte-Qwen2-7B-instruct` (see note), etc. - ✅︎ - ✅︎ * - :code:`RobertaModel`, :code:`RobertaForMaskedLM` @@ -363,6 +363,13 @@ Text Embedding .. tip:: You can override the model's pooling method by passing :code:`--override-pooler-config`. +.. note:: + Unlike base Qwen2, :code:`Alibaba-NLP/gte-Qwen2-7B-instruct` uses bi-directional attention. + You can set `--hf-overrides '{"is_causal": false}'` to change the attention mask accordingly. + + On the other hand, its 1.5B variant (:code:`Alibaba-NLP/gte-Qwen2-1.5B-instruct`) uses causal attention + despite being described otherwise on its model card. + Reward Modeling --------------- @@ -606,10 +613,10 @@ Text Generation | :sup:`+` Multiple items can be inputted per text prompt for this modality. .. note:: - vLLM currently only supports adding LoRA to the language backbone of multimodal models. + vLLM currently only supports adding LoRA to the language backbone of multimodal models. .. note:: - For :code:`openbmb/MiniCPM-V-2`, the official repo doesn't work yet, so we need to use a fork (:code:`HwwwH/MiniCPM-V-2`) for now. + The official :code:`openbmb/MiniCPM-V-2` doesn't work yet, so we need to use a fork (:code:`HwwwH/MiniCPM-V-2`) for now. For more details, please see: https://github.com/vllm-project/vllm/pull/4087#issuecomment-2250397630 Multimodal Embedding diff --git a/tests/models/embedding/language/test_embedding.py b/tests/models/embedding/language/test_embedding.py index c3f351ef707be..36b1e5887981c 100644 --- a/tests/models/embedding/language/test_embedding.py +++ b/tests/models/embedding/language/test_embedding.py @@ -21,6 +21,7 @@ marks=[pytest.mark.core_model]), pytest.param("ssmits/Qwen2-7B-Instruct-embed-base"), pytest.param("Alibaba-NLP/gte-Qwen2-1.5B-instruct"), + pytest.param("Alibaba-NLP/gte-Qwen2-7B-instruct"), ], ) @pytest.mark.parametrize("dtype", ["half"]) @@ -31,6 +32,10 @@ def test_models( model, dtype: str, ) -> None: + vllm_extra_kwargs = {} + if model == "Alibaba-NLP/gte-Qwen2-7B-instruct": + vllm_extra_kwargs["hf_overrides"] = {"is_causal": False} + # The example_prompts has ending "\n", for example: # "Write a short story about a robot that dreams for the first time.\n" # sentence_transformers will strip the input texts, see: @@ -43,8 +48,11 @@ def test_models( is_sentence_transformer=True) as hf_model: hf_outputs = hf_model.encode(example_prompts) - with vllm_runner(model, task="embedding", dtype=dtype, - max_model_len=None) as vllm_model: + with vllm_runner(model, + task="embedding", + dtype=dtype, + max_model_len=None, + **vllm_extra_kwargs) as vllm_model: vllm_outputs = vllm_model.encode(example_prompts) # This test is for verifying whether the model's extra_repr # can be printed correctly. diff --git a/tests/models/embedding/utils.py b/tests/models/embedding/utils.py index fd1c44d9c117e..f96c7d2b176db 100644 --- a/tests/models/embedding/utils.py +++ b/tests/models/embedding/utils.py @@ -24,7 +24,7 @@ def check_embeddings_close( dim=0) fail_msg = (f"Test{prompt_idx}:" - f"\n{name_0}:\t{embeddings_0!r}" - f"\n{name_1}:\t{embeddings_1!r}") + f"\n{name_0}:\t{embeddings_0[:16]!r}" + f"\n{name_1}:\t{embeddings_1[:16]!r}") assert sim >= 1 - tol, fail_msg diff --git a/vllm/config.py b/vllm/config.py index 0a390c4311ba6..f9ecb02cd5bde 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -27,7 +27,7 @@ get_hf_text_config, get_pooling_config, get_sentence_transformer_tokenizer_config, is_encoder_decoder, uses_mrope) from vllm.utils import (GiB_bytes, cuda_device_count_stateless, get_cpu_memory, - identity, print_warning_once, resolve_obj_by_qualname) + print_warning_once, resolve_obj_by_qualname) if TYPE_CHECKING: from ray.util.placement_group import PlacementGroup @@ -183,7 +183,7 @@ def __init__( hf_overrides_fn = hf_overrides else: hf_overrides_kw = hf_overrides - hf_overrides_fn = identity + hf_overrides_fn = None if rope_scaling is not None: hf_override: Dict[str, Any] = {"rope_scaling": rope_scaling} @@ -212,8 +212,15 @@ def __init__( self.skip_tokenizer_init = skip_tokenizer_init hf_config = get_config(self.model, trust_remote_code, revision, - code_revision, config_format, **hf_overrides_kw) - hf_config = hf_overrides_fn(hf_config) + code_revision, config_format) + + if hf_overrides_kw: + logger.info("Overriding HF config with %s", hf_overrides_kw) + hf_config.update(hf_overrides_kw) + if hf_overrides_fn: + logger.info("Overriding HF config with %s", hf_overrides_fn) + hf_config = hf_overrides_fn(hf_config) + self.hf_config = hf_config self.hf_text_config = get_hf_text_config(self.hf_config) diff --git a/vllm/model_executor/models/qwen2.py b/vllm/model_executor/models/qwen2.py index 370cff5fa153f..8da75c9935a13 100644 --- a/vllm/model_executor/models/qwen2.py +++ b/vllm/model_executor/models/qwen2.py @@ -27,7 +27,7 @@ from torch import nn from transformers import Qwen2Config -from vllm.attention import Attention, AttentionMetadata +from vllm.attention import Attention, AttentionMetadata, AttentionType from vllm.compilation.decorators import support_torch_compile from vllm.config import CacheConfig, VllmConfig from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size @@ -164,11 +164,17 @@ def forward( hidden_states: torch.Tensor, kv_cache: torch.Tensor, attn_metadata: AttentionMetadata, + attn_type: str = AttentionType.DECODER, ) -> torch.Tensor: qkv, _ = self.qkv_proj(hidden_states) q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) q, k = self.rotary_emb(positions, q, k) - attn_output = self.attn(q, k, v, kv_cache, attn_metadata) + attn_output = self.attn(q, + k, + v, + kv_cache, + attn_metadata, + attn_type=attn_type) output, _ = self.o_proj(attn_output) return output @@ -210,6 +216,15 @@ def __init__( self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + # By default, Qwen2 uses causal attention as it is a decoder-only model. + # You can override the HF config with `is_causal=False` to enable + # bidirectional attention, which is used in some embedding models + # (e.g. Alibaba-NLP/gte-Qwen2-7B-instruct) + if getattr(config, "is_causal", True): + self._attn_type = AttentionType.DECODER + else: + self._attn_type = AttentionType.ENCODER_ONLY + def forward( self, positions: torch.Tensor, @@ -230,6 +245,7 @@ def forward( hidden_states=hidden_states, kv_cache=kv_cache, attn_metadata=attn_metadata, + attn_type=self._attn_type, ) # Fully Connected From 2b0879bfc273a08d339b952890c4e88e77f0a014 Mon Sep 17 00:00:00 2001 From: fzyzcjy <5236035+fzyzcjy@users.noreply.github.com> Date: Mon, 25 Nov 2024 21:08:30 +0800 Subject: [PATCH 137/397] Super tiny little typo fix (#10633) --- docs/source/quantization/fp8_e5m2_kvcache.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/quantization/fp8_e5m2_kvcache.rst b/docs/source/quantization/fp8_e5m2_kvcache.rst index 9ae07bcd3b991..b2d824427f786 100644 --- a/docs/source/quantization/fp8_e5m2_kvcache.rst +++ b/docs/source/quantization/fp8_e5m2_kvcache.rst @@ -4,7 +4,7 @@ FP8 E5M2 KV Cache ================== The int8/int4 quantization scheme requires additional scale GPU memory storage, which reduces the expected GPU memory benefits. -The FP8 data format retains 2~3 mantissa bits and can convert float/fp16/bflaot16 and fp8 to each other. +The FP8 data format retains 2~3 mantissa bits and can convert float/fp16/bfloat16 and fp8 to each other. Here is an example of how to enable this feature: From d04b13a380da422afa1883efc81e0d4c4b18d091 Mon Sep 17 00:00:00 2001 From: Chauncey Date: Tue, 26 Nov 2024 00:21:41 +0800 Subject: [PATCH 138/397] [Bug]: Authorization ignored when root_path is set (#10606) Signed-off-by: chaunceyjiang --- tests/entrypoints/openai/test_root_path.py | 103 +++++++++++++++++++++ vllm/entrypoints/openai/api_server.py | 6 +- 2 files changed, 107 insertions(+), 2 deletions(-) create mode 100644 tests/entrypoints/openai/test_root_path.py diff --git a/tests/entrypoints/openai/test_root_path.py b/tests/entrypoints/openai/test_root_path.py new file mode 100644 index 0000000000000..20f7960619efb --- /dev/null +++ b/tests/entrypoints/openai/test_root_path.py @@ -0,0 +1,103 @@ +import contextlib +import os +from typing import Any, List, NamedTuple + +import openai # use the official client for correctness check +import pytest + +from ...utils import RemoteOpenAIServer + +# # any model with a chat template should work here +MODEL_NAME = "Qwen/Qwen2-1.5B-Instruct" +DUMMY_CHAT_TEMPLATE = """{% for message in messages %}{{message['role'] + ': ' + message['content'] + '\\n'}}{% endfor %}""" # noqa: E501 +API_KEY = "abc-123" +ERROR_API_KEY = "abc" +ROOT_PATH = "llm" + + +@pytest.fixture(scope="module") +def server(): + args = [ + # use half precision for speed and memory savings in CI environment + "--dtype", + "float16", + "--enforce-eager", + "--max-model-len", + "4080", + "--root-path", # use --root-path=/llm for testing + "/" + ROOT_PATH, + "--chat-template", + DUMMY_CHAT_TEMPLATE, + ] + envs = os.environ.copy() + + envs["VLLM_API_KEY"] = API_KEY + with RemoteOpenAIServer(MODEL_NAME, args, env_dict=envs) as remote_server: + yield remote_server + + +class TestCase(NamedTuple): + model_name: str + base_url: List[str] + api_key: str + expected_error: Any + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "test_case", + [ + TestCase( + model_name=MODEL_NAME, + base_url=["v1"], # http://localhost:8000/v1 + api_key=ERROR_API_KEY, + expected_error=openai.AuthenticationError), + TestCase( + model_name=MODEL_NAME, + base_url=[ROOT_PATH, "v1"], # http://localhost:8000/llm/v1 + api_key=ERROR_API_KEY, + expected_error=openai.AuthenticationError), + TestCase( + model_name=MODEL_NAME, + base_url=["v1"], # http://localhost:8000/v1 + api_key=API_KEY, + expected_error=None), + TestCase( + model_name=MODEL_NAME, + base_url=[ROOT_PATH, "v1"], # http://localhost:8000/llm/v1 + api_key=API_KEY, + expected_error=None), + ], +) +async def test_chat_session_root_path_with_api_key(server: RemoteOpenAIServer, + test_case: TestCase): + saying: str = "Here is a common saying about apple. An apple a day, keeps" + ctx = contextlib.nullcontext() + if test_case.expected_error is not None: + ctx = pytest.raises(test_case.expected_error) + with ctx: + client = openai.AsyncOpenAI( + api_key=test_case.api_key, + base_url=server.url_for(*test_case.base_url), + max_retries=0) + chat_completion = await client.chat.completions.create( + model=test_case.model_name, + messages=[{ + "role": "user", + "content": "tell me a common saying" + }, { + "role": "assistant", + "content": saying + }], + extra_body={ + "continue_final_message": True, + "add_generation_prompt": False + }) + + assert chat_completion.id is not None + assert len(chat_completion.choices) == 1 + choice = chat_completion.choices[0] + assert choice.finish_reason == "stop" + message = choice.message + assert len(message.content) > 0 + assert message.role == "assistant" diff --git a/vllm/entrypoints/openai/api_server.py b/vllm/entrypoints/openai/api_server.py index 2b1f14b89b1f2..bc018be982bff 100644 --- a/vllm/entrypoints/openai/api_server.py +++ b/vllm/entrypoints/openai/api_server.py @@ -499,10 +499,12 @@ async def validation_exception_handler(_, exc): @app.middleware("http") async def authentication(request: Request, call_next): - root_path = "" if args.root_path is None else args.root_path if request.method == "OPTIONS": return await call_next(request) - if not request.url.path.startswith(f"{root_path}/v1"): + url_path = request.url.path + if app.root_path and url_path.startswith(app.root_path): + url_path = url_path[len(app.root_path):] + if not url_path.startswith("/v1"): return await call_next(request) if request.headers.get("Authorization") != "Bearer " + token: return JSONResponse(content={"error": "Unauthorized"}, From c27df94e1ff98551b987b40bb2049bf4640e202a Mon Sep 17 00:00:00 2001 From: Wallas Henrique Date: Mon, 25 Nov 2024 14:23:32 -0300 Subject: [PATCH 139/397] [Bugfix] Fix chunked prefill with model dtype float32 on Turing Devices (#9850) Signed-off-by: Wallas Santos Co-authored-by: Michael Goin --- pyproject.toml | 1 + tests/conftest.py | 19 +++++++++ tests/kernels/test_prefix_prefill.py | 63 ++++++++++++++++++++++++++++ vllm/attention/ops/prefix_prefill.py | 41 ++++++++++++------ vllm/config.py | 10 +++++ vllm/engine/arg_utils.py | 1 + 6 files changed, 122 insertions(+), 13 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 3c8c46cc8621e..253b706a774a7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -98,4 +98,5 @@ markers = [ "quant_model: run this model test under Quantized category", "distributed_2_gpus: run this test only in distributed tests for 2 GPUs", "skip_v1: do not run this test with v1", + "optional: optional tests that are automatically skipped, include --optional to run them", ] diff --git a/tests/conftest.py b/tests/conftest.py index 29707f975e2a0..d56942d8912af 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1030,3 +1030,22 @@ def dummy_gemma2_embedding_path(): with open(json_path, "w") as f: json.dump(config, f) return _dummy_gemma2_embedding_path + + +# Add the flag `--optional` to allow run tests +# that are marked with @pytest.mark.optional +def pytest_addoption(parser): + parser.addoption("--optional", + action="store_true", + default=False, + help="run optional test") + + +def pytest_collection_modifyitems(config, items): + if config.getoption("--optional"): + # --optional given in cli: do not skip optional tests + return + skip_optional = pytest.mark.skip(reason="need --optional option to run") + for item in items: + if "optional" in item.keywords: + item.add_marker(skip_optional) diff --git a/tests/kernels/test_prefix_prefill.py b/tests/kernels/test_prefix_prefill.py index a8a187ebaede4..3fdb7996ba4e0 100644 --- a/tests/kernels/test_prefix_prefill.py +++ b/tests/kernels/test_prefix_prefill.py @@ -40,6 +40,13 @@ def test_contexted_kv_attention( kv_cache_dtype: str, device: str, ) -> None: + + if 'fp8' in kv_cache_dtype and not current_platform.has_device_capability( + 89): + pytest.skip( + 'Triton limitation: fp8e4nv data type is not supported on CUDA' + ' arch < 89') + current_platform.seed_everything(0) torch.set_default_device(device) @@ -235,6 +242,13 @@ def test_contexted_kv_attention_alibi( kv_cache_dtype: str, device: str, ) -> None: + + if 'fp8' in kv_cache_dtype and not current_platform.has_device_capability( + 89): + pytest.skip( + 'Triton limitation: fp8e4nv data type is not supported on CUDA' + ' arch < 89') + current_platform.seed_everything(0) torch.set_default_device(device) @@ -462,3 +476,52 @@ def _get_alibi_slopes(total_num_heads: int) -> torch.Tensor: print(f"xformers Time: {(end_time - start_time)*1000:.2f} ms") atol = 1e-3 if "fp8" in kv_cache_dtype else 1e-6 torch.testing.assert_close(output, output_ref, atol=atol, rtol=0) + + +# These tests are optional to only run when explicitly invoked +# +# pytest -v -s --optional \ +# tests/kernels/test_prefix_prefill.py::test_contexted_kv_attention_f32 +# +# These tests are useful to test model dtype float32 on Turing devices. +# We skip them to not increase the time when running tests on CI +@pytest.mark.optional +@pytest.mark.parametrize("num_heads", NUM_HEADS) +@pytest.mark.parametrize("num_queries_per_kv", NUM_QUERIES_PER_KV) +@pytest.mark.parametrize("head_size", HEAD_SIZES) +@pytest.mark.parametrize("dtype", [torch.float32]) +@pytest.mark.parametrize("kv_cache_dtype", KV_CACHE_DTYPES) +@pytest.mark.parametrize("device", CUDA_DEVICES) +@pytest.mark.parametrize("sliding_window", SLIDING_WINDOW) +@torch.inference_mode() +def test_contexted_kv_attention_f32( + num_heads: int, + num_queries_per_kv: int, + head_size: int, + sliding_window: int, + dtype: torch.dtype, + kv_cache_dtype: str, + device: str, +) -> None: + test_contexted_kv_attention(num_heads, num_queries_per_kv, head_size, + sliding_window, dtype, kv_cache_dtype, device) + + +@pytest.mark.optional +@pytest.mark.parametrize("num_heads", NUM_HEADS) +@pytest.mark.parametrize("num_queries_per_kv", NUM_QUERIES_PER_KV) +@pytest.mark.parametrize("head_size", HEAD_SIZES) +@pytest.mark.parametrize("dtype", [torch.float32]) +@pytest.mark.parametrize("kv_cache_dtype", KV_CACHE_DTYPES) +@pytest.mark.parametrize("device", CUDA_DEVICES) +@torch.inference_mode() +def test_contexted_kv_attention_alibi_f32( + num_heads: int, + num_queries_per_kv: int, + head_size: int, + dtype: torch.dtype, + kv_cache_dtype: str, + device: str, +) -> None: + test_contexted_kv_attention_alibi(num_heads, num_queries_per_kv, head_size, + dtype, kv_cache_dtype, device) diff --git a/vllm/attention/ops/prefix_prefill.py b/vllm/attention/ops/prefix_prefill.py index a2a649c8ebcfd..9c11a8df55278 100644 --- a/vllm/attention/ops/prefix_prefill.py +++ b/vllm/attention/ops/prefix_prefill.py @@ -7,6 +7,13 @@ from vllm.platforms import current_platform +# Static kernels parameters +BASE_BLOCK = 128 if current_platform.has_device_capability(80) else 64 +NUM_WARPS = 8 + +# To check compatibility +IS_TURING = current_platform.get_device_capability() == (7, 5) + if triton.__version__ >= "2.1.0": @triton.jit @@ -50,6 +57,7 @@ def _fwd_kernel( stride_v_cache_d, stride_v_cache_bl, num_queries_per_kv: int, + IN_PRECISION: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, # head size BLOCK_DMODEL_PADDED: tl.constexpr, # head size padded to a power of 2 @@ -130,7 +138,7 @@ def _fwd_kernel( k = k_load qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) # [M,N] - qk += tl.dot(q, k) + qk = tl.dot(q, k, acc=qk, input_precision=IN_PRECISION) qk = tl.where((start_n + offs_n[None, :]) < cur_batch_ctx_len, qk, float("-inf")) qk *= sm_scale @@ -178,7 +186,7 @@ def _fwd_kernel( v = v_load p = p.to(v.dtype) - acc += tl.dot(p, v) + acc = tl.dot(p, v, acc=acc, input_precision=IN_PRECISION) # # update m_i and l_i l_i = l_i_new m_i = m_i_new @@ -204,7 +212,7 @@ def _fwd_kernel( other=0.0) qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) - qk += tl.dot(q, k) + qk = tl.dot(q, k, acc=qk, input_precision=IN_PRECISION) qk *= sm_scale # apply causal mask qk = tl.where(offs_m[:, None] >= (start_n + offs_n[None, :]), qk, @@ -238,7 +246,7 @@ def _fwd_kernel( other=0.0) p = p.to(v.dtype) - acc += tl.dot(p, v) + acc = tl.dot(p, v, acc=acc, input_precision=IN_PRECISION) # update m_i and l_i l_i = l_i_new m_i = m_i_new @@ -485,6 +493,7 @@ def _fwd_kernel_alibi( stride_v_cache_d, stride_v_cache_bl, num_queries_per_kv: int, + IN_PRECISION: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, # head size BLOCK_DMODEL_PADDED: tl.constexpr, # head size padded to a power of 2 @@ -560,7 +569,7 @@ def _fwd_kernel_alibi( k = k_load qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) - qk += tl.dot(q, k) + qk = tl.dot(q, k, acc=qk, input_precision=IN_PRECISION) qk = tl.where((start_n + offs_n[None, :]) < cur_batch_ctx_len, qk, float("-inf")) qk *= sm_scale @@ -600,7 +609,7 @@ def _fwd_kernel_alibi( v = v_load p = p.to(v.dtype) - acc += tl.dot(p, v, allow_tf32=False) + acc = tl.dot(p, v, acc=acc, input_precision='ieee') # update m_i and l_i l_i = l_i_new m_i = m_i_new @@ -635,7 +644,7 @@ def _fwd_kernel_alibi( other=0.0) qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) - qk += tl.dot(q, k, allow_tf32=False) + qk = tl.dot(q, k, acc=qk, input_precision='ieee') qk *= sm_scale qk = tl.where(offs_m[:, None] >= (start_n + offs_n[None, :]), qk, float("-inf")) @@ -673,7 +682,7 @@ def _fwd_kernel_alibi( other=0.0) p = p.to(v.dtype) - acc += tl.dot(p, v, allow_tf32=False) + acc = tl.dot(p, v, acc=acc, input_precision='ieee') # update m_i and l_i l_i = l_i_new m_i = m_i_new @@ -709,13 +718,17 @@ def context_attention_fwd(q, alibi_slopes=None, sliding_window=None): - BLOCK = 128 if current_platform.has_device_capability(80) else 64 - NUM_WARPS = 8 - + q_dtype_is_f32 = q.dtype is torch.float32 # need to reduce num. blocks when using fp32 # due to increased use of GPU shared memory - if q.dtype is torch.float32: - BLOCK = BLOCK // 2 + # if q.dtype is torch.float32: + BLOCK = BASE_BLOCK // 2 if q_dtype_is_f32 else BASE_BLOCK + + # Turing does have tensor core for float32 multiplication + # use ieee as fallback for triton kernels work. There is also + # warning on vllm/config.py to inform users this fallback + # implementation + IN_PRECISION = 'ieee' if IS_TURING and q_dtype_is_f32 else None # Conversion of FP8 Tensor from uint8 storage to # appropriate torch.dtype for interpretation by Triton @@ -799,6 +812,7 @@ def context_attention_fwd(q, v_cache.stride( 3), #[num_blocks, num_kv_heads, head_size, block_size] num_queries_per_kv=num_queries_per_kv, + IN_PRECISION=IN_PRECISION, BLOCK_M=BLOCK, BLOCK_DMODEL=Lk, BLOCK_DMODEL_PADDED=Lk_padded, @@ -850,6 +864,7 @@ def context_attention_fwd(q, v_cache.stride( 3), #[num_blocks, num_kv_heads, head_size, block_size] num_queries_per_kv=num_queries_per_kv, + IN_PRECISION=IN_PRECISION, BLOCK_M=BLOCK, BLOCK_DMODEL=Lk, BLOCK_DMODEL_PADDED=Lk_padded, diff --git a/vllm/config.py b/vllm/config.py index f9ecb02cd5bde..c87feaec3e5f6 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -2388,6 +2388,16 @@ def __post_init__(self): self.quant_config = VllmConfig._get_quantization_config( self.model_config, self.load_config) + if self.scheduler_config is not None and \ + self.model_config is not None and \ + self.scheduler_config.chunked_prefill_enabled and \ + self.model_config.dtype == torch.float32 and \ + current_platform.get_device_capability() == (7, 5): + print_warning_once( + "Turing devices tensor cores do not support float32 matmul. " + "To workaround this limitation, vLLM will set 'ieee' input " + "precision for chunked prefill triton kernels.") + if self.compilation_config is None: self.compilation_config = CompilationConfig() if envs.VLLM_USE_V1 and not self.model_config.enforce_eager: diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index a43e133f21ac2..ca68c1d57151c 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -1055,6 +1055,7 @@ def create_engine_config(self) -> VllmConfig: msg = "Chunked prefill is not supported for embedding models" raise ValueError(msg) + speculative_config = SpeculativeConfig.maybe_create_spec_config( target_model_config=model_config, target_parallel_config=parallel_config, From 452a4e80c3dfc6596cd89c7a87dfb7036bab8acd Mon Sep 17 00:00:00 2001 From: Simon Mo Date: Mon, 25 Nov 2024 09:34:46 -0800 Subject: [PATCH 140/397] [Docs] Add Snowflake Slides (#10641) Signed-off-by: simon-mo --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 4e1353d98f1dc..cfeb24cbb5823 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ Easy, fast, and cheap LLM serving for everyone --- *Latest News* 🔥 -- [2024/11] We hosted [the seventh vLLM meetup](https://lu.ma/h0qvrajz) with Snowflake! Please find the meetup slides [here](https://docs.google.com/presentation/d/1e3CxQBV3JsfGp30SwyvS3eM_tW-ghOhJ9PAJGK6KR54/edit?usp=sharing). +- [2024/11] We hosted [the seventh vLLM meetup](https://lu.ma/h0qvrajz) with Snowflake! Please find the meetup slides from vLLM team [here](https://docs.google.com/presentation/d/1e3CxQBV3JsfGp30SwyvS3eM_tW-ghOhJ9PAJGK6KR54/edit?usp=sharing), and Snowflake team [here](https://docs.google.com/presentation/d/1qF3RkDAbOULwz9WK5TOltt2fE9t6uIc_hVNLFAaQX6A/edit?usp=sharing). - [2024/10] We have just created a developer slack ([slack.vllm.ai](https://slack.vllm.ai)) focusing on coordinating contributions and discussing features. Please feel free to join us there! - [2024/10] Ray Summit 2024 held a special track for vLLM! Please find the opening talk slides from the vLLM team [here](https://docs.google.com/presentation/d/1B_KQxpHBTRa_mDF-tR6i8rWdOU5QoTZNcEg2MKZxEHM/edit?usp=sharing). Learn more from the [talks](https://www.youtube.com/playlist?list=PLzTswPQNepXl6AQwifuwUImLPFRVpksjR) from other vLLM contributors and users! - [2024/09] We hosted [the sixth vLLM meetup](https://lu.ma/87q3nvnh) with NVIDIA! Please find the meetup slides [here](https://docs.google.com/presentation/d/1wrLGwytQfaOTd5wCGSPNhoaW3nq0E-9wqyP7ny93xRs/edit?usp=sharing). From b1d920531f6d5fd6c020096499c91a8f26620cd6 Mon Sep 17 00:00:00 2001 From: zhou fan <1247714429@qq.com> Date: Tue, 26 Nov 2024 02:10:55 +0800 Subject: [PATCH 141/397] [Model]: Add support for Aria model (#10514) Signed-off-by: xffxff <1247714429@qq.com> Co-authored-by: Isotr0py <2037008807@qq.com> --- docs/source/models/supported_models.rst | 6 + examples/offline_inference_vision_language.py | 18 + ...e_inference_vision_language_multi_image.py | 20 + tests/models/registry.py | 2 + vllm/entrypoints/chat_utils.py | 2 + vllm/model_executor/models/aria.py | 695 ++++++++++++++++++ vllm/model_executor/models/registry.py | 1 + vllm/transformers_utils/configs/aria.py | 47 ++ 8 files changed, 791 insertions(+) create mode 100644 vllm/model_executor/models/aria.py create mode 100644 vllm/transformers_utils/configs/aria.py diff --git a/docs/source/models/supported_models.rst b/docs/source/models/supported_models.rst index 54e2c4479c2c9..7a6932d65e653 100644 --- a/docs/source/models/supported_models.rst +++ b/docs/source/models/supported_models.rst @@ -476,6 +476,12 @@ Text Generation - Example HF Models - :ref:`LoRA ` - :ref:`PP ` + * - :code:`AriaForConditionalGeneration` + - Aria + - T + I + - :code:`rhymes-ai/Aria` + - + - ✅︎ * - :code:`Blip2ForConditionalGeneration` - BLIP-2 - T + I\ :sup:`E` diff --git a/examples/offline_inference_vision_language.py b/examples/offline_inference_vision_language.py index 11af6880e1b5a..f08f22eec164a 100644 --- a/examples/offline_inference_vision_language.py +++ b/examples/offline_inference_vision_language.py @@ -402,6 +402,23 @@ def run_idefics3(question: str, modality: str): return llm, prompt, stop_token_ids +# Aria +def run_aria(question: str, modality: str): + assert modality == "image" + model_name = "rhymes-ai/Aria" + + llm = LLM(model=model_name, + tokenizer_mode="slow", + trust_remote_code=True, + dtype="bfloat16") + + prompt = (f"<|im_start|>user\n<|img|>\n{question}" + "<|im_end|>\n<|im_start|>assistant\n") + + stop_token_ids = [93532, 93653, 944, 93421, 1019, 93653, 93519] + return llm, prompt, stop_token_ids + + model_example_map = { "llava": run_llava, "llava-next": run_llava_next, @@ -423,6 +440,7 @@ def run_idefics3(question: str, modality: str): "molmo": run_molmo, "glm4v": run_glm4v, "idefics3": run_idefics3, + "aria": run_aria, } diff --git a/examples/offline_inference_vision_language_multi_image.py b/examples/offline_inference_vision_language_multi_image.py index dc12df8d78211..788b604cfd4a0 100644 --- a/examples/offline_inference_vision_language_multi_image.py +++ b/examples/offline_inference_vision_language_multi_image.py @@ -321,6 +321,25 @@ def load_idefics3(question, image_urls: List[str]) -> ModelRequestData: ) +def load_aria(question, image_urls: List[str]) -> ModelRequestData: + model_name = "rhymes-ai/Aria" + llm = LLM(model=model_name, + tokenizer_mode="slow", + trust_remote_code=True, + dtype="bfloat16", + limit_mm_per_prompt={"image": len(image_urls)}) + placeholders = "<|img|>\n" * len(image_urls) + prompt = (f"<|im_start|>user\n{placeholders}{question}<|im_end|>\n" + "<|im_start|>assistant\n") + stop_token_ids = [93532, 93653, 944, 93421, 1019, 93653, 93519] + return ModelRequestData( + llm=llm, + prompt=prompt, + stop_token_ids=stop_token_ids, + image_data=[fetch_image(url) for url in image_urls], + chat_template=None) + + model_example_map = { "phi3_v": load_phi3v, "h2ovl_chat": load_h2onvl, @@ -330,6 +349,7 @@ def load_idefics3(question, image_urls: List[str]) -> ModelRequestData: "qwen_vl_chat": load_qwenvl_chat, "mllama": load_mllama, "idefics3": load_idefics3, + "aria": load_aria, } diff --git a/tests/models/registry.py b/tests/models/registry.py index fa0818c4f0bd1..669c832b1df3a 100644 --- a/tests/models/registry.py +++ b/tests/models/registry.py @@ -43,6 +43,8 @@ class _HfExamplesInfo: trust_remote_code=True), "ArcticForCausalLM": _HfExamplesInfo("Snowflake/snowflake-arctic-instruct", trust_remote_code=True), + "AriaForConditionalGeneration": _HfExamplesInfo("rhymes-ai/Aria", + trust_remote_code=True), "BaiChuanForCausalLM": _HfExamplesInfo("baichuan-inc/Baichuan-7B", trust_remote_code=True), "BaichuanForCausalLM": _HfExamplesInfo("baichuan-inc/Baichuan2-7B-chat", diff --git a/vllm/entrypoints/chat_utils.py b/vllm/entrypoints/chat_utils.py index abee5ac46391c..c2054dcbfce0e 100644 --- a/vllm/entrypoints/chat_utils.py +++ b/vllm/entrypoints/chat_utils.py @@ -412,6 +412,8 @@ def _placeholder_str(self, modality: ModalityStr, return "" if model_type == "idefics3": return "" + if model_type == "aria": + return "<|fim_prefix|><|img|><|fim_suffix|>" raise TypeError(f"Unknown {modality} model type: {model_type}") elif modality == "audio": diff --git a/vllm/model_executor/models/aria.py b/vllm/model_executor/models/aria.py new file mode 100644 index 0000000000000..0356435e9c257 --- /dev/null +++ b/vllm/model_executor/models/aria.py @@ -0,0 +1,695 @@ +import math +from typing import Iterable, List, Optional, Set, Tuple, TypedDict, Union + +import torch +import torch.nn as nn +from torch.nn.init import trunc_normal_ +from transformers import LlamaConfig + +from vllm.attention import AttentionMetadata +from vllm.config import CacheConfig, QuantizationConfig, VllmConfig +from vllm.distributed import get_tensor_model_parallel_rank +from vllm.inputs import INPUT_REGISTRY, token_inputs +from vllm.model_executor.layers.activation import get_act_fn +from vllm.model_executor.layers.fused_moe import FusedMoE +from vllm.model_executor.layers.linear import (ColumnParallelLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization.compressed_tensors.utils import ( + get_compressed_tensors_cache_scale) +from vllm.model_executor.layers.sampler import (Sampler, SamplerOutput, + SamplingMetadata) +from vllm.model_executor.layers.vocab_parallel_embedding import ParallelLMHead +from vllm.model_executor.model_loader.weight_utils import ( + default_weight_loader, maybe_remap_kv_scale_name) +from vllm.model_executor.models.idefics2_vision_model import ( + Idefics2VisionTransformer) +from vllm.model_executor.models.interfaces import SupportsMultiModal +from vllm.model_executor.models.llama import (LlamaDecoderLayer, LlamaMLP, + LlamaModel) +from vllm.model_executor.models.utils import (AutoWeightsLoader, WeightsMapper, + is_pp_missing_parameter, + make_layers, maybe_prefix, + merge_multimodal_embeddings) +from vllm.multimodal import MULTIMODAL_REGISTRY +from vllm.multimodal.base import MultiModalInputs +from vllm.multimodal.image import cached_get_image_processor +from vllm.multimodal.inputs import NestedTensors +from vllm.multimodal.utils import (cached_get_tokenizer, + repeat_and_pad_placeholder_tokens) +from vllm.sequence import IntermediateTensors +from vllm.transformers_utils.configs.aria import (AriaMoELMConfig, + AriaVisionConfig) + +from .utils import flatten_bn + + +class AriaImagePixelInputs(TypedDict): + pixel_values: torch.Tensor + pixel_mask: Optional[torch.Tensor] + """ + Shape: + pixel_values: `(batch_size * num_images, num_channels, height, width)` + pixel_mask: `(batch_size * num_images, height, width)` + """ + + +class AriaVisionTransformer(Idefics2VisionTransformer): + """ + AriaVisionTransformer is a modified version of Idefics2VisionTransformer + that replaces the post-layernorm with an identity layer. + """ + + def __init__( + self, + config: AriaVisionConfig, + quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", + ) -> None: + super().__init__(config, quant_config, prefix) + self.post_layernorm = nn.Identity() + + +class AriaVisionModel(nn.Module): + config_class = AriaVisionConfig + + def __init__( + self, + config: AriaVisionConfig, + quant_config: Optional[QuantizationConfig] = None, + *, + prefix: str = "", + ) -> None: + super().__init__() + + self.vision_model = AriaVisionTransformer( + config, + quant_config, + prefix=f"{prefix}.vision_model", + ) + + def forward( + self, + pixel_values: torch.Tensor, + pixel_mask: Optional[torch.BoolTensor] = None, + ) -> Tuple[torch.Tensor, Optional[torch.BoolTensor]]: + patch_attention_mask = self._create_patch_attention_mask(pixel_mask) + + vit_oup = self.vision_model( + pixel_values=pixel_values, + patch_attention_mask=patch_attention_mask, + ) + + image_atts = self._create_image_attention_mask(patch_attention_mask) + + return vit_oup, image_atts + + def _create_patch_attention_mask(self, pixel_mask): + if pixel_mask is None: + return None + + patches_subgrid = pixel_mask.unfold( + dimension=1, + size=self.vision_model.config.patch_size, + step=self.vision_model.config.patch_size, + ).unfold( + dimension=2, + size=self.vision_model.config.patch_size, + step=self.vision_model.config.patch_size, + ) + return (patches_subgrid.sum(dim=(-1, -2)) > 0).bool() + + def _create_image_attention_mask(self, patch_attention_mask): + if patch_attention_mask is None: + return None + + flattened_mask = patch_attention_mask.flatten(1) + return torch.logical_not(flattened_mask) + + +class FFN(nn.Module): + + def __init__(self, embed_dim, ff_dim, output_dim): + super().__init__() + self.linear_in = ColumnParallelLinear(embed_dim, ff_dim, bias=False) + self.linear_out = RowParallelLinear(ff_dim, output_dim, bias=False) + self.act = get_act_fn("gelu_new") + + def forward(self, hidden_states): + hidden_states, _ = self.linear_in(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states, _ = self.linear_out(hidden_states) + return hidden_states + + +class CrossAttention(nn.Module): + + def __init__(self, kv_dim, embed_dim, num_heads, drop_out_rate=0): + super().__init__() + self.num_heads = num_heads + self.q_proj = nn.Linear(embed_dim, embed_dim, bias=False) + self.k_proj = nn.Linear(kv_dim, embed_dim, bias=False) + self.v_proj = nn.Linear(kv_dim, embed_dim, bias=False) + + self.multihead_attn = nn.MultiheadAttention(embed_dim, num_heads) + self.linear = nn.Linear(embed_dim, embed_dim) + self.dropout = nn.Dropout(drop_out_rate) + + self.layer_norm = nn.LayerNorm(embed_dim) + self.ln_kv = nn.LayerNorm(kv_dim) + + def forward(self, x, hidden_states, attn_mask=None, add_residual=False): + normed_hidden_states = self.layer_norm(hidden_states) + query = self.q_proj(normed_hidden_states).permute(1, 0, 2) + + x = self.ln_kv(x) + key = self.k_proj(x).permute(1, 0, 2) + value = self.v_proj(x).permute(1, 0, 2) + + attn_output, _ = self.multihead_attn(query, + key, + value, + attn_mask=attn_mask) + + attn_output = attn_output.permute(1, 0, 2) + + if add_residual: + attn_output = hidden_states + self.dropout( + self.linear(attn_output)) + else: + attn_output = self.dropout(self.linear(attn_output)) + + return attn_output + + +class AriaProjector(nn.Module): + """ + A projection module with one cross attention layer and one FFN layer, which + projects ViT's outputs into MoE's inputs. + + Args: + patch_to_query_dict (dict): Maps patch numbers to their corresponding + query numbers, + e.g., {1225: 128, 4900: 256}. This allows for different query sizes + based on image resolution. + embed_dim (int): Embedding dimension. + num_heads (int): Number of attention heads. + kv_dim (int): Dimension of key and value. + ff_dim (int): Hidden dimension of the feed-forward network. + output_dim (int): Output dimension. + norm_layer (nn.Module): Normalization layer. Default is nn.LayerNorm. + + Outputs: + A tensor with the shape of (batch_size, query_number, output_dim) + """ + + def __init__( + self, + patch_to_query_dict, + embed_dim, + num_heads, + kv_dim, + ff_dim, + output_dim, + norm_layer=nn.LayerNorm, + ): + super().__init__() + self.patch_to_query_dict = patch_to_query_dict + self.embed_dim = embed_dim + self.num_heads = num_heads + + self.query = nn.Parameter( + torch.zeros(max(patch_to_query_dict.values()), self.embed_dim)) + + trunc_normal_(self.query, std=0.02) + + self.cross_attn = CrossAttention(kv_dim, embed_dim, num_heads) + + self.ln_ffn = norm_layer(embed_dim) + self.ffn = FFN(embed_dim, ff_dim, output_dim) + + def forward(self, x, attn_mask=None): + bs = x.shape[0] + queries = self.query.unsqueeze(0).repeat(bs, 1, 1) + + query_num = self.patch_to_query_dict.get(x.shape[1], None) + assert (query_num is not None + ), f"Query number for {x.shape[1]} patches is not provided" + + queries = queries[:, :query_num, :] + + if attn_mask is not None: + attn_mask = attn_mask.repeat_interleave(self.num_heads, 0) + attn_mask = attn_mask.unsqueeze(1).expand(-1, queries.size(1), -1) + + attention_out = self.cross_attn(x, queries, attn_mask=attn_mask) + + out = self.ffn(self.ln_ffn(attention_out)) + + return out + + +class AriaFusedMoE(FusedMoE): + + def weight_loader(self, param: nn.Parameter, loaded_weight: torch.Tensor, + shard_id: str) -> Set[str]: + # Override the weight_loader to handle the expert weights in the Aria + # model, which are already packed with experts, and merge the gate and + # up weights for each expert. + # Note: Loading expert weights with quantization is not supported + tp_rank = get_tensor_model_parallel_rank() + if shard_id == 'w13': + # the shape of loaded_weight is + # (num_experts, hidden_size, 2 * moe_intermediate_size) + if self.tp_size > 1: + up, gate = loaded_weight.chunk(2, dim=-1) + up_current_rank = up.chunk(self.tp_size, dim=-1)[tp_rank] + gate_current_rank = gate.chunk(self.tp_size, dim=-1)[tp_rank] + up_and_gate = torch.cat([up_current_rank, gate_current_rank], + dim=-1).transpose(1, 2) + param.data.copy_(up_and_gate) + else: + param.data.copy_(loaded_weight.transpose(1, 2)) + elif shard_id == 'w2': + # the shape of loaded_weight is + # (num_experts, moe_intermediate_size, hidden_size) + if self.tp_size > 1: + down_current_rank = loaded_weight.chunk(self.tp_size, + dim=1)[tp_rank] + param.data.copy_(down_current_rank.transpose(1, 2)) + else: + param.data.copy_(loaded_weight.transpose(1, 2)) + + +class MoELayer(nn.Module): + """ + Mixture of Experts (MoE) Layer for the AriaMoE model. + + This layer implements the MoE mechanism, which routes input tokens to + different experts based on a routing algorithm, processes them through the + experts, and then combines the outputs. + """ + + def __init__( + self, + config: AriaMoELMConfig, + quant_config: Optional[QuantizationConfig], + ) -> None: + super().__init__() + self.config = config + + self.router_weight = nn.Parameter( + torch.empty( + (self.config.moe_num_experts, self.config.hidden_size))) + + self.experts = AriaFusedMoE( + num_experts=config.moe_num_experts, + top_k=config.moe_topk, + hidden_size=config.hidden_size, + intermediate_size=config.moe_intermediate_size, + quant_config=quant_config, + reduce_results=True, + ) + self.shared_experts = LlamaMLP( + config.hidden_size, + config.moe_intermediate_size * config.moe_num_shared_experts, + "silu", + quant_config=quant_config, + ) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + """ + Forward pass of the MoE Layer. + + Args: + hidden_states (torch.Tensor): Input tensor of shape (batch_size, + sequence_length, hidden_size). + + Returns: + torch.Tensor: Output tensor after passing through the MoE layer. + """ + + router_output = torch.nn.functional.linear(hidden_states, + self.router_weight) + + shared_expert_output = self.shared_experts(hidden_states) + sparse_expert_output = self.experts(hidden_states, router_output) + + return sparse_expert_output + shared_expert_output + + +class MoEDecoderLayer(LlamaDecoderLayer): + """ + Custom Decoder Layer for the AriaMoE model which modifies the standard + `LlamaDecoderLayer` by replacing the traditional MLP with a Mixture of + Experts (MoE) Layer. + """ + + def __init__( + self, + config: LlamaConfig, + cache_config: Optional[CacheConfig] = None, + quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", + ) -> None: + super().__init__(config, cache_config, quant_config, prefix) + self.mlp = MoELayer(config, quant_config=quant_config) + + +class AriaMoELMModel(LlamaModel): + """ + Custom LlamaModel for the AriaMoE model which modifies the standard + LlamaModel by replacing the `LlamaDecoderLayer` with `MoEDecoderLayer`. + """ + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super().__init__(vllm_config=vllm_config, prefix=prefix) + + config = vllm_config.model_config.hf_config + cache_config = vllm_config.cache_config + quant_config = vllm_config.quant_config + + # FIXME: this is a hack to disable the compilation of the model + self.do_not_compile = True + + self.layers = None + + self.start_layer, self.end_layer, self.layers = make_layers( + config.num_hidden_layers, + lambda prefix: MoEDecoderLayer( + config=config, + cache_config=cache_config, + quant_config=quant_config, + prefix=prefix, + ), + prefix=f"{prefix}.layers", + ) + + # Adapted from LlamaModel.load_weights with the modification of adding + # the expert weights mapping to `stacked_params_mapping` + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + (".qkv_proj", ".q_proj", "q"), + (".qkv_proj", ".k_proj", "k"), + (".qkv_proj", ".v_proj", "v"), + (".gate_up_proj", ".gate_proj", 0), + (".gate_up_proj", ".up_proj", 1), + ("experts.w13_weight", "experts.fc1.weight", 'w13'), + ("experts.w2_weight", "experts.fc2.weight", 'w2'), + ] + params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() + for name, loaded_weight in weights: + if "rotary_emb.inv_freq" in name: + continue + if ("rotary_emb.cos_cached" in name + or "rotary_emb.sin_cached" in name): + # Models trained using ColossalAI may include these tensors in + # the checkpoint. Skip them. + continue + if scale_name := get_compressed_tensors_cache_scale(name): + # Loading kv cache scales for compressed-tensors quantization + param = params_dict[scale_name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + loaded_weight = loaded_weight[0] + weight_loader(param, loaded_weight) + loaded_params.add(scale_name) + continue + for param_name, weight_name, shard_id in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + + if is_pp_missing_parameter(name, self): + continue + + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + # Remapping the name of FP8 kv-scale. + name = maybe_remap_kv_scale_name(name, params_dict) + if name is None: + continue + + if is_pp_missing_parameter(name, self): + continue + + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params + + +def build_mm_projector(config): + return AriaProjector( + patch_to_query_dict=config.projector_patch_to_query_dict, + embed_dim=config.vision_config.hidden_size, + num_heads=config.vision_config.num_attention_heads, + kv_dim=config.vision_config.hidden_size, + ff_dim=config.text_config.hidden_size, + output_dim=config.text_config.hidden_size, + ) + + +def get_max_multimodal_tokens(ctx): + return max(ctx.model_config.hf_config.image_size2tokens.values()) + + +def input_mapper_for_aria(ctx, data): + return MultiModalInputs(data) + + +def input_processor(ctx, llm_inputs): + multi_modal_data = llm_inputs.get("multi_modal_data") + # if it is pure text input, use it as is + if multi_modal_data is None or "image" not in multi_modal_data: + return llm_inputs + + model_config = ctx.model_config + + tokenizer = cached_get_tokenizer(model_config.tokenizer) + image_processor = cached_get_image_processor( + model_config.model, trust_remote_code=model_config.trust_remote_code) + hf_config = model_config.hf_config + + # prepare image tokens, the max_image_size is used to determine the number + # of patch_size for every image + max_image_size = multi_modal_data.pop("max_image_size", 980) + _split_image = multi_modal_data.pop("split_image", False) + + assert isinstance(max_image_size, + (int, float)), "max_image_size should be float or int" + images = (multi_modal_data["image"] if isinstance( + multi_modal_data["image"], list) else [multi_modal_data["image"]]) + + image_inputs = image_processor.preprocess(images, + max_image_size=max_image_size, + split_image=_split_image, + return_tensors="pt").data + image_inputs['pixel_values'] = image_inputs['pixel_values'].to( + ctx.model_config.dtype) + num_crops = image_inputs.pop("num_crops") + + prompt_token_ids = llm_inputs["prompt_token_ids"] + if num_crops.sum().item() > 0: + _, prompt_token_ids, _ = repeat_and_pad_placeholder_tokens( + tokenizer, + None, + prompt_token_ids, + placeholder_token_id=hf_config.image_token_index, + repeat_count=num_crops, + ) + + repeat_count = [hf_config.image_size2tokens[max_image_size] + ] * sum(num_crops).item() + new_prompt, new_token_ids, _ = repeat_and_pad_placeholder_tokens( + tokenizer, + None, + prompt_token_ids, + placeholder_token_id=hf_config.image_token_index, + repeat_count=repeat_count, + ) + + return token_inputs( + prompt_token_ids=new_token_ids, + prompt=new_prompt, + multi_modal_data={"image": image_inputs}, + ) + + +@MULTIMODAL_REGISTRY.register_max_image_tokens(get_max_multimodal_tokens) +@MULTIMODAL_REGISTRY.register_image_input_mapper(input_mapper_for_aria) +@INPUT_REGISTRY.register_input_processor(input_processor) +class AriaForConditionalGeneration(nn.Module, SupportsMultiModal): + """ + Aria model for conditional generation tasks. + + This model combines a vision tower, a multi-modal projector, and a language + model to perform tasks that involve both image and text inputs. + """ + + def __init__( + self, + vllm_config: VllmConfig, + prefix: str = "", + ): + super().__init__() + config = vllm_config.model_config.hf_config + quant_config = vllm_config.quant_config + + # prepare the image_size to tokens mapping for the image preprocess, see + # input_processor + config.image_size2tokens = { + int(math.sqrt(k) * config.vision_config.patch_size): v + for k, v in config.projector_patch_to_query_dict.items() + } + self.config = config + self.vision_tower = AriaVisionModel(config.vision_config) + self.multi_modal_projector = build_mm_projector(config) + self.vocab_size = config.text_config.vocab_size + self.language_model = AriaMoELMModel( + vllm_config=vllm_config.with_hf_config(config.text_config), + prefix=maybe_prefix(prefix, "language_model.model"), + ) + self.pad_token_id = (self.config.pad_token_id + if self.config.pad_token_id is not None else -1) + self.unpadded_vocab_size = config.text_config.vocab_size + self.lm_head = ParallelLMHead( + self.unpadded_vocab_size, + config.text_config.hidden_size, + org_num_embeddings=self.language_model.org_vocab_size, + quant_config=quant_config, + ) + logit_scale = getattr(config, "logit_scale", 1.0) + self.logits_processor = LogitsProcessor(self.unpadded_vocab_size, + self.vocab_size, logit_scale) + self.sampler = Sampler() + + def _validate_image_sizes( + self, images: List[torch.Tensor]) -> List[torch.Tensor]: + if not all(img.shape == images[0].shape for img in images): + raise ValueError("All images must be the same size") + return images + + def _parse_and_validate_image_input( + self, **kwargs: object) -> Optional[AriaImagePixelInputs]: + pixel_values = kwargs.pop("pixel_values", None) + pixel_mask = kwargs.pop("pixel_mask", None) + + if pixel_values is None: + return None + + if not isinstance(pixel_values, (torch.Tensor, list)): + raise ValueError("Incorrect type of pixel values. " + f"Got type: {type(pixel_values)}") + + pixel_values = self._validate_image_sizes(pixel_values) + pixel_values = flatten_bn(pixel_values, concat=True) + if pixel_mask is not None: + pixel_mask = flatten_bn(pixel_mask, concat=True) + + return AriaImagePixelInputs( + pixel_values=pixel_values, + pixel_mask=pixel_mask, + ) + + def _process_image_input( + self, image_input: AriaImagePixelInputs + ) -> Tuple[torch.Tensor, torch.Tensor]: + assert self.vision_tower is not None + + pixel_values = image_input['pixel_values'] + pixel_mask = image_input['pixel_mask'] + + image_feature, image_attn_mask = self.vision_tower( + pixel_values, pixel_mask=pixel_mask) + return self.multi_modal_projector(image_feature, image_attn_mask) + + def get_multimodal_embeddings(self, **kwargs) -> Optional[NestedTensors]: + image_input = self._parse_and_validate_image_input(**kwargs) + if image_input is None: + return None + multimodal_embeddings = self._process_image_input(image_input) + return multimodal_embeddings + + def get_input_embeddings( + self, + input_ids: torch.Tensor, + multimodal_embeddings: Optional[NestedTensors] = None, + ) -> torch.Tensor: + inputs_embeds = self.language_model.get_input_embeddings(input_ids) + if multimodal_embeddings is not None: + inputs_embeds = merge_multimodal_embeddings( + input_ids, inputs_embeds, multimodal_embeddings, + self.config.image_token_index) + return inputs_embeds + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, + **kwargs: object, + ) -> Union[torch.Tensor, IntermediateTensors]: + if inputs_embeds is None: + multimodal_embeddings = self.get_multimodal_embeddings(**kwargs) + # always pass the input via `inputs_embeds` + # to make sure the computation graph is consistent + inputs_embeds = self.get_input_embeddings(input_ids, + multimodal_embeddings) + input_ids = None + + hidden_states = self.language_model( + input_ids, + positions, + kv_caches, + attn_metadata, + intermediate_tensors, + inputs_embeds=inputs_embeds, + ) + + return hidden_states + + def compute_logits(self, hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata) -> torch.Tensor: + logits = self.logits_processor(self.lm_head, hidden_states, + sampling_metadata) + return logits + + def sample( + self, + logits: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + hf_to_vllm_mapper = WeightsMapper( + orig_to_new_prefix={ + "language_model.model": "language_model", + "language_model.lm_head": "lm_head", + }, + orig_to_new_suffix={ + "router.weight": "router_weight", + }, + ) + + loader = AutoWeightsLoader(self) + loader.load_weights(weights, mapper=hf_to_vllm_mapper) diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index 789ffb4d3bde0..184f4b2bc1526 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -133,6 +133,7 @@ _MULTIMODAL_MODELS = { # [Decoder-only] + "AriaForConditionalGeneration": ("aria", "AriaForConditionalGeneration"), "Blip2ForConditionalGeneration": ("blip2", "Blip2ForConditionalGeneration"), "ChameleonForConditionalGeneration": ("chameleon", "ChameleonForConditionalGeneration"), # noqa: E501 "ChatGLMModel": ("chatglm", "ChatGLMForCausalLM"), diff --git a/vllm/transformers_utils/configs/aria.py b/vllm/transformers_utils/configs/aria.py new file mode 100644 index 0000000000000..d253da0d96a34 --- /dev/null +++ b/vllm/transformers_utils/configs/aria.py @@ -0,0 +1,47 @@ +from transformers.models.idefics2.configuration_idefics2 import ( + Idefics2VisionConfig) +from transformers.models.llama.configuration_llama import LlamaConfig + + +class AriaVisionConfig(Idefics2VisionConfig): + model_type = "aria_vision_model" + + +class AriaMoELMConfig(LlamaConfig): + """ + Configuration class for AriaMoE language model. + + This class extends the LlamaConfig to include additional parameters specific + to the Mixture of Experts (MoE) architecture. + """ + + model_type = "aria_moe_lm" + + def __init__( + self, + moe_intermediate_size: int = 4096, + moe_num_experts: int = 8, + moe_topk: int = 2, + moe_num_shared_experts: int = 2, + **kwargs, + ): + """ + Initialize the AriaMoELMConfig. + + Args: + moe_intermediate_size (int): The intermediate size for MoE layers. + Default is 4096. + moe_num_experts (int): The number of experts in the MoE layer. + Default is 8. + moe_topk (int): The number of top experts to route to for each + token. Default is 2. + moe_num_shared_experts (int): The number of shared experts. Default + is 2. + **kwargs: Additional keyword arguments to be passed to the parent + LlamaConfig. + """ + super().__init__(**kwargs) + self.moe_intermediate_size = moe_intermediate_size + self.moe_num_experts = moe_num_experts + self.moe_topk = moe_topk + self.moe_num_shared_experts = moe_num_shared_experts From cf73f0c95e09836efff876d5bfd9b9c6cc1ba06e Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Tue, 26 Nov 2024 02:14:33 +0800 Subject: [PATCH 142/397] [Model] Enable optional prefix when loading embedding models (#10639) Signed-off-by: DarkLight1337 --- vllm/model_executor/models/bert.py | 9 +++++---- vllm/model_executor/models/gemma2.py | 4 +++- vllm/model_executor/models/llama.py | 5 ++++- vllm/model_executor/models/qwen2.py | 12 ++++++------ vllm/model_executor/models/roberta.py | 3 ++- 5 files changed, 20 insertions(+), 13 deletions(-) diff --git a/vllm/model_executor/models/bert.py b/vllm/model_executor/models/bert.py index f570d6d3c12b3..1fff72b3490e9 100644 --- a/vllm/model_executor/models/bert.py +++ b/vllm/model_executor/models/bert.py @@ -14,18 +14,17 @@ RowParallelLinear) from vllm.model_executor.layers.pooler import (CrossEncodingPooler, Pooler, PoolingType) -from vllm.model_executor.layers.quantization.base_config import ( - QuantizationConfig) +from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.layers.vocab_parallel_embedding import ( VocabParallelEmbedding) from vllm.model_executor.model_loader.weight_utils import default_weight_loader -from vllm.model_executor.models.interfaces import SupportsCrossEncoding from vllm.model_executor.pooling_metadata import PoolingMetadata from vllm.sequence import IntermediateTensors, PoolerOutput from vllm.transformers_utils.config import ( get_cross_encoder_activation_function) -from .utils import maybe_prefix +from .interfaces import SupportsCrossEncoding +from .utils import WeightsMapper, maybe_prefix class BertEmbedding(nn.Module): @@ -442,6 +441,8 @@ def pooler( return self._pooler(hidden_states, pooling_metadata) def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + hf_to_vllm_mapper = WeightsMapper(orig_to_new_prefix={"model.": ""}) + weights = hf_to_vllm_mapper.apply(weights) self.model.load_weights(weights) def _build_model(self, diff --git a/vllm/model_executor/models/gemma2.py b/vllm/model_executor/models/gemma2.py index fd8223dd9be1b..d229eb74669ee 100644 --- a/vllm/model_executor/models/gemma2.py +++ b/vllm/model_executor/models/gemma2.py @@ -42,7 +42,7 @@ from vllm.sequence import IntermediateTensors, PoolerOutput from .interfaces import SupportsLoRA, SupportsPP -from .utils import (AutoWeightsLoader, extract_layer_index, +from .utils import (AutoWeightsLoader, WeightsMapper, extract_layer_index, is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) @@ -511,4 +511,6 @@ def pooler( return self._pooler(hidden_states, pooling_metadata) def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + hf_to_vllm_mapper = WeightsMapper(orig_to_new_prefix={"model.": ""}) + weights = hf_to_vllm_mapper.apply(weights) self.model.load_weights(weights) diff --git a/vllm/model_executor/models/llama.py b/vllm/model_executor/models/llama.py index 66b29e72cfa89..33d78d74129c8 100644 --- a/vllm/model_executor/models/llama.py +++ b/vllm/model_executor/models/llama.py @@ -53,7 +53,8 @@ from vllm.sequence import IntermediateTensors, PoolerOutput from .interfaces import SupportsLoRA, SupportsPP -from .utils import (AutoWeightsLoader, PPMissingLayer, is_pp_missing_parameter, +from .utils import (AutoWeightsLoader, PPMissingLayer, WeightsMapper, + is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) @@ -689,6 +690,8 @@ def pooler( return self._pooler(hidden_states, pooling_metadata) def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + hf_to_vllm_mapper = WeightsMapper(orig_to_new_prefix={"model.": ""}) + weights = hf_to_vllm_mapper.apply(weights) self.model.load_weights(weights) def load_kv_cache_scales(self, quantization_param_path: str) -> None: diff --git a/vllm/model_executor/models/qwen2.py b/vllm/model_executor/models/qwen2.py index 8da75c9935a13..46640226d4cf8 100644 --- a/vllm/model_executor/models/qwen2.py +++ b/vllm/model_executor/models/qwen2.py @@ -50,7 +50,8 @@ from vllm.sequence import IntermediateTensors, PoolerOutput from .interfaces import SupportsLoRA, SupportsPP -from .utils import (AutoWeightsLoader, PPMissingLayer, is_pp_missing_parameter, +from .utils import (AutoWeightsLoader, PPMissingLayer, WeightsMapper, + is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) @@ -585,8 +586,7 @@ def pooler( ) -> Optional[PoolerOutput]: return self._pooler(hidden_states, pooling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, - torch.Tensor]]) -> Set[str]: - loader = AutoWeightsLoader(self, - ignore_unexpected_prefixes=["lm_head."]) - return loader.load_weights(weights) + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + hf_to_vllm_mapper = WeightsMapper(orig_to_new_prefix={"model.": ""}) + weights = hf_to_vllm_mapper.apply(weights) + self.model.load_weights(weights) diff --git a/vllm/model_executor/models/roberta.py b/vllm/model_executor/models/roberta.py index 5a296e311f079..ba1a78ac640fd 100644 --- a/vllm/model_executor/models/roberta.py +++ b/vllm/model_executor/models/roberta.py @@ -11,13 +11,14 @@ VocabParallelEmbedding) from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.model_executor.models.bert import BertEmbeddingModel, BertModel -from vllm.model_executor.models.interfaces import SupportsCrossEncoding from vllm.model_executor.models.utils import maybe_prefix from vllm.model_executor.pooling_metadata import PoolingMetadata from vllm.sequence import IntermediateTensors, PoolerOutput from vllm.transformers_utils.config import ( get_cross_encoder_activation_function) +from .interfaces import SupportsCrossEncoding + class RobertaEmbedding(nn.Module): From 1b583cfefad4ffa030bda1c1265aec6e7755a6d2 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Tue, 26 Nov 2024 02:15:45 +0800 Subject: [PATCH 143/397] [Doc] Fix typos in docs (#10636) Signed-off-by: DarkLight1337 --- docs/source/models/supported_models.rst | 2 +- docs/source/serving/compatibility_matrix.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/models/supported_models.rst b/docs/source/models/supported_models.rst index 7a6932d65e653..3f012284bfbff 100644 --- a/docs/source/models/supported_models.rst +++ b/docs/source/models/supported_models.rst @@ -365,7 +365,7 @@ Text Embedding .. note:: Unlike base Qwen2, :code:`Alibaba-NLP/gte-Qwen2-7B-instruct` uses bi-directional attention. - You can set `--hf-overrides '{"is_causal": false}'` to change the attention mask accordingly. + You can set :code:`--hf-overrides '{"is_causal": false}'` to change the attention mask accordingly. On the other hand, its 1.5B variant (:code:`Alibaba-NLP/gte-Qwen2-1.5B-instruct`) uses causal attention despite being described otherwise on its model card. diff --git a/docs/source/serving/compatibility_matrix.rst b/docs/source/serving/compatibility_matrix.rst index a4300761d2635..fa03d2cde1486 100644 --- a/docs/source/serving/compatibility_matrix.rst +++ b/docs/source/serving/compatibility_matrix.rst @@ -393,7 +393,7 @@ Feature x Hardware - ✅ - ✅ - ✅ - - ✗ + - ? * - :abbr:`enc-dec (Encoder-Decoder Models)` - ✅ - ✅ From 9db713a1dca7e1bc9b6ecf5303c63c7352c52a13 Mon Sep 17 00:00:00 2001 From: Shane A Date: Mon, 25 Nov 2024 14:26:40 -0800 Subject: [PATCH 144/397] [Model] Add OLMo November 2024 model (#10503) --- docs/source/models/supported_models.rst | 5 + tests/distributed/test_pipeline_parallel.py | 1 + tests/models/registry.py | 1 + vllm/model_executor/models/olmo2.py | 432 ++++++++++++++++++++ vllm/model_executor/models/registry.py | 1 + vllm/transformers_utils/config.py | 5 +- vllm/transformers_utils/configs/__init__.py | 2 + vllm/transformers_utils/configs/olmo2.py | 166 ++++++++ 8 files changed, 611 insertions(+), 2 deletions(-) create mode 100644 vllm/model_executor/models/olmo2.py create mode 100644 vllm/transformers_utils/configs/olmo2.py diff --git a/docs/source/models/supported_models.rst b/docs/source/models/supported_models.rst index 3f012284bfbff..b5cbe6915d581 100644 --- a/docs/source/models/supported_models.rst +++ b/docs/source/models/supported_models.rst @@ -234,6 +234,11 @@ Text Generation - :code:`allenai/OLMo-1B-hf`, :code:`allenai/OLMo-7B-hf`, etc. - - ✅︎ + * - :code:`OLMo2ForCausalLM` + - OLMo2 + - :code:`allenai/OLMo2-7B-1124`, etc. + - + - ✅︎ * - :code:`OLMoEForCausalLM` - OLMoE - :code:`allenai/OLMoE-1B-7B-0924`, :code:`allenai/OLMoE-1B-7B-0924-Instruct`, etc. diff --git a/tests/distributed/test_pipeline_parallel.py b/tests/distributed/test_pipeline_parallel.py index c49ed9802cde8..386877e0e0a2c 100644 --- a/tests/distributed/test_pipeline_parallel.py +++ b/tests/distributed/test_pipeline_parallel.py @@ -167,6 +167,7 @@ def iter_params(self, model_name: str): "mosaicml/mpt-7b": PPTestSettings.fast(), "nvidia/Minitron-8B-Base": PPTestSettings.fast(), "allenai/OLMo-1B-hf": PPTestSettings.fast(), + "shanearora/OLMo-7B-1124-hf": PPTestSettings.fast(), "allenai/OLMoE-1B-7B-0924-Instruct": PPTestSettings.fast(), "facebook/opt-iml-max-1.3b": PPTestSettings.fast(), "OrionStarAI/Orion-14B-Chat": PPTestSettings.fast(trust_remote_code=True), diff --git a/tests/models/registry.py b/tests/models/registry.py index 669c832b1df3a..865e90b3f8b0e 100644 --- a/tests/models/registry.py +++ b/tests/models/registry.py @@ -93,6 +93,7 @@ class _HfExamplesInfo: "MPTForCausalLM": _HfExamplesInfo("mosaicml/mpt-7b"), "NemotronForCausalLM": _HfExamplesInfo("nvidia/Minitron-8B-Base"), "OlmoForCausalLM": _HfExamplesInfo("allenai/OLMo-1B-hf"), + "Olmo2ForCausalLM": _HfExamplesInfo("shanearora/OLMo-7B-1124-hf"), "OlmoeForCausalLM": _HfExamplesInfo("allenai/OLMoE-1B-7B-0924-Instruct"), "OPTForCausalLM": _HfExamplesInfo("facebook/opt-iml-max-1.3b"), "OrionForCausalLM": _HfExamplesInfo("OrionStarAI/Orion-14B-Chat", diff --git a/vllm/model_executor/models/olmo2.py b/vllm/model_executor/models/olmo2.py new file mode 100644 index 0000000000000..a35c911f90d96 --- /dev/null +++ b/vllm/model_executor/models/olmo2.py @@ -0,0 +1,432 @@ +# Adapted from +# https://github.com/huggingface/transformers/blob/main/src/transformers/models/olmo2/modeling_olmo2.py +# Copyright 2024 The vLLM team. +# Copyright 2024 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Inference-only OLMo2 model compatible with HuggingFace weights.""" + +from functools import partial +from typing import Iterable, List, Optional, Tuple, Union + +import torch +from torch import nn + +from vllm.attention import Attention, AttentionMetadata +from vllm.config import VllmConfig +from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size +from vllm.distributed.communication_op import tensor_model_parallel_all_gather +from vllm.distributed.parallel_state import get_tensor_model_parallel_rank +from vllm.distributed.utils import split_tensor_along_last_dim +from vllm.model_executor.layers.activation import SiluAndMul +from vllm.model_executor.layers.layernorm import RMSNorm +from vllm.model_executor.layers.linear import (MergedColumnParallelLinear, + QKVParallelLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.rotary_embedding import get_rope +from vllm.model_executor.layers.sampler import Sampler, SamplerOutput +from vllm.model_executor.layers.vocab_parallel_embedding import ( + ParallelLMHead, VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.models.interfaces import SupportsPP +from vllm.model_executor.models.utils import ( + is_pp_missing_parameter, make_empty_intermediate_tensors_factory, + make_layers, maybe_prefix) +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import IntermediateTensors +from vllm.transformers_utils.configs.olmo2 import Olmo2Config + + +class Olmo2Attention(nn.Module): + """ + This is the attention block where the output is computed as + ``Attention(LN(x))`` in ``MLP(LN(x + Attention(LN(x))))`` + (plus another skip connection). + """ + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super().__init__() + self.config = vllm_config.model_config.hf_config + assert isinstance(self.config, Olmo2Config) + + hidden_size = self.config.hidden_size + self.tp_size = get_tensor_model_parallel_world_size() + self.total_num_heads = self.config.num_attention_heads + + assert hidden_size % self.total_num_heads == 0 + assert self.total_num_heads % self.tp_size == 0 + + self.num_heads = self.total_num_heads // self.tp_size + self.total_num_kv_heads = (self.config.num_key_value_heads + or self.total_num_heads) + if self.total_num_kv_heads >= self.tp_size: + assert self.total_num_kv_heads % self.tp_size == 0 + else: + assert self.tp_size % self.total_num_kv_heads == 0 + + self.num_kv_heads = max(1, self.total_num_kv_heads // self.tp_size) + self.head_dim = hidden_size // self.total_num_heads + self.q_size = self.num_heads * self.head_dim + self.kv_size = self.num_kv_heads * self.head_dim + self.max_position_embeddings = self.config.max_position_embeddings + self.rope_theta = self.config.rope_theta + + # Attention input projection. Projects x -> (q, k, v) + self.qkv_proj = QKVParallelLinear( + hidden_size, + self.head_dim, + self.total_num_heads, + self.total_num_kv_heads, + bias=False, + quant_config=vllm_config.quant_config, + prefix=f"{prefix}.qkv_proj", + ) + + self.tp_rank = get_tensor_model_parallel_rank() + self.k_norm = RMSNorm( + self.total_num_kv_heads * self.head_dim, + eps=self.config.rms_norm_eps, + ) + self.q_norm = RMSNorm(self.config.hidden_size, + eps=self.config.rms_norm_eps) + + # Rotary embeddings. + self.rotary_emb = get_rope( + self.head_dim, + rotary_dim=self.head_dim, + max_position=self.max_position_embeddings, + base=self.rope_theta, # type: ignore + ) + self.scaling = self.head_dim**-0.5 + self.attn = Attention( + self.num_heads, + self.head_dim, + self.scaling, + num_kv_heads=self.num_kv_heads, + cache_config=vllm_config.cache_config, + quant_config=vllm_config.quant_config, + prefix=prefix, + ) + + # Attention output projection. + self.o_proj = RowParallelLinear( + self.total_num_heads * self.head_dim, + hidden_size, + bias=False, + quant_config=vllm_config.quant_config, + prefix=f"{prefix}.o_proj", + ) + + def _apply_qk_norm(self, q: torch.Tensor, + k: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + if self.tp_size > 1: + q = tensor_model_parallel_all_gather(q.contiguous()) + k = tensor_model_parallel_all_gather(k.contiguous()) + q = self.q_norm.forward_native(q) + k = self.k_norm.forward_native(k) + if self.tp_size > 1: + splitter = partial(split_tensor_along_last_dim, + num_partitions=self.tp_size) + q = splitter(q)[self.tp_rank] + k = splitter(k)[self.tp_rank] + return q, k + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + qkv, _ = self.qkv_proj(hidden_states) + q, k, v = qkv.chunk(chunks=3, dim=-1) + q, k = self._apply_qk_norm(q, k) + q, k = self.rotary_emb(positions, q, k) + attn_output = self.attn(q, k, v, kv_cache, attn_metadata) + output, _ = self.o_proj(attn_output) + return output + + +class Olmo2MLP(nn.Module): + """ + This is the MLP block where the output is computed as + ``MLP(x)`` in ``LN(MLP(x + LN(Attention(x))))`` + (plus another skip connection). + """ + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super().__init__() + config = vllm_config.model_config.hf_config + assert isinstance(config, Olmo2Config) + hidden_size = config.hidden_size + intermediate_size = config.intermediate_size + + # Feed-forward input projection. + self.gate_up_proj = MergedColumnParallelLinear( + hidden_size, + [intermediate_size] * 2, + bias=False, + quant_config=vllm_config.quant_config, + prefix=f"{prefix}.gate_up_proj", + ) + + # Activation function. + self.act_fn = SiluAndMul() + + # Feed-forward output projection. + self.down_proj = RowParallelLinear( + intermediate_size, + hidden_size, + bias=False, + quant_config=vllm_config.quant_config, + prefix=f"{prefix}.down_proj", + ) + + def forward( + self, + x: torch.Tensor, + ) -> torch.Tensor: + gate_up, _ = self.gate_up_proj(x) + x = self.act_fn(gate_up) + x, _ = self.down_proj(x) + return x + + +class Olmo2DecoderLayer(nn.Module): + """ + This is a typical transformer block where the output is + computed as ``MLP(LN(x + Attention(LN(x))))`` + (plus another skip connection). + """ + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super().__init__() + config = vllm_config.model_config.hf_config + assert isinstance(config, Olmo2Config) + # Attention block. + self.self_attn = Olmo2Attention(vllm_config=vllm_config, + prefix=f"{prefix}.self_attn") + + # MLP block. + self.mlp = Olmo2MLP(vllm_config=vllm_config, prefix=f"{prefix}.mlp") + + # LayerNorm + self.post_attention_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + + self.post_feedforward_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AttentionMetadata, + ) -> torch.Tensor: + # Attention block. + residual = hidden_states + hidden_states = self.self_attn(positions, hidden_states, kv_cache, + attn_metadata) + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = hidden_states + residual + + # MLP block. + residual = hidden_states + hidden_states = self.mlp(hidden_states) + hidden_states = self.post_feedforward_layernorm(hidden_states) + hidden_states = residual + hidden_states + return hidden_states + + +class Olmo2Model(nn.Module): + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super().__init__() + self.config = vllm_config.model_config.hf_config + assert isinstance(self.config, Olmo2Config) + + self.embed_tokens = VocabParallelEmbedding( + self.config.vocab_size, + self.config.hidden_size, + prefix=f"{prefix}.embed_tokens", + ) + self.start_layer, self.end_layer, self.layers = make_layers( + self.config.num_hidden_layers, + lambda prefix: Olmo2DecoderLayer(vllm_config=vllm_config, + prefix=prefix), + prefix=f"{prefix}.layers", + ) + self.norm = RMSNorm( + self.config.hidden_size, + eps=self.config.rms_norm_eps, + ) + self.make_empty_intermediate_tensors = ( + make_empty_intermediate_tensors_factory(["hidden_states"], + self.config.hidden_size)) + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + intermediate_tensors: Optional[IntermediateTensors], + ) -> Union[torch.Tensor, IntermediateTensors]: + """ + :param input_ids: A tensor of shape `(batch_size, seq_len)`. + """ + if get_pp_group().is_first_rank: + # Get embeddings of input. + # shape: (batch_size, seq_len, d_model) + inputs_embeds = self.embed_tokens(input_ids) + + # embed positions + hidden_states = inputs_embeds + else: + assert intermediate_tensors is not None + hidden_states = intermediate_tensors["hidden_states"] + assert isinstance(hidden_states, torch.Tensor) + + # Apply blocks one-by-one. + for i in range(self.start_layer, self.end_layer): + # shape: (batch_size, seq_len, d_model) + hidden_states = self.layers[i]( + positions, + hidden_states, + kv_caches[i - self.start_layer], + attn_metadata, + ) + + if not get_pp_group().is_last_rank: + return IntermediateTensors({"hidden_states": hidden_states}) + + # Apply final layer norm. + # shape: (batch_size, seq_len or 1, d_model) + hidden_states = self.norm(hidden_states) + return hidden_states + + +class Olmo2ForCausalLM(nn.Module, SupportsPP): + """ + Extremely barebones HF model wrapper. + """ + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super().__init__() + config = vllm_config.model_config.hf_config + assert isinstance(config, Olmo2Config) + self.config = config + self.model = Olmo2Model(vllm_config=vllm_config, + prefix=maybe_prefix(prefix, "model")) + if config.tie_word_embeddings: + self.lm_head = self.model.embed_tokens + else: + self.unpadded_vocab_size = config.vocab_size + self.lm_head = ParallelLMHead( + config.vocab_size, + config.hidden_size, + org_num_embeddings=config.vocab_size, + quant_config=vllm_config.quant_config, + prefix=maybe_prefix(prefix, "lm_head"), + ) + self.logits_processor = LogitsProcessor(config.vocab_size) + self.sampler = Sampler() + self.make_empty_intermediate_tensors = ( + self.model.make_empty_intermediate_tensors) + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + intermediate_tensors: Optional[IntermediateTensors] = None, + ) -> Union[torch.Tensor, IntermediateTensors]: + hidden_states = self.model( + input_ids=input_ids, + positions=positions, + kv_caches=kv_caches, + attn_metadata=attn_metadata, + intermediate_tensors=intermediate_tensors, + ) + return hidden_states + + def compute_logits( + self, + hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[torch.Tensor]: + logits = self.logits_processor(self.lm_head, hidden_states, + sampling_metadata) + return logits + + def sample( + self, + logits: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[SamplerOutput]: + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("qkv_proj", "q_proj", "q"), + ("qkv_proj", "k_proj", "k"), + ("qkv_proj", "v_proj", "v"), + ("gate_up_proj", "gate_proj", 0), + ("gate_up_proj", "up_proj", 1), + ] + + params_dict = dict(self.named_parameters(remove_duplicate=False)) + for name, loaded_weight in weights: + if "rotary_emb.inv_freq" in name: + continue + if ("rotary_emb.cos_cached" in name + or "rotary_emb.sin_cached" in name): + # Models trained using ColossalAI may include these tensors in + # the checkpoint. Skip them. + continue + if is_pp_missing_parameter(name, self): + continue + # With tie_word_embeddings, we can skip lm_head.weight + # The weight might appear unnecessarily in the files if the model is + # processed with quantization, LoRA, fine-tuning, etc. + if self.config.tie_word_embeddings and "lm_head.weight" in name: + continue + for param_name, weight_name, shard_id in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = param.weight_loader # type: ignore + weight_loader(param, loaded_weight, shard_id) + break + else: + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index 184f4b2bc1526..f5a02a5b25ca2 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -74,6 +74,7 @@ "MPTForCausalLM": ("mpt", "MPTForCausalLM"), "NemotronForCausalLM": ("nemotron", "NemotronForCausalLM"), "OlmoForCausalLM": ("olmo", "OlmoForCausalLM"), + "Olmo2ForCausalLM": ("olmo2", "Olmo2ForCausalLM"), "OlmoeForCausalLM": ("olmoe", "OlmoeForCausalLM"), "OPTForCausalLM": ("opt", "OPTForCausalLM"), "OrionForCausalLM": ("orion", "OrionForCausalLM"), diff --git a/vllm/transformers_utils/config.py b/vllm/transformers_utils/config.py index 70d18d40b7aa7..4c096acdf2035 100644 --- a/vllm/transformers_utils/config.py +++ b/vllm/transformers_utils/config.py @@ -28,8 +28,8 @@ MedusaConfig, MllamaConfig, MLPSpeculatorConfig, MPTConfig, NemotronConfig, NVLM_D_Config, - RWConfig, SolarConfig, - UltravoxConfig) + Olmo2Config, RWConfig, + SolarConfig, UltravoxConfig) # yapf: enable from vllm.transformers_utils.utils import check_gguf_file from vllm.utils import resolve_obj_by_qualname @@ -62,6 +62,7 @@ "internvl_chat": InternVLChatConfig, "nemotron": NemotronConfig, "NVLM_D": NVLM_D_Config, + "olmo2": Olmo2Config, "solar": SolarConfig, "ultravox": UltravoxConfig, **_CONFIG_REGISTRY_OVERRIDE_HF diff --git a/vllm/transformers_utils/configs/__init__.py b/vllm/transformers_utils/configs/__init__.py index d1e19c9a33c24..4c721001d8434 100644 --- a/vllm/transformers_utils/configs/__init__.py +++ b/vllm/transformers_utils/configs/__init__.py @@ -15,6 +15,7 @@ from vllm.transformers_utils.configs.mpt import MPTConfig from vllm.transformers_utils.configs.nemotron import NemotronConfig from vllm.transformers_utils.configs.nvlm_d import NVLM_D_Config +from vllm.transformers_utils.configs.olmo2 import Olmo2Config from vllm.transformers_utils.configs.solar import SolarConfig from vllm.transformers_utils.configs.ultravox import UltravoxConfig @@ -33,6 +34,7 @@ "MLPSpeculatorConfig", "NemotronConfig", "NVLM_D_Config", + "Olmo2Config", "SolarConfig", "UltravoxConfig", ] \ No newline at end of file diff --git a/vllm/transformers_utils/configs/olmo2.py b/vllm/transformers_utils/configs/olmo2.py new file mode 100644 index 0000000000000..0e6d8e4879b06 --- /dev/null +++ b/vllm/transformers_utils/configs/olmo2.py @@ -0,0 +1,166 @@ +# yapf: disable +# ruff: noqa: E501 +# coding=utf-8 +# Copied from +# https://github.com/huggingface/transformers/blob/main/src/transformers/models/olmo2/configuration_olmo2.py +"""OLMo 2 configuration.""" + +from transformers.configuration_utils import PretrainedConfig +from transformers.utils import logging + +logger = logging.get_logger(__name__) + + +class Olmo2Config(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`Olmo2Model`]. It is used to instantiate an OLMo2 + model according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the [allenai/Olmo2-7B-1124-hf](https://huggingface.co/allenai/Olmo2-7B-1124-hf). + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 50304): + Vocabulary size of the Olmo2 model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`Olmo2Model`] + hidden_size (`int`, *optional*, defaults to 4096): + Dimension of the hidden representations. + intermediate_size (`int`, *optional*, defaults to 11008): + Dimension of the MLP representations. + num_hidden_layers (`int`, *optional*, defaults to 32): + Number of hidden layers in the Transformer decoder. + num_attention_heads (`int`, *optional*, defaults to 32): + Number of attention heads for each attention layer in the Transformer decoder. + num_key_value_heads (`int`, *optional*): + This is the number of key_value heads that should be used to implement Grouped Query Attention. If + `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if + `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When + converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed + by meanpooling all the original heads within that group. For more details checkout [this + paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to + `num_attention_heads`. + hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): + The non-linear activation function (function or string) in the decoder. + max_position_embeddings (`int`, *optional*, defaults to 2048): + The maximum sequence length that this model might ever be used with. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + pad_token_id (`int`, *optional*, defaults to 1): + Padding token id. + bos_token_id (`int`, *optional*): + Beginning of stream token id. + eos_token_id (`int`, *optional*, defaults to 50279): + End of stream token id. + tie_word_embeddings (`bool`, *optional*, defaults to `False`): + Whether to tie weight embeddings + rope_theta (`float`, *optional*, defaults to 10000.0): + The base period of the RoPE embeddings. + rope_scaling (`Dict`, *optional*): + Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling + strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is + `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update + `max_position_embeddings` to the expected new maximum. See the following thread for more information on how + these scaling strategies behave: + https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an + experimental feature, subject to breaking API changes in future versions. + attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): + Whether to use a bias in the query, key, value and output projection layers during self-attention. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + rms_norm_eps (`float`, *optional*, defaults to 1e-05): + The epsilon used by the rms normalization layers. + + ```python + >>> from transformers import Olmo2Model, Olmo2Config + + >>> # Initializing a Olmo2 7B style configuration + >>> configuration = Olmo2Config() + + >>> # Initializing a model from the Olmo2 7B style configuration + >>> model = Olmo2Model(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ``` + """ + + model_type = "olmo2" + keys_to_ignore_at_inference = ["past_key_values"] + + def __init__( + self, + vocab_size=50304, + hidden_size=4096, + intermediate_size=11008, + num_hidden_layers=32, + num_attention_heads=32, + num_key_value_heads=None, + hidden_act="silu", + max_position_embeddings=2048, + initializer_range=0.02, + use_cache=True, + pad_token_id=1, + bos_token_id=None, + eos_token_id=50279, + tie_word_embeddings=False, + rope_theta=10000.0, + rope_scaling=None, + attention_bias=False, + attention_dropout=0.0, + rms_norm_eps=1e-5, + **kwargs, + ): + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + tie_word_embeddings=tie_word_embeddings, + **kwargs, + ) + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + + # for backward compatibility + if num_key_value_heads is None: + num_key_value_heads = num_attention_heads + + self.num_key_value_heads = num_key_value_heads + self.hidden_act = hidden_act + self.initializer_range = initializer_range + self.use_cache = use_cache + self.rope_theta = rope_theta + self.rope_scaling = rope_scaling + self._rope_scaling_validation() + self.attention_bias = attention_bias + self.attention_dropout = attention_dropout + + self.rms_norm_eps = rms_norm_eps + + def _rope_scaling_validation(self): + """ + Validate the `rope_scaling` configuration. + """ + if self.rope_scaling is None: + return + + if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: + raise ValueError( + "`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}" + ) + rope_scaling_type = self.rope_scaling.get("type", None) + rope_scaling_factor = self.rope_scaling.get("factor", None) + if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: + raise ValueError( + f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" + ) + if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: + raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}") From 6e9ff050c8e83ad6d5e5eab621e83549e35933a1 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Mon, 25 Nov 2024 17:04:50 -0800 Subject: [PATCH 145/397] [misc] do not read HOST_IP (#10644) Signed-off-by: youkaichao --- vllm/envs.py | 2 +- vllm/executor/ray_gpu_executor.py | 4 ++-- vllm/executor/ray_hpu_executor.py | 4 ++-- vllm/utils.py | 7 +++++++ 4 files changed, 12 insertions(+), 5 deletions(-) diff --git a/vllm/envs.py b/vllm/envs.py index 14c1617f1be19..c896770e5f6bc 100644 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -153,7 +153,7 @@ def get_default_config_root(): # If you are using multi-node inference, you should set this differently # on each node. 'VLLM_HOST_IP': - lambda: os.getenv('VLLM_HOST_IP', "") or os.getenv("HOST_IP", ""), + lambda: os.getenv('VLLM_HOST_IP', ""), # used in distributed environment to manually set the communication port # Note: if VLLM_PORT is set, and some code asks for multiple ports, the diff --git a/vllm/executor/ray_gpu_executor.py b/vllm/executor/ray_gpu_executor.py index 810b0f06ff7b2..6542b18ae70b1 100644 --- a/vllm/executor/ray_gpu_executor.py +++ b/vllm/executor/ray_gpu_executor.py @@ -216,8 +216,8 @@ def sort_by_driver_then_worker_ip(worker): f"Every node should have a unique IP address. Got {n_nodes}" f" nodes with node ids {list(node_workers.keys())} and " f"{n_ips} unique IP addresses {all_ips}. Please check your" - " network configuration. If you set `VLLM_HOST_IP` or " - "`HOST_IP` environment variable, make sure it is unique for" + " network configuration. If you set `VLLM_HOST_IP`" + " environment variable, make sure it is unique for" " each node.") VLLM_INSTANCE_ID = get_vllm_instance_id() diff --git a/vllm/executor/ray_hpu_executor.py b/vllm/executor/ray_hpu_executor.py index 6fe8c6c403358..a74328e5aa272 100644 --- a/vllm/executor/ray_hpu_executor.py +++ b/vllm/executor/ray_hpu_executor.py @@ -192,8 +192,8 @@ def sort_by_driver_then_worker_ip(worker): f"Every node should have a unique IP address. Got {n_nodes}" f" nodes with node ids {list(node_workers.keys())} and " f"{n_ips} unique IP addresses {all_ips}. Please check your" - " network configuration. If you set `VLLM_HOST_IP` or " - "`HOST_IP` environment variable, make sure it is unique for" + " network configuration. If you set `VLLM_HOST_IP` " + "environment variable, make sure it is unique for" " each node.") VLLM_INSTANCE_ID = get_vllm_instance_id() diff --git a/vllm/utils.py b/vllm/utils.py index dd4283e3ac381..bec876d983701 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -467,6 +467,13 @@ async def collect_from_async_generator( def get_ip() -> str: host_ip = envs.VLLM_HOST_IP + if "HOST_IP" in os.environ and "VLLM_HOST_IP" not in os.environ: + logger.warning( + "The environment variable HOST_IP is deprecated and ignored, as" + " it is often used by Docker and other software to" + "interact with the container's network stack. Please" + "use VLLM_HOST_IP instead to set the IP address for vLLM processes" + " to communicate with each other.") if host_ip: return host_ip From 45ac4ff270b267765457159c0b75e1bb7ebf6d79 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Mon, 25 Nov 2024 18:32:09 -0800 Subject: [PATCH 146/397] [bugfix] fix aria model and add torch.compile (#10645) Signed-off-by: youkaichao --- vllm/model_executor/models/aria.py | 26 ++++---------------------- vllm/model_executor/models/llama.py | 16 ++++++++++------ 2 files changed, 14 insertions(+), 28 deletions(-) diff --git a/vllm/model_executor/models/aria.py b/vllm/model_executor/models/aria.py index 0356435e9c257..fa6b95f5481ad 100644 --- a/vllm/model_executor/models/aria.py +++ b/vllm/model_executor/models/aria.py @@ -29,7 +29,7 @@ LlamaModel) from vllm.model_executor.models.utils import (AutoWeightsLoader, WeightsMapper, is_pp_missing_parameter, - make_layers, maybe_prefix, + maybe_prefix, merge_multimodal_embeddings) from vllm.multimodal import MULTIMODAL_REGISTRY from vllm.multimodal.base import MultiModalInputs @@ -363,27 +363,9 @@ class AriaMoELMModel(LlamaModel): """ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): - super().__init__(vllm_config=vllm_config, prefix=prefix) - - config = vllm_config.model_config.hf_config - cache_config = vllm_config.cache_config - quant_config = vllm_config.quant_config - - # FIXME: this is a hack to disable the compilation of the model - self.do_not_compile = True - - self.layers = None - - self.start_layer, self.end_layer, self.layers = make_layers( - config.num_hidden_layers, - lambda prefix: MoEDecoderLayer( - config=config, - cache_config=cache_config, - quant_config=quant_config, - prefix=prefix, - ), - prefix=f"{prefix}.layers", - ) + super().__init__(vllm_config=vllm_config, + prefix=prefix, + layer_type=MoEDecoderLayer) # Adapted from LlamaModel.load_weights with the modification of adding # the expert weights mapping to `stacked_params_mapping` diff --git a/vllm/model_executor/models/llama.py b/vllm/model_executor/models/llama.py index 33d78d74129c8..355b2f3ef8b28 100644 --- a/vllm/model_executor/models/llama.py +++ b/vllm/model_executor/models/llama.py @@ -20,7 +20,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only LLaMA model compatible with HuggingFace weights.""" -from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Type, Union import torch from torch import nn @@ -273,7 +273,11 @@ def forward( @support_torch_compile class LlamaModel(nn.Module): - def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + def __init__(self, + *, + vllm_config: VllmConfig, + prefix: str = "", + layer_type: Type[LlamaDecoderLayer] = LlamaDecoderLayer): super().__init__() config = vllm_config.model_config.hf_config @@ -299,10 +303,10 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.embed_tokens = PPMissingLayer() self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: LlamaDecoderLayer(config=config, - cache_config=cache_config, - quant_config=quant_config, - prefix=prefix), + lambda prefix: layer_type(config=config, + cache_config=cache_config, + quant_config=quant_config, + prefix=prefix), prefix=f"{prefix}.layers", ) if get_pp_group().is_last_rank: From a6760f6456b714409685e23301c820a85da856ca Mon Sep 17 00:00:00 2001 From: Sanket Kale Date: Tue, 26 Nov 2024 08:02:39 +0530 Subject: [PATCH 147/397] [Feature] vLLM ARM Enablement for AARCH64 CPUs (#9228) Signed-off-by: Sanket Kale Co-authored-by: Sanket Kale Co-authored-by: mgoin --- Dockerfile.arm | 62 +++ cmake/cpu_extension.cmake | 33 +- csrc/cpu/attention.cpp | 18 +- csrc/cpu/cpu_types.hpp | 6 +- csrc/cpu/cpu_types_arm.hpp | 515 ++++++++++++++++++ .../getting_started/arm-installation.rst | 50 ++ docs/source/index.rst | 1 + examples/offline_inference.py | 2 +- requirements-cpu.txt | 7 +- 9 files changed, 678 insertions(+), 16 deletions(-) create mode 100644 Dockerfile.arm create mode 100644 csrc/cpu/cpu_types_arm.hpp create mode 100644 docs/source/getting_started/arm-installation.rst diff --git a/Dockerfile.arm b/Dockerfile.arm new file mode 100644 index 0000000000000..093ee2209222f --- /dev/null +++ b/Dockerfile.arm @@ -0,0 +1,62 @@ +# This vLLM Dockerfile is used to construct an image that can build and run vLLM on ARM CPU platform. + +FROM ubuntu:22.04 AS cpu-test-arm + +ENV CCACHE_DIR=/root/.cache/ccache + +ENV CMAKE_CXX_COMPILER_LAUNCHER=ccache + +RUN --mount=type=cache,target=/var/cache/apt \ + apt-get update -y \ + && apt-get install -y curl ccache git wget vim numactl gcc-12 g++-12 python3 python3-pip libtcmalloc-minimal4 libnuma-dev \ + && apt-get install -y ffmpeg libsm6 libxext6 libgl1 \ + && update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 10 --slave /usr/bin/g++ g++ /usr/bin/g++-12 + +# tcmalloc provides better memory allocation efficiency, e.g., holding memory in caches to speed up access of commonly-used objects. +RUN --mount=type=cache,target=/root/.cache/pip \ + pip install py-cpuinfo # Use this to gather CPU info and optimize based on ARM Neoverse cores + +# Set LD_PRELOAD for tcmalloc on ARM +ENV LD_PRELOAD="/usr/lib/aarch64-linux-gnu/libtcmalloc_minimal.so.4" + +RUN echo 'ulimit -c 0' >> ~/.bashrc + +WORKDIR /workspace + +ARG PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/cpu" +ENV PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL} +RUN --mount=type=cache,target=/root/.cache/pip \ + --mount=type=bind,src=requirements-build.txt,target=requirements-build.txt \ + pip install --upgrade pip && \ + pip install -r requirements-build.txt + +FROM cpu-test-arm AS build + +WORKDIR /workspace/vllm + +RUN --mount=type=cache,target=/root/.cache/pip \ + --mount=type=bind,src=requirements-common.txt,target=requirements-common.txt \ + --mount=type=bind,src=requirements-cpu.txt,target=requirements-cpu.txt \ + pip install -v -r requirements-cpu.txt + +COPY . . +ARG GIT_REPO_CHECK=0 +RUN --mount=type=bind,source=.git,target=.git \ + if [ "$GIT_REPO_CHECK" != 0 ]; then bash tools/check_repo.sh ; fi + +# Disabling AVX512 specific optimizations for ARM +ARG VLLM_CPU_DISABLE_AVX512="true" +ENV VLLM_CPU_DISABLE_AVX512=${VLLM_CPU_DISABLE_AVX512} + +RUN --mount=type=cache,target=/root/.cache/pip \ + --mount=type=cache,target=/root/.cache/ccache \ + --mount=type=bind,source=.git,target=.git \ + VLLM_TARGET_DEVICE=cpu python3 setup.py bdist_wheel && \ + pip install dist/*.whl && \ + rm -rf dist + +WORKDIR /workspace/ + +RUN ln -s /workspace/vllm/tests && ln -s /workspace/vllm/examples && ln -s /workspace/vllm/benchmarks + +ENTRYPOINT ["python3", "-m", "vllm.entrypoints.openai.api_server"] \ No newline at end of file diff --git a/cmake/cpu_extension.cmake b/cmake/cpu_extension.cmake index 426189481575b..68f7ca1af05ad 100644 --- a/cmake/cpu_extension.cmake +++ b/cmake/cpu_extension.cmake @@ -16,16 +16,15 @@ include_directories("${CMAKE_SOURCE_DIR}/csrc") # # Check the compile flags # -if (CMAKE_SYSTEM_PROCESSOR STREQUAL "ppc64le") - list(APPEND CXX_COMPILE_FLAGS - "-fopenmp" - "-DVLLM_CPU_EXTENSION") -else() + +if (CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64") list(APPEND CXX_COMPILE_FLAGS - "-fopenmp" "-mf16c" - "-DVLLM_CPU_EXTENSION") + ) endif() +list(APPEND CXX_COMPILE_FLAGS + "-fopenmp" + "-DVLLM_CPU_EXTENSION") execute_process(COMMAND cat /proc/cpuinfo RESULT_VARIABLE CPUINFO_RET @@ -59,6 +58,8 @@ find_isa(${CPUINFO} "avx2" AVX2_FOUND) find_isa(${CPUINFO} "avx512f" AVX512_FOUND) find_isa(${CPUINFO} "POWER10" POWER10_FOUND) find_isa(${CPUINFO} "POWER9" POWER9_FOUND) +find_isa(${CPUINFO} "asimd" ASIMD_FOUND) # Check for ARM NEON support +find_isa(${CPUINFO} "bf16" ARM_BF16_FOUND) # Check for ARM BF16 support if (AVX512_FOUND AND NOT AVX512_DISABLED) list(APPEND CXX_COMPILE_FLAGS @@ -78,9 +79,11 @@ if (AVX512_FOUND AND NOT AVX512_DISABLED) else() message(WARNING "Disable AVX512-BF16 ISA support, no avx512_bf16 found in local CPU flags." " If cross-compilation is required, please set env VLLM_CPU_AVX512BF16=1.") endif() + elseif (AVX2_FOUND) list(APPEND CXX_COMPILE_FLAGS "-mavx2") message(WARNING "vLLM CPU backend using AVX2 ISA") + elseif (POWER9_FOUND OR POWER10_FOUND) message(STATUS "PowerPC detected") # Check for PowerPC VSX support @@ -88,8 +91,20 @@ elseif (POWER9_FOUND OR POWER10_FOUND) "-mvsx" "-mcpu=native" "-mtune=native") + +elseif (ASIMD_FOUND) + message(STATUS "ARMv8 or later architecture detected") + if(ARM_BF16_FOUND) + message(STATUS "BF16 extension detected") + set(MARCH_FLAGS "-march=armv8.2-a+bf16+dotprod+fp16") + add_compile_definitions(ARM_BF16_SUPPORT) + else() + message(WARNING "BF16 functionality is not available") + set(MARCH_FLAGS "-march=armv8.2-a+dotprod+fp16") + endif() + list(APPEND CXX_COMPILE_FLAGS ${MARCH_FLAGS}) else() - message(FATAL_ERROR "vLLM CPU backend requires AVX512 or AVX2 or Power9+ ISA support.") + message(FATAL_ERROR "vLLM CPU backend requires AVX512, AVX2, Power9+ ISA or ARMv8 support.") endif() # @@ -159,4 +174,4 @@ define_gpu_extension_target( WITH_SOABI ) -message(STATUS "Enabling C extension.") +message(STATUS "Enabling C extension.") \ No newline at end of file diff --git a/csrc/cpu/attention.cpp b/csrc/cpu/attention.cpp index e6c03dcb034fd..e21832ba7582f 100644 --- a/csrc/cpu/attention.cpp +++ b/csrc/cpu/attention.cpp @@ -51,6 +51,10 @@ struct KernelVecType { using v_load_vec_type = vec_op::BF16Vec16; }; #else + #ifdef __aarch64__ + #ifndef ARM_BF16_SUPPORT + // pass + #else template <> struct KernelVecType { using q_load_vec_type = vec_op::BF16Vec8; @@ -60,6 +64,18 @@ struct KernelVecType { using qk_acc_vec_type = vec_op::FP32Vec16; using v_load_vec_type = vec_op::BF16Vec16; }; + #endif + #else +template <> +struct KernelVecType { + using q_load_vec_type = vec_op::BF16Vec8; + using q_vec_type = vec_op::FP32Vec16; + using k_load_vec_type = vec_op::BF16Vec16; + using k_vec_type = vec_op::FP32Vec16; + using qk_acc_vec_type = vec_op::FP32Vec16; + using v_load_vec_type = vec_op::BF16Vec16; +}; + #endif #endif template @@ -779,4 +795,4 @@ void paged_attention_v2( CALL_V2_KERNEL_LAUNCHER_BLOCK_SIZE(scalar_t); CPU_KERNEL_GUARD_OUT(paged_attention_v2_impl) }); -} +} \ No newline at end of file diff --git a/csrc/cpu/cpu_types.hpp b/csrc/cpu/cpu_types.hpp index 0213be09105ed..28db0479748bf 100644 --- a/csrc/cpu/cpu_types.hpp +++ b/csrc/cpu/cpu_types.hpp @@ -1,4 +1,3 @@ - #ifndef CPU_TYPES_HPP #define CPU_TYPES_HPP @@ -8,8 +7,11 @@ #elif defined(__POWER9_VECTOR__) //ppc implementation #include "cpu_types_vsx.hpp" +#elif defined(__aarch64__) + //arm implementation + #include "cpu_types_arm.hpp" #else #warning "unsupported vLLM cpu implementation" #endif -#endif +#endif \ No newline at end of file diff --git a/csrc/cpu/cpu_types_arm.hpp b/csrc/cpu/cpu_types_arm.hpp new file mode 100644 index 0000000000000..73e0f8cb2e0fb --- /dev/null +++ b/csrc/cpu/cpu_types_arm.hpp @@ -0,0 +1,515 @@ +#include +#include +#include + +namespace vec_op { + +#ifdef ARM_BF16_SUPPORT + #define VLLM_DISPATCH_CASE_FLOATING_TYPES(...) \ + AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::BFloat16, __VA_ARGS__) +#else + #define VLLM_DISPATCH_CASE_FLOATING_TYPES(...) \ + AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) +#endif + +#define VLLM_DISPATCH_FLOATING_TYPES(TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH(TYPE, NAME, VLLM_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__)) + +#ifndef CPU_OP_GUARD +#define CPU_KERNEL_GUARD_IN(NAME) +#define CPU_KERNEL_GUARD_OUT(NAME) +#else +#define CPU_KERNEL_GUARD_IN(NAME) \ + std::cout << #NAME << " invoked." << std::endl; +#define CPU_KERNEL_GUARD_OUT(NAME) std::cout << #NAME << " exit." << std::endl; +#endif + +#define FORCE_INLINE __attribute__((always_inline)) inline + +namespace { + template + constexpr void unroll_loop_item(std::integer_sequence, F &&f) { + (f(std::integral_constant{}), ...); + }; +}; + +template >> +constexpr void unroll_loop(F &&f) { + unroll_loop_item(std::make_integer_sequence{}, std::forward(f)); +} + +template struct Vec { + constexpr static int get_elem_num() { return T::VEC_ELEM_NUM; }; +}; + +struct FP32Vec8; +struct FP32Vec16; + +struct FP16Vec8 : public Vec { + constexpr static int VEC_ELEM_NUM = 8; + + float16x8_t reg; + + explicit FP16Vec8(const void *ptr) + : reg(vld1q_f16(static_cast(ptr))) {}; + + explicit FP16Vec8(const FP32Vec8 &); + + void save(void *ptr) const { + vst1q_f16(static_cast<__fp16 *>(ptr), reg); + } +}; + +struct FP16Vec16 : public Vec { + constexpr static int VEC_ELEM_NUM = 16; + + float16x8x2_t reg; + + explicit FP16Vec16(const void *ptr) { + reg.val[0] = vld1q_f16(reinterpret_cast(ptr)); + reg.val[1] = vld1q_f16(reinterpret_cast(ptr) + 8); + } + + explicit FP16Vec16(const FP32Vec16& vec); + + void save(void *ptr) const { + vst1q_f16(reinterpret_cast<__fp16*>(ptr), reg.val[0]); + vst1q_f16(reinterpret_cast<__fp16*>(ptr) + 8, reg.val[1]); + } + + void save(void *ptr, const int elem_num) const { + int full_blocks = elem_num / 8; + int remainder = elem_num % 8; + + if (full_blocks > 0) { + vst1q_f16(reinterpret_cast<__fp16*>(ptr), reg.val[0]); + if (full_blocks > 1) { + vst1q_f16(reinterpret_cast<__fp16*>(ptr) + 8, reg.val[1]); + } + } + + if (remainder > 0) { + float16x8_t temp = reg.val[full_blocks]; + for (int i = 0; i < remainder; ++i) { + reinterpret_cast<__fp16*>(ptr)[full_blocks * 8 + i] = vgetq_lane_f16(temp, i); + } + } + } +}; + + +#ifdef ARM_BF16_SUPPORT +struct BF16Vec8 : public Vec { + constexpr static int VEC_ELEM_NUM = 8; + + bfloat16x8_t reg; + + explicit BF16Vec8(const void *ptr) + : reg(*reinterpret_cast(ptr)) {}; + + explicit BF16Vec8(bfloat16x8_t data) : reg(data) {}; + + explicit BF16Vec8(const FP32Vec8 &); + + explicit BF16Vec8(float32x4x2_t v) : reg(vcvtq_high_bf16_f32(vcvtq_low_bf16_f32(v.val[0]), v.val[1])) {}; + + void save(void *ptr) const { *reinterpret_cast(ptr) = reg; } +}; + +struct BF16Vec16 : public Vec { + constexpr static int VEC_ELEM_NUM = 16; + + bfloat16x8x2_t reg; + + explicit BF16Vec16(const void *ptr) + : reg(*reinterpret_cast(ptr)) {}; + + explicit BF16Vec16(bfloat16x8x2_t data) : reg(data) {}; + + explicit BF16Vec16(const FP32Vec16 &); + + explicit BF16Vec16(float32x4x4_t v) : reg({ + vcvtq_high_bf16_f32(vcvtq_low_bf16_f32(v.val[0]), v.val[1]), + vcvtq_high_bf16_f32(vcvtq_low_bf16_f32(v.val[2]), v.val[3]) + }){}; + + void save(void *ptr) const { *reinterpret_cast(ptr) = reg; }; +}; + +struct BF16Vec32 : public Vec { + constexpr static int VEC_ELEM_NUM = 32; + + bfloat16x8x4_t reg; + + explicit BF16Vec32(const void *ptr) + : reg(*reinterpret_cast(ptr)) {}; + + explicit BF16Vec32(bfloat16x8x4_t data) : reg(data) {}; + + explicit BF16Vec32(const BF16Vec8 &vec8_data) : reg({ + vec8_data.reg, + vec8_data.reg, + vec8_data.reg, + vec8_data.reg + }) {}; + + void save(void *ptr) const { *reinterpret_cast(ptr) = reg; }; +}; +#endif + +struct FP32Vec4 : public Vec { + constexpr static int VEC_ELEM_NUM = 4; + + union AliasReg { + float32x4_t reg; + float values[VEC_ELEM_NUM]; + }; + + float32x4_t reg; + + explicit FP32Vec4(float v) : reg(vdupq_n_f32(v)) {}; + + explicit FP32Vec4() : reg(vdupq_n_f32(0.0f)) {}; + + explicit FP32Vec4(const float *ptr) : reg(vld1q_f32(ptr)) {}; + + explicit FP32Vec4(float32x4_t data) : reg(data) {}; + + explicit FP32Vec4(const FP32Vec4 &data) : reg(data.reg) {}; +}; + +struct FP32Vec8 : public Vec { + constexpr static int VEC_ELEM_NUM = 8; + union AliasReg { + float32x4x2_t reg; + float values[VEC_ELEM_NUM]; + }; + + float32x4x2_t reg; + + explicit FP32Vec8(float v) : reg({vmovq_n_f32(v), vmovq_n_f32(v)}) {}; + + explicit FP32Vec8() : reg({vmovq_n_f32(0.0), vmovq_n_f32(0.0)}) {}; + + explicit FP32Vec8(const float *ptr) : reg({vld1q_f32(ptr), vld1q_f32(ptr + 4)}) {}; + + explicit FP32Vec8(float32x4x2_t data) : reg(data) {}; + + explicit FP32Vec8(const FP32Vec8 &data) : reg(data.reg) {}; + + explicit FP32Vec8(const FP16Vec8 &v) { + reg.val[0] = vcvt_f32_f16(vget_low_f16(v.reg)); + reg.val[1] = vcvt_f32_f16(vget_high_f16(v.reg)); + }; + + explicit FP32Vec8(float16x8_t v) : reg({vcvt_f32_f16(vget_low_f16(v)), vcvt_f32_f16(vget_high_f16(v))}) {}; + + #ifdef ARM_BF16_SUPPORT + + explicit FP32Vec8(bfloat16x8_t v) : reg({vcvtq_low_f32_bf16(v), vcvtq_high_f32_bf16(v)}) {}; + + explicit FP32Vec8(const BF16Vec8 &v) : reg({vcvtq_low_f32_bf16(v.reg), vcvtq_high_f32_bf16(v.reg)}) {}; + + #endif + + float reduce_sum() const { + AliasReg ar; + ar.reg = reg; + float answer = 0; + unroll_loop([&answer, &ar](int i) { answer += ar.values[i]; }); + + return answer; + } + + FP32Vec8 exp() const { + AliasReg ar; + ar.reg = reg; + + float32x2_t exp_vec0 = {expf(ar.values[0]), expf(ar.values[1])}; + float32x2_t exp_vec1 = {expf(ar.values[2]), expf(ar.values[3])}; + float32x2_t exp_vec2 = {expf(ar.values[4]), expf(ar.values[5])}; + float32x2_t exp_vec3 = {expf(ar.values[6]), expf(ar.values[7])}; + + float32x4_t result0 = vcombine_f32(exp_vec0, exp_vec1); + float32x4_t result1 = vcombine_f32(exp_vec2, exp_vec3); + + float32x4x2_t result; + result.val[0] = result0; + result.val[1] = result1; + + return FP32Vec8(result); + } + + FP32Vec8 tanh() const { + AliasReg ar; + ar.reg = reg; + + float32x2_t tanh_vec0 = {tanhf(ar.values[0]), tanhf(ar.values[1])}; + float32x2_t tanh_vec1 = {tanhf(ar.values[2]), tanhf(ar.values[3])}; + float32x2_t tanh_vec2 = {tanhf(ar.values[4]), tanhf(ar.values[5])}; + float32x2_t tanh_vec3 = {tanhf(ar.values[6]), tanhf(ar.values[7])}; + + float32x4_t result0 = vcombine_f32(tanh_vec0, tanh_vec1); + float32x4_t result1 = vcombine_f32(tanh_vec2, tanh_vec3); + + float32x4x2_t result; + result.val[0] = result0; + result.val[1] = result1; + + return FP32Vec8(result); + } + + FP32Vec8 er() const { + AliasReg ar; + ar.reg = reg; + + float32x2_t er_vec0 = {static_cast(erf(ar.values[0])), static_cast(erf(ar.values[1]))}; + float32x2_t er_vec1 = {static_cast(erf(ar.values[2])), static_cast(erf(ar.values[3]))}; + float32x2_t er_vec2 = {static_cast(erf(ar.values[4])), static_cast(erf(ar.values[5]))}; + float32x2_t er_vec3 = {static_cast(erf(ar.values[6])), static_cast(erf(ar.values[7]))}; + + float32x4_t result0 = vcombine_f32(er_vec0, er_vec1); + float32x4_t result1 = vcombine_f32(er_vec2, er_vec3); + + float32x4x2_t result; + result.val[0] = result0; + result.val[1] = result1; + + return FP32Vec8(result); + } + + FP32Vec8 operator*(const FP32Vec8 &b) const { + return FP32Vec8(float32x4x2_t({vmulq_f32(reg.val[0], b.reg.val[0]), vmulq_f32(reg.val[1], b.reg.val[1])})); + } + + FP32Vec8 operator+(const FP32Vec8 &b) const { + return FP32Vec8(float32x4x2_t({vaddq_f32(reg.val[0], b.reg.val[0]), vaddq_f32(reg.val[1], b.reg.val[1])})); + } + + FP32Vec8 operator-(const FP32Vec8 &b) const { + return FP32Vec8(float32x4x2_t({vsubq_f32(reg.val[0], b.reg.val[0]), vsubq_f32(reg.val[1], b.reg.val[1])})); + } + + FP32Vec8 operator/(const FP32Vec8 &b) const { + return FP32Vec8(float32x4x2_t({vdivq_f32(reg.val[0], b.reg.val[0]), vdivq_f32(reg.val[1], b.reg.val[1])})); + } + + void save(float *ptr) const { + vst1q_f32(ptr, reg.val[0]); + vst1q_f32(ptr + 4, reg.val[1]); + } +}; + +struct FP32Vec16 : public Vec { + constexpr static int VEC_ELEM_NUM = 16; + union AliasReg { + float32x4x4_t reg; + float values[VEC_ELEM_NUM]; + }; + + float32x4x4_t reg; + + explicit FP32Vec16(float v) : reg({vmovq_n_f32(v), vmovq_n_f32(v), vmovq_n_f32(v), vmovq_n_f32(v)}) {} + + explicit FP32Vec16() : reg({vmovq_n_f32(0.0), vmovq_n_f32(0.0), vmovq_n_f32(0.0), vmovq_n_f32(0.0)}) {} + + explicit FP32Vec16(const float *ptr) : reg({vld1q_f32(ptr), vld1q_f32(ptr + 4), vld1q_f32(ptr + 8), vld1q_f32(ptr + 12)}) {} + + explicit FP32Vec16(float32x4x4_t data) : reg(data) {} + + explicit FP32Vec16(const FP32Vec8 &data) { + reg.val[0] = data.reg.val[0]; + reg.val[1] = data.reg.val[1]; + reg.val[2] = data.reg.val[0]; + reg.val[3] = data.reg.val[1]; + } + + explicit FP32Vec16(const FP32Vec16 &data) : reg(data.reg) {} + + explicit FP32Vec16(const FP16Vec8 &v) : FP32Vec16(FP32Vec8(v.reg)) {} + + #ifdef ARM_BF16_SUPPORT + explicit FP32Vec16(bfloat16x8x2_t v) : reg({ + vcvtq_low_f32_bf16(v.val[0]), + vcvtq_high_f32_bf16(v.val[0]), + vcvtq_low_f32_bf16(v.val[1]), + vcvtq_high_f32_bf16(v.val[1]) + }) {}; + #endif + + explicit FP32Vec16(const FP32Vec4 &data) { + reg.val[0] = data.reg; + reg.val[1] = data.reg; + reg.val[2] = data.reg; + reg.val[3] = data.reg; + }; + + #ifdef ARM_BF16_SUPPORT + explicit FP32Vec16(const BF16Vec16 &v) : reg({ + vcvtq_low_f32_bf16(v.reg.val[0]), + vcvtq_high_f32_bf16(v.reg.val[0]), + vcvtq_low_f32_bf16(v.reg.val[1]), + vcvtq_high_f32_bf16(v.reg.val[1]) + }) {}; + + explicit FP32Vec16(const BF16Vec8 &v) : FP32Vec16(FP32Vec8(v)) {}; + #endif + + explicit FP32Vec16(const FP16Vec16 &v) { + reg.val[0] = vcvt_f32_f16(vget_low_f16(v.reg.val[0])); + reg.val[1] = vcvt_f32_f16(vget_high_f16(v.reg.val[0])); + reg.val[2] = vcvt_f32_f16(vget_low_f16(v.reg.val[1])); + reg.val[3] = vcvt_f32_f16(vget_high_f16(v.reg.val[1])); + }; + + FP32Vec16 operator+(const FP32Vec16 &b) const { + return FP32Vec16(float32x4x4_t({ + vaddq_f32(reg.val[0], b.reg.val[0]), + vaddq_f32(reg.val[1], b.reg.val[1]), + vaddq_f32(reg.val[2], b.reg.val[2]), + vaddq_f32(reg.val[3], b.reg.val[3])})); + }; + + FP32Vec16 operator*(const FP32Vec16 &b) const { + return FP32Vec16(float32x4x4_t({ + vmulq_f32(reg.val[0], b.reg.val[0]), + vmulq_f32(reg.val[1], b.reg.val[1]), + vmulq_f32(reg.val[2], b.reg.val[2]), + vmulq_f32(reg.val[3], b.reg.val[3])})); + }; + + FP32Vec16 operator-(const FP32Vec16 &b) const { + return FP32Vec16(float32x4x4_t({ + vsubq_f32(reg.val[0], b.reg.val[0]), + vsubq_f32(reg.val[1], b.reg.val[1]), + vsubq_f32(reg.val[2], b.reg.val[2]), + vsubq_f32(reg.val[3], b.reg.val[3]) + })); + }; + + FP32Vec16 operator/(const FP32Vec16 &b) const { + return FP32Vec16(float32x4x4_t({ + vdivq_f32(reg.val[0], b.reg.val[0]), + vdivq_f32(reg.val[1], b.reg.val[1]), + vdivq_f32(reg.val[2], b.reg.val[2]), + vdivq_f32(reg.val[3], b.reg.val[3]) + })); + }; + + float reduce_sum() const { + AliasReg ar; + ar.reg = reg; + float answer = 0; + unroll_loop([&answer, &ar](int i) { answer += ar.values[i]; }); + + return answer; + }; + + template float reduce_sub_sum(int idx) { + static_assert(VEC_ELEM_NUM % group_size == 0); + + AliasReg ar; + ar.reg = reg; + float answer = 0; + const int start = idx * group_size; + unroll_loop( + [&answer, &start, ar](int i) { answer += ar.values[start + i]; }); + + return answer; + }; + + void save(float *ptr) const { + vst1q_f32(ptr, reg.val[0]); + vst1q_f32(ptr + 4, reg.val[1]); + vst1q_f32(ptr + 8, reg.val[2]); + vst1q_f32(ptr + 12, reg.val[3]); + }; +}; + +template struct VecType { using vec_type = void; }; + +template using vec_t = typename VecType::vec_type; + +template <> struct VecType { using vec_type = FP32Vec8; }; + +template <> struct VecType { using vec_type = FP16Vec8; }; + +#ifdef ARM_BF16_SUPPORT +template <> struct VecType { using vec_type = BF16Vec8; }; +#endif + +template void storeFP32(float v, T *ptr) { *ptr = v; } + +template <> inline void storeFP32(float v, c10::Half *ptr) { + *reinterpret_cast<__fp16 *>(ptr) = v; +} + +inline FP16Vec16::FP16Vec16(const FP32Vec16 &v) { + float16x4_t low_0 = vcvt_f16_f32(v.reg.val[0]); + float16x4_t high_0 = vcvt_f16_f32(v.reg.val[1]); + float16x4_t low_1 = vcvt_f16_f32(v.reg.val[2]); + float16x4_t high_1 = vcvt_f16_f32(v.reg.val[3]); + + reg.val[0] = vcombine_f16(low_0, high_0); + reg.val[1] = vcombine_f16(low_1, high_1); +}; + +inline FP16Vec8 :: FP16Vec8(const FP32Vec8 &v) { + float16x4_t lower_half = vcvt_f16_f32(v.reg.val[0]); + float16x4_t upper_half = vcvt_f16_f32(v.reg.val[1]); + + reg = vcombine_f16(lower_half, upper_half); +}; + +inline void fma(FP32Vec16 &acc, FP32Vec16 &a, FP32Vec16 &b) { + + acc.reg.val[0] = vfmaq_f32(acc.reg.val[0], a.reg.val[0], b.reg.val[0]); + acc.reg.val[1] = vfmaq_f32(acc.reg.val[1], a.reg.val[1], b.reg.val[1]); + acc.reg.val[2] = vfmaq_f32(acc.reg.val[2], a.reg.val[2], b.reg.val[2]); + acc.reg.val[3] = vfmaq_f32(acc.reg.val[3], a.reg.val[3], b.reg.val[3]); +}; + +#ifdef ARM_BF16_SUPPORT +inline void fma(FP32Vec16 &acc, BF16Vec32 &a, BF16Vec32 &b) { + + float32x4_t a0_low = vcvt_f32_bf16(vget_low_bf16(a.reg.val[0])); + float32x4_t a0_high = vcvt_f32_bf16(vget_high_bf16(a.reg.val[0])); + float32x4_t a1_low = vcvt_f32_bf16(vget_low_bf16(a.reg.val[1])); + float32x4_t a1_high = vcvt_f32_bf16(vget_high_bf16(a.reg.val[1])); + + float32x4_t b0_low = vcvt_f32_bf16(vget_low_bf16(b.reg.val[0])); + float32x4_t b0_high = vcvt_f32_bf16(vget_high_bf16(b.reg.val[0])); + float32x4_t b1_low = vcvt_f32_bf16(vget_low_bf16(b.reg.val[1])); + float32x4_t b1_high = vcvt_f32_bf16(vget_high_bf16(b.reg.val[1])); + + acc.reg.val[0] = vfmaq_f32(acc.reg.val[0], a0_low, b0_low); + acc.reg.val[1] = vfmaq_f32(acc.reg.val[1], a0_high, b0_high); + acc.reg.val[2] = vfmaq_f32(acc.reg.val[2], a1_low, b1_low); + acc.reg.val[3] = vfmaq_f32(acc.reg.val[3], a1_high, b1_high); +}; +#endif + +#ifdef ARM_BF16_SUPPORT +inline BF16Vec8::BF16Vec8(const FP32Vec8 &v) : reg(vcvtq_high_bf16_f32(vcvtq_low_bf16_f32(v.reg.val[0]), v.reg.val[1])) {}; + +inline BF16Vec16::BF16Vec16(const FP32Vec16 &v) : reg({ + vcvtq_high_bf16_f32(vcvtq_low_bf16_f32(v.reg.val[0]), v.reg.val[1]), + vcvtq_high_bf16_f32(vcvtq_low_bf16_f32(v.reg.val[2]), v.reg.val[3]) + }){}; +#endif + +inline void prefetch(const void *addr) { + __builtin_prefetch(addr, 0, 1); +}; + +#ifdef ARM_BF16_SUPPORT +template <> +inline void storeFP32(float v, c10::BFloat16 *ptr) { + *reinterpret_cast<__bf16 *>(ptr) = vcvth_bf16_f32(v); +}; +#endif +}; \ No newline at end of file diff --git a/docs/source/getting_started/arm-installation.rst b/docs/source/getting_started/arm-installation.rst new file mode 100644 index 0000000000000..7b457df92c11d --- /dev/null +++ b/docs/source/getting_started/arm-installation.rst @@ -0,0 +1,50 @@ +.. _installation_arm: + +Installation for ARM CPUs +========================= + +vLLM has been adapted to work on ARM64 CPUs with NEON support, leveraging the CPU backend initially developed for the x86 platform. This guide provides installation instructions specific to ARM. For additional details on supported features, refer to the x86 platform documentation covering: + +* CPU backend inference capabilities +* Relevant runtime environment variables +* Performance optimization tips + +ARM CPU backend currently supports Float32, FP16 and BFloat16 datatypes. +Contents: + +1. :ref:`Requirements ` +2. :ref:`Quick Start with Dockerfile ` +3. :ref:`Building from Source ` + +.. _arm_backend_requirements: + +Requirements +------------ + +* **Operating System**: Linux or macOS +* **Compiler**: gcc/g++ >= 12.3.0 (optional, but recommended) +* **Instruction Set Architecture (ISA)**: NEON support is required + +.. _arm_backend_quick_start_dockerfile: + +Quick Start with Dockerfile +--------------------------- + +You can quickly set up vLLM on ARM using Docker: + +.. code-block:: console + + $ docker build -f Dockerfile.arm -t vllm-cpu-env --shm-size=4g . + $ docker run -it \ + --rm \ + --network=host \ + --cpuset-cpus= \ + --cpuset-mems= \ + vllm-cpu-env + +.. _build_arm_backend_from_source: + +Building from Source +-------------------- + +To build vLLM from source on Ubuntu 22.04 or other Linux distributions, follow a similar process as with x86. Testing has been conducted on AWS Graviton3 instances for compatibility. diff --git a/docs/source/index.rst b/docs/source/index.rst index c2afd806c50f9..0692e949f1c77 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -67,6 +67,7 @@ Documentation getting_started/openvino-installation getting_started/cpu-installation getting_started/gaudi-installation + getting_started/arm-installation getting_started/neuron-installation getting_started/tpu-installation getting_started/xpu-installation diff --git a/examples/offline_inference.py b/examples/offline_inference.py index 9b758fa2479f6..23cc6e8539431 100644 --- a/examples/offline_inference.py +++ b/examples/offline_inference.py @@ -19,4 +19,4 @@ for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text - print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") \ No newline at end of file diff --git a/requirements-cpu.txt b/requirements-cpu.txt index 749b03a0603d8..db8ad9d3a015d 100644 --- a/requirements-cpu.txt +++ b/requirements-cpu.txt @@ -1,6 +1,7 @@ # Common dependencies -r requirements-common.txt -# Dependencies for x86_64 CPUs -torch == 2.5.1+cpu; platform_machine != "ppc64le" -torchvision; platform_machine != "ppc64le" # required for the image processor of phi3v, this must be updated alongside torch +# Dependencies for CPUs +torch==2.5.1+cpu; platform_machine != "ppc64le" and platform_machine != "aarch64" +torch==2.5.1; platform_machine == "aarch64" +torchvision; platform_machine != "ppc64le" # required for the image processor of phi3v, this must be updated alongside torch \ No newline at end of file From 519e8e4182af8e25d78b062ba5e613df661e6e5d Mon Sep 17 00:00:00 2001 From: Ricky Xu Date: Mon, 25 Nov 2024 21:09:43 -0800 Subject: [PATCH 148/397] [v1] EngineArgs for better config handling for v1 (#10382) Signed-off-by: rickyx --- .buildkite/test-pipeline.yaml | 2 +- tests/v1/engine/test_async_llm.py | 3 ++ tests/v1/engine/test_engine_args.py | 42 +++++++++++++++++ tests/v1/engine/test_engine_core.py | 3 +- tests/v1/engine/test_engine_core_client.py | 6 ++- vllm/engine/arg_utils.py | 53 ++++++++++++++++++++-- vllm/engine/async_llm_engine.py | 2 +- vllm/engine/llm_engine.py | 2 +- vllm/engine/multiprocessing/engine.py | 2 +- vllm/entrypoints/openai/api_server.py | 4 +- vllm/v1/engine/async_llm.py | 2 +- vllm/v1/engine/core.py | 13 ------ vllm/v1/engine/llm_engine.py | 2 +- 13 files changed, 109 insertions(+), 27 deletions(-) create mode 100644 tests/v1/engine/test_engine_args.py diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index bff33d35b423e..fc23c9cff0d87 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -172,7 +172,7 @@ steps: - vllm/ - tests/v1 commands: - - pytest -v -s v1 + - VLLM_USE_V1=1 pytest -v -s v1 - label: Examples Test # 15min working_dir: "/vllm-workspace/examples" diff --git a/tests/v1/engine/test_async_llm.py b/tests/v1/engine/test_async_llm.py index 1f26fe0fc892f..fffb5b8100ec7 100644 --- a/tests/v1/engine/test_async_llm.py +++ b/tests/v1/engine/test_async_llm.py @@ -32,6 +32,9 @@ async def generate(engine: AsyncLLM, request_id: str, @pytest.mark.asyncio async def test_load(monkeypatch): + # TODO(rickyx): Remove monkeypatch once we have a better way to test V1 + # so that in the future when we switch, we don't have to change all the + # tests. with monkeypatch.context() as m: m.setenv("VLLM_USE_V1", "1") diff --git a/tests/v1/engine/test_engine_args.py b/tests/v1/engine/test_engine_args.py new file mode 100644 index 0000000000000..69cfdf5a395c1 --- /dev/null +++ b/tests/v1/engine/test_engine_args.py @@ -0,0 +1,42 @@ +import pytest + +from vllm import envs +from vllm.config import VllmConfig +from vllm.engine.arg_utils import EngineArgs +from vllm.usage.usage_lib import UsageContext + +if not envs.VLLM_USE_V1: + pytest.skip( + "Skipping V1 tests. Rerun with `VLLM_USE_V1=1` to test.", + allow_module_level=True, + ) + + +def test_defaults(): + engine_args = EngineArgs(model="facebook/opt-125m") + + # Assert V1 defaults + assert (engine_args.enable_prefix_caching + ), "V1 turns on prefix caching by default" + + +def test_defaults_with_usage_context(): + engine_args = EngineArgs(model="facebook/opt-125m") + vllm_config: VllmConfig = engine_args.create_engine_config( + UsageContext.LLM_CLASS) + + assert vllm_config.scheduler_config.max_num_seqs == 1024 + assert vllm_config.scheduler_config.max_num_batched_tokens == 8192 + + engine_args = EngineArgs(model="facebook/opt-125m") + vllm_config = engine_args.create_engine_config( + UsageContext.OPENAI_API_SERVER) + assert vllm_config.scheduler_config.max_num_seqs == 1024 + assert vllm_config.scheduler_config.max_num_batched_tokens == 2048 + + +def test_prefix_cache_disabled_with_multimodel(): + engine_args = EngineArgs(model="llava-hf/llava-1.5-7b-hf") + + vllm_config = engine_args.create_engine_config(UsageContext.LLM_CLASS) + assert not vllm_config.cache_config.enable_prefix_caching diff --git a/tests/v1/engine/test_engine_core.py b/tests/v1/engine/test_engine_core.py index b3692b594326a..bd11ff1877064 100644 --- a/tests/v1/engine/test_engine_core.py +++ b/tests/v1/engine/test_engine_core.py @@ -43,7 +43,8 @@ def test_engine_core(monkeypatch): m.setenv("VLLM_USE_V1", "1") """Setup the EngineCore.""" engine_args = EngineArgs(model=MODEL_NAME) - vllm_config = engine_args.create_engine_config() + vllm_config = engine_args.create_engine_config( + usage_context=UsageContext.UNKNOWN_CONTEXT) executor_class = AsyncLLM._get_executor_cls(vllm_config) engine_core = EngineCore(vllm_config=vllm_config, diff --git a/tests/v1/engine/test_engine_core_client.py b/tests/v1/engine/test_engine_core_client.py index e248e35ae4069..582192196aaf9 100644 --- a/tests/v1/engine/test_engine_core_client.py +++ b/tests/v1/engine/test_engine_core_client.py @@ -82,7 +82,8 @@ def test_engine_core_client(monkeypatch, multiprocessing_mode: bool): m.setenv("VLLM_USE_V1", "1") engine_args = EngineArgs(model=MODEL_NAME, compilation_config=3) - vllm_config = engine_args.create_engine_config() + vllm_config = engine_args.create_engine_config( + UsageContext.UNKNOWN_CONTEXT) executor_class = AsyncLLM._get_executor_cls(vllm_config) client = EngineCoreClient.make_client( vllm_config, @@ -153,7 +154,8 @@ async def test_engine_core_client_asyncio(monkeypatch): m.setenv("VLLM_USE_V1", "1") engine_args = EngineArgs(model=MODEL_NAME) - vllm_config = engine_args.create_engine_config() + vllm_config = engine_args.create_engine_config( + usage_context=UsageContext.UNKNOWN_CONTEXT) executor_class = AsyncLLM._get_executor_cls(vllm_config) client = EngineCoreClient.make_client( vllm_config, diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index ca68c1d57151c..60ad5ee54a2f2 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -20,6 +20,7 @@ from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS from vllm.platforms import current_platform from vllm.transformers_utils.utils import check_gguf_file +from vllm.usage.usage_lib import UsageContext from vllm.utils import FlexibleArgumentParser, StoreBoolean if TYPE_CHECKING: @@ -113,7 +114,7 @@ class EngineArgs: # NOTE(kzawora): default block size for Gaudi should be 128 # smaller sizes still work, but very inefficiently block_size: int = 16 if not current_platform.is_hpu() else 128 - enable_prefix_caching: bool = False + enable_prefix_caching: Optional[bool] = None disable_sliding_window: bool = False use_v2_block_manager: bool = True swap_space: float = 4 # GiB @@ -197,6 +198,11 @@ def __post_init__(self): if not self.tokenizer: self.tokenizer = self.model + # Override the default value of enable_prefix_caching if it's not set + # by user. + if self.enable_prefix_caching is None: + self.enable_prefix_caching = bool(envs.VLLM_USE_V1) + # support `EngineArgs(compilation_config={...})` # without having to manually construct a # CompilationConfig object @@ -953,7 +959,12 @@ def create_load_config(self) -> LoadConfig: ignore_patterns=self.ignore_patterns, ) - def create_engine_config(self) -> VllmConfig: + def create_engine_config(self, + usage_context: Optional[UsageContext] = None + ) -> VllmConfig: + if envs.VLLM_USE_V1: + self._override_v1_engine_args(usage_context) + # gguf file needs a specific model loader and doesn't use hf_repo if check_gguf_file(self.model): self.quantization = self.load_format = "gguf" @@ -1170,7 +1181,7 @@ def create_engine_config(self) -> VllmConfig: or "all" in detailed_trace_modules, ) - return VllmConfig( + config = VllmConfig( model_config=model_config, cache_config=cache_config, parallel_config=parallel_config, @@ -1185,6 +1196,42 @@ def create_engine_config(self) -> VllmConfig: compilation_config=self.compilation_config, ) + if envs.VLLM_USE_V1: + self._override_v1_engine_config(config) + return config + + def _override_v1_engine_args(self, usage_context: UsageContext) -> None: + """ + Override the EngineArgs's args based on the usage context for V1. + """ + assert envs.VLLM_USE_V1, "V1 is not enabled" + + if self.max_num_batched_tokens is None: + # When no user override, set the default values based on the + # usage context. + if usage_context == UsageContext.LLM_CLASS: + logger.warning("Setting max_num_batched_tokens to 8192 " + "for LLM_CLASS usage context.") + self.max_num_seqs = 1024 + self.max_num_batched_tokens = 8192 + elif usage_context == UsageContext.OPENAI_API_SERVER: + logger.warning("Setting max_num_batched_tokens to 2048 " + "for OPENAI_API_SERVER usage context.") + self.max_num_seqs = 1024 + self.max_num_batched_tokens = 2048 + + def _override_v1_engine_config(self, engine_config: VllmConfig) -> None: + """ + Override the EngineConfig's configs based on the usage context for V1. + """ + assert envs.VLLM_USE_V1, "V1 is not enabled" + # TODO (ywang96): Enable APC by default when VLM supports it. + if engine_config.model_config.is_multimodal_model: + logger.warning( + "Prefix caching is currently not supported for multimodal " + "models and has been disabled.") + engine_config.cache_config.enable_prefix_caching = False + @dataclass class AsyncEngineArgs(EngineArgs): diff --git a/vllm/engine/async_llm_engine.py b/vllm/engine/async_llm_engine.py index 5a5388708b1c6..3224577c567f8 100644 --- a/vllm/engine/async_llm_engine.py +++ b/vllm/engine/async_llm_engine.py @@ -680,7 +680,7 @@ def from_engine_args( """Creates an async LLM engine from the engine arguments.""" # Create the engine configs. if engine_config is None: - engine_config = engine_args.create_engine_config() + engine_config = engine_args.create_engine_config(usage_context) executor_class = cls._get_executor_cls(engine_config) diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index fb21b2dedeb74..a4975cece9a81 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -568,7 +568,7 @@ def from_engine_args( ) -> "LLMEngine": """Creates an LLM engine from the engine arguments.""" # Create the engine configs. - engine_config = engine_args.create_engine_config() + engine_config = engine_args.create_engine_config(usage_context) executor_class = cls._get_executor_cls(engine_config) # Create the LLM engine. engine = cls( diff --git a/vllm/engine/multiprocessing/engine.py b/vllm/engine/multiprocessing/engine.py index 7de23643a2e1c..49a90b321dac4 100644 --- a/vllm/engine/multiprocessing/engine.py +++ b/vllm/engine/multiprocessing/engine.py @@ -111,7 +111,7 @@ def from_engine_args(cls, engine_args: AsyncEngineArgs, from vllm.plugins import load_general_plugins load_general_plugins() - engine_config = engine_args.create_engine_config() + engine_config = engine_args.create_engine_config(usage_context) executor_class = LLMEngine._get_executor_cls(engine_config) use_async_sockets = engine_config.model_config.use_async_output_proc diff --git a/vllm/entrypoints/openai/api_server.py b/vllm/entrypoints/openai/api_server.py index bc018be982bff..6bc31ef83ded4 100644 --- a/vllm/entrypoints/openai/api_server.py +++ b/vllm/entrypoints/openai/api_server.py @@ -135,8 +135,8 @@ async def build_async_engine_client_from_engine_args( # TODO: fill out feature matrix. if (MQLLMEngineClient.is_unsupported_config(engine_args) or envs.VLLM_USE_V1 or disable_frontend_multiprocessing): - - engine_config = engine_args.create_engine_config() + engine_config = engine_args.create_engine_config( + UsageContext.OPENAI_API_SERVER) uses_ray = getattr(AsyncLLMEngine._get_executor_cls(engine_config), "uses_ray", False) diff --git a/vllm/v1/engine/async_llm.py b/vllm/v1/engine/async_llm.py index c44ebb2a85ba0..a17c8eac4b77c 100644 --- a/vllm/v1/engine/async_llm.py +++ b/vllm/v1/engine/async_llm.py @@ -94,7 +94,7 @@ def from_engine_args( # Create the engine configs. if engine_config is None: - vllm_config = engine_args.create_engine_config() + vllm_config = engine_args.create_engine_config(usage_context) else: vllm_config = engine_config diff --git a/vllm/v1/engine/core.py b/vllm/v1/engine/core.py index 1a978fbe7355f..34f99dd30ef2e 100644 --- a/vllm/v1/engine/core.py +++ b/vllm/v1/engine/core.py @@ -41,19 +41,6 @@ def __init__( executor_class: Type[GPUExecutor], usage_context: UsageContext, ): - # Override the configs for V1. - # FIXME - if usage_context == UsageContext.LLM_CLASS: - vllm_config.scheduler_config.max_num_seqs = 1024 - vllm_config.scheduler_config.max_num_batched_tokens = 8192 - elif usage_context == UsageContext.OPENAI_API_SERVER: - vllm_config.scheduler_config.max_num_seqs = 1024 - vllm_config.scheduler_config.max_num_batched_tokens = 2048 - - # TODO (ywang96): Enable APC by default when VLM supports it. - if not vllm_config.model_config.is_multimodal_model: - vllm_config.cache_config.enable_prefix_caching = True - assert vllm_config.model_config.task != "embedding" logger.info("Initializing an LLM engine (v%s) with config: %s", diff --git a/vllm/v1/engine/llm_engine.py b/vllm/v1/engine/llm_engine.py index 75a77be750acd..7a5482f03b6fa 100644 --- a/vllm/v1/engine/llm_engine.py +++ b/vllm/v1/engine/llm_engine.py @@ -82,7 +82,7 @@ def from_engine_args( """Creates an LLM engine from the engine arguments.""" # Create the engine configs. - vllm_config = engine_args.create_engine_config() + vllm_config = engine_args.create_engine_config(usage_context) executor_class = cls._get_executor_cls(vllm_config) if VLLM_ENABLE_V1_MULTIPROCESSING: From 9a88f897993a83fad79d1bf6b95595be25a8d68a Mon Sep 17 00:00:00 2001 From: Sage Moore Date: Tue, 26 Nov 2024 00:00:16 -0600 Subject: [PATCH 149/397] custom allreduce + torch.compile (#10121) Signed-off-by: youkaichao Co-authored-by: youkaichao --- docs/source/getting_started/debugging.rst | 1 - tests/distributed/test_pynccl.py | 15 +-- tests/distributed/test_utils.py | 2 - .../device_communicators/pynccl.py | 26 ++--- vllm/distributed/parallel_state.py | 110 ++++++------------ vllm/v1/worker/gpu_model_runner.py | 6 +- 6 files changed, 59 insertions(+), 101 deletions(-) diff --git a/docs/source/getting_started/debugging.rst b/docs/source/getting_started/debugging.rst index 77bf550601346..0c1afcbd7c0b9 100644 --- a/docs/source/getting_started/debugging.rst +++ b/docs/source/getting_started/debugging.rst @@ -86,7 +86,6 @@ If GPU/CPU communication cannot be established, you can use the following Python from vllm.distributed.device_communicators.pynccl import PyNcclCommunicator pynccl = PyNcclCommunicator(group=gloo_group, device=local_rank) - pynccl.disabled = False s = torch.cuda.Stream() with torch.cuda.stream(s): diff --git a/tests/distributed/test_pynccl.py b/tests/distributed/test_pynccl.py index f702d7c46ea73..fb24d6bc2c100 100644 --- a/tests/distributed/test_pynccl.py +++ b/tests/distributed/test_pynccl.py @@ -60,7 +60,7 @@ def worker_fn(): tensor = torch.ones(16, 1024, 1024, dtype=torch.float32).cuda(pynccl_comm.rank) with pynccl_comm.change_state(enable=True): - pynccl_comm.all_reduce(tensor) + tensor = pynccl_comm.all_reduce(tensor) result = tensor.mean().cpu().item() assert result == pynccl_comm.world_size @@ -84,12 +84,12 @@ def multiple_allreduce_worker_fn(): with pynccl_comm.change_state(enable=True): # two groups can communicate independently if torch.distributed.get_rank() in [0, 1]: - pynccl_comm.all_reduce(tensor) - pynccl_comm.all_reduce(tensor) + tensor = pynccl_comm.all_reduce(tensor) + tensor = pynccl_comm.all_reduce(tensor) result = tensor.mean().cpu().item() assert result == 4 else: - pynccl_comm.all_reduce(tensor) + tensor = pynccl_comm.all_reduce(tensor) result = tensor.mean().cpu().item() assert result == 2 @@ -140,14 +140,11 @@ def worker_fn_with_cudagraph(): with torch.cuda.graph( graph, stream=pynccl_comm.stream), pynccl_comm.change_state( enable=True): - # operation during the graph capture is recorded but not executed - # see https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#creating-a-graph-using-stream-capture # noqa - pynccl_comm.all_reduce(a) + a_out = pynccl_comm.all_reduce(a) pynccl_comm.stream.synchronize() - assert a.mean().cpu().item() == pynccl_comm.world_size**0 graph.replay() pynccl_comm.stream.synchronize() - assert a.mean().cpu().item() == pynccl_comm.world_size**1 + assert a_out.mean().cpu().item() == pynccl_comm.world_size**1 @worker_fn_wrapper diff --git a/tests/distributed/test_utils.py b/tests/distributed/test_utils.py index 686b697c98e03..5fb1ae7b29fd2 100644 --- a/tests/distributed/test_utils.py +++ b/tests/distributed/test_utils.py @@ -70,14 +70,12 @@ def gpu_worker(rank, WORLD_SIZE, port1, port2): rank=rank, world_size=WORLD_SIZE) pynccl1 = PyNcclCommunicator(pg1, device=rank) - pynccl1.disabled = False if rank <= 2: pg2 = StatelessProcessGroup.create(host="127.0.0.1", port=port2, rank=rank, world_size=3) pynccl2 = PyNcclCommunicator(pg2, device=rank) - pynccl2.disabled = False data = torch.tensor([rank]).cuda() pynccl1.all_reduce(data) pg1.barrier() diff --git a/vllm/distributed/device_communicators/pynccl.py b/vllm/distributed/device_communicators/pynccl.py index 7411304eb18fa..d4e3f81747038 100644 --- a/vllm/distributed/device_communicators/pynccl.py +++ b/vllm/distributed/device_communicators/pynccl.py @@ -106,30 +106,30 @@ def __init__( self.stream.synchronize() del data - # by default it is disabled, e.g. in profiling models and prefill phase. - # to use it, use under `with obj.change_state(enable=True)`, usually - # when we are using CUDA graph. - self.disabled = True - def all_reduce(self, - tensor: torch.Tensor, + in_tensor: torch.Tensor, op: ReduceOp = ReduceOp.SUM, - stream=None): + stream=None) -> torch.Tensor: if self.disabled: - return + return None # nccl communicator created on a specific device # will only work on tensors on the same device # otherwise it will cause "illegal memory access" - assert tensor.device == self.device, ( + assert in_tensor.device == self.device, ( f"this nccl communicator is created to work on {self.device}, " - f"but the input tensor is on {tensor.device}") + f"but the input tensor is on {in_tensor.device}") + + out_tensor = torch.empty_like(in_tensor) + if stream is None: stream = self.stream - self.nccl.ncclAllReduce(buffer_type(tensor.data_ptr()), - buffer_type(tensor.data_ptr()), tensor.numel(), - ncclDataTypeEnum.from_torch(tensor.dtype), + self.nccl.ncclAllReduce(buffer_type(in_tensor.data_ptr()), + buffer_type(out_tensor.data_ptr()), + in_tensor.numel(), + ncclDataTypeEnum.from_torch(in_tensor.dtype), ncclRedOpTypeEnum.from_torch(op), self.comm, cudaStream_t(stream.cuda_stream)) + return out_tensor def all_gather(self, output_tensor: torch.Tensor, diff --git a/vllm/distributed/parallel_state.py b/vllm/distributed/parallel_state.py index 87ade377266a2..ccbe00386c5da 100644 --- a/vllm/distributed/parallel_state.py +++ b/vllm/distributed/parallel_state.py @@ -96,42 +96,24 @@ def _register_group(group: "GroupCoordinator") -> None: _groups[group.unique_name] = weakref.ref(group) -if supports_custom_op(): - - def inplace_all_reduce(tensor: torch.Tensor, group_name: str) -> None: - assert group_name in _groups, f"Group {group_name} is not found." - group = _groups[group_name]() - if group is None: - raise ValueError(f"Group {group_name} is destroyed.") - group._all_reduce_in_place(tensor) - - def inplace_all_reduce_fake(tensor: torch.Tensor, group_name: str) -> None: - return +def all_reduce(tensor: torch.Tensor, group_name: str) -> torch.Tensor: + assert group_name in _groups, f"Group {group_name} is not found." + group = _groups[group_name]() + if group is None: + raise ValueError(f"Group {group_name} is destroyed.") + return group._all_reduce_out_place(tensor) - direct_register_custom_op( - op_name="inplace_all_reduce", - op_func=inplace_all_reduce, - mutates_args=["tensor"], - fake_impl=inplace_all_reduce_fake, - ) - def outplace_all_reduce(tensor: torch.Tensor, - group_name: str) -> torch.Tensor: - assert group_name in _groups, f"Group {group_name} is not found." - group = _groups[group_name]() - if group is None: - raise ValueError(f"Group {group_name} is destroyed.") - return group._all_reduce_out_place(tensor) +def all_reduce_fake(tensor: torch.Tensor, group_name: str) -> torch.Tensor: + return torch.empty_like(tensor) - def outplace_all_reduce_fake(tensor: torch.Tensor, - group_name: str) -> torch.Tensor: - return torch.empty_like(tensor) +if supports_custom_op(): direct_register_custom_op( - op_name="outplace_all_reduce", - op_func=outplace_all_reduce, + op_name="all_reduce", + op_func=all_reduce, mutates_args=[], - fake_impl=outplace_all_reduce_fake, + fake_impl=all_reduce_fake, ) @@ -317,30 +299,13 @@ def graph_capture( stream.wait_stream(curr_stream) with torch.cuda.stream(stream), maybe_ca_context: - # In graph mode, we have to be very careful about the collective - # operations. The current status is: - # allreduce \ Mode | Eager | Graph | - # -------------------------------------------- - # custom allreduce | enabled | enabled | - # PyNccl | disabled| enabled | - # torch.distributed | enabled | disabled| - # - # Note that custom allreduce will have a runtime check, if the - # tensor size is too large, it will fallback to the next - # available option. - # In summary: When using CUDA graph, we use - # either custom all-reduce kernel or pynccl. When not using - # CUDA graph, we use either custom all-reduce kernel or - # PyTorch NCCL. We always prioritize using custom all-reduce - # kernel but fall back to PyTorch or pynccl if it is - # disabled or not supported. pynccl_comm = self.pynccl_comm maybe_pynccl_context: Any if not pynccl_comm: maybe_pynccl_context = nullcontext() else: maybe_pynccl_context = pynccl_comm.change_state( - enable=True, stream=torch.cuda.current_stream()) + stream=torch.cuda.current_stream()) with maybe_pynccl_context: yield graph_capture_context @@ -356,8 +321,8 @@ def all_reduce(self, input_: torch.Tensor) -> torch.Tensor: coordinator. In addition, PyTorch custom ops do not support mutation or returning - a new tensor in the same op. So we need to figure out if the op is - in-place or out-of-place ahead of time. + a new tensor in the same op. So we always make the all-reduce operation + out-of-place. """ # Bypass the function if we are using only 1 GPU. if self.world_size == 1: @@ -368,10 +333,6 @@ def all_reduce(self, input_: torch.Tensor) -> torch.Tensor: ipex.distributed.all_reduce(input_, group=self.device_group) return input_ - if not supports_custom_op(): - self._all_reduce_in_place(input_) - return input_ - if self.tpu_communicator is not None and \ not self.tpu_communicator.disabled: # TPU handles Dynamo with its own logic. @@ -385,30 +346,31 @@ def all_reduce(self, input_: torch.Tensor) -> torch.Tensor: not self.xpu_communicator.disabled: return self.xpu_communicator.all_reduce(input_) - if self.ca_comm is not None and \ - not self.ca_comm.disabled and \ - self.ca_comm.should_custom_ar(input_): - return torch.ops.vllm.outplace_all_reduce( - input_, group_name=self.unique_name) - else: - torch.ops.vllm.inplace_all_reduce(input_, - group_name=self.unique_name) - return input_ + return torch.ops.vllm.all_reduce(input_, group_name=self.unique_name) def _all_reduce_out_place(self, input_: torch.Tensor) -> torch.Tensor: + # always try custom allreduce first, + # and then pynccl. ca_comm = self.ca_comm - assert ca_comm is not None - assert not ca_comm.disabled - out = ca_comm.custom_all_reduce(input_) - assert out is not None - return out - - def _all_reduce_in_place(self, input_: torch.Tensor) -> None: + if ca_comm is not None and not ca_comm.disabled and \ + ca_comm.should_custom_ar(input_): + out = ca_comm.custom_all_reduce(input_) + assert out is not None + return out pynccl_comm = self.pynccl_comm - if (pynccl_comm is not None and not pynccl_comm.disabled): - pynccl_comm.all_reduce(input_) - else: - torch.distributed.all_reduce(input_, group=self.device_group) + assert pynccl_comm is not None + # TODO: pynccl should not use `stream=` + # it can just always use the current stream. + out = pynccl_comm.all_reduce(input_, + stream=torch.cuda.current_stream()) + if out is None: + # fall back to the default all-reduce using PyTorch. + # this usually happens during testing. + # when we run the model, allreduce only happens for the TP + # group, where we always have either custom allreduce or pynccl. + out = input_.clone() + torch.distributed.all_reduce(out, group=self.device_group) + return out def all_gather(self, input_: torch.Tensor, dim: int = -1) -> torch.Tensor: world_size = self.world_size diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 02f9498142bb7..13cbc8fa39c03 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -10,6 +10,7 @@ from vllm.compilation.compile_context import set_compile_context from vllm.config import CompilationLevel, VllmConfig +from vllm.distributed.parallel_state import graph_capture from vllm.forward_context import set_forward_context from vllm.inputs import INPUT_REGISTRY, InputRegistry from vllm.logger import init_logger @@ -570,8 +571,9 @@ def capture_model(self) -> None: # Trigger CUDA graph capture for specific shapes. # Capture the large shapes first so that the smaller shapes # can reuse the memory pool allocated for the large shapes. - for num_tokens in reversed(self.cudagraph_batch_sizes): - self._dummy_run(self.model, num_tokens, self.kv_caches) + with graph_capture(): + for num_tokens in reversed(self.cudagraph_batch_sizes): + self._dummy_run(self.model, num_tokens, self.kv_caches) end_time = time.perf_counter() end_free_gpu_memory = torch.cuda.mem_get_info()[0] From 940635343a087a5fb6548449989b84de77af5e73 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Tue, 26 Nov 2024 14:55:00 +0800 Subject: [PATCH 150/397] [Misc] Remove outdated init protocols (#10655) Signed-off-by: DarkLight1337 --- vllm/model_executor/models/interfaces.py | 30 ------------------- vllm/model_executor/models/interfaces_base.py | 2 +- 2 files changed, 1 insertion(+), 31 deletions(-) diff --git a/vllm/model_executor/models/interfaces.py b/vllm/model_executor/models/interfaces.py index 4f0c75b2c6a57..9b4a97abf9b51 100644 --- a/vllm/model_executor/models/interfaces.py +++ b/vllm/model_executor/models/interfaces.py @@ -10,7 +10,6 @@ from .interfaces_base import is_embedding_model if TYPE_CHECKING: - from vllm.config import LoRAConfig, MultiModalConfig, SchedulerConfig from vllm.sequence import IntermediateTensors logger = init_logger(__name__) @@ -29,9 +28,6 @@ class SupportsMultiModal(Protocol): MRO of your model class. """ - def __init__(self, *, multimodal_config: "MultiModalConfig") -> None: - ... - # We can't use runtime_checkable with ClassVar for issubclass checks # so we need to treat the class as an instance and use isinstance instead @@ -39,9 +35,6 @@ def __init__(self, *, multimodal_config: "MultiModalConfig") -> None: class _SupportsMultiModalType(Protocol): supports_multimodal: Literal[True] - def __call__(self, *, multimodal_config: "MultiModalConfig") -> None: - ... - @overload def supports_multimodal( @@ -81,10 +74,6 @@ class SupportsLoRA(Protocol): embedding_modules: ClassVar[Dict[str, str]] embedding_padding_modules: ClassVar[List[str]] - # lora_config is None when LoRA is not enabled - def __init__(self, *, lora_config: Optional["LoRAConfig"] = None) -> None: - ... - # We can't use runtime_checkable with ClassVar for issubclass checks # so we need to treat the class as an instance and use isinstance instead @@ -97,9 +86,6 @@ class _SupportsLoRAType(Protocol): embedding_modules: Dict[str, str] embedding_padding_modules: List[str] - def __call__(self, *, lora_config: Optional["LoRAConfig"] = None) -> None: - ... - @overload def supports_lora(model: Type[object]) -> TypeIs[Type[SupportsLoRA]]: @@ -276,21 +262,11 @@ class HasInnerState(Protocol): for max_num_seqs, etc. True for e.g. both Mamba and Jamba. """ - def __init__(self, - *, - scheduler_config: Optional["SchedulerConfig"] = None) -> None: - ... - @runtime_checkable class _HasInnerStateType(Protocol): has_inner_state: ClassVar[Literal[True]] - def __init__(self, - *, - scheduler_config: Optional["SchedulerConfig"] = None) -> None: - ... - @overload def has_inner_state(model: object) -> TypeIs[HasInnerState]: @@ -323,17 +299,11 @@ class IsAttentionFree(Protocol): True for Mamba but not Jamba. """ - def __init__(self) -> None: - ... - @runtime_checkable class _IsAttentionFreeType(Protocol): is_attention_free: ClassVar[Literal[True]] - def __init__(self) -> None: - ... - @overload def is_attention_free(model: object) -> TypeIs[IsAttentionFree]: diff --git a/vllm/model_executor/models/interfaces_base.py b/vllm/model_executor/models/interfaces_base.py index 7bb43beff255c..957a5a6e26b5c 100644 --- a/vllm/model_executor/models/interfaces_base.py +++ b/vllm/model_executor/models/interfaces_base.py @@ -71,7 +71,7 @@ def _check_vllm_model_forward(model: Union[Type[object], object]) -> bool: and issubclass(model, nn.Module)): logger.warning( "The model (%s) is missing " - "vLLM-specific keywords from its initializer: %s", + "vLLM-specific keywords from its `forward` method: %s", model, missing_kws, ) From 334d64d1e816cc7c9fa2f67e22d24638e63c8e15 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Tue, 26 Nov 2024 00:20:04 -0800 Subject: [PATCH 151/397] [ci] add vllm_test_utils (#10659) Signed-off-by: youkaichao --- Dockerfile | 4 ++ Dockerfile.cpu | 4 ++ Dockerfile.hpu | 3 ++ Dockerfile.neuron | 3 ++ Dockerfile.openvino | 3 ++ Dockerfile.ppc64le | 3 ++ Dockerfile.rocm | 3 ++ Dockerfile.tpu | 3 ++ Dockerfile.xpu | 3 +- tests/entrypoints/llm/test_lazy_outlines.py | 23 +++++--- tests/test_lazy_torch_compile.py | 54 +------------------ tests/vllm_test_utils/setup.py | 7 +++ .../vllm_test_utils/__init__.py | 8 +++ .../vllm_test_utils/vllm_test_utils/blame.py | 53 ++++++++++++++++++ 14 files changed, 113 insertions(+), 61 deletions(-) create mode 100644 tests/vllm_test_utils/setup.py create mode 100644 tests/vllm_test_utils/vllm_test_utils/__init__.py create mode 100644 tests/vllm_test_utils/vllm_test_utils/blame.py diff --git a/Dockerfile b/Dockerfile index 220dbe26712ec..682f046d4b6ec 100644 --- a/Dockerfile +++ b/Dockerfile @@ -191,6 +191,10 @@ ADD . /vllm-workspace/ RUN --mount=type=cache,target=/root/.cache/pip \ python3 -m pip install -r requirements-dev.txt +# install development dependencies (for testing) +RUN --mount=type=cache,target=/root/.cache/pip \ + python3 -m pip install -e tests/vllm_test_utils + # enable fast downloads from hf (for testing) RUN --mount=type=cache,target=/root/.cache/pip \ python3 -m pip install hf_transfer diff --git a/Dockerfile.cpu b/Dockerfile.cpu index 287b4958da4e5..d2f72ea975a3d 100644 --- a/Dockerfile.cpu +++ b/Dockerfile.cpu @@ -62,4 +62,8 @@ WORKDIR /workspace/ RUN ln -s /workspace/vllm/tests && ln -s /workspace/vllm/examples && ln -s /workspace/vllm/benchmarks +# install development dependencies (for testing) +RUN --mount=type=cache,target=/root/.cache/pip \ + pip install -e tests/vllm_test_utils + ENTRYPOINT ["python3", "-m", "vllm.entrypoints.openai.api_server"] diff --git a/Dockerfile.hpu b/Dockerfile.hpu index d18fc016387bf..87e0c1a6a934e 100644 --- a/Dockerfile.hpu +++ b/Dockerfile.hpu @@ -11,6 +11,9 @@ ENV PT_HPU_ENABLE_LAZY_COLLECTIVES=true RUN VLLM_TARGET_DEVICE=hpu python3 setup.py install +# install development dependencies (for testing) +RUN python3 -m pip install -e tests/vllm_test_utils + WORKDIR /workspace/ RUN ln -s /workspace/vllm/tests && ln -s /workspace/vllm/examples && ln -s /workspace/vllm/benchmarks diff --git a/Dockerfile.neuron b/Dockerfile.neuron index 2143315d2a078..76dbd4c04d3f3 100644 --- a/Dockerfile.neuron +++ b/Dockerfile.neuron @@ -38,4 +38,7 @@ ENV VLLM_TARGET_DEVICE neuron RUN --mount=type=bind,source=.git,target=.git \ pip install --no-build-isolation -v -e . +# install development dependencies (for testing) +RUN python3 -m pip install -e tests/vllm_test_utils + CMD ["/bin/bash"] diff --git a/Dockerfile.openvino b/Dockerfile.openvino index a05ff452cd36e..8bd188ffde408 100644 --- a/Dockerfile.openvino +++ b/Dockerfile.openvino @@ -22,4 +22,7 @@ RUN PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/cpu" VLLM_TARGET_DEVIC COPY examples/ /workspace/examples COPY benchmarks/ /workspace/benchmarks +# install development dependencies (for testing) +RUN python3 -m pip install -e tests/vllm_test_utils + CMD ["/bin/bash"] diff --git a/Dockerfile.ppc64le b/Dockerfile.ppc64le index b19c6ddec7948..971248577983f 100644 --- a/Dockerfile.ppc64le +++ b/Dockerfile.ppc64le @@ -29,6 +29,9 @@ RUN --mount=type=cache,target=/root/.cache/pip \ RUN --mount=type=bind,source=.git,target=.git \ VLLM_TARGET_DEVICE=cpu python3 setup.py install +# install development dependencies (for testing) +RUN python3 -m pip install -e tests/vllm_test_utils + WORKDIR /workspace/ RUN ln -s /workspace/vllm/tests && ln -s /workspace/vllm/examples && ln -s /workspace/vllm/benchmarks diff --git a/Dockerfile.rocm b/Dockerfile.rocm index 62d4a9b4909c3..e733994f8c33e 100644 --- a/Dockerfile.rocm +++ b/Dockerfile.rocm @@ -168,4 +168,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \ if ls libs/*.whl; then \ python3 -m pip install libs/*.whl; fi +# install development dependencies (for testing) +RUN python3 -m pip install -e tests/vllm_test_utils + CMD ["/bin/bash"] diff --git a/Dockerfile.tpu b/Dockerfile.tpu index 0a507b6ecdf60..b617932a85b47 100644 --- a/Dockerfile.tpu +++ b/Dockerfile.tpu @@ -22,4 +22,7 @@ RUN --mount=type=cache,target=/root/.cache/pip \ -r requirements-tpu.txt RUN python3 setup.py develop +# install development dependencies (for testing) +RUN python3 -m pip install -e tests/vllm_test_utils + CMD ["/bin/bash"] diff --git a/Dockerfile.xpu b/Dockerfile.xpu index 63bc682770422..a374f20d7d949 100644 --- a/Dockerfile.xpu +++ b/Dockerfile.xpu @@ -64,5 +64,6 @@ RUN --mount=type=cache,target=/root/.cache/pip \ ENV VLLM_USAGE_SOURCE production-docker-image \ TRITON_XPU_PROFILE 1 - +# install development dependencies (for testing) +RUN python3 -m pip install -e tests/vllm_test_utils ENTRYPOINT ["python3", "-m", "vllm.entrypoints.openai.api_server"] diff --git a/tests/entrypoints/llm/test_lazy_outlines.py b/tests/entrypoints/llm/test_lazy_outlines.py index cbfb0cc32c1ce..81fb000d8ac56 100644 --- a/tests/entrypoints/llm/test_lazy_outlines.py +++ b/tests/entrypoints/llm/test_lazy_outlines.py @@ -1,12 +1,12 @@ import sys +from vllm_test_utils import blame + from vllm import LLM, SamplingParams from vllm.distributed import cleanup_dist_env_and_memory -def test_lazy_outlines(sample_regex): - """If users don't use guided decoding, outlines should not be imported. - """ +def run_normal(): prompts = [ "Hello, my name is", "The president of the United States is", @@ -25,13 +25,12 @@ def test_lazy_outlines(sample_regex): generated_text = output.outputs[0].text print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") - # make sure outlines is not imported - assert 'outlines' not in sys.modules - # Destroy the LLM object and free up the GPU memory. del llm cleanup_dist_env_and_memory() + +def run_lmfe(sample_regex): # Create an LLM with guided decoding enabled. llm = LLM(model="facebook/opt-125m", enforce_eager=True, @@ -51,5 +50,15 @@ def test_lazy_outlines(sample_regex): generated_text = output.outputs[0].text print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + +def test_lazy_outlines(sample_regex): + """If users don't use guided decoding, outlines should not be imported. + """ # make sure outlines is not imported - assert 'outlines' not in sys.modules + module_name = "outlines" + with blame(lambda: module_name in sys.modules) as result: + run_normal() + run_lmfe(sample_regex) + assert not result.found, ( + f"Module {module_name} is already imported, the" + f" first import location is:\n{result.trace_stack}") diff --git a/tests/test_lazy_torch_compile.py b/tests/test_lazy_torch_compile.py index b8ac4dd93732b..4756fac8e2a8d 100644 --- a/tests/test_lazy_torch_compile.py +++ b/tests/test_lazy_torch_compile.py @@ -1,61 +1,9 @@ # Description: Test the lazy import module # The utility function cannot be placed in `vllm.utils` # this needs to be a standalone script - -import contextlib -import dataclasses import sys -import traceback -from typing import Callable, Generator - - -@dataclasses.dataclass -class BlameResult: - found: bool = False - trace_stack: str = "" - - -@contextlib.contextmanager -def blame(func: Callable) -> Generator[BlameResult, None, None]: - """ - Trace the function calls to find the first function that satisfies the - condition. The trace stack will be stored in the result. - - Usage: - - ```python - with blame(lambda: some_condition()) as result: - # do something - - if result.found: - print(result.trace_stack) - """ - result = BlameResult() - - def _trace_calls(frame, event, arg=None): - nonlocal result - if event in ['call', 'return']: - # for every function call or return - try: - # Temporarily disable the trace function - sys.settrace(None) - # check condition here - if not result.found and func(): - result.found = True - result.trace_stack = "".join(traceback.format_stack()) - # Re-enable the trace function - sys.settrace(_trace_calls) - except NameError: - # modules are deleted during shutdown - pass - return _trace_calls - - sys.settrace(_trace_calls) - - yield result - - sys.settrace(None) +from vllm_test_utils import blame module_name = "torch._inductor.async_compile" diff --git a/tests/vllm_test_utils/setup.py b/tests/vllm_test_utils/setup.py new file mode 100644 index 0000000000000..790e891ec837d --- /dev/null +++ b/tests/vllm_test_utils/setup.py @@ -0,0 +1,7 @@ +from setuptools import setup + +setup( + name='vllm_test_utils', + version='0.1', + packages=['vllm_test_utils'], +) diff --git a/tests/vllm_test_utils/vllm_test_utils/__init__.py b/tests/vllm_test_utils/vllm_test_utils/__init__.py new file mode 100644 index 0000000000000..bf0b62a5b75e3 --- /dev/null +++ b/tests/vllm_test_utils/vllm_test_utils/__init__.py @@ -0,0 +1,8 @@ +""" +vllm_utils is a package for vLLM testing utilities. +It does not import any vLLM modules. +""" + +from .blame import BlameResult, blame + +__all__ = ["blame", "BlameResult"] diff --git a/tests/vllm_test_utils/vllm_test_utils/blame.py b/tests/vllm_test_utils/vllm_test_utils/blame.py new file mode 100644 index 0000000000000..ad23ab83c2d81 --- /dev/null +++ b/tests/vllm_test_utils/vllm_test_utils/blame.py @@ -0,0 +1,53 @@ +import contextlib +import dataclasses +import sys +import traceback +from typing import Callable, Generator + + +@dataclasses.dataclass +class BlameResult: + found: bool = False + trace_stack: str = "" + + +@contextlib.contextmanager +def blame(func: Callable) -> Generator[BlameResult, None, None]: + """ + Trace the function calls to find the first function that satisfies the + condition. The trace stack will be stored in the result. + + Usage: + + ```python + with blame(lambda: some_condition()) as result: + # do something + + if result.found: + print(result.trace_stack) + """ + result = BlameResult() + + def _trace_calls(frame, event, arg=None): + nonlocal result + if event in ['call', 'return']: + # for every function call or return + try: + # Temporarily disable the trace function + sys.settrace(None) + # check condition here + if not result.found and func(): + result.found = True + result.trace_stack = "".join(traceback.format_stack()) + # Re-enable the trace function + sys.settrace(_trace_calls) + except NameError: + # modules are deleted during shutdown + pass + return _trace_calls + + sys.settrace(_trace_calls) + + yield result + + sys.settrace(None) From 1f6584ee851501cfae672973b9e55d000729818c Mon Sep 17 00:00:00 2001 From: Kunshang Ji Date: Tue, 26 Nov 2024 18:36:45 +0800 Subject: [PATCH 152/397] [V1] Enable profile for LLMEngine (#10665) --- vllm/v1/engine/llm_engine.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/vllm/v1/engine/llm_engine.py b/vllm/v1/engine/llm_engine.py index 7a5482f03b6fa..bd19d998a4adb 100644 --- a/vllm/v1/engine/llm_engine.py +++ b/vllm/v1/engine/llm_engine.py @@ -161,13 +161,13 @@ def step(self) -> List[RequestOutput]: # TODO(rob): Can we get rid of these? def get_model_config(self): - pass + return self.model_config def start_profile(self): - pass + self.engine_core.profile(True) def stop_profile(self): - pass + self.engine_core.profile(False) def get_tokenizer_group(self, group_type): pass From db66e018eaabcc5e5855e994b49931dbb4800ce1 Mon Sep 17 00:00:00 2001 From: Murali Andoorveedu <37849411+andoorve@users.noreply.github.com> Date: Tue, 26 Nov 2024 09:11:16 -0800 Subject: [PATCH 153/397] [Bugfix] Fix for Spec model TP + Chunked Prefill (#10232) Signed-off-by: andoorve <37849411+andoorve@users.noreply.github.com> Signed-off-by: Sourashis Roy Co-authored-by: Sourashis Roy --- docs/source/serving/compatibility_matrix.rst | 2 +- tests/core/test_chunked_prefill_scheduler.py | 39 +++++++++++++ tests/spec_decode/e2e/test_compatibility.py | 46 --------------- .../e2e/test_integration_dist_tp2.py | 57 +++++++++++++++++++ tests/spec_decode/test_spec_decode_worker.py | 3 +- vllm/config.py | 10 ---- vllm/core/scheduler.py | 28 ++++++--- vllm/spec_decode/spec_decode_worker.py | 33 +++++++++-- 8 files changed, 145 insertions(+), 73 deletions(-) diff --git a/docs/source/serving/compatibility_matrix.rst b/docs/source/serving/compatibility_matrix.rst index fa03d2cde1486..a93632ff36fb8 100644 --- a/docs/source/serving/compatibility_matrix.rst +++ b/docs/source/serving/compatibility_matrix.rst @@ -118,7 +118,7 @@ Feature x Feature - - * - :ref:`SD ` - - ✗ + - ✅ - ✅ - ✗ - ✅ diff --git a/tests/core/test_chunked_prefill_scheduler.py b/tests/core/test_chunked_prefill_scheduler.py index acd82065ae457..eaaf004df38b2 100644 --- a/tests/core/test_chunked_prefill_scheduler.py +++ b/tests/core/test_chunked_prefill_scheduler.py @@ -413,6 +413,45 @@ def cannot_append_second_group2(seq_group, num_lookahead_slots): assert out.num_batched_tokens == max_num_batched_tokens +@pytest.mark.parametrize("num_scheduler_steps", [1, 5]) +def test_chunked_prefill_spec_prefill(num_scheduler_steps): + """Verify that the num_lookahead_slots is set appropriately for an all""" + """prefill batch depending on whether multi-step scheduling is enabled""" + """or not""" + block_size = 4 + max_seqs = 30 + max_model_len = 200 + max_num_batched_tokens = 30 + num_lookahead_slots = 4 + scheduler_config = SchedulerConfig( + "generate", + max_num_batched_tokens, + max_seqs, + max_model_len, + enable_chunked_prefill=True, + num_lookahead_slots=num_lookahead_slots, + num_scheduler_steps=num_scheduler_steps, + ) + cache_config = CacheConfig(block_size, 1.0, 1, "auto") + cache_config.num_cpu_blocks = 16 + cache_config.num_gpu_blocks = 16 + scheduler = Scheduler(scheduler_config, cache_config, None) + + _, seq_group = create_dummy_prompt("1", + prompt_length=30, + block_size=block_size) + scheduler.add_seq_group(seq_group) + _, out = schedule_and_update_computed_tokens(scheduler) + # The request is chunked. + # prefill scheduled now. + assert len(out.scheduled_seq_groups) == 1 + assert out.num_prefill_groups == 1 + assert out.num_batched_tokens == max_num_batched_tokens + print(out.num_lookahead_slots) + assert out.num_lookahead_slots == (0 if (num_scheduler_steps == 1) else + num_lookahead_slots) + + def test_chunked_prefill_max_seqs(): block_size = 4 max_seqs = 2 diff --git a/tests/spec_decode/e2e/test_compatibility.py b/tests/spec_decode/e2e/test_compatibility.py index a3f0464e79675..af8397c235f48 100644 --- a/tests/spec_decode/e2e/test_compatibility.py +++ b/tests/spec_decode/e2e/test_compatibility.py @@ -50,49 +50,3 @@ def test_spec_decode_xfail_spec_max_model_len(test_llm_generator): with pytest.raises(ValueError, match="cannot be larger than"): get_output_from_llm_generator(test_llm_generator, prompts, sampling_params) - - -@pytest.mark.parametrize("common_llm_kwargs", - [{ - "model": "meta-llama/Llama-2-7b-chat-hf", - "speculative_model": "JackFram/llama-68m", - "num_speculative_tokens": 5, - "enable_chunked_prefill": "True", - }]) -@pytest.mark.parametrize("per_test_common_llm_kwargs", [ - { - "tensor_parallel_size": 2, - "speculative_draft_tensor_parallel_size": 2, - }, - { - "tensor_parallel_size": 4, - "speculative_draft_tensor_parallel_size": 4, - }, - { - "tensor_parallel_size": 8, - "speculative_draft_tensor_parallel_size": 8, - }, -]) -@pytest.mark.parametrize("test_llm_kwargs", [{}]) -@pytest.mark.parametrize("seed", [1]) -def test_spec_decode_xfail_chunked_prefill_draft_model_tp_not_one( - test_llm_generator): - """Verify that speculative decoding fails if chunked prefill is enabled for - draft model with tensor parallelism of more than 1. - """ - output_len = 128 - temperature = 0.0 - - prompts = [ - "Hello, my name is", - ] - - sampling_params = SamplingParams( - max_tokens=output_len, - ignore_eos=True, - temperature=temperature, - ) - - with pytest.raises(ValueError, match="with tensor parallel size 1"): - get_output_from_llm_generator(test_llm_generator, prompts, - sampling_params) diff --git a/tests/spec_decode/e2e/test_integration_dist_tp2.py b/tests/spec_decode/e2e/test_integration_dist_tp2.py index 25562ca85adf4..02cba92795142 100644 --- a/tests/spec_decode/e2e/test_integration_dist_tp2.py +++ b/tests/spec_decode/e2e/test_integration_dist_tp2.py @@ -115,3 +115,60 @@ def test_draft_model_tp_lt_target_model_tp2(model, common_llm_kwargs, max_output_len=32, seed=seed, temperature=0.0) + + +@pytest.mark.skipif(torch.cuda.device_count() < 2, + reason="Need at least 2 GPUs to run the test.") +@pytest.mark.parametrize( + "common_llm_kwargs", + [[ + # Skip cuda graph recording for fast test. + "--enforce-eager", + "--tensor_parallel_size", + "2", + + # precision + "--dtype", + "bfloat16", + ]]) +@pytest.mark.parametrize( + "per_test_common_llm_kwargs", + [["--enable-chunked-prefill", "False"], + [ + "--enable-chunked-prefill", "True", "--max-num-batched-tokens", "4", + "--max-num-seqs", "4" + ]]) +@pytest.mark.parametrize("baseline_llm_kwargs", [[]]) +@pytest.mark.parametrize("model, test_llm_kwargs", + [("JackFram/llama-68m", [ + "--speculative-model", + "JackFram/llama-68m", + "--num_speculative-tokens", + "3", + ]), + ("JackFram/llama-68m", [ + "--speculative-model", + "JackFram/llama-68m", + "--num_speculative-tokens", + "3", + "--speculative-draft-tensor-parallel-size", + "1", + ])]) +@pytest.mark.parametrize("batch_size", [2]) +@pytest.mark.parametrize("seed", [1]) +def test_spec_decode_chunked_prefill_tp2(model, common_llm_kwargs, + per_test_common_llm_kwargs, + baseline_llm_kwargs, test_llm_kwargs, + batch_size: int, seed: int): + """Verify spec decode works well with same and different TP size for + the draft model with chunked prefill. + """ + run_equality_correctness_test_tp(model, + common_llm_kwargs, + per_test_common_llm_kwargs, + baseline_llm_kwargs, + test_llm_kwargs, + batch_size, + max_output_len=32, + seed=seed, + temperature=0.0) diff --git a/tests/spec_decode/test_spec_decode_worker.py b/tests/spec_decode/test_spec_decode_worker.py index 8df143104c279..d7caf57147278 100644 --- a/tests/spec_decode/test_spec_decode_worker.py +++ b/tests/spec_decode/test_spec_decode_worker.py @@ -867,7 +867,8 @@ def test_chunked_prefill_flow(k: int, batch_size: int, batch_composition: str): target_group_metadata_list = prefill + decodes execute_model_req = ExecuteModelRequest( seq_group_metadata_list=target_group_metadata_list, - num_lookahead_slots=k) + # For prefill only batches we expect num_lookahead_slots = 0. + num_lookahead_slots=k if n_decodes > 0 else 0) target_token_ids = torch.randint(low=0, high=vocab_size, diff --git a/vllm/config.py b/vllm/config.py index c87feaec3e5f6..eae6f909e3933 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -1409,16 +1409,6 @@ def maybe_create_spec_config( draft_hf_config ) - if (enable_chunked_prefill and \ - speculative_draft_tensor_parallel_size != 1): - # TODO - Investigate why the error reported in - # https://github.com/vllm-project/vllm/pull/9291#issuecomment-2463266258 - # is happening and re-enable it. - raise ValueError( - "Chunked prefill and speculative decoding can be enabled " - "simultaneously only for draft models with tensor " - "parallel size 1.") - draft_model_config.max_model_len = ( SpeculativeConfig._maybe_override_draft_max_model_len( speculative_max_model_len, diff --git a/vllm/core/scheduler.py b/vllm/core/scheduler.py index 530cbdc3a9190..d23009dae01ee 100644 --- a/vllm/core/scheduler.py +++ b/vllm/core/scheduler.py @@ -1201,15 +1201,25 @@ def _schedule_chunked_prefill(self) -> SchedulerOutputs: # Update swapped requests. self.swapped.extend(running_scheduled.swapped_out) # Put prefills first due to Attention backend ordering assumption. + scheduled_seq_groups = (prefills.seq_groups + + running_scheduled.prefill_seq_groups + + swapped_in.prefill_seq_groups + + running_scheduled.decode_seq_groups + + swapped_in.decode_seq_groups) + num_prefill_groups = (len(prefills.seq_groups) + + len(swapped_in.prefill_seq_groups) + + len(running_scheduled.prefill_seq_groups)) + # If all prompts, then we set num_lookahead_slots to 0 + # this allows us to go through the `no_spec` path in + # `spec_decode_worker.py` + all_prefills = (len(scheduled_seq_groups) == num_prefill_groups) + num_lookahead_slots = (0 if + (all_prefills + and not self.scheduler_config.is_multi_step) + else running_scheduled.num_lookahead_slots) return SchedulerOutputs( - scheduled_seq_groups=(prefills.seq_groups + - running_scheduled.prefill_seq_groups + - swapped_in.prefill_seq_groups + - running_scheduled.decode_seq_groups + - swapped_in.decode_seq_groups), - num_prefill_groups=(len(prefills.seq_groups) + - len(swapped_in.prefill_seq_groups) + - len(running_scheduled.prefill_seq_groups)), + scheduled_seq_groups=scheduled_seq_groups, + num_prefill_groups=num_prefill_groups, num_batched_tokens=budget.num_batched_tokens + budget.num_cached_tokens, blocks_to_swap_in=swapped_in.blocks_to_swap_in, @@ -1218,7 +1228,7 @@ def _schedule_chunked_prefill(self) -> SchedulerOutputs: swapped_in.blocks_to_copy, ignored_seq_groups=prefills.ignored_seq_groups + swapped_in.infeasible_seq_groups, - num_lookahead_slots=running_scheduled.num_lookahead_slots, + num_lookahead_slots=num_lookahead_slots, running_queue_size=len(self.running), preempted=(len(running_scheduled.preempted) + len(running_scheduled.swapped_out)), diff --git a/vllm/spec_decode/spec_decode_worker.py b/vllm/spec_decode/spec_decode_worker.py index b57742c2ebfdd..b279931ca4b02 100644 --- a/vllm/spec_decode/spec_decode_worker.py +++ b/vllm/spec_decode/spec_decode_worker.py @@ -408,7 +408,20 @@ def execute_model( disable_all_speculation = self._should_disable_all_speculation( execute_model_req) num_lookahead_slots = execute_model_req.num_lookahead_slots - + all_prompt = True + atleast_one_prompt = False + all_zero_spec_tokens = True + for sgm in execute_model_req.seq_group_metadata_list: + all_prompt = all_prompt and sgm.is_prompt + atleast_one_prompt = atleast_one_prompt or sgm.is_prompt + all_zero_spec_tokens = all_zero_spec_tokens and ( + sgm.num_speculative_tokens == 0) + + if all_prompt and execute_model_req.seq_group_metadata_list: + assert num_lookahead_slots == 0, ( + "Prompt only runs should have num_lookahead_slots equal to 0. " + "This should never happen, please file a bug at " + "https://github.com/vllm-project/vllm/issues") # Speculative decoding is disabled in the following cases: # 1. Prefill phase: Speculative decoding is not # used during the prefill phase. @@ -419,11 +432,8 @@ def execute_model( # In any of these cases, the proposer and scorer workers # are called normally. # We expect `num_speculative_tokens` to be None for prefills. - no_spec = all( - sgm.is_prompt for sgm in execute_model_req.seq_group_metadata_list - ) or num_lookahead_slots == 0 or disable_all_speculation or all( - sgm.num_speculative_tokens == 0 - for sgm in execute_model_req.seq_group_metadata_list) + no_spec = (num_lookahead_slots == 0 or disable_all_speculation + or all_zero_spec_tokens) # Broadcast how many lookahead slots are scheduled for this step, and # whether all speculation is disabled, to all non-driver workers. @@ -442,6 +452,15 @@ def execute_model( num_lookahead_slots=num_lookahead_slots, no_spec=no_spec, disable_all_speculation=disable_all_speculation, + # When both chunked prefill and speculative decoding are enabled + # it is possible that the same batch contains both prefill + # and decodes. If that happens in the scorer we run the batch + # as one single forward pass. However, in the proposer we + # run them as 2 different batches - one for prefill and + # the other for decodes. The variable indicates to the non-driver + # worker that there are prefills as part of the speculative batch + # and hence it needs to run an extra prefill forward pass. + run_spec_proposer_for_prefill=atleast_one_prompt, ) broadcast_tensor_dict(broadcast_dict, src=self._driver_rank) @@ -653,6 +672,8 @@ def _run_non_driver_rank(self) -> bool: if not data["no_spec"]: self.scorer_worker.execute_model() + if data["run_spec_proposer_for_prefill"]: + self.proposer_worker.execute_model() return True From f5792c7c4a63ecdd2dcaa068ac7986dc4a22436b Mon Sep 17 00:00:00 2001 From: Conroy Cheers Date: Wed, 27 Nov 2024 05:26:28 +1100 Subject: [PATCH 154/397] [Hardware][NVIDIA] Add non-NVML CUDA mode for Jetson (#9735) Signed-off-by: Conroy Cheers --- CMakeLists.txt | 10 +- vllm/platforms/__init__.py | 10 +- vllm/platforms/cuda.py | 222 +++++++++++++++++++++++-------------- 3 files changed, 155 insertions(+), 87 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ff34225537cdd..882d4412632a5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -34,7 +34,7 @@ install(CODE "set(CMAKE_INSTALL_LOCAL_ONLY TRUE)" ALL_COMPONENTS) set(PYTHON_SUPPORTED_VERSIONS "3.9" "3.10" "3.11" "3.12") # Supported NVIDIA architectures. -set(CUDA_SUPPORTED_ARCHS "7.0;7.5;8.0;8.6;8.9;9.0") +set(CUDA_SUPPORTED_ARCHS "7.0;7.2;7.5;8.0;8.6;8.7;8.9;9.0") # Supported AMD GPU architectures. set(HIP_SUPPORTED_ARCHS "gfx906;gfx908;gfx90a;gfx940;gfx941;gfx942;gfx1030;gfx1100;gfx1101") @@ -249,7 +249,7 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") # Only build Marlin kernels if we are building for at least some compatible archs. # Keep building Marlin for 9.0 as there are some group sizes and shapes that # are not supported by Machete yet. - cuda_archs_loose_intersection(MARLIN_ARCHS "8.0;8.6;8.9;9.0" ${CUDA_ARCHS}) + cuda_archs_loose_intersection(MARLIN_ARCHS "8.0;8.6;8.7;8.9;9.0" ${CUDA_ARCHS}) if (MARLIN_ARCHS) set(MARLIN_SRCS "csrc/quantization/fp8/fp8_marlin.cu" @@ -300,8 +300,8 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") # # For the cutlass_scaled_mm kernels we want to build the c2x (CUTLASS 2.x) # kernels for the remaining archs that are not already built for 3x. - cuda_archs_loose_intersection(SCALED_MM_2X_ARCHS - "7.5;8.0;8.6;8.9;9.0" "${CUDA_ARCHS}") + cuda_archs_loose_intersection(SCALED_MM_2X_ARCHS + "7.5;8.0;8.6;8.7;8.9;9.0" "${CUDA_ARCHS}") # subtract out the archs that are already built for 3x list(REMOVE_ITEM SCALED_MM_2X_ARCHS ${SCALED_MM_3X_ARCHS}) if (SCALED_MM_2X_ARCHS) @@ -427,7 +427,7 @@ set_gencode_flags_for_srcs( CUDA_ARCHS "${CUDA_ARCHS}") if(VLLM_GPU_LANG STREQUAL "CUDA") - cuda_archs_loose_intersection(MARLIN_MOE_ARCHS "8.0;8.6;8.9;9.0" "${CUDA_ARCHS}") + cuda_archs_loose_intersection(MARLIN_MOE_ARCHS "8.0;8.6;8.7;8.9;9.0" "${CUDA_ARCHS}") if (MARLIN_MOE_ARCHS) set(MARLIN_MOE_SRC "csrc/moe/marlin_kernels/marlin_moe_kernel.h" diff --git a/vllm/platforms/__init__.py b/vllm/platforms/__init__.py index 1f68fc2e25df3..7cb8ac4b0a1e0 100644 --- a/vllm/platforms/__init__.py +++ b/vllm/platforms/__init__.py @@ -28,7 +28,15 @@ finally: pynvml.nvmlShutdown() except Exception: - pass + # CUDA is supported on Jetson, but NVML may not be. + import os + + def cuda_is_jetson() -> bool: + return os.path.isfile("/etc/nv_tegra_release") \ + or os.path.exists("/sys/class/tegra-firmware") + + if cuda_is_jetson(): + is_cuda = True is_rocm = False diff --git a/vllm/platforms/cuda.py b/vllm/platforms/cuda.py index 70724b8be4c45..0d07050fd1b6a 100644 --- a/vllm/platforms/cuda.py +++ b/vllm/platforms/cuda.py @@ -4,7 +4,7 @@ import os from functools import lru_cache, wraps -from typing import TYPE_CHECKING, Callable, List, Tuple, TypeVar +from typing import TYPE_CHECKING, Callable, List, TypeVar import pynvml import torch @@ -38,10 +38,23 @@ # see https://github.com/huggingface/diffusers/issues/9704 for details torch.backends.cuda.enable_cudnn_sdp(False) -# NVML utils -# Note that NVML is not affected by `CUDA_VISIBLE_DEVICES`, -# all the related functions work on real physical device ids. -# the major benefit of using NVML is that it will not initialize CUDA + +def device_id_to_physical_device_id(device_id: int) -> int: + if "CUDA_VISIBLE_DEVICES" in os.environ: + device_ids = os.environ["CUDA_VISIBLE_DEVICES"].split(",") + if device_ids == [""]: + msg = ( + "CUDA_VISIBLE_DEVICES is set to empty string, which means" + " GPU support is disabled. If you are using ray, please unset" + " the environment variable `CUDA_VISIBLE_DEVICES` inside the" + " worker/actor. " + "Check https://github.com/vllm-project/vllm/issues/8402 for" + " more information.") + raise RuntimeError(msg) + physical_device_id = device_ids[device_id] + return int(physical_device_id) + else: + return device_id def with_nvml_context(fn: Callable[_P, _R]) -> Callable[_P, _R]: @@ -57,87 +70,75 @@ def wrapper(*args: _P.args, **kwargs: _P.kwargs) -> _R: return wrapper -@lru_cache(maxsize=8) -@with_nvml_context -def get_physical_device_capability(device_id: int = 0) -> Tuple[int, int]: - handle = pynvml.nvmlDeviceGetHandleByIndex(device_id) - return pynvml.nvmlDeviceGetCudaComputeCapability(handle) - - -@lru_cache(maxsize=8) -@with_nvml_context -def get_physical_device_name(device_id: int = 0) -> str: - handle = pynvml.nvmlDeviceGetHandleByIndex(device_id) - return pynvml.nvmlDeviceGetName(handle) - - -@lru_cache(maxsize=8) -@with_nvml_context -def get_physical_device_total_memory(device_id: int = 0) -> int: - handle = pynvml.nvmlDeviceGetHandleByIndex(device_id) - return int(pynvml.nvmlDeviceGetMemoryInfo(handle).total) - +class CudaPlatformBase(Platform): + _enum = PlatformEnum.CUDA + device_type: str = "cuda" + dispatch_key: str = "CUDA" -@with_nvml_context -def warn_if_different_devices(): - device_ids: int = pynvml.nvmlDeviceGetCount() - if device_ids > 1: - device_names = [get_physical_device_name(i) for i in range(device_ids)] - if len(set(device_names)) > 1 and os.environ.get( - "CUDA_DEVICE_ORDER") != "PCI_BUS_ID": - logger.warning( - "Detected different devices in the system: \n%s\nPlease" - " make sure to set `CUDA_DEVICE_ORDER=PCI_BUS_ID` to " - "avoid unexpected behavior.", "\n".join(device_names)) + @classmethod + def get_device_capability(cls, device_id: int = 0) -> DeviceCapability: + raise NotImplementedError + @classmethod + def get_device_name(cls, device_id: int = 0) -> str: + raise NotImplementedError -try: - from sphinx.ext.autodoc.mock import _MockModule + @classmethod + def get_device_total_memory(cls, device_id: int = 0) -> int: + raise NotImplementedError - if not isinstance(pynvml, _MockModule): - warn_if_different_devices() -except ModuleNotFoundError: - warn_if_different_devices() + @classmethod + def is_full_nvlink(cls, device_ids: List[int]) -> bool: + raise NotImplementedError + @classmethod + def log_warnings(cls): + pass -def device_id_to_physical_device_id(device_id: int) -> int: - if "CUDA_VISIBLE_DEVICES" in os.environ: - device_ids = os.environ["CUDA_VISIBLE_DEVICES"].split(",") - if device_ids == [""]: - msg = ( - "CUDA_VISIBLE_DEVICES is set to empty string, which means" - " GPU support is disabled. If you are using ray, please unset" - " the environment variable `CUDA_VISIBLE_DEVICES` inside the" - " worker/actor. " - "Check https://github.com/vllm-project/vllm/issues/8402 for" - " more information.") - raise RuntimeError(msg) - physical_device_id = device_ids[device_id] - return int(physical_device_id) - else: - return device_id + @classmethod + def check_and_update_config(cls, vllm_config: VllmConfig) -> None: + parallel_config = vllm_config.parallel_config + scheduler_config = vllm_config.scheduler_config + if parallel_config.worker_cls == "auto": + if scheduler_config.is_multi_step: + parallel_config.worker_cls = \ + "vllm.worker.multi_step_worker.MultiStepWorker" + elif vllm_config.speculative_config: + parallel_config.worker_cls = \ + "vllm.spec_decode.spec_decode_worker.create_spec_worker" + else: + parallel_config.worker_cls = "vllm.worker.worker.Worker" -class CudaPlatform(Platform): - _enum = PlatformEnum.CUDA - device_type: str = "cuda" - dispatch_key: str = "CUDA" +# NVML utils +# Note that NVML is not affected by `CUDA_VISIBLE_DEVICES`, +# all the related functions work on real physical device ids. +# the major benefit of using NVML is that it will not initialize CUDA +class NvmlCudaPlatform(CudaPlatformBase): @classmethod + @lru_cache(maxsize=8) + @with_nvml_context def get_device_capability(cls, device_id: int = 0) -> DeviceCapability: physical_device_id = device_id_to_physical_device_id(device_id) - major, minor = get_physical_device_capability(physical_device_id) + handle = pynvml.nvmlDeviceGetHandleByIndex(physical_device_id) + major, minor = pynvml.nvmlDeviceGetCudaComputeCapability(handle) return DeviceCapability(major=major, minor=minor) @classmethod + @lru_cache(maxsize=8) + @with_nvml_context def get_device_name(cls, device_id: int = 0) -> str: physical_device_id = device_id_to_physical_device_id(device_id) - return get_physical_device_name(physical_device_id) + return cls._get_physical_device_name(physical_device_id) @classmethod + @lru_cache(maxsize=8) + @with_nvml_context def get_device_total_memory(cls, device_id: int = 0) -> int: physical_device_id = device_id_to_physical_device_id(device_id) - return get_physical_device_total_memory(physical_device_id) + handle = pynvml.nvmlDeviceGetHandleByIndex(physical_device_id) + return int(pynvml.nvmlDeviceGetMemoryInfo(handle).total) @classmethod @with_nvml_context @@ -153,27 +154,86 @@ def is_full_nvlink(cls, physical_device_ids: List[int]) -> bool: if i < j: try: p2p_status = pynvml.nvmlDeviceGetP2PStatus( - handle, peer_handle, - pynvml.NVML_P2P_CAPS_INDEX_NVLINK) + handle, + peer_handle, + pynvml.NVML_P2P_CAPS_INDEX_NVLINK, + ) if p2p_status != pynvml.NVML_P2P_STATUS_OK: return False except pynvml.NVMLError: logger.exception( - "NVLink detection failed. This is normal if your" - " machine has no NVLink equipped.") + "NVLink detection failed. This is normal if" + " your machine has no NVLink equipped.") return False return True @classmethod - def check_and_update_config(cls, vllm_config: VllmConfig) -> None: - parallel_config = vllm_config.parallel_config - scheduler_config = vllm_config.scheduler_config - if parallel_config.worker_cls == "auto": - if scheduler_config.is_multi_step: - parallel_config.worker_cls = \ - "vllm.worker.multi_step_worker.MultiStepWorker" - elif vllm_config.speculative_config: - parallel_config.worker_cls = \ - "vllm.spec_decode.spec_decode_worker.create_spec_worker" - else: - parallel_config.worker_cls = "vllm.worker.worker.Worker" + def _get_physical_device_name(cls, device_id: int = 0) -> str: + handle = pynvml.nvmlDeviceGetHandleByIndex(device_id) + return pynvml.nvmlDeviceGetName(handle) + + @classmethod + @with_nvml_context + def log_warnings(cls): + device_ids: int = pynvml.nvmlDeviceGetCount() + if device_ids > 1: + device_names = [ + cls._get_physical_device_name(i) for i in range(device_ids) + ] + if (len(set(device_names)) > 1 + and os.environ.get("CUDA_DEVICE_ORDER") != "PCI_BUS_ID"): + logger.warning( + "Detected different devices in the system: \n%s\nPlease" + " make sure to set `CUDA_DEVICE_ORDER=PCI_BUS_ID` to " + "avoid unexpected behavior.", + "\n".join(device_names), + ) + + +class NonNvmlCudaPlatform(CudaPlatformBase): + + @classmethod + def get_device_capability(cls, device_id: int = 0) -> DeviceCapability: + major, minor = torch.cuda.get_device_capability(device_id) + return DeviceCapability(major=major, minor=minor) + + @classmethod + def get_device_name(cls, device_id: int = 0) -> str: + return torch.cuda.get_device_name(device_id) + + @classmethod + def get_device_total_memory(cls, device_id: int = 0) -> int: + device_props = torch.cuda.get_device_properties(device_id) + return device_props.total_memory + + @classmethod + def is_full_nvlink(cls, physical_device_ids: List[int]) -> bool: + logger.exception( + "NVLink detection not possible, as context support was" + " not found. Assuming no NVLink available.") + return False + + +# Autodetect either NVML-enabled or non-NVML platform +# based on whether NVML is available. +nvml_available = False +try: + try: + pynvml.nvmlInit() + nvml_available = True + except Exception: + # On Jetson, NVML is not supported. + nvml_available = False +finally: + if nvml_available: + pynvml.nvmlShutdown() + +CudaPlatform = NvmlCudaPlatform if nvml_available else NonNvmlCudaPlatform + +try: + from sphinx.ext.autodoc.mock import _MockModule + + if not isinstance(pynvml, _MockModule): + CudaPlatform.log_warnings() +except ModuleNotFoundError: + CudaPlatform.log_warnings() From 9a99273b482a3e90431069f37858d60827983e2f Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Tue, 26 Nov 2024 13:44:01 -0500 Subject: [PATCH 155/397] [Bugfix] Fix using `-O[0,3]` with LLM entrypoint (#10677) Signed-off-by: mgoin --- vllm/engine/arg_utils.py | 5 ++++- vllm/entrypoints/llm.py | 10 ++++++++-- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 60ad5ee54a2f2..90b4798f17a13 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -206,7 +206,10 @@ def __post_init__(self): # support `EngineArgs(compilation_config={...})` # without having to manually construct a # CompilationConfig object - if isinstance(self.compilation_config, (int, dict)): + if isinstance(self.compilation_config, (int)): + self.compilation_config = CompilationConfig.from_cli( + str(self.compilation_config)) + elif isinstance(self.compilation_config, (dict)): self.compilation_config = CompilationConfig.from_cli( json.dumps(self.compilation_config)) diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index e07f4c04abd84..1551a9a998160 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -185,8 +185,14 @@ def __init__( kwargs["disable_log_stats"] = True if compilation_config is not None: - compilation_config_instance = CompilationConfig.from_cli( - json.dumps(compilation_config)) + if isinstance(compilation_config, (int)): + compilation_config_instance = CompilationConfig.from_cli( + str(compilation_config)) + elif isinstance(compilation_config, (dict)): + compilation_config_instance = CompilationConfig.from_cli( + json.dumps(compilation_config)) + else: + compilation_config_instance = compilation_config else: compilation_config_instance = None From 7576cd38dfdf1672d04f4fe659f8260a9d319e8b Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Tue, 26 Nov 2024 15:29:00 -0500 Subject: [PATCH 156/397] [Bugfix] Check bnb_4bit_quant_storage for bitsandbytes (#10642) --- .../layers/quantization/bitsandbytes.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/vllm/model_executor/layers/quantization/bitsandbytes.py b/vllm/model_executor/layers/quantization/bitsandbytes.py index 39965ac9115c2..6a0de3034142a 100644 --- a/vllm/model_executor/layers/quantization/bitsandbytes.py +++ b/vllm/model_executor/layers/quantization/bitsandbytes.py @@ -20,6 +20,7 @@ def __init__( load_in_8bit: bool = False, load_in_4bit: bool = True, bnb_4bit_compute_dtype: str = "float32", + bnb_4bit_quant_storage: str = "uint8", bnb_4bit_quant_type: str = "fp4", bnb_4bit_use_double_quant: bool = False, llm_int8_enable_fp32_cpu_offload: bool = False, @@ -31,6 +32,7 @@ def __init__( self.load_in_8bit = load_in_8bit self.load_in_4bit = load_in_4bit self.bnb_4bit_compute_dtype = bnb_4bit_compute_dtype + self.bnb_4bit_quant_storage = bnb_4bit_quant_storage self.bnb_4bit_quant_type = bnb_4bit_quant_type self.bnb_4bit_use_double_quant = bnb_4bit_use_double_quant self.llm_int8_enable_fp32_cpu_offload = llm_int8_enable_fp32_cpu_offload @@ -38,10 +40,15 @@ def __init__( self.llm_int8_skip_modules = llm_int8_skip_modules or [] self.llm_int8_threshold = llm_int8_threshold + if self.bnb_4bit_quant_storage not in ["uint8"]: + raise ValueError("Unsupported bnb_4bit_quant_storage: " + f"{self.bnb_4bit_quant_storage}") + def __repr__(self) -> str: return (f"BitsAndBytesConfig(load_in_8bit={self.load_in_8bit}, " f"load_in_4bit={self.load_in_4bit}, " f"bnb_4bit_compute_dtype={self.bnb_4bit_compute_dtype}, " + f"bnb_4bit_quant_storage={self.bnb_4bit_quant_storage}, " f"bnb_4bit_quant_type={self.bnb_4bit_quant_type}, " f"llm_int8_skip_modules={self.llm_int8_skip_modules})") @@ -80,6 +87,9 @@ def get_safe_value(config, keys, default_value=None): bnb_4bit_compute_dtype = get_safe_value(config, ["bnb_4bit_compute_dtype"], default_value="float32") + bnb_4bit_quant_storage = get_safe_value(config, + ["bnb_4bit_quant_storage"], + default_value="uint8") bnb_4bit_quant_type = get_safe_value(config, ["bnb_4bit_quant_type"], default_value="fp4") bnb_4bit_use_double_quant = get_safe_value( @@ -99,6 +109,7 @@ def get_safe_value(config, keys, default_value=None): load_in_8bit=load_in_8bit, load_in_4bit=load_in_4bit, bnb_4bit_compute_dtype=bnb_4bit_compute_dtype, + bnb_4bit_quant_storage=bnb_4bit_quant_storage, bnb_4bit_quant_type=bnb_4bit_quant_type, bnb_4bit_use_double_quant=bnb_4bit_use_double_quant, llm_int8_enable_fp32_cpu_offload=llm_int8_enable_fp32_cpu_offload, From 2f0a0a17a47436fe9709462dfee3bb9d2f91e0a0 Mon Sep 17 00:00:00 2001 From: Roger Wang <136131678+ywang96@users.noreply.github.com> Date: Tue, 26 Nov 2024 12:46:11 -0800 Subject: [PATCH 157/397] [V1] Refactor model executable interface for multimodal models (#10570) Signed-off-by: Roger Wang --- vllm/model_executor/models/blip2.py | 61 ++++++----- vllm/model_executor/models/chameleon.py | 58 +++++++--- vllm/model_executor/models/chatglm.py | 54 ++++++---- vllm/model_executor/models/fuyu.py | 43 +++++--- vllm/model_executor/models/interfaces.py | 36 ++++++- vllm/model_executor/models/internvl.py | 54 +++++++--- vllm/model_executor/models/llava.py | 15 +-- vllm/model_executor/models/llava_next.py | 51 +++++---- .../model_executor/models/llava_next_video.py | 44 +++++--- vllm/model_executor/models/llava_onevision.py | 74 +++++++++---- vllm/model_executor/models/molmo.py | 88 +++++++-------- vllm/model_executor/models/paligemma.py | 52 +++++---- vllm/model_executor/models/phi3v.py | 16 +-- vllm/model_executor/models/qwen2_audio.py | 59 ++++++---- vllm/model_executor/models/qwen2_vl.py | 102 ++++++++++++------ vllm/model_executor/models/ultravox.py | 72 ++++++++----- vllm/model_executor/models/utils.py | 5 +- vllm/v1/worker/gpu_model_runner.py | 3 +- 18 files changed, 581 insertions(+), 306 deletions(-) diff --git a/vllm/model_executor/models/blip2.py b/vllm/model_executor/models/blip2.py index 7d7639b4a92ce..d2592016aff34 100644 --- a/vllm/model_executor/models/blip2.py +++ b/vllm/model_executor/models/blip2.py @@ -16,6 +16,7 @@ from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY +from vllm.multimodal.inputs import NestedTensors from vllm.multimodal.utils import consecutive_placeholder_ranges from vllm.sequence import IntermediateTensors, SequenceData @@ -609,6 +610,25 @@ def _process_image_input(self, return self.language_projection(query_output) + def get_multimodal_embeddings(self, **kwargs) -> Optional[NestedTensors]: + image_input = self._parse_and_validate_image_input(**kwargs) + if image_input is None: + return None + vision_embeddings = self._process_image_input(image_input) + return vision_embeddings + + def get_input_embeddings( + self, + input_ids: torch.Tensor, + multimodal_embeddings: Optional[NestedTensors] = None, + ) -> torch.Tensor: + inputs_embeds = self.language_model.get_input_embeddings(input_ids) + if multimodal_embeddings is not None: + inputs_embeds = merge_multimodal_embeddings( + input_ids, inputs_embeds, multimodal_embeddings, + BLIP2_IMAGE_TOKEN_ID) + return inputs_embeds + def forward( self, input_ids: torch.Tensor, @@ -616,6 +636,7 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, **kwargs: object, ) -> Union[SamplerOutput, IntermediateTensors]: """Run forward pass for BLIP-2. @@ -648,32 +669,24 @@ def forward( See also: :class:`Blip2ImageInputs` """ + if intermediate_tensors is not None: - input_ids = None inputs_embeds = None - else: - image_input = self._parse_and_validate_image_input(**kwargs) - - if image_input is not None: - vision_embeddings = self._process_image_input(image_input) - inputs_embeds = self.language_model.model.get_input_embeddings( - input_ids) - - inputs_embeds = merge_multimodal_embeddings( - input_ids, inputs_embeds, vision_embeddings, - BLIP2_IMAGE_TOKEN_ID) - - input_ids = None - else: - inputs_embeds = None - - hidden_states = self.language_model.model( - input_ids, - positions, - kv_caches, - attn_metadata, - intermediate_tensors=intermediate_tensors, - inputs_embeds=inputs_embeds) + + # NOTE: In v1, inputs_embeds is always generated at model runner, this + # condition is for v0 compatibility. + elif inputs_embeds is None: + vision_embeddings = self.get_multimodal_embeddings(**kwargs) + inputs_embeds = self.get_input_embeddings(input_ids, + vision_embeddings) + input_ids = None + + hidden_states = self.language_model.model(input_ids, + positions, + kv_caches, + attn_metadata, + intermediate_tensors, + inputs_embeds=inputs_embeds) return hidden_states diff --git a/vllm/model_executor/models/chameleon.py b/vllm/model_executor/models/chameleon.py index 5a6d6432112f0..a40c321ce0a58 100644 --- a/vllm/model_executor/models/chameleon.py +++ b/vllm/model_executor/models/chameleon.py @@ -29,6 +29,7 @@ from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.model_executor.utils import set_weight_attrs from vllm.multimodal import MULTIMODAL_REGISTRY +from vllm.multimodal.inputs import NestedTensors from vllm.multimodal.utils import (cached_get_tokenizer, consecutive_placeholder_ranges, repeat_and_pad_placeholder_tokens) @@ -38,7 +39,7 @@ from .interfaces import SupportsMultiModal, SupportsPP from .utils import (is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, - maybe_prefix) + maybe_prefix, merge_multimodal_embeddings) # These configs are not part of the model config but the preprocessor # and processor files, so we hardcode them in the model file for now. @@ -987,6 +988,29 @@ def _parse_and_validate_image_input( data=self._validate_pixel_values(pixel_values), ) + def get_multimodal_embeddings(self, **kwargs) -> Optional[NestedTensors]: + image_input = self._parse_and_validate_image_input(**kwargs) + if image_input is None: + return None + assert self.model.vqmodel is not None + image_tokens = self.model.get_image_tokens(image_input["data"].to( + self.config.torch_dtype)) + vision_embeddings = self.model.get_input_embeddings(image_tokens) + return vision_embeddings + + def get_input_embeddings( + self, + input_ids: torch.Tensor, + multimodal_embeddings: Optional[NestedTensors] = None, + ) -> torch.Tensor: + + inputs_embeds = self.model.get_input_embeddings(input_ids) + if multimodal_embeddings is not None: + inputs_embeds = merge_multimodal_embeddings( + input_ids, inputs_embeds, multimodal_embeddings, + self.model.vocabulary_mapping.image_token_id) + return inputs_embeds + def forward( self, input_ids: torch.Tensor, @@ -994,27 +1018,27 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, **kwargs, ) -> Union[torch.Tensor, IntermediateTensors]: if intermediate_tensors is not None: + inputs_embeds = None + + # NOTE: In v1, inputs_embeds is always generated at model runner, this + # condition is for v0 compatibility. + elif inputs_embeds is None: + vision_embeddings = self.get_multimodal_embeddings(**kwargs) + inputs_embeds = self.get_input_embeddings(input_ids, + vision_embeddings) input_ids = None - else: - image_input = self._parse_and_validate_image_input(**kwargs) - - if image_input is not None: - assert self.model.vqmodel is not None - image_tokens = self.model.get_image_tokens( - image_input["data"].to(self.config.torch_dtype)) - image_token_id = self.model.vocabulary_mapping.image_token_id - special_image_mask = input_ids == image_token_id - image_tokens = image_tokens.to(input_ids.device, - input_ids.dtype) - input_ids = input_ids.masked_scatter(special_image_mask, - image_tokens) - - hidden_states = self.model(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors) + + hidden_states = self.model(input_ids, + positions, + kv_caches, + attn_metadata, + intermediate_tensors, + inputs_embeds=inputs_embeds) return hidden_states def compute_logits( diff --git a/vllm/model_executor/models/chatglm.py b/vllm/model_executor/models/chatglm.py index 5bcbce7180ca4..6c50882d83c3b 100644 --- a/vllm/model_executor/models/chatglm.py +++ b/vllm/model_executor/models/chatglm.py @@ -33,7 +33,8 @@ from vllm.model_executor.models.module_mapping import MultiModelKeys from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY -from vllm.multimodal.inputs import MultiModalData, MultiModalKwargs +from vllm.multimodal.inputs import (MultiModalData, MultiModalKwargs, + NestedTensors) from vllm.multimodal.utils import cached_get_tokenizer from vllm.sequence import (VLLM_TOKEN_ID_ARRAY_TYPE, IntermediateTensors, SequenceData) @@ -545,6 +546,30 @@ def _parse_and_validate_image_input( """) return GLMImagePixelInputs(pixel_values=pixel_values) + def get_multimodal_embeddings(self, **kwargs) -> Optional[NestedTensors]: + image_input = self._parse_and_validate_image_input(**kwargs) + if image_input["pixel_values"] is None: + return None + pixel_values = image_input["pixel_values"].to( + dtype=self.config.torch_dtype) + vision_embeddings = self.vision(pixel_values) + return vision_embeddings + + def get_input_embeddings( + self, + input_ids: torch.Tensor, + multimodal_embeddings: Optional[NestedTensors] = None, + ) -> torch.Tensor: + inputs_embeds = self.embedding(input_ids) + if multimodal_embeddings is not None: + inputs_embeds = merge_glm_vision_embeddings( + input_ids=input_ids, + inputs_embeds=inputs_embeds, + vision_embeddings=multimodal_embeddings, + boi_token_id=self.config.boi_token_id, + eoi_token_id=self.config.eoi_token_id) + return inputs_embeds + def forward( self, input_ids: torch.Tensor, @@ -552,26 +577,17 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, **kwargs: object, ) -> torch.Tensor: - if intermediate_tensors is None: - inputs_embeds = self.embedding(input_ids) - image_input = self._parse_and_validate_image_input(**kwargs) - - if image_input["pixel_values"] is not None: - pixel_values = image_input["pixel_values"].to( - dtype=inputs_embeds.dtype) - image_embeds = self.vision(pixel_values) - - boi_token_id = self.config.boi_token_id - eoi_token_id = self.config.eoi_token_id - - inputs_embeds = merge_glm_vision_embeddings( - input_ids=input_ids, - inputs_embeds=inputs_embeds, - vision_embeddings=image_embeds, - boi_token_id=boi_token_id, - eoi_token_id=eoi_token_id) + + # NOTE: In v1, inputs_embeds is always generated at model runner, this + # condition is for v0 compatibility. + if intermediate_tensors is None and inputs_embeds is None: + vision_embeddings = self.get_multimodal_embeddings(**kwargs) + inputs_embeds = self.get_input_embeddings(input_ids, + vision_embeddings) + input_ids = None else: inputs_embeds = intermediate_tensors["hidden_states"] diff --git a/vllm/model_executor/models/fuyu.py b/vllm/model_executor/models/fuyu.py index 7b46907ac83ab..6e86900326c4b 100644 --- a/vllm/model_executor/models/fuyu.py +++ b/vllm/model_executor/models/fuyu.py @@ -35,6 +35,7 @@ from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs from vllm.multimodal.image import cached_get_image_processor +from vllm.multimodal.inputs import NestedTensors from vllm.multimodal.utils import (cached_get_tokenizer, consecutive_placeholder_ranges) from vllm.sequence import (VLLM_TOKEN_ID_ARRAY_TYPE, IntermediateTensors, @@ -302,6 +303,25 @@ def _process_image_input( vision_embeddings, _ = self.vision_embed_tokens(image_input["data"]) return vision_embeddings + def get_multimodal_embeddings(self, **kwargs) -> Optional[NestedTensors]: + image_input = self._parse_and_validate_image_input(**kwargs) + if image_input is None: + return None + vision_embeddings = self._process_image_input(image_input) + return vision_embeddings + + def get_input_embeddings( + self, + input_ids: torch.Tensor, + multimodal_embeddings: Optional[NestedTensors] = None, + ) -> torch.Tensor: + inputs_embeds = self.language_model.get_input_embeddings(input_ids) + if multimodal_embeddings is not None: + inputs_embeds = merge_multimodal_embeddings( + input_ids, inputs_embeds, multimodal_embeddings, + _IMAGE_TOKEN_ID) + return inputs_embeds + def forward( self, input_ids: torch.Tensor, @@ -309,24 +329,19 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, **kwargs: object, ): if intermediate_tensors is not None: - input_ids = None inputs_embeds = None - else: - image_input = self._parse_and_validate_image_input(**kwargs) - - if image_input is not None: - vision_embeddings = self._process_image_input(image_input) - inputs_embeds = self.language_model.model.embed_tokens( - input_ids) - inputs_embeds = merge_multimodal_embeddings( - input_ids, inputs_embeds, vision_embeddings, - self.image_token_id) - - else: - inputs_embeds = None + + # NOTE: In v1, inputs_embeds is always generated at model runner, this + # condition is for v0 compatibility. + elif inputs_embeds is None: + vision_embeddings = self.get_multimodal_embeddings(**kwargs) + inputs_embeds = self.get_input_embeddings(input_ids, + vision_embeddings) + input_ids = None hidden_states = self.language_model( input_ids=input_ids, diff --git a/vllm/model_executor/models/interfaces.py b/vllm/model_executor/models/interfaces.py index 9b4a97abf9b51..1545ce332309f 100644 --- a/vllm/model_executor/models/interfaces.py +++ b/vllm/model_executor/models/interfaces.py @@ -2,7 +2,7 @@ Protocol, Type, Union, overload, runtime_checkable) import torch -from typing_extensions import TypeIs +from typing_extensions import TypeIs, TypeVar from vllm.logger import init_logger from vllm.utils import supports_kw @@ -10,10 +10,14 @@ from .interfaces_base import is_embedding_model if TYPE_CHECKING: + from vllm.attention import AttentionMetadata + from vllm.multimodal.inputs import NestedTensors # noqa: F401 from vllm.sequence import IntermediateTensors logger = init_logger(__name__) +T = TypeVar("T", default="NestedTensors") + @runtime_checkable class SupportsMultiModal(Protocol): @@ -28,6 +32,36 @@ class SupportsMultiModal(Protocol): MRO of your model class. """ + def get_multimodal_embeddings(self, **kwargs) -> Optional[T]: + """ + Returns multimodal embeddings generated from multimodal kwargs + to be merged with text embeddings. + """ + ... + + # Only for models that support v0 chunked prefill + # TODO(ywang96): Remove this overload once v0 is deprecated + @overload + def get_input_embeddings( + self, + input_ids: torch.Tensor, + multimodal_embeddings: Optional[T] = None, + attn_metadata: Optional["AttentionMetadata"] = None, + ) -> torch.Tensor: + ... + + def get_input_embeddings( + self, + input_ids: torch.Tensor, + multimodal_embeddings: Optional[T] = None, + ) -> torch.Tensor: + """ + Returns the input embeddings merged from the text embeddings from + input_ids and the multimodal embeddings generated from multimodal + kwargs. + """ + ... + # We can't use runtime_checkable with ClassVar for issubclass checks # so we need to treat the class as an instance and use isinstance instead diff --git a/vllm/model_executor/models/internvl.py b/vllm/model_executor/models/internvl.py index 47ac00b6afe9b..b1c0065afbf30 100644 --- a/vllm/model_executor/models/internvl.py +++ b/vllm/model_executor/models/internvl.py @@ -26,6 +26,7 @@ InternVisionPatchModel) from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs +from vllm.multimodal.inputs import NestedTensors from vllm.multimodal.utils import cached_get_tokenizer from vllm.sequence import IntermediateTensors from vllm.utils import is_list_of @@ -641,6 +642,26 @@ def _get_visual_token_mask(self, input_ids: torch.Tensor) -> torch.Tensor: visual_token_mask = None return visual_token_mask + def get_multimodal_embeddings(self, **kwargs) -> Optional[NestedTensors]: + image_input = self._parse_and_validate_image_input(**kwargs) + if image_input is None: + return None + vision_embeddings = self._process_image_input(image_input) + return vision_embeddings + + def get_input_embeddings( + self, + input_ids: torch.Tensor, + multimodal_embeddings: Optional[NestedTensors] = None, + ) -> torch.Tensor: + inputs_embeds = self.language_model.get_input_embeddings(input_ids) + if multimodal_embeddings is not None: + assert self.img_context_token_id is not None + inputs_embeds = merge_multimodal_embeddings( + input_ids, inputs_embeds, multimodal_embeddings, + self.img_context_token_id) + return inputs_embeds + def forward( self, input_ids: torch.Tensor, @@ -648,26 +669,22 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, **kwargs: object, ) -> Union[SamplerOutput, IntermediateTensors]: + + visual_token_mask = None if intermediate_tensors is not None: input_ids = None inputs_embeds = None - visual_token_mask = None - else: - image_input = self._parse_and_validate_image_input(**kwargs) - if image_input is not None: - inputs_embeds = self.language_model.model.get_input_embeddings( - input_ids) - vision_embeddings = self._process_image_input(image_input) - inputs_embeds = merge_multimodal_embeddings( - input_ids, inputs_embeds, vision_embeddings, - self.img_context_token_id) - visual_token_mask = self._get_visual_token_mask(input_ids) - input_ids = None - else: - inputs_embeds = None - visual_token_mask = None + + # NOTE: In v1, inputs_embeds is always generated at model runner, this + # condition is for v0 compatibility. + elif inputs_embeds is None: + vision_embeddings = self.get_multimodal_embeddings(**kwargs) + inputs_embeds = self.get_input_embeddings(input_ids, + vision_embeddings) + input_ids = None forward_kwargs = { "input_ids": input_ids, @@ -677,6 +694,13 @@ def forward( "intermediate_tensors": intermediate_tensors, "inputs_embeds": inputs_embeds, } + if self.img_context_token_id is not None: + visual_token_mask = self._get_visual_token_mask(input_ids) + + # We always overwrite it back to None after computing visual token + # mask so that this doesn't need to depend on encoder output + self.img_context_token_id = None + if self.is_mono: forward_kwargs.update({"visual_token_mask": visual_token_mask}) diff --git a/vllm/model_executor/models/llava.py b/vllm/model_executor/models/llava.py index 05c6cc62efcd7..e7757b3c7d405 100644 --- a/vllm/model_executor/models/llava.py +++ b/vllm/model_executor/models/llava.py @@ -478,7 +478,7 @@ def _process_image_input(self, image_features = self._process_image_pixels(image_input) return self.multi_modal_projector(image_features) - def process_mm_inputs(self, **kwargs): + def get_multimodal_embeddings(self, **kwargs) -> Optional[NestedTensors]: image_input = self._parse_and_validate_image_input(**kwargs) if image_input is None: return None @@ -488,12 +488,12 @@ def process_mm_inputs(self, **kwargs): def get_input_embeddings( self, input_ids: torch.Tensor, - vision_embeddings: Optional[NestedTensors] = None, + multimodal_embeddings: Optional[NestedTensors] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if vision_embeddings is not None: + if multimodal_embeddings is not None: inputs_embeds = merge_multimodal_embeddings( - input_ids, inputs_embeds, vision_embeddings, + input_ids, inputs_embeds, multimodal_embeddings, self.config.image_token_index) return inputs_embeds @@ -544,10 +544,11 @@ def forward( """ if intermediate_tensors is not None: inputs_embeds = None + + # NOTE: In v1, inputs_embeds is always generated at model runner, this + # condition is for v0 compatibility. elif inputs_embeds is None: - vision_embeddings = self.process_mm_inputs(**kwargs) - # always pass the input via `inputs_embeds` - # to make sure the computation graph is consistent + vision_embeddings = self.get_multimodal_embeddings(**kwargs) inputs_embeds = self.get_input_embeddings(input_ids, vision_embeddings) input_ids = None diff --git a/vllm/model_executor/models/llava_next.py b/vllm/model_executor/models/llava_next.py index abeebb45fc4a7..e113f5862830d 100644 --- a/vllm/model_executor/models/llava_next.py +++ b/vllm/model_executor/models/llava_next.py @@ -19,6 +19,7 @@ from vllm.model_executor.pooling_metadata import PoolingMetadata from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY +from vllm.multimodal.inputs import NestedTensors from vllm.sequence import IntermediateTensors, PoolerOutput from vllm.utils import is_list_of @@ -565,6 +566,30 @@ def _process_image_input( for i, patch_features_batch in enumerate(patch_embeddings) ] + def get_multimodal_embeddings(self, **kwargs) -> Optional[NestedTensors]: + image_input = self._parse_and_validate_image_input(**kwargs) + if image_input is None: + return None + vision_embeddings = self._process_image_input(image_input) + return vision_embeddings + + def get_input_embeddings( + self, + input_ids: torch.Tensor, + multimodal_embeddings: Optional[NestedTensors] = None, + ) -> torch.Tensor: + + if multimodal_embeddings is None: + return self.language_model.get_input_embeddings(input_ids) + + inputs_embeds = embed_multimodal( + input_ids, + self.config.image_token_index, + self.language_model.model.get_input_embeddings, + multimodal_embeddings, + ) + return inputs_embeds + def forward( self, input_ids: torch.Tensor, @@ -572,6 +597,7 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, **kwargs: object, ) -> Union[torch.Tensor, IntermediateTensors]: """Run forward pass for LlaVA-NeXT. @@ -620,24 +646,14 @@ def forward( """ if intermediate_tensors is not None: inputs_embeds = None - else: - image_input = self._parse_and_validate_image_input(**kwargs) - - if image_input is not None: - inputs_embeds = embed_multimodal( - input_ids, - self.config.image_token_index, - self.language_model.model.get_input_embeddings, - lambda _: self._process_image_input(image_input), - ) - else: - inputs_embeds = self.language_model.model.get_input_embeddings( - input_ids) - # always pass the input via `inputs_embeds` - # to make sure the computation graph is consistent - # for `torch.compile` integration - input_ids = None + # NOTE: In v1, inputs_embeds is always generated at model runner, this + # condition is for v0 compatibility. + elif inputs_embeds is None: + vision_embeddings = self.get_multimodal_embeddings(**kwargs) + inputs_embeds = self.get_input_embeddings(input_ids, + vision_embeddings) + input_ids = None hidden_states = self.language_model.model(input_ids, positions, @@ -645,7 +661,6 @@ def forward( attn_metadata, intermediate_tensors, inputs_embeds=inputs_embeds) - return hidden_states def compute_logits( diff --git a/vllm/model_executor/models/llava_next_video.py b/vllm/model_executor/models/llava_next_video.py index e2880c76cf43d..b130791808924 100644 --- a/vllm/model_executor/models/llava_next_video.py +++ b/vllm/model_executor/models/llava_next_video.py @@ -18,6 +18,7 @@ from vllm.model_executor.models.clip import CLIPVisionModel from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY +from vllm.multimodal.inputs import NestedTensors from vllm.multimodal.utils import (cached_get_tokenizer, repeat_and_pad_placeholder_tokens) from vllm.sequence import IntermediateTensors @@ -388,6 +389,25 @@ def _process_video_pixels(self, inputs: LlavaNextVideoPixelInputs): raise ValueError( f"Unsupported type of video input {type(video_pixels)}") + def get_multimodal_embeddings(self, **kwargs) -> Optional[NestedTensors]: + video_input = self._parse_and_validate_video_input(**kwargs) + if video_input is None: + return None + vision_embeddings = self._process_video_pixels(video_input) + return vision_embeddings + + def get_input_embeddings( + self, + input_ids: torch.Tensor, + multimodal_embeddings: Optional[NestedTensors] = None, + ) -> torch.Tensor: + inputs_embeds = self.language_model.get_input_embeddings(input_ids) + if multimodal_embeddings is not None: + inputs_embeds = merge_multimodal_embeddings( + input_ids, inputs_embeds, multimodal_embeddings, + self.config.video_token_index) + return inputs_embeds + def forward( self, input_ids: torch.Tensor, @@ -395,6 +415,7 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, **kwargs: object, ) -> Union[torch.Tensor, IntermediateTensors]: """Run forward pass for LlaVA-NeXT-Video. @@ -404,22 +425,15 @@ def forward( pixel_values_videos: Pixels in each frames for each input videos. """ if intermediate_tensors is not None: - input_ids = None inputs_embeds = None - else: - video_input = self._parse_and_validate_video_input(**kwargs) - if video_input is not None: - video_embeddings = self._process_video_pixels(video_input) - inputs_embeds = self.language_model \ - .model.get_input_embeddings(input_ids) - - inputs_embeds = merge_multimodal_embeddings( - input_ids, inputs_embeds, video_embeddings, - self.config.video_token_index) - - input_ids = None - else: - inputs_embeds = None + + # NOTE: In v1, inputs_embeds is always generated at model runner, this + # condition is for v0 compatibility. + elif inputs_embeds is None: + vision_embeddings = self.get_multimodal_embeddings(**kwargs) + inputs_embeds = self.get_input_embeddings(input_ids, + vision_embeddings) + input_ids = None hidden_states = self.language_model.model(input_ids, positions, diff --git a/vllm/model_executor/models/llava_onevision.py b/vllm/model_executor/models/llava_onevision.py index 705ca1e4ab6e6..3166737d61582 100644 --- a/vllm/model_executor/models/llava_onevision.py +++ b/vllm/model_executor/models/llava_onevision.py @@ -21,6 +21,7 @@ from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY +from vllm.multimodal.inputs import NestedTensors from vllm.multimodal.utils import (cached_get_tokenizer, repeat_and_pad_placeholder_tokens) from vllm.sequence import IntermediateTensors @@ -824,6 +825,49 @@ def apply_pooling(self, image_features, stride=2): image_feature = image_feature.view(batch_frames, -1, dim) return image_feature + def get_multimodal_embeddings( + self, **kwargs) -> Optional[List[Tuple[NestedTensors, str]]]: + modalities = self._parse_and_validate_multimodal_inputs(**kwargs) + if not modalities: + return None + + # We make a tuple of each embedding with its modality string. This is a + # temporary workaround for models to handle mixed modalities when + # get_multimodal_embeddings and get_input_embeddings are called + # separately. + # TODO(ywang96): Add support for mixed-modality inference for v1. + multimodal_embeddings: List[Tuple[NestedTensors, str]] = [] + + if "images" in modalities: + image_input = modalities["images"] + vision_embeddings = self._process_image_input(image_input) + multimodal_embeddings.append((vision_embeddings, "image")) + if "videos" in modalities: + video_input = modalities["videos"] + video_embeddings = self._process_video_pixels(video_input) + multimodal_embeddings.append((video_embeddings, "video")) + + return multimodal_embeddings + + def get_input_embeddings( + self, + input_ids: torch.Tensor, + multimodal_embeddings: Optional[List[Tuple[NestedTensors, + str]]] = None, + ) -> torch.Tensor: + inputs_embeds = self.language_model.get_input_embeddings(input_ids) + if multimodal_embeddings is not None: + for embeddings, modality in multimodal_embeddings: + if modality == "image": + inputs_embeds = merge_multimodal_embeddings( + input_ids, inputs_embeds, embeddings, + self.config.image_token_index) + if modality == "video": + inputs_embeds = merge_multimodal_embeddings( + input_ids, inputs_embeds, embeddings, + self.config.video_token_index) + return inputs_embeds + def forward( self, input_ids: torch.Tensor, @@ -831,6 +875,7 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, **kwargs: object, ) -> Union[torch.Tensor, IntermediateTensors]: """Run forward pass for LlaVA-Onevision. @@ -840,28 +885,15 @@ def forward( pixel_values_videos: Pixels in each frames for each input videos. """ if intermediate_tensors is not None: - input_ids = None inputs_embeds = None - else: - modalities = self._parse_and_validate_multimodal_inputs(**kwargs) - if modalities: - inputs_embeds = self.language_model.model.get_input_embeddings( - input_ids) - if "images" in modalities: - image_input = modalities["images"] - vision_embeddings = self._process_image_input(image_input) - inputs_embeds = merge_multimodal_embeddings( - input_ids, inputs_embeds, vision_embeddings, - self.config.image_token_index) - if "videos" in modalities: - video_input = modalities["videos"] - video_embeddings = self._process_video_pixels(video_input) - inputs_embeds = merge_multimodal_embeddings( - input_ids, inputs_embeds, video_embeddings, - self.config.video_token_index) - input_ids = None - else: - inputs_embeds = None + + # NOTE: In v1, inputs_embeds is always generated at model runner, this + # condition is for v0 compatibility. + elif inputs_embeds is None: + multimodal_embeddings = self.get_multimodal_embeddings(**kwargs) + inputs_embeds = self.get_input_embeddings(input_ids, + multimodal_embeddings) + input_ids = None hidden_states = self.language_model.model(input_ids, positions, diff --git a/vllm/model_executor/models/molmo.py b/vllm/model_executor/models/molmo.py index ee7b560fe1ee4..acedddd84d7cb 100644 --- a/vllm/model_executor/models/molmo.py +++ b/vllm/model_executor/models/molmo.py @@ -3,7 +3,7 @@ from array import array from dataclasses import dataclass from functools import lru_cache, partial -from typing import Iterable, List, Mapping, Optional, Tuple, TypedDict, Union +from typing import Iterable, List, Mapping, Optional, Tuple, TypedDict import torch from einops import rearrange @@ -36,6 +36,7 @@ ParallelLMHead, VocabParallelEmbedding) from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs +from vllm.multimodal.inputs import NestedTensors from vllm.multimodal.utils import cached_get_tokenizer from vllm.platforms import _Backend from vllm.sequence import (VLLM_TOKEN_ID_ARRAY_TYPE, IntermediateTensors, @@ -756,6 +757,12 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): make_empty_intermediate_tensors_factory( ["hidden_states", "residual"], config.hidden_size)) + def get_input_embeddings( + self, + input_ids: torch.Tensor, + ) -> torch.Tensor: + return self.embed_tokens(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -1098,19 +1105,16 @@ def _process_image_input( return image_features - def _merge_multimodal_embeddings( - self, - inputs_embeds: torch.Tensor, - image_features: torch.Tensor, - image_input_idx: torch.Tensor, - seq_len: Union[torch.Tensor, List[torch.Tensor]], - ) -> torch.Tensor: + def get_multimodal_embeddings(self, **kwargs) -> Optional[NestedTensors]: + image_input = self._parse_and_validate_image_input(**kwargs) + if image_input is None: + return None + image_features = self._process_image_input(image_input) + image_input_idx = image_input["image_input_idx"] + seq_len = image_input["seq_len"] batch_size, num_image, num_patch = image_features.shape[:3] assert image_input_idx.shape == (batch_size, num_image, num_patch) - image_features = image_features.to(inputs_embeds.device) - seq_len = seq_len.to(inputs_embeds.device) - # insert the image feature into the embedding. image_features = image_features.view(batch_size, num_image * num_patch, -1) @@ -1130,12 +1134,24 @@ def _merge_multimodal_embeddings( image_input_idx = image_input_idx + offset.to(image_input_idx.dtype) image_input_idx = image_input_idx.flatten()[:, None] mat = image_input_idx == torch.arange( - seq_len.sum().item(), device=inputs_embeds.device)[None, :] + seq_len.sum().item(), device=image_features.device)[None, :] mat = mat.to(image_features.dtype) - inputs_embeds = inputs_embeds + torch.einsum('nd,nm->md', - image_features, mat) + # Note: In this original implementation from AI2, the final + # vision_embeddings will be always be the same length + # of input embedddings, which is not very efficient. + # TODO(ywang96): see if this can be optimized. + vision_embeddings = torch.einsum('nd,nm->md', image_features, mat) + return vision_embeddings + def get_input_embeddings( + self, + input_ids: torch.Tensor, + multimodal_embeddings: Optional[NestedTensors] = None, + ) -> torch.Tensor: + inputs_embeds = self.model.get_input_embeddings(input_ids) + if multimodal_embeddings is not None: + inputs_embeds = inputs_embeds + multimodal_embeddings return inputs_embeds def forward( @@ -1145,39 +1161,27 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, **kwargs: object, ) -> SamplerOutput: + if intermediate_tensors is not None: inputs_embeds = None - else: - image_input = self._parse_and_validate_image_input(**kwargs) - - if image_input is not None: - inputs_embeds = self.model.embed_tokens(input_ids) - image_features = self._process_image_input(image_input) - - inputs_embeds = self._merge_multimodal_embeddings( - inputs_embeds, - image_features, - image_input["image_input_idx"], - image_input["seq_len"], - ) - else: - inputs_embeds = self.model.embed_tokens(input_ids) - # always pass the input via `inputs_embeds` - # to make sure the computation graph is consistent - # for `torch.compile` integration - input_ids = None - - hidden_states = self.model( - input_ids=input_ids, - positions=positions, - kv_caches=kv_caches, - attn_metadata=attn_metadata, - intermediate_tensors=intermediate_tensors, - inputs_embeds=inputs_embeds, - ) + # NOTE: In v1, inputs_embeds is always generated at model runner, this + # condition is for v0 compatibility. + elif inputs_embeds is None: + vision_embeddings = self.get_multimodal_embeddings(**kwargs) + inputs_embeds = self.get_input_embeddings(input_ids, + vision_embeddings) + input_ids = None + + hidden_states = self.model(input_ids, + positions, + kv_caches, + attn_metadata, + intermediate_tensors, + inputs_embeds=inputs_embeds) return hidden_states diff --git a/vllm/model_executor/models/paligemma.py b/vllm/model_executor/models/paligemma.py index dd5256eb87ab3..2e5b6bee784e7 100644 --- a/vllm/model_executor/models/paligemma.py +++ b/vllm/model_executor/models/paligemma.py @@ -13,6 +13,7 @@ from vllm.model_executor.layers.sampler import SamplerOutput from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY +from vllm.multimodal.inputs import NestedTensors from vllm.multimodal.utils import cached_get_tokenizer from vllm.sequence import IntermediateTensors @@ -240,36 +241,45 @@ def _process_image_input( return self.multi_modal_projector(image_features) + def get_multimodal_embeddings(self, **kwargs) -> Optional[NestedTensors]: + image_input = self._parse_and_validate_image_input(**kwargs) + if image_input is None: + return None + vision_embeddings = self._process_image_input(image_input) + # https://github.com/huggingface/transformers/blob/main/src/transformers/models/paligemma/modeling_paligemma.py#L294 # noqa + vision_embeddings = vision_embeddings * (self.config.hidden_size**-0.5) + return vision_embeddings + + def get_input_embeddings( + self, + input_ids: torch.Tensor, + multimodal_embeddings: Optional[NestedTensors] = None, + ) -> torch.Tensor: + inputs_embeds = self.language_model.get_input_embeddings(input_ids) + if multimodal_embeddings is not None: + inputs_embeds = merge_multimodal_embeddings( + input_ids, inputs_embeds, multimodal_embeddings, + self.config.image_token_index) + return inputs_embeds + def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, **kwargs: object) -> Union[SamplerOutput, IntermediateTensors]: if intermediate_tensors is not None: - input_ids = None inputs_embeds = None - else: - parsed_image_input = self._parse_and_validate_image_input(**kwargs) - - if parsed_image_input is not None: - vision_embeddings = self._process_image_input( - parsed_image_input) - # https://github.com/huggingface/transformers/blob/main/src/transformers/models/paligemma/modeling_paligemma.py#L294 # noqa - vision_embeddings = vision_embeddings * ( - self.config.hidden_size**-0.5) - - inputs_embeds = self.language_model.model.get_input_embeddings( - input_ids) - - inputs_embeds = merge_multimodal_embeddings( - input_ids, inputs_embeds, vision_embeddings, - self.config.image_token_index) - - input_ids = None - else: - inputs_embeds = None + + # NOTE: In v1, inputs_embeds is always generated at model runner, this + # condition is for v0 compatibility. + elif inputs_embeds is None: + vision_embeddings = self.get_multimodal_embeddings(**kwargs) + inputs_embeds = self.get_input_embeddings(input_ids, + vision_embeddings) + input_ids = None hidden_states = self.language_model.model(input_ids, positions, diff --git a/vllm/model_executor/models/phi3v.py b/vllm/model_executor/models/phi3v.py index 2e583bb08e87a..4cb874a13e0c1 100644 --- a/vllm/model_executor/models/phi3v.py +++ b/vllm/model_executor/models/phi3v.py @@ -676,7 +676,7 @@ def _process_image_input( return image_embeds - def process_mm_inputs(self, **kwargs): + def get_multimodal_embeddings(self, **kwargs) -> Optional[NestedTensors]: image_input = self._parse_and_validate_image_input(**kwargs) if image_input is None: return None @@ -686,12 +686,12 @@ def process_mm_inputs(self, **kwargs): def get_input_embeddings( self, input_ids: torch.Tensor, - vision_embeddings: Optional[NestedTensors] = None, + multimodal_embeddings: Optional[NestedTensors] = None, ) -> torch.Tensor: inputs_embeds = self.embed_tokens(input_ids) - if vision_embeddings is not None: + if multimodal_embeddings is not None: inputs_embeds = merge_multimodal_embeddings( - input_ids, inputs_embeds, vision_embeddings, + input_ids, inputs_embeds, multimodal_embeddings, self.image_token_id) return inputs_embeds @@ -703,12 +703,14 @@ def forward(self, intermediate_tensors: Optional[IntermediateTensors] = None, inputs_embeds: Optional[torch.Tensor] = None, **kwargs: object): + if intermediate_tensors is not None: inputs_embeds = None + + # NOTE: In v1, inputs_embeds is always generated at model runner, this + # condition is for v0 compatibility elif inputs_embeds is None: - vision_embeddings = self.process_mm_inputs(**kwargs) - # always pass the input via `inputs_embeds` - # to make sure the computation graph is consistent + vision_embeddings = self.get_multimodal_embeddings(**kwargs) inputs_embeds = self.get_input_embeddings(input_ids, vision_embeddings) input_ids = None diff --git a/vllm/model_executor/models/qwen2_audio.py b/vllm/model_executor/models/qwen2_audio.py index 0c2374c3c3fc9..a0605fee82aca 100644 --- a/vllm/model_executor/models/qwen2_audio.py +++ b/vllm/model_executor/models/qwen2_audio.py @@ -42,10 +42,12 @@ from vllm.model_executor.models.qwen2 import Qwen2Model from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs +from vllm.multimodal.inputs import NestedTensors from vllm.multimodal.utils import consecutive_placeholder_ranges from vllm.sequence import IntermediateTensors, SequenceData from .interfaces import SupportsMultiModal, SupportsPP +from .utils import merge_multimodal_embeddings logger = init_logger(__name__) @@ -371,6 +373,25 @@ def _process_audio_input(self, return masked_audio_features + def get_multimodal_embeddings(self, **kwargs) -> Optional[NestedTensors]: + audio_input = self._parse_and_validate_audio_input(**kwargs) + if audio_input is None: + return None + masked_audio_features = self._process_audio_input(audio_input) + return masked_audio_features + + def get_input_embeddings( + self, + input_ids: torch.Tensor, + multimodal_embeddings: Optional[NestedTensors] = None, + ) -> torch.Tensor: + inputs_embeds = self.language_model.get_input_embeddings(input_ids) + if multimodal_embeddings is not None: + inputs_embeds = merge_multimodal_embeddings( + input_ids, inputs_embeds, multimodal_embeddings, + self.config.audio_token_index) + return inputs_embeds + def forward( self, input_ids: torch.Tensor, @@ -378,33 +399,27 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, **kwargs: object, ) -> Union[torch.Tensor, IntermediateTensors]: + if intermediate_tensors is not None: - input_ids = None inputs_embeds = None - else: - audio_input = self._parse_and_validate_audio_input(**kwargs) - if audio_input is None: - inputs_embeds = None - else: - inputs_embeds = self.language_model.embed_tokens(input_ids) - masked_audio_features = self._process_audio_input(audio_input) - # merge llm embeddings and audio features - mask = (input_ids == self.config.audio_token_index) - inputs_embeds[mask, :] = masked_audio_features - - input_ids = None - - hidden_states = self.language_model( - input_ids=input_ids, - positions=positions, - kv_caches=kv_caches, - attn_metadata=attn_metadata, - intermediate_tensors=intermediate_tensors, - inputs_embeds=inputs_embeds, - ) + # NOTE: In v1, inputs_embeds is always generated at model runner, this + # condition is for v0 compatibility. + elif inputs_embeds is None: + multimodal_embeddings = self.get_multimodal_embeddings(**kwargs) + inputs_embeds = self.get_input_embeddings(input_ids, + multimodal_embeddings) + input_ids = None + + hidden_states = self.language_model(input_ids, + positions, + kv_caches, + attn_metadata, + intermediate_tensors, + inputs_embeds=inputs_embeds) return hidden_states def compute_logits(self, hidden_states: torch.Tensor, diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index 531608a877f2f..7956a98b21569 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -63,7 +63,7 @@ from vllm.multimodal import MULTIMODAL_REGISTRY from vllm.multimodal.image import cached_get_image_processor from vllm.multimodal.inputs import (MultiModalData, MultiModalDataDict, - MultiModalKwargs) + MultiModalKwargs, NestedTensors) from vllm.multimodal.utils import cached_get_tokenizer from vllm.platforms import _Backend from vllm.sequence import IntermediateTensors, PoolerOutput, SequenceData @@ -1238,6 +1238,55 @@ def _merge_multimodal_embeddings( inputs_embeds[mask, :] = multimodal_embeddings return inputs_embeds + def get_multimodal_embeddings( + self, **kwargs) -> Optional[List[Tuple[NestedTensors, str]]]: + + image_input = self._parse_and_validate_image_input(**kwargs) + video_input = self._parse_and_validate_video_input(**kwargs) + if image_input is None and video_input is None: + return None + + # We make a tuple of each embedding with its modality string. This is a + # temporary workaround for models to handle mixed modalities when + # get_multimodal_embeddings and get_input_embeddings are called + # separately. + # TODO(ywang96): Add support for mixed-modality inference for v1. + multimodal_embeddings: List[Tuple[NestedTensors, str]] = [] + + if image_input is not None: + image_embeds = self._process_image_input(image_input) + multimodal_embeddings.append((image_embeds, "image")) + if video_input is not None: + video_embeds = self._process_video_input(video_input) + multimodal_embeddings.append((video_embeds, "video")) + + return multimodal_embeddings + + def get_input_embeddings( + self, + input_ids: torch.Tensor, + multimodal_embeddings: Optional[List[Tuple[NestedTensors, + str]]] = None, + ) -> torch.Tensor: + inputs_embeds = self.model.get_input_embeddings(input_ids) + if multimodal_embeddings is not None: + for embeddings, modality in multimodal_embeddings: + if modality == "image": + inputs_embeds = self._merge_multimodal_embeddings( + input_ids, + inputs_embeds, + embeddings, + placeholder_token_id=self.config.image_token_id, + ) + if modality == "video": + inputs_embeds = self._merge_multimodal_embeddings( + input_ids, + inputs_embeds, + embeddings, + placeholder_token_id=self.config.video_token_id, + ) + return inputs_embeds + def forward( self, input_ids: torch.Tensor, @@ -1245,6 +1294,7 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, **kwargs: object, ) -> Union[torch.Tensor, IntermediateTensors]: """Run forward pass for Qwen2-VL. @@ -1266,42 +1316,26 @@ def forward( video_grid_thw: Tensor `(n_videos, 3)` of video 3D grid in LLM. `None` if no videos are passed. """ + if intermediate_tensors is not None: - input_ids = None inputs_embeds = None - else: - image_input = self._parse_and_validate_image_input(**kwargs) - video_input = self._parse_and_validate_video_input(**kwargs) - - if image_input is None and video_input is None: - inputs_embeds = None - else: - if uses_mrope(self.config): - assert positions.ndim == 2 and positions.size(0) == 3, ( - "multimodal section rotary embedding requires " - f"(3, seq_len) positions, but got {positions.size()}") - - inputs_embeds = self.model.embed_tokens(input_ids) - - if image_input is not None: - image_embeds = self._process_image_input(image_input) - inputs_embeds = self._merge_multimodal_embeddings( - input_ids, - inputs_embeds, - image_embeds, - placeholder_token_id=self.config.image_token_id, - ) - - if video_input is not None: - video_embeds = self._process_video_input(video_input) - inputs_embeds = self._merge_multimodal_embeddings( - input_ids, - inputs_embeds, - video_embeds, - placeholder_token_id=self.config.video_token_id, - ) - input_ids = None + # NOTE: In v1, inputs_embeds is always generated at model runner, this + # condition is for v0 compatibility. + elif inputs_embeds is None: + multimodal_embeddings = self.get_multimodal_embeddings(**kwargs) + + # We need to check for usage of mrope here in case there is + # multimodal data. + # TODO (ywang96): move this to model runner in V1. + if multimodal_embeddings is not None and uses_mrope(self.config): + assert positions.ndim == 2 and positions.size(0) == 3, ( + "multimodal section rotary embedding requires " + f"(3, seq_len) positions, but got {positions.size()}") + + inputs_embeds = self.get_input_embeddings(input_ids, + multimodal_embeddings) + input_ids = None hidden_states = self.model( input_ids=input_ids, diff --git a/vllm/model_executor/models/ultravox.py b/vllm/model_executor/models/ultravox.py index 512adbc7db35e..b61deccde45b7 100644 --- a/vllm/model_executor/models/ultravox.py +++ b/vllm/model_executor/models/ultravox.py @@ -449,10 +449,36 @@ def _process_audio_input( return result - def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, + def get_multimodal_embeddings(self, **kwargs) -> Optional[NestedTensors]: + audio_input = self._parse_and_validate_audio_input(**kwargs) + if audio_input is None: + return None + audio_embeddings = self._process_audio_input(audio_input) + return audio_embeddings + + def get_input_embeddings( + self, + input_ids: torch.Tensor, + multimodal_embeddings: Optional[NestedTensors] = None, + attn_metadata: Optional[AttentionMetadata] = None, + ) -> torch.Tensor: + inputs_embeds = self.language_model.get_input_embeddings(input_ids) + if multimodal_embeddings is not None: + + # TODO(ywang96): use merge_multimodal_embeddings after + # v0 is deprecated + merge_multimodal_embeddings_from_map( + inputs_embeds, multimodal_embeddings, + attn_metadata.multi_modal_placeholder_index_maps["audio"]) + return inputs_embeds + + def forward(self, + input_ids: torch.Tensor, + positions: torch.Tensor, kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, - intermediate_tensors: Optional[torch.Tensor], + intermediate_tensors: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, **kwargs) -> Union[torch.Tensor, IntermediateTensors]: """Run forward pass for Ultravox @@ -466,30 +492,28 @@ def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, Args: audio_features: A batch of audio inputs [B, N, 80, M]. """ + if intermediate_tensors is not None: - input_ids = None inputs_embeds = None - else: - audio_input = self._parse_and_validate_audio_input(**kwargs) - if audio_input is not None: - audio_embeddings = self._process_audio_input(audio_input) - inputs_embeds = self.language_model.model.get_input_embeddings( - input_ids) - - merge_multimodal_embeddings_from_map( - inputs_embeds, audio_embeddings, - attn_metadata.multi_modal_placeholder_index_maps["audio"]) - input_ids = None - else: - inputs_embeds = None - - hidden_states = self.language_model.model( - input_ids=input_ids, - positions=positions, - kv_caches=kv_caches, - attn_metadata=attn_metadata, - intermediate_tensors=intermediate_tensors, - inputs_embeds=inputs_embeds) + + # NOTE: In v1, inputs_embeds is always generated at model runner, this + # condition is for v0 compatibility. + elif inputs_embeds is None: + multimodal_embeddings = self.get_multimodal_embeddings(**kwargs) + + # TODO(ywang96): remove attn_metadata from get_input_embeddings + # after v0 is deprecated + inputs_embeds = self.get_input_embeddings(input_ids, + multimodal_embeddings, + attn_metadata) + input_ids = None + + hidden_states = self.language_model.model(input_ids, + positions, + kv_caches, + attn_metadata, + intermediate_tensors, + inputs_embeds=inputs_embeds) return hidden_states def compute_logits(self, hidden_states: torch.Tensor, diff --git a/vllm/model_executor/models/utils.py b/vllm/model_executor/models/utils.py index dcfd2cb7d2622..4c13cbc953273 100644 --- a/vllm/model_executor/models/utils.py +++ b/vllm/model_executor/models/utils.py @@ -356,8 +356,7 @@ def embed_multimodal( input_ids: torch.Tensor, multimodal_token_id: int, get_text_embeds: Callable[[torch.Tensor], torch.Tensor], - get_multimodal_embeds: Callable[[torch.Tensor], Union[torch.Tensor, - List[torch.Tensor]]], + multimodal_embeds: Union[torch.Tensor, List[torch.Tensor]], ) -> torch.Tensor: """ Embed token IDs and multimodal inputs and combine their embeddings. @@ -374,8 +373,6 @@ def embed_multimodal( is_text = ~is_multimodal text_embeds = get_text_embeds(input_ids[is_text]) - multimodal_embeds = get_multimodal_embeds(input_ids[is_multimodal]) - merged_embeds = torch.empty( (input_ids.shape[0], text_embeds.shape[1]), dtype=text_embeds.dtype, diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 13cbc8fa39c03..1fa47f553dfd6 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -363,7 +363,8 @@ def _execute_encoder(self, scheduler_output: "SchedulerOutput"): # 2. A list (length: num_images) of tensors, each of shape # [feature_size, hidden_size] in case when the feature size is # dynamic depending on input images. - encoder_outputs = self.model.process_mm_inputs(**batched_mm_inputs) + encoder_outputs = self.model.get_multimodal_embeddings( + **batched_mm_inputs) # Cache the encoder outputs. for (req_id, input_id), output in zip(req_input_ids, encoder_outputs): From 0a71900bc92b4a18d5545e9d5dc0ca750add3c69 Mon Sep 17 00:00:00 2001 From: "Chendi.Xue" Date: Tue, 26 Nov 2024 19:57:11 -0600 Subject: [PATCH 158/397] Remove hard-dependencies of Speculative decode to CUDA workers (#10587) Signed-off-by: Chendi Xue --- tests/spec_decode/test_spec_decode_worker.py | 4 +- vllm/config.py | 1 + .../layers/spec_decode_base_sampler.py | 17 +++++++- vllm/platforms/cpu.py | 8 +++- vllm/platforms/cuda.py | 4 +- vllm/spec_decode/draft_model_runner.py | 24 ++++++------ vllm/spec_decode/interfaces.py | 8 ++-- vllm/spec_decode/medusa_worker.py | 9 +++-- vllm/spec_decode/metrics.py | 15 ++++++- vllm/spec_decode/multi_step_worker.py | 31 +++++++++++---- vllm/spec_decode/ngram_worker.py | 3 +- vllm/spec_decode/spec_decode_worker.py | 36 +++++++++++------ vllm/spec_decode/target_model_runner.py | 33 ++++++---------- vllm/spec_decode/util.py | 12 ++++-- vllm/worker/cpu_model_runner.py | 39 ++++++++++++++++++- vllm/worker/cpu_worker.py | 27 ++++++++++++- vllm/worker/model_runner_base.py | 15 +++++++ vllm/worker/worker.py | 7 ++-- vllm/worker/worker_base.py | 3 ++ 19 files changed, 219 insertions(+), 77 deletions(-) diff --git a/tests/spec_decode/test_spec_decode_worker.py b/tests/spec_decode/test_spec_decode_worker.py index d7caf57147278..caf7a7e625b46 100644 --- a/tests/spec_decode/test_spec_decode_worker.py +++ b/tests/spec_decode/test_spec_decode_worker.py @@ -595,8 +595,8 @@ def test_init_device(acceptance_sampler_method: str): target_worker.init_device.assert_called_once() - metrics_collector.init_gpu_tensors.assert_called_once() - spec_decode_sampler.init_gpu_tensors.assert_called_once() + metrics_collector.init_tensors.assert_called_once() + spec_decode_sampler.init_tensors.assert_called_once() @pytest.mark.parametrize("acceptance_sampler_method", diff --git a/vllm/config.py b/vllm/config.py index eae6f909e3933..68f73bf4b4dc9 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -990,6 +990,7 @@ class ParallelConfig: # the full name of the worker class to use. If "auto", the worker class # will be determined based on the platform. worker_cls: str = "auto" + sd_worker_cls: str = "auto" world_size: int = field(init=False) diff --git a/vllm/model_executor/layers/spec_decode_base_sampler.py b/vllm/model_executor/layers/spec_decode_base_sampler.py index 7e750a744e25f..6aa4b8bd34cde 100644 --- a/vllm/model_executor/layers/spec_decode_base_sampler.py +++ b/vllm/model_executor/layers/spec_decode_base_sampler.py @@ -43,6 +43,21 @@ def init_gpu_tensors(self, device: Union[int, str]) -> None: dtype=torch.long, device=device) + def init_tensors(self, + device: Union[int, str], + device_type: Union[torch.device, str] = 'cuda') -> None: + assert self.num_accepted_tokens is None + if isinstance(device_type, torch.device): + device_type = device_type.type + if isinstance(device, int): + device = f"{device_type}:{device}" + self.num_accepted_tokens = torch.tensor(0, + dtype=torch.long, + device=device) + self.num_emitted_tokens = torch.tensor(0, + dtype=torch.long, + device=device) + @property def probs_dtype(self): return torch.float32 @@ -77,7 +92,7 @@ def _create_output( tensor is [batch_size, k + num_bonus_tokens] """ batch_size, k = substitute_token_ids.shape - bonus_token_ids = bonus_token_ids.squeeze() + bonus_token_ids = bonus_token_ids.squeeze(-1) # Determine the index of the first False value for each row. limits = (accepted == 0).max(1).indices limits[~(accepted == 0).any(1)] = k diff --git a/vllm/platforms/cpu.py b/vllm/platforms/cpu.py index cbc982752c6b4..3e22c87f61fac 100644 --- a/vllm/platforms/cpu.py +++ b/vllm/platforms/cpu.py @@ -86,4 +86,10 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: parallel_config.distributed_executor_backend) parallel_config.distributed_executor_backend = "mp" if parallel_config.worker_cls == "auto": - parallel_config.worker_cls = "vllm.worker.cpu_worker.CPUWorker" + if vllm_config.speculative_config: + parallel_config.worker_cls = \ + "vllm.spec_decode.spec_decode_worker.create_spec_worker" + parallel_config.sd_worker_cls = \ + "vllm.worker.cpu_worker.CPUWorker" + else: + parallel_config.worker_cls = "vllm.worker.cpu_worker.CPUWorker" diff --git a/vllm/platforms/cuda.py b/vllm/platforms/cuda.py index 0d07050fd1b6a..5e9ce551f2332 100644 --- a/vllm/platforms/cuda.py +++ b/vllm/platforms/cuda.py @@ -106,6 +106,8 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: elif vllm_config.speculative_config: parallel_config.worker_cls = \ "vllm.spec_decode.spec_decode_worker.create_spec_worker" + parallel_config.sd_worker_cls = \ + "vllm.worker.worker.Worker" else: parallel_config.worker_cls = "vllm.worker.worker.Worker" @@ -236,4 +238,4 @@ def is_full_nvlink(cls, physical_device_ids: List[int]) -> bool: if not isinstance(pynvml, _MockModule): CudaPlatform.log_warnings() except ModuleNotFoundError: - CudaPlatform.log_warnings() + CudaPlatform.log_warnings() \ No newline at end of file diff --git a/vllm/spec_decode/draft_model_runner.py b/vllm/spec_decode/draft_model_runner.py index cf166e3eb5bad..fe5fd39f42ac9 100644 --- a/vllm/spec_decode/draft_model_runner.py +++ b/vllm/spec_decode/draft_model_runner.py @@ -20,8 +20,9 @@ from vllm.logger import init_logger from vllm.multimodal import MultiModalKwargs from vllm.sequence import ExecuteModelRequest, IntermediateTensors -from vllm.worker.model_runner import (ModelInputForGPUWithSamplingMetadata, - ModelRunner) +from vllm.worker.model_runner_base import (ModelRunnerBase, + ModelRunnerInputBase, + ModelRunnerWrapperBase) logger = init_logger(__name__) @@ -33,7 +34,7 @@ allow_gpu_advance_step = True -class TP1DraftModelRunner(ModelRunner): +class TP1DraftModelRunner(ModelRunnerWrapperBase): """Specialized model runner for speculative decoding draft model. Since the draft model always execute k forward passes consecutively to generate k speculative tokens in a single speculative decoding step, @@ -46,13 +47,14 @@ class TP1DraftModelRunner(ModelRunner): any broadcasting inside execute_model). """ - def __init__(self, *args, **kwargs): - if kwargs.get("return_hidden_states"): + def __init__(self, model_runner: ModelRunnerBase): + if hasattr( + model_runner, + "return_hidden_states") and model_runner.return_hidden_states: raise ValueError( "return_hidden_states is not supported for TP1DraftModelRunner." ) - - super().__init__(*args, **kwargs) + super().__init__(model_runner) self.indices_of_seq_with_bonus_tokens = None @@ -73,10 +75,8 @@ def _update_sampling_metadata(self, sampling_metadata, num_seqs, assert seq_group.prompt_logprob_indices == [] # No prompt assert seq_group.sample_indices == [i] # Simple - def _gpu_advance_step( - self, model_input: ModelInputForGPUWithSamplingMetadata, - last_output: SamplerOutput - ) -> ModelInputForGPUWithSamplingMetadata: + def _gpu_advance_step(self, model_input: ModelRunnerInputBase, + last_output: SamplerOutput) -> ModelRunnerInputBase: # Currently, we expect "decode mode" only assert not model_input.is_prompt @@ -168,7 +168,7 @@ def set_indices_of_seq_with_bonus_tokens(self, @torch.inference_mode() def execute_model( self, - model_input: ModelInputForGPUWithSamplingMetadata, + model_input: ModelRunnerInputBase, kv_caches: List[torch.Tensor], previous_hidden_states: Optional[torch.Tensor] = None, intermediate_tensors: Optional[IntermediateTensors] = None, diff --git a/vllm/spec_decode/interfaces.py b/vllm/spec_decode/interfaces.py index 029f56460f5c1..a4fe0f13c8db1 100644 --- a/vllm/spec_decode/interfaces.py +++ b/vllm/spec_decode/interfaces.py @@ -1,6 +1,6 @@ from abc import ABC, abstractmethod from dataclasses import dataclass -from typing import Optional, Set +from typing import Optional, Set, Union import torch @@ -75,9 +75,11 @@ def get_spec_proposals( class SpeculativeScorer(ABC): - def __init__(self, scorer_worker: WorkerBase, device: str, - vocab_size: int): + def __init__(self, scorer_worker: WorkerBase, + device: Union[torch.device, str], vocab_size: int): self._scorer_worker = scorer_worker + if isinstance(device, torch.device): + device = device.type self._device = device self._vocab_size = vocab_size diff --git a/vllm/spec_decode/medusa_worker.py b/vllm/spec_decode/medusa_worker.py index 0d233f393cb8c..1ab691a7ef047 100644 --- a/vllm/spec_decode/medusa_worker.py +++ b/vllm/spec_decode/medusa_worker.py @@ -9,21 +9,22 @@ from vllm.spec_decode.interfaces import SpeculativeProposals from vllm.spec_decode.proposer_worker_base import NonLLMProposerWorkerBase from vllm.spec_decode.top1_proposer import Top1Proposer -from vllm.worker.worker import Worker +from vllm.worker.worker_base import WorkerWrapperBase -class MedusaWorker(NonLLMProposerWorkerBase, Worker): +class MedusaWorker(NonLLMProposerWorkerBase, WorkerWrapperBase): """Worker for Medusa. """ def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) + super().__init__(kwargs.get("vllm_config")) + self.init_worker(*args, **kwargs) # Lazy initialization list. self._proposer: Top1Proposer def init_device(self): - super().init_device() + self.worker.init_device() self._proposer = Top1Proposer( weakref.proxy(self), # type: ignore[arg-type] diff --git a/vllm/spec_decode/metrics.py b/vllm/spec_decode/metrics.py index 89ccaba70e93c..03dc46600d8a9 100644 --- a/vllm/spec_decode/metrics.py +++ b/vllm/spec_decode/metrics.py @@ -1,11 +1,12 @@ import time -from typing import Callable, Optional +from typing import Callable, Optional, Union import msgspec import torch from vllm.model_executor.layers.spec_decode_base_sampler import ( SpecDecodeBaseSampler) +from vllm.platforms import current_platform from vllm.utils import is_pin_memory_available @@ -81,8 +82,20 @@ def init_gpu_tensors(self, rank: int) -> None: self._rank = rank self._copy_stream = torch.cuda.Stream() + def init_tensors(self, + rank: int, + device_type: Union[torch.device, str] = 'cuda') -> None: + self._rank = rank + if isinstance(device_type, torch.device): + device_type = device_type.type + if device_type == 'cuda': + self._copy_stream = torch.cuda.Stream() + def maybe_collect_rejsample_metrics( self, k: int) -> Optional[SpecDecodeWorkerMetrics]: + # currently using cuda.Event, skip for any non_cuda_alike platform + if not current_platform.is_cuda_alike(): + return None # If a copy was initiated in the previous call, collect and return. if self._in_flight_copy is not None: diff --git a/vllm/spec_decode/multi_step_worker.py b/vllm/spec_decode/multi_step_worker.py index f49b98f5c9528..d249b37c780e4 100644 --- a/vllm/spec_decode/multi_step_worker.py +++ b/vllm/spec_decode/multi_step_worker.py @@ -5,17 +5,21 @@ import torch from vllm.model_executor.layers.sampler import SamplerOutput +from vllm.platforms import current_platform from vllm.sequence import (ExecuteModelRequest, HiddenStates, SequenceData, SequenceGroupMetadata) -from vllm.spec_decode.draft_model_runner import TP1DraftModelRunner + +if current_platform.is_cuda_alike(): + from vllm.spec_decode.draft_model_runner import TP1DraftModelRunner + from vllm.spec_decode.interfaces import (SpeculativeProposals, SpeculativeProposer) from vllm.spec_decode.proposer_worker_base import ProposerWorkerBase from vllm.spec_decode.top1_proposer import Top1Proposer -from vllm.worker.worker import Worker +from vllm.worker.worker_base import WorkerWrapperBase -class MultiStepWorker(Worker, ProposerWorkerBase): +class MultiStepWorker(ProposerWorkerBase, WorkerWrapperBase): """The MultiStepWorker is equivalent to a Worker except that it allows multiple forward passes in a single call, assuming the scheduler has allocated enough space to store the additional KV. This reduces overhead @@ -28,13 +32,14 @@ class MultiStepWorker(Worker, ProposerWorkerBase): """ def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) + super().__init__(kwargs.get("vllm_config")) + self.init_worker(*args, **kwargs) # Lazy initialization list. self._proposer: SpeculativeProposer def init_device(self) -> None: - super().init_device() + self.worker.init_device() self._proposer = Top1Proposer( weakref.proxy(self), # type: ignore[arg-type] @@ -51,6 +56,18 @@ def set_should_modify_greedy_probs_inplace(self) -> None: self.model_runner.model.sampler.should_modify_greedy_probs_inplace = ( True) + def determine_num_available_blocks(self) -> Tuple[int, int]: + return self.worker.determine_num_available_blocks() + + def get_cache_block_size_bytes(self) -> int: + return self.worker.get_cache_block_size_bytes() + + def initialize_cache(self, *args, **kwargs) -> None: + self.worker.initialize_cache(*args, **kwargs) + + def execute_model(self, *args, **kwargs) -> List[SamplerOutput]: + return self.worker.execute_model(*args, **kwargs) + @torch.inference_mode() def sampler_output( self, @@ -75,7 +92,7 @@ def sampler_output( # Run model sample_len times. model_outputs: List[SamplerOutput] = [] - if isinstance( + if current_platform.is_cuda_alike() and isinstance( self.model_runner, TP1DraftModelRunner ) and self.model_runner.supports_gpu_multi_step(expanded_request): # Here we run the draft_model_runner with multi-step prepare @@ -92,7 +109,7 @@ def sampler_output( # and other restrictions that are part of DraftModelRunner's # supports_gpu_multi_step(..) for _ in range(sample_len): - model_output: List[SamplerOutput] = super().execute_model( + model_output: List[SamplerOutput] = self.worker.execute_model( execute_model_req=expanded_request) assert (len(model_output) == 1 ), "composing multistep workers not supported" diff --git a/vllm/spec_decode/ngram_worker.py b/vllm/spec_decode/ngram_worker.py index debb3b2d5ec30..bb6b99135580e 100644 --- a/vllm/spec_decode/ngram_worker.py +++ b/vllm/spec_decode/ngram_worker.py @@ -22,6 +22,7 @@ def __init__(self, *args, **kwargs): # Get local_rank/vocab_size from kwargs attribute self.local_rank = kwargs["local_rank"] self.vocab_size = kwargs["vllm_config"].model_config.get_vocab_size() + self.device_type = kwargs.get("device_type", "cuda") # Lazy initialization list. self._proposer: Top1Proposer @@ -34,7 +35,7 @@ def set_ngram_window_size(self, ngram_prompt_lookup_min: int, self.ngram_prompt_lookup_min = ngram_prompt_lookup_min def init_device(self): - self.device = torch.device(f"cuda:{self.local_rank}") + self.device = torch.device(f"{self.device_type}:{self.local_rank}") self.load_model = lambda *args, **kwargs: None # Current NGramWorker only supports Top1Proposer diff --git a/vllm/spec_decode/spec_decode_worker.py b/vllm/spec_decode/spec_decode_worker.py index b279931ca4b02..53634f7b0b366 100644 --- a/vllm/spec_decode/spec_decode_worker.py +++ b/vllm/spec_decode/spec_decode_worker.py @@ -14,12 +14,16 @@ SpecDecodeBaseSampler, SpecDecodeStochasticBaseSampler) from vllm.model_executor.layers.typical_acceptance_sampler import ( TypicalAcceptanceSampler) +from vllm.platforms import current_platform from vllm.sequence import (VLLM_INVALID_TOKEN_ID, CompletionSequenceGroupOutput, ExecuteModelRequest, HiddenStates, SequenceGroupMetadata, get_all_seq_ids_and_request_ids) from vllm.spec_decode.batch_expansion import BatchExpansionTop1Scorer -from vllm.spec_decode.draft_model_runner import TP1DraftModelRunner + +if current_platform.is_cuda_alike(): + from vllm.spec_decode.draft_model_runner import TP1DraftModelRunner + from vllm.spec_decode.interfaces import (SpeculativeProposals, SpeculativeScorer, SpeculativeScores) from vllm.spec_decode.medusa_worker import MedusaWorker @@ -36,8 +40,8 @@ get_all_num_logprobs, get_sampled_token_logprobs, nvtx_range, split_batch_by_proposal_len) -from vllm.worker.worker import Worker -from vllm.worker.worker_base import LoraNotSupportedWorkerBase, WorkerBase +from vllm.worker.worker_base import (LoraNotSupportedWorkerBase, WorkerBase, + WorkerWrapperBase) logger = init_logger(__name__) @@ -53,7 +57,11 @@ def create_spec_worker(*args, **kwargs) -> "SpecDecodeWorker": draft_worker_kwargs = kwargs.copy() kwargs["model_runner_cls"] = TargetModelRunner - target_worker = Worker(*args, **kwargs) + target_worker_config = copy.deepcopy(vllm_config) + target_worker_config.parallel_config.worker_cls =\ + target_worker_config.parallel_config.sd_worker_cls + target_worker = WorkerWrapperBase(vllm_config=target_worker_config) + target_worker.init_worker(*args, **kwargs) # Set the disable_logprobs variable in the TargetModelRunner instance # as per its value specified in the SpeculativeConfig. target_worker.model_runner.disable_logprobs =\ @@ -65,6 +73,8 @@ def create_spec_worker(*args, **kwargs) -> "SpecDecodeWorker": draft_worker_config.model_config, vllm_config.load_config, ) + speculative_config.draft_parallel_config.worker_cls =\ + draft_worker_config.parallel_config.sd_worker_cls draft_worker_config.parallel_config = speculative_config.draft_parallel_config # noqa # TODO allow draft-model specific load config. @@ -125,7 +135,7 @@ class SpecDecodeWorker(LoraNotSupportedWorkerBase): @classmethod def create_worker( cls, - scorer_worker: Worker, + scorer_worker: WorkerBase, draft_worker_kwargs: Dict[str, Any], disable_mqa_scorer: bool, disable_by_batch_size: Optional[int], @@ -145,6 +155,8 @@ def create_worker( draft_parallel_config: ParallelConfig = draft_worker_kwargs[ 'vllm_config'].parallel_config if ngram_prompt_lookup_max > 0: + draft_worker_kwargs[ + "device_type"] = scorer_worker.device_config.device.type proposer_worker = NGramWorker(**draft_worker_kwargs) proposer_worker.set_ngram_window_size(ngram_prompt_lookup_min, ngram_prompt_lookup_max) @@ -158,8 +170,9 @@ def create_worker( proposer_worker = MedusaWorker(**draft_worker_kwargs) else: if draft_tp == 1: - draft_worker_kwargs[ - "model_runner_cls"] = TP1DraftModelRunner + if current_platform.is_cuda_alike(): + draft_worker_kwargs[ + "model_runner_cls"] = TP1DraftModelRunner else: if draft_model_config.hf_config.model_type == "eagle": raise NotImplementedError( @@ -306,8 +319,9 @@ def init_device(self) -> None: self.scorer_worker.load_model() self.proposer_worker.load_model() - self._metrics.init_gpu_tensors(self.rank) - self.spec_decode_sampler.init_gpu_tensors(self.rank) + self._metrics.init_tensors(self.rank, device_type=self.device) + self.spec_decode_sampler.init_tensors(self.rank, + device_type=self.device) scorer_cls: Type[SpeculativeScorer] if self.disable_mqa_scorer: @@ -1111,11 +1125,11 @@ def get_cache_block_size_bytes(self): raise NotImplementedError def start_profile(self): - if isinstance(self.scorer_worker, Worker): + if isinstance(self.scorer_worker, WorkerBase): self.scorer_worker.start_profile() def stop_profile(self): - if isinstance(self.scorer_worker, Worker): + if isinstance(self.scorer_worker, WorkerBase): self.scorer_worker.stop_profile() diff --git a/vllm/spec_decode/target_model_runner.py b/vllm/spec_decode/target_model_runner.py index e61cde5b17f20..56540744b73a9 100644 --- a/vllm/spec_decode/target_model_runner.py +++ b/vllm/spec_decode/target_model_runner.py @@ -1,12 +1,12 @@ from typing import List, Optional -from vllm.config import VllmConfig from vllm.sequence import SequenceGroupMetadata -from vllm.worker.model_runner import (ModelInputForGPUWithSamplingMetadata, - ModelRunner) +from vllm.worker.model_runner_base import (ModelRunnerBase, + ModelRunnerInputBase, + ModelRunnerWrapperBase) -class TargetModelRunner(ModelRunner): +class TargetModelRunner(ModelRunnerWrapperBase): """Specialized model runner for speculative decoding target model. In speculative decoding, the log probabilities selected finally may not be the same ones as selected by the target model sampling. This means @@ -18,32 +18,21 @@ class TargetModelRunner(ModelRunner): requested or not. """ - def __init__( - self, - vllm_config: VllmConfig, - kv_cache_dtype: Optional[str] = "auto", - is_driver_worker: bool = False, - return_hidden_states: bool = False, - ): + def __init__(self, model_runner: ModelRunnerBase): # An internal boolean member variable to indicate if token log # probabilities are needed or not. + super().__init__(model_runner) self.disable_logprobs = True - super().__init__( - vllm_config=vllm_config, - kv_cache_dtype=kv_cache_dtype, - is_driver_worker=is_driver_worker, - return_hidden_states=return_hidden_states, - ) def prepare_model_input( self, seq_group_metadata_list: List[SequenceGroupMetadata], virtual_engine: int = 0, - finished_requests_ids: Optional[List[str]] = None - ) -> ModelInputForGPUWithSamplingMetadata: - model_input: ModelInputForGPUWithSamplingMetadata = super( - ).prepare_model_input(seq_group_metadata_list, virtual_engine, - finished_requests_ids) + finished_requests_ids: Optional[List[str]] = None, + ) -> ModelRunnerInputBase: + model_input: ModelRunnerInputBase =\ + self.model_runner.prepare_model_input( + seq_group_metadata_list, virtual_engine, finished_requests_ids) # If token log probabilities is disabled then skip generating sampler # CPU output. We directly serialize the GPU sampled_token_id tensors # as needed. If log probabilities is enabled then synchronize all the diff --git a/vllm/spec_decode/util.py b/vllm/spec_decode/util.py index 193ef870dfceb..da8706658d09a 100644 --- a/vllm/spec_decode/util.py +++ b/vllm/spec_decode/util.py @@ -5,6 +5,7 @@ import torch from vllm.model_executor.layers.sampler import SamplerOutput +from vllm.platforms import current_platform from vllm.sequence import (CompletionSequenceGroupOutput, Logprob, PromptLogprobs, SequenceGroupMetadata, SequenceOutput) @@ -247,11 +248,14 @@ def nvtx_range(msg, *args, **kwargs): Arguments: msg (string): message to associate with the range """ - torch.cuda.nvtx.range_push(msg.format(*args, **kwargs)) - try: + if current_platform.is_cuda_alike(): + torch.cuda.nvtx.range_push(msg.format(*args, **kwargs)) + try: + yield + finally: + torch.cuda.nvtx.range_pop() + else: yield - finally: - torch.cuda.nvtx.range_pop() class Timer: diff --git a/vllm/worker/cpu_model_runner.py b/vllm/worker/cpu_model_runner.py index b08171d79f002..420aaf8a1b4cd 100644 --- a/vllm/worker/cpu_model_runner.py +++ b/vllm/worker/cpu_model_runner.py @@ -80,6 +80,7 @@ class ModelInputForCPUWithSamplingMetadata(ModelInputForCPU): Used by the ModelRunner. """ sampling_metadata: Optional["SamplingMetadata"] = None + is_prompt: Optional[bool] = None def as_broadcastable_tensor_dict(self) -> Dict[str, Any]: tensor_dict = { @@ -395,6 +396,7 @@ def __init__( vllm_config: VllmConfig, kv_cache_dtype: Optional[str] = "auto", is_driver_worker: bool = False, + return_hidden_states: bool = False, *args, **kwargs, ): @@ -403,19 +405,25 @@ def __init__( cache_config = self.cache_config self.is_driver_worker = is_driver_worker + self.return_hidden_states = return_hidden_states self.device = self.device_config.device + self.pin_memory = False self.kv_cache_dtype = kv_cache_dtype self.sliding_window = model_config.get_sliding_window() self.block_size = cache_config.block_size + num_attn_heads = self.model_config.get_num_attention_heads( + self.parallel_config) + needs_attn_backend = (num_attn_heads != 0 + or self.model_config.is_attention_free) self.attn_backend = get_attn_backend( self.model_config.get_head_size(), self.model_config.dtype, self.kv_cache_dtype, self.block_size, self.model_config.is_attention_free, - ) + ) if needs_attn_backend else None # Multi-modal data support self.mm_registry = MULTIMODAL_REGISTRY @@ -444,6 +452,15 @@ def _prepare_model_input_tensors( return builder.build() # type: ignore + # sampler property will be used by spec_decode_worker + @property + def sampler(self): + return self.model.sampler + + @property + def vocab_size(self) -> int: + return self.model_config.get_vocab_size() + class CPUModelRunner(CPUModelRunnerBase[ModelInputForCPUWithSamplingMetadata]): _model_input_cls: Type[ModelInputForCPUWithSamplingMetadata] = ( @@ -480,9 +497,12 @@ def prepare_model_input( pin_memory=False, generators=generators) + is_prompt = (seq_group_metadata_list[0].is_prompt + if seq_group_metadata_list else None) return dataclasses.replace(model_input, sampling_metadata=sampling_metadata, - virtual_engine=virtual_engine) + virtual_engine=virtual_engine, + is_prompt=is_prompt) @torch.no_grad() def execute_model( @@ -491,16 +511,22 @@ def execute_model( kv_caches: List[torch.Tensor], intermediate_tensors: Optional[IntermediateTensors] = None, num_steps: int = 1, + previous_hidden_states: Optional[torch.Tensor] = None, ) -> Optional[List[SamplerOutput]]: if num_steps > 1: raise ValueError( "CPU worker does not support multi-step execution.") model_executable = self.model + multimodal_kwargs = {} if model_input.multi_modal_kwargs is not None: multimodal_kwargs = MultiModalKwargs.as_kwargs( model_input.multi_modal_kwargs, device=self.device) + execute_model_kwargs = {} + if previous_hidden_states is not None: + execute_model_kwargs.update( + {"previous_hidden_states": previous_hidden_states}) with set_forward_context(model_input.attn_metadata, self.vllm_config): hidden_states = model_executable( @@ -509,6 +535,7 @@ def execute_model( kv_caches=kv_caches, attn_metadata=model_input.attn_metadata, intermediate_tensors=intermediate_tensors, + **execute_model_kwargs, **multimodal_kwargs, ) @@ -525,4 +552,12 @@ def execute_model( logits=logits, sampling_metadata=model_input.sampling_metadata, ) + if self.return_hidden_states: + # we only need to pass hidden states of most recent token + if model_input.is_prompt: + output.prefill_hidden_states = hidden_states + output.hidden_states = hidden_states return [output] + + def generate_proposals(self, *args, **kwargs): + return self.model.generate_proposals(*args, **kwargs) diff --git a/vllm/worker/cpu_worker.py b/vllm/worker/cpu_worker.py index bc9164bd9d5df..cf04808b73372 100644 --- a/vllm/worker/cpu_worker.py +++ b/vllm/worker/cpu_worker.py @@ -128,6 +128,7 @@ def __init__( distributed_init_method: str, kv_cache_dtype: Optional[str] = "auto", is_driver_worker: bool = False, + model_runner_cls: Optional[Type[CPUModelRunner]] = None, ) -> None: WorkerBase.__init__(self, vllm_config=vllm_config) @@ -151,6 +152,16 @@ def __init__( else: self.local_omp_cpuid = omp_cpuids.split("|")[rank] + # Return hidden states from target model if the draft model is an + # mlp_speculator + speculative_config = self.speculative_config + model_config = self.model_config + speculative_args = {} if speculative_config is None \ + or (speculative_config.draft_model_config.model == + model_config.model) \ + or (speculative_config.draft_model_config.hf_config.model_type + not in ["medusa", "mlp_speculator", "eagle"]) \ + else {"return_hidden_states": True} ModelRunnerClass: Type[CPUModelRunnerBase] = CPUModelRunner if self.model_config.task == "embedding": ModelRunnerClass = CPUEmbeddingModelRunner @@ -159,7 +170,11 @@ def __init__( self.model_runner: CPUModelRunnerBase = ModelRunnerClass( vllm_config=vllm_config, kv_cache_dtype=kv_cache_dtype, - is_driver_worker=is_driver_worker) + is_driver_worker=is_driver_worker, + **speculative_args, + ) + if model_runner_cls is not None: + self.model_runner = model_runner_cls(self.model_runner) # Uninitialized cache engine. Will be initialized by # initialize_cache. self.cache_engine: List[CPUCacheEngine] @@ -197,7 +212,7 @@ def init_device(self) -> None: ret = torch.ops._C_utils.init_cpu_threads_env(self.local_omp_cpuid) if ret: logger.info(ret) - + self.device = torch.device("cpu") self.init_distributed_environment() # Set random seed. set_random_seed(self.model_config.seed) @@ -297,6 +312,14 @@ def do_metadata_broadcast(self) -> bool: def kv_cache(self) -> Optional[List[List[torch.Tensor]]]: return self.cpu_cache + @property + def vocab_size(self) -> int: + return self.model_runner.vocab_size + + @property + def max_model_len(self) -> int: + return self.model_config.max_model_len + def execute_worker( self, worker_input: WorkerInput, diff --git a/vllm/worker/model_runner_base.py b/vllm/worker/model_runner_base.py index 9e529f86b46bb..cd4770202a186 100644 --- a/vllm/worker/model_runner_base.py +++ b/vllm/worker/model_runner_base.py @@ -289,3 +289,18 @@ def get_generators(self, finished_request_ids: Optional[List[str]] = None): self.generators.pop(request_id, None) return self.generators + + +class ModelRunnerWrapperBase: + """ + The whole point of this class is to lazily initialize the model_runner. + """ + + def __init__( + self, + moderl_runner: ModelRunnerBase, + ) -> None: + self.model_runner: ModelRunnerBase = moderl_runner + + def __getattr__(self, attr): + return getattr(self.model_runner, attr) diff --git a/vllm/worker/worker.py b/vllm/worker/worker.py index 80fd7bc3b67cc..24e7bc760b0c0 100644 --- a/vllm/worker/worker.py +++ b/vllm/worker/worker.py @@ -74,9 +74,7 @@ def __init__( else {"return_hidden_states": True} ModelRunnerClass: Type[GPUModelRunnerBase] = ModelRunner - if model_runner_cls is not None: - ModelRunnerClass = model_runner_cls - elif model_config.task == "embedding": + if model_config.task == "embedding": ModelRunnerClass = EmbeddingModelRunner elif self.model_config.is_encoder_decoder: ModelRunnerClass = EncoderDecoderModelRunner @@ -86,6 +84,9 @@ def __init__( is_driver_worker=is_driver_worker, **speculative_args, ) + if model_runner_cls is not None: + self.model_runner = model_runner_cls(self.model_runner) + # Uninitialized cache engine. Will be initialized by # initialize_cache. self.cache_engine: List[CacheEngine] diff --git a/vllm/worker/worker_base.py b/vllm/worker/worker_base.py index e7fec6d17eecd..7aaa8b453cff1 100644 --- a/vllm/worker/worker_base.py +++ b/vllm/worker/worker_base.py @@ -466,6 +466,9 @@ def execute_method(self, method, *args, **kwargs): logger.exception(msg) raise e + def __getattr__(self, attr): + return getattr(self.worker, attr) + def extract_previous_hidden_states( data: Union[ExecuteModelRequest, Dict[str, torch.Tensor]]) -> \ From 0a4d96850013eb2c295b25df53177ad2302110ca Mon Sep 17 00:00:00 2001 From: Roger Wang <136131678+ywang96@users.noreply.github.com> Date: Tue, 26 Nov 2024 18:04:01 -0800 Subject: [PATCH 159/397] [V1] Update interface for idefics3 (#10680) Signed-off-by: Roger Wang --- vllm/model_executor/models/idefics3.py | 73 ++++++++++++++++---------- 1 file changed, 46 insertions(+), 27 deletions(-) diff --git a/vllm/model_executor/models/idefics3.py b/vllm/model_executor/models/idefics3.py index 5d176b2a4e416..58f7635275c05 100644 --- a/vllm/model_executor/models/idefics3.py +++ b/vllm/model_executor/models/idefics3.py @@ -39,6 +39,7 @@ from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs from vllm.multimodal.image import cached_get_image_processor +from vllm.multimodal.inputs import NestedTensors from vllm.sequence import IntermediateTensors, SequenceData from vllm.transformers_utils.processor import cached_get_processor from vllm.utils import is_list_of @@ -597,6 +598,12 @@ def _process_image_input(self, image_input: ImageInputs) -> torch.Tensor: image_features = self._process_image_pixels(image_input) return self.connector(image_features) + def get_input_embeddings( + self, + input_ids: torch.Tensor, + ) -> torch.Tensor: + return self.text_model.get_input_embeddings(input_ids) + def forward( self, input_ids: torch.Tensor, @@ -604,26 +611,8 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, - **kwargs: object, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: - if intermediate_tensors is not None: - input_ids = None - inputs_embeds = None - else: - # always pass the input via `inputs_embeds` - # to make sure the computation graph is consistent - image_input = self._parse_and_validate_image_input(**kwargs) - - if image_input is not None: - vision_embeddings = self._process_image_input(image_input) - inputs_embeds = self.text_model.get_input_embeddings(input_ids) - - inputs_embeds = merge_multimodal_embeddings( - input_ids, inputs_embeds, vision_embeddings, - self.image_token_id) - else: - inputs_embeds = self.text_model.get_input_embeddings(input_ids) - input_ids = None hidden_states = self.text_model( input_ids, @@ -718,6 +707,25 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.logits_processor = LogitsProcessor(config.text_config.vocab_size) self.sampler = Sampler() + def get_multimodal_embeddings(self, **kwargs) -> Optional[NestedTensors]: + image_input = self.model._parse_and_validate_image_input(**kwargs) + if image_input is None: + return None + vision_embeddings = self.model._process_image_input(image_input) + return vision_embeddings + + def get_input_embeddings( + self, + input_ids: torch.Tensor, + multimodal_embeddings: Optional[NestedTensors] = None, + ) -> torch.Tensor: + inputs_embeds = self.model.get_input_embeddings(input_ids) + if multimodal_embeddings is not None: + inputs_embeds = merge_multimodal_embeddings( + input_ids, inputs_embeds, multimodal_embeddings, + self.config.image_token_id) + return inputs_embeds + def forward( self, input_ids: torch.Tensor, @@ -725,16 +733,27 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, **kwargs: object, ) -> Union[torch.Tensor, IntermediateTensors]: - hidden_states = self.model( - input_ids, - positions, - kv_caches, - attn_metadata, - intermediate_tensors, - **kwargs, - ) + if intermediate_tensors is not None: + inputs_embeds = None + + # NOTE: In v1, inputs_embeds is always generated at model runner, this + # condition is for v0 compatibility. + elif inputs_embeds is None: + vision_embeddings = self.get_multimodal_embeddings(**kwargs) + inputs_embeds = self.get_input_embeddings(input_ids, + vision_embeddings) + input_ids = None + + hidden_states = self.model.text_model(input_ids, + positions, + kv_caches, + attn_metadata, + intermediate_tensors, + inputs_embeds=inputs_embeds) + return hidden_states def compute_logits(self, hidden_states: torch.Tensor, From 1bf905ddaa969e6458fe0d15a1db80318f39fade Mon Sep 17 00:00:00 2001 From: jeongin601 <78595701+jeongin601@users.noreply.github.com> Date: Wed, 27 Nov 2024 14:07:30 +0900 Subject: [PATCH 160/397] [Bugfix][SpecDecode] apply sampling parameters to target probabilities for consistency in rejection sampling. (#10198) Signed-off-by: jeongin601 <0200angela@gmail.com> Signed-off-by: jeong_in.bae --- tests/spec_decode/e2e/test_mlp_correctness.py | 2 +- tests/spec_decode/test_batch_expansion.py | 8 ++++++++ vllm/spec_decode/batch_expansion.py | 14 +------------- 3 files changed, 10 insertions(+), 14 deletions(-) diff --git a/tests/spec_decode/e2e/test_mlp_correctness.py b/tests/spec_decode/e2e/test_mlp_correctness.py index 5ecc0d4e95719..183ff2f5db274 100644 --- a/tests/spec_decode/e2e/test_mlp_correctness.py +++ b/tests/spec_decode/e2e/test_mlp_correctness.py @@ -203,7 +203,7 @@ def test_mlp_e2e_acceptance_rate(vllm_runner, common_llm_kwargs, @pytest.mark.parametrize("test_llm_kwargs", [{"seed": 5}]) @pytest.mark.parametrize("output_len", [64]) @pytest.mark.parametrize("batch_size", [1, 32]) -@pytest.mark.parametrize("temperature", [0.1, 1.0]) +@pytest.mark.parametrize("temperature", [1.0]) @pytest.mark.parametrize("seed", [1]) def test_mlp_e2e_seeded_correctness(vllm_runner, common_llm_kwargs, per_test_common_llm_kwargs, diff --git a/tests/spec_decode/test_batch_expansion.py b/tests/spec_decode/test_batch_expansion.py index 0d6aaa449d856..3504fcf43e361 100644 --- a/tests/spec_decode/test_batch_expansion.py +++ b/tests/spec_decode/test_batch_expansion.py @@ -90,6 +90,14 @@ def test_create_single_target_seq_group_metadata(k: int): ) assert output.request_id == input_seq_group_metadata.request_id + assert output.sampling_params.repetition_penalty == \ + input_seq_group_metadata.sampling_params.repetition_penalty + assert output.sampling_params.temperature == \ + input_seq_group_metadata.sampling_params.temperature + assert output.sampling_params.top_p == \ + input_seq_group_metadata.sampling_params.top_p + assert output.sampling_params.top_k == \ + input_seq_group_metadata.sampling_params.top_k assert len(output.seq_data) == 1 assert output.seq_data[target_seq_id].get_prompt_token_ids() == tuple( prompt_tokens) diff --git a/vllm/spec_decode/batch_expansion.py b/vllm/spec_decode/batch_expansion.py index 25ef27b8378f0..01b9cdad963da 100644 --- a/vllm/spec_decode/batch_expansion.py +++ b/vllm/spec_decode/batch_expansion.py @@ -307,28 +307,16 @@ def _create_target_seq_group_metadata( token_ids_to_score = self._get_token_ids_to_score( proposal_token_ids[batch_index]) - # Use simpler sampling parameters apart from for final token - # (in particular don't do seeded sampling) since those sampled tokens - # aren't used. - # We don't replace the sampling_params in the greedy case because - # this also controls whether the probs get modified in the sampler - # (see use of _modify_greedy_probs_inplace there). sampling_params = input_seq_group_metadata.sampling_params - non_bonus_sampling_params = DEFAULT_SIMPLE_SAMPLING_PARAMS \ - if sampling_params.temperature else sampling_params - target_seq_group_metadata_list: List[SequenceGroupMetadata] = [] - last_index = len(token_ids_to_score) - 1 for i, token_ids in enumerate(token_ids_to_score): - target_sampling_params = sampling_params if i == last_index \ - else non_bonus_sampling_params target_seq_group_metadata_list.append( self._create_single_target_seq_group_metadata( input_seq_group_metadata, input_seq_id, next(target_seq_ids_iter), token_ids, - sampling_params=target_sampling_params, + sampling_params=sampling_params, )) return target_seq_group_metadata_list From cfb3bf25fb981494fa6c575fb0714388c9df99b0 Mon Sep 17 00:00:00 2001 From: yansh97 Date: Wed, 27 Nov 2024 13:55:23 +0800 Subject: [PATCH 161/397] [bugfix] fix the default value of llm_int8_threshold in BitsAndBytesConfig (#10657) --- vllm/model_executor/layers/quantization/bitsandbytes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/model_executor/layers/quantization/bitsandbytes.py b/vllm/model_executor/layers/quantization/bitsandbytes.py index 6a0de3034142a..e01c713dd14db 100644 --- a/vllm/model_executor/layers/quantization/bitsandbytes.py +++ b/vllm/model_executor/layers/quantization/bitsandbytes.py @@ -26,7 +26,7 @@ def __init__( llm_int8_enable_fp32_cpu_offload: bool = False, llm_int8_has_fp16_weight: bool = False, llm_int8_skip_modules: Optional[List[str]] = None, - llm_int8_threshold: float = 0.0, + llm_int8_threshold: float = 6.0, ) -> None: self.load_in_8bit = load_in_8bit @@ -103,7 +103,7 @@ def get_safe_value(config, keys, default_value=None): ["llm_int8_skip_modules"], default_value=[]) llm_int8_threshold = get_safe_value(config, ["llm_int8_threshold"], - default_value=0.0) + default_value=6.0) return cls( load_in_8bit=load_in_8bit, From e85250b1d164c9975816fa7aaf591aa5abad577d Mon Sep 17 00:00:00 2001 From: Kunshang Ji Date: Wed, 27 Nov 2024 14:49:40 +0800 Subject: [PATCH 162/397] [Hardware][Gaudi]add get_name method for HPUAttentionBackend (#10667) Signed-off-by: Kunshang Ji --- vllm/attention/backends/hpu_attn.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/vllm/attention/backends/hpu_attn.py b/vllm/attention/backends/hpu_attn.py index 4a3ddd5db94e5..5359941d41fde 100644 --- a/vllm/attention/backends/hpu_attn.py +++ b/vllm/attention/backends/hpu_attn.py @@ -22,6 +22,10 @@ class HPUAttentionBackend(AttentionBackend): + @staticmethod + def get_name() -> str: + return "HPU_ATTN" + @staticmethod def get_impl_cls() -> Type["HPUAttentionImpl"]: return HPUAttentionImpl From 15cc2a9f1acb70b68366da0a6d2a4549da3d32f4 Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Wed, 27 Nov 2024 14:54:12 +0800 Subject: [PATCH 163/397] [Misc]Further reduce BNB static variable (#10597) Signed-off-by: Jee Jee Li --- vllm/model_executor/model_loader/loader.py | 218 ++++++++++++--------- vllm/model_executor/models/baichuan.py | 8 - vllm/model_executor/models/falcon.py | 6 - vllm/model_executor/models/gemma.py | 9 - vllm/model_executor/models/gemma2.py | 9 - vllm/model_executor/models/idefics3.py | 15 -- vllm/model_executor/models/llama.py | 9 - vllm/model_executor/models/minicpmv.py | 34 ---- vllm/model_executor/models/mllama.py | 14 -- vllm/model_executor/models/opt.py | 3 - vllm/model_executor/models/phi.py | 3 - vllm/model_executor/models/phi3.py | 6 - vllm/model_executor/models/qwen.py | 7 +- vllm/model_executor/models/qwen2.py | 9 - 14 files changed, 131 insertions(+), 219 deletions(-) diff --git a/vllm/model_executor/model_loader/loader.py b/vllm/model_executor/model_loader/loader.py index 441dd409b4f9d..37c2d789030b6 100644 --- a/vllm/model_executor/model_loader/loader.py +++ b/vllm/model_executor/model_loader/loader.py @@ -28,7 +28,8 @@ get_tensor_model_parallel_world_size) from vllm.envs import VLLM_USE_MODELSCOPE from vllm.logger import init_logger -from vllm.model_executor.layers.linear import (MergedColumnParallelLinear, +from vllm.model_executor.layers.linear import (LinearBase, + MergedColumnParallelLinear, QKVParallelLinear, ReplicatedLinear, RowParallelLinear) @@ -78,12 +79,14 @@ def device_loading_context(module: torch.nn.Module, original_device: torch.device = original_device_states[name] if original_device.type == "cpu": # `torch.empty_like` does not support `pin_memory` argument - cpu_data = torch.empty_strided(size=p.data.size(), - stride=p.data.stride(), - dtype=p.data.dtype, - layout=p.data.layout, - device="cpu", - pin_memory=pin_memory) + cpu_data = torch.empty_strided( + size=p.data.size(), + stride=p.data.stride(), + dtype=p.data.dtype, + layout=p.data.layout, + device="cpu", + pin_memory=pin_memory, + ) cpu_data.copy_(p.data) p.data = cpu_data else: @@ -112,7 +115,8 @@ def _initialize_model(vllm_config: VllmConfig, prefix: str = "") -> nn.Module: logger.warning(msg) logger.warning( "Trying to guess the arguments for old-style model class %s", - model_class) + model_class, + ) # try to be compatible with old-style model class kwargs = {} if "prefix" in all_params: @@ -198,14 +202,17 @@ def _maybe_download_from_modelscope( return model_path return None - def _prepare_weights(self, model_name_or_path: str, - revision: Optional[str], - fall_back_to_pt: bool) -> Tuple[str, List[str], bool]: + def _prepare_weights( + self, + model_name_or_path: str, + revision: Optional[str], + fall_back_to_pt: bool, + ) -> Tuple[str, List[str], bool]: """Prepare weights for the model. If the model is not local, it will be downloaded.""" - model_name_or_path = self._maybe_download_from_modelscope( - model_name_or_path, revision) or model_name_or_path + model_name_or_path = (self._maybe_download_from_modelscope( + model_name_or_path, revision) or model_name_or_path) is_local = os.path.isdir(model_name_or_path) load_format = self.load_config.load_format @@ -258,8 +265,11 @@ def _prepare_weights(self, model_name_or_path: str, # any files not found in the index. if not is_local: download_safetensors_index_file_from_hf( - model_name_or_path, index_file, - self.load_config.download_dir, revision) + model_name_or_path, + index_file, + self.load_config.download_dir, + revision, + ) hf_weights_files = filter_duplicate_safetensors_files( hf_weights_files, hf_folder, index_file) else: @@ -282,8 +292,11 @@ def _get_weights_iterator( # Currently np_cache only support *.bin checkpoints assert use_safetensors is False weights_iterator = np_cache_weights_iterator( - source.model_or_path, self.load_config.download_dir, hf_folder, - hf_weights_files) + source.model_or_path, + self.load_config.download_dir, + hf_folder, + hf_weights_files, + ) elif use_safetensors: weights_iterator = safetensors_weights_iterator(hf_weights_files) else: @@ -310,17 +323,19 @@ def _get_all_weights( model_config: ModelConfig, model: nn.Module, ) -> Generator[Tuple[str, torch.Tensor], None, None]: - primary_weights = DefaultModelLoader.Source( model_config.model, model_config.revision, prefix="", fall_back_to_pt=getattr(model, "fall_back_to_pt_during_load", - True)) + True), + ) yield from self._get_weights_iterator(primary_weights) - secondary_weights = cast(Iterable[DefaultModelLoader.Source], - getattr(model, "secondary_weights", ())) + secondary_weights = cast( + Iterable[DefaultModelLoader.Source], + getattr(model, "secondary_weights", ()), + ) for source in secondary_weights: yield from self._get_weights_iterator(source) @@ -416,7 +431,7 @@ def _verify_config(self, model_config: ModelConfig, self.tensorizer_config.verify_with_parallel_config(parallel_config) def _get_weights_iterator( - self) -> Generator[Tuple[str, torch.Tensor], None, None]: + self, ) -> Generator[Tuple[str, torch.Tensor], None, None]: tensorizer_args = self.tensorizer_config._construct_tensorizer_args() return tensorizer_weights_iterator(tensorizer_args) @@ -479,9 +494,10 @@ def load_model(self, vllm_config: VllmConfig) -> nn.Module: if parallel_config.tensor_parallel_size > 1: from vllm.distributed import get_tensor_model_parallel_rank - self.tensorizer_config.tensorizer_uri = \ - self.tensorizer_config.tensorizer_uri \ - % get_tensor_model_parallel_rank() + + self.tensorizer_config.tensorizer_uri = ( + self.tensorizer_config.tensorizer_uri % + get_tensor_model_parallel_rank()) if is_vllm_tensorized(self.tensorizer_config): return self._load_model_serialized(vllm_config=vllm_config) @@ -520,13 +536,13 @@ def __init__(self, load_config: LoadConfig): @staticmethod def _filter_subtensors( - tensors: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: + tensors: Dict[str, torch.Tensor], ) -> Dict[str, torch.Tensor]: """ Filter out all tensors that share the same memory or a subset of the memory of another tensor. """ - same_storage_groups: Dict[Any, List[Tuple[ - str, torch.Tensor]]] = collections.defaultdict(list) + same_storage_groups: Dict[Any, List[Tuple[str, torch.Tensor]]] = ( + collections.defaultdict(list)) for key, tensor in tensors.items(): if tensor.numel(): ptr = tensor.untyped_storage().data_ptr() @@ -615,8 +631,11 @@ def load_model(self, vllm_config: VllmConfig) -> nn.Module: if tensor.shape != param_shape: logger.warning( "loading tensor of shape %s into " - "parameter '%s' of shape %s", tensor.shape, - key, param_shape) + "parameter '%s' of shape %s", + tensor.shape, + key, + param_shape, + ) param_data.copy_(tensor) state_dict.pop(key) if state_dict: @@ -634,6 +653,7 @@ def save_model( from safetensors.torch import save_file from vllm.distributed import get_tensor_model_parallel_rank + if pattern is None: pattern = ShardedStateLoader.DEFAULT_PATTERN rank = get_tensor_model_parallel_rank() @@ -667,24 +687,6 @@ class BitsAndBytesModelLoader(BaseModelLoader): possible_config_file_names = ["adapter_config.json"] - default_target_modules = [ - ".gate_proj.", - ".down_proj.", - ".up_proj.", - ".q_proj.", - ".k_proj.", - ".v_proj.", - ".o_proj.", - '.fc1.', - '.fc2.', - '.dense.', - '.query_key_value.', - '.qkv_proj.', - '.dense_h_to_4h.', - '.dense_4h_to_h.', - '.out_proj.', - ] - def __init__(self, load_config: LoadConfig): super().__init__(load_config) @@ -709,6 +711,11 @@ def __init__(self, load_config: LoadConfig): with open(config_file_path) as f: config = json.load(f) self.target_modules = config["target_modules"] + # TODO: target_modules could be either a list or a regex string. + # We need to handle both cases. + assert isinstance(self.target_modules, + list), "Unsupported target_modules: " + f"{self.target_modules}" def _get_config_file(self, qlora_adapter: str) -> str: is_local = os.path.isdir(qlora_adapter) @@ -734,12 +741,13 @@ def _get_config_file(self, qlora_adapter: str) -> str: return config_file_path def _get_weight_files( - self, - model_name_or_path: str, - allowed_patterns: List[str], - revision: Optional[str] = None) -> Tuple[List[str], str]: - """Retrieve weight files. Download the files if necessary. - + self, + model_name_or_path: str, + allowed_patterns: List[str], + revision: Optional[str] = None, + ) -> Tuple[List[str], str]: + """Retrieve weight files. Download the files if necessary. + Return the weight files and the file pattern.""" is_local = os.path.isdir(model_name_or_path) @@ -806,6 +814,7 @@ def _get_quantized_weights_iterator( # only load the bitsandbytes module when needed try: import bitsandbytes + if bitsandbytes.__version__ < "0.44.0": raise ImportError("bitsandbytes version is wrong. Please " "install bitsandbytes>=0.44.0.") @@ -839,8 +848,11 @@ def _is_8bit_weight_name(self, weight_name: str): def _is_4bit_weight_name(self, weight_name: str): quantized_suffix = { - "absmax", "quant_map", "nested_absmax", "nested_quant_map", - "bitsandbytes" + "absmax", + "quant_map", + "nested_absmax", + "nested_quant_map", + "bitsandbytes", } suffix = weight_name.split(".")[-1] return any(q_suffix in suffix for q_suffix in quantized_suffix) @@ -857,7 +869,6 @@ def _quantized_8bit_generator(self, hf_weights_files, use_safetensors, for weight_name, weight_tensor in self._hf_weight_iter( hf_weights_files, use_safetensors): - if self._is_8bit_weight_name(weight_name): continue @@ -899,14 +910,13 @@ def _parse_quant_state(param_name: str, # pre quantized weights would have a quant_state for weight_name, weight_tensor in self._hf_weight_iter( hf_weights_files, use_safetensors): - if self._is_4bit_weight_name(weight_name): continue - if (f"{weight_name}.quant_state.bitsandbytes__nf4" \ - in temp_state_dict) or \ - (f"{weight_name}.quant_state.bitsandbytes__fp4" \ - in temp_state_dict): + if (f"{weight_name}.quant_state.bitsandbytes__nf4" + in temp_state_dict) or ( + f"{weight_name}.quant_state.bitsandbytes__fp4" + in temp_state_dict): quant_state = _parse_quant_state(weight_name, temp_state_dict) quant_state_dict[weight_name] = quant_state yield weight_name, weight_tensor @@ -916,12 +926,12 @@ def _parse_quant_state(param_name: str, def _unquantized_generator(self, hf_weights_files, use_safetensors, quant_state_dict) -> Generator: from bitsandbytes.functional import quantize_4bit + tp_size = get_tensor_model_parallel_world_size() tp_rank = get_tensor_model_parallel_rank() for weight_name, weight_tensor in self._hf_weight_iter( hf_weights_files, use_safetensors): - if any(target_module in weight_name for target_module in self.target_modules) and weight_name.endswith(".weight"): # Without sharding @@ -954,12 +964,11 @@ def _unquantized_generator(self, hf_weights_files, use_safetensors, # get the start/end index of each shard weight tensor total_start_index = list( itertools.accumulate([0] + total_shard_sizes))[:-1] - shard_weights_index = [ - (idx + size // tp_size * tp_rank, - idx + size // tp_size * (tp_rank + 1)) - for idx, size in zip(total_start_index, - total_shard_sizes) - ] + shard_weights_index = [( + idx + size // tp_size * tp_rank, + idx + size // tp_size * (tp_rank + 1), + ) for idx, size in zip(total_start_index, + total_shard_sizes)] # slice and reorder the weight tensor weight_tensor = [ weight_tensor[start_index:end_index, ...] @@ -989,7 +998,8 @@ def _unquantized_generator(self, hf_weights_files, use_safetensors, processed_weight, quant_state = quantize_4bit( loaded_weight, compress_statistics=True, - quant_type="nf4") + quant_type="nf4", + ) quant_state_dict[weight_name] = quant_state else: @@ -997,28 +1007,58 @@ def _unquantized_generator(self, hf_weights_files, use_safetensors, yield weight_name, processed_weight + def _get_bnb_target_modules(self, model: nn.Module) -> None: + + # TODO: Maybe we can replace bitsandbytes_stacked_params_mapping with + # packed_modules_mapping. + inverse_stacked_mapping: Dict[str, List[str]] = {} + for orig, ( + packed, + idx, + ) in model.bitsandbytes_stacked_params_mapping.items(): + if packed not in inverse_stacked_mapping: + inverse_stacked_mapping[packed] = [] + inverse_stacked_mapping[packed].insert(idx, orig) + + linear_module_lst = [] + for name, module in model.named_modules(): + if isinstance(module, (LinearBase, )): + last_name = name.split(".")[-1] + if sub_modules := inverse_stacked_mapping.get(last_name, []): + # Map vllm's names to transformers' names. + for sub_name in sub_modules: + linear_module_lst.append( + name.replace(last_name, sub_name)) + else: + linear_module_lst.append(name) + if self.target_modules: + # Update self.target_modules + self.target_modules = [ + qual_name for qual_name in linear_module_lst + if any(t in qual_name for t in self.target_modules) + ] + else: + self.target_modules = linear_module_lst + assert (self.target_modules + ), "vllm currently does not support BNB quantization for" + f" {type(model).__name__}" + def _load_weights(self, model_config: ModelConfig, model: nn.Module) -> None: - if not hasattr(model, 'load_weights'): + if not hasattr(model, "load_weights"): raise AttributeError( "The required method 'load_weights' is not defined in class" f" {type(model).__name__}.") - if not hasattr(model, 'bitsandbytes_stacked_params_mapping'): + if not hasattr(model, "bitsandbytes_stacked_params_mapping"): raise AttributeError( f"Model {type(model).__name__} does not support BitsAndBytes " "quantization yet.") - if len(self.target_modules) == 0: - if hasattr(model, 'default_bitsandbytes_target_modules'): - self.target_modules = model.default_bitsandbytes_target_modules - else: - self.target_modules = self.default_target_modules - # Modules whose weights might have fused on disk # we need their output_sizes to make shard in flight correctly with TP self.maybe_fused_weights_modules: Dict[str, List[int]] = {} - + self._get_bnb_target_modules(model) for name, module in model.named_modules(): # Some modules like `ReplicatedLinear` should not have their weights # sharded. The reason for implementing it this way is to avoid new @@ -1046,7 +1086,7 @@ def _load_weights(self, model_config: ModelConfig, pre_quant = False if quant_config is not None: - quant_method = quant_config.get('quant_method') + quant_method = quant_config.get("quant_method") if quant_method == "bitsandbytes": pre_quant = True else: @@ -1063,11 +1103,12 @@ def _load_weights(self, model_config: ModelConfig, load_8bit = False if pre_quant: - load_8bit = quant_config.get('load_in_8bit', False) + load_8bit = quant_config.get("load_in_8bit", False) - qweight_iterator, quant_state_dict = \ - self._get_quantized_weights_iterator( - model_config.model, model_config.revision, pre_quant, load_8bit) + qweight_iterator, quant_state_dict = ( + self._get_quantized_weights_iterator(model_config.model, + model_config.revision, + pre_quant, load_8bit)) model.load_weights(qweight_iterator) @@ -1078,6 +1119,7 @@ def _load_weights(self, model_config: ModelConfig, # TODO: Change this lazy import to normal import # after the checks are updated to run on a new version from vllm.model_executor.models.utils import is_pp_missing_parameter + for quant_param_name in quant_state_dict: if is_pp_missing_parameter(quant_param_name, model): continue @@ -1086,9 +1128,9 @@ def _load_weights(self, model_config: ModelConfig, shard_index = 0 for shard_name, ( - weight_name, index + weight_name, + index, ) in model.bitsandbytes_stacked_params_mapping.items(): - shard_pos = quant_param_name.find(shard_name) # Some models, such as MiniCPM V2.5/2.6, contain both # module names 'kv_proj' and 'qkv_proj'. To prevent 'kv_proj' @@ -1123,8 +1165,8 @@ def _load_weights(self, model_config: ModelConfig, num_elements = [0] * len(quant_states) for seq, quant_state in quant_states.items(): - num_elements[seq] = math.prod( - quant_state.shape) // pack_ratio + num_elements[seq] = (math.prod(quant_state.shape) // + pack_ratio) offsets = np.concatenate(([0], np.cumsum(num_elements))) set_weight_attrs(param, {"bnb_shard_offsets": offsets}) diff --git a/vllm/model_executor/models/baichuan.py b/vllm/model_executor/models/baichuan.py index 39cb5a8b2cbbe..5e68b7f165bf4 100644 --- a/vllm/model_executor/models/baichuan.py +++ b/vllm/model_executor/models/baichuan.py @@ -351,14 +351,6 @@ class BaiChuanBaseForCausalLM(nn.Module, SupportsLoRA, SupportsPP): embedding_padding_modules = [] # BitandBytes specific attributes - default_bitsandbytes_target_modules = [ - ".W_pack.", - ".o_proj.", - ".down_proj.", - ".up_proj.", - ".gate_proj.", - ".up_proj.", - ] bitsandbytes_stacked_params_mapping = { # shard_name, weight_name, index "gate_proj": ("gate_up_proj", 0), diff --git a/vllm/model_executor/models/falcon.py b/vllm/model_executor/models/falcon.py index 096ad32b38e86..8660cf79b9cdb 100644 --- a/vllm/model_executor/models/falcon.py +++ b/vllm/model_executor/models/falcon.py @@ -412,12 +412,6 @@ class FalconForCausalLM(nn.Module, SupportsPP): # BitandBytes specific attributes bitsandbytes_stacked_params_mapping = {} - default_bitsandbytes_target_modules = [ - ".query_key_value.", - ".dense.", - ".dense_h_to_4h.", - ".dense_4h_to_h.", - ] def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): super().__init__() diff --git a/vllm/model_executor/models/gemma.py b/vllm/model_executor/models/gemma.py index 131e9af139c2a..b28715c48adfb 100644 --- a/vllm/model_executor/models/gemma.py +++ b/vllm/model_executor/models/gemma.py @@ -350,15 +350,6 @@ class GemmaForCausalLM(nn.Module, SupportsLoRA, SupportsPP): "down_proj", ] # BitandBytes specific attributes - default_bitsandbytes_target_modules = [ - ".gate_proj.", - ".down_proj.", - ".up_proj.", - ".q_proj.", - ".k_proj.", - ".v_proj.", - ".o_proj.", - ] bitsandbytes_stacked_params_mapping = { # shard_name, weight_name, index "q_proj": ("qkv_proj", 0), diff --git a/vllm/model_executor/models/gemma2.py b/vllm/model_executor/models/gemma2.py index d229eb74669ee..c93223c740272 100644 --- a/vllm/model_executor/models/gemma2.py +++ b/vllm/model_executor/models/gemma2.py @@ -386,15 +386,6 @@ class Gemma2ForCausalLM(nn.Module, SupportsLoRA, SupportsPP): embedding_padding_modules = [] # BitandBytes specific attributes - default_bitsandbytes_target_modules = [ - ".gate_proj.", - ".down_proj.", - ".up_proj.", - ".q_proj.", - ".k_proj.", - ".v_proj.", - ".o_proj.", - ] bitsandbytes_stacked_params_mapping = { # shard_name, weight_name, index "q_proj": ("qkv_proj", 0), diff --git a/vllm/model_executor/models/idefics3.py b/vllm/model_executor/models/idefics3.py index 58f7635275c05..014e27bc869d4 100644 --- a/vllm/model_executor/models/idefics3.py +++ b/vllm/model_executor/models/idefics3.py @@ -656,21 +656,6 @@ class Idefics3ForConditionalGeneration(nn.Module, SupportsMultiModal, ] # BitandBytes specific attributes - default_bitsandbytes_target_modules = [ - ".gate_proj.", - ".down_proj.", - ".up_proj.", - ".q_proj.", - ".k_proj.", - ".v_proj.", - ".o_proj.", - # vision_model - ".fc1.", - ".fc2.", - ".out_proj.", - # connector - ".proj.", - ] bitsandbytes_stacked_params_mapping = { # shard_name, weight_name, index "q_proj": ("qkv_proj", 0), diff --git a/vllm/model_executor/models/llama.py b/vllm/model_executor/models/llama.py index 355b2f3ef8b28..7cc5547b4a4d5 100644 --- a/vllm/model_executor/models/llama.py +++ b/vllm/model_executor/models/llama.py @@ -463,15 +463,6 @@ class LlamaForCausalLM(nn.Module, SupportsLoRA, SupportsPP): embedding_padding_modules = ["lm_head"] # BitandBytes specific attributes - default_bitsandbytes_target_modules = [ - ".gate_proj.", - ".down_proj.", - ".up_proj.", - ".q_proj.", - ".k_proj.", - ".v_proj.", - ".o_proj.", - ] bitsandbytes_stacked_params_mapping = { # shard_name, weight_name, index "q_proj": ("qkv_proj", 0), diff --git a/vllm/model_executor/models/minicpmv.py b/vllm/model_executor/models/minicpmv.py index 99bf1d42d0355..aacce477e0460 100644 --- a/vllm/model_executor/models/minicpmv.py +++ b/vllm/model_executor/models/minicpmv.py @@ -822,25 +822,6 @@ class MiniCPMV2_5(MiniCPMVBaseModel, SupportsLoRA): ] # BitandBytes specific attributes - default_bitsandbytes_target_modules = [ - ".gate_proj.", - ".down_proj.", - ".up_proj.", - ".q_proj.", - ".k_proj.", - ".v_proj.", - ".o_proj.", - # vision encoder - ".fc1.", - ".fc2.", - # Currently, vllm does not support BNB quantization for the `out_proj` - # of the resampler, so it's necessary to distinguish between the - # vision encoder and the resampler's out_proj. The same applies to - # MiniCPMV2_6. - ".self_attn.out_proj.", # vision encoder out_proj - # resampler - ".kv_proj.", - ] bitsandbytes_stacked_params_mapping = { # shard_name, weight_name, index "q_proj": ("qkv_proj", 0), @@ -964,21 +945,6 @@ class MiniCPMV2_6(MiniCPMVBaseModel, SupportsLoRA): ] # BitandBytes specific attributes - default_bitsandbytes_target_modules = [ - ".gate_proj.", - ".down_proj.", - ".up_proj.", - ".q_proj.", - ".k_proj.", - ".v_proj.", - ".o_proj.", - # vision encoder - ".fc1.", - ".fc2.", - ".self_attn.out_proj.", - # resampler - ".kv_proj.", - ] bitsandbytes_stacked_params_mapping = { # shard_name, weight_name, index "q_proj": ("qkv_proj", 0), diff --git a/vllm/model_executor/models/mllama.py b/vllm/model_executor/models/mllama.py index 9e6634a9a7579..6536f9807730c 100644 --- a/vllm/model_executor/models/mllama.py +++ b/vllm/model_executor/models/mllama.py @@ -1104,20 +1104,6 @@ def forward( @INPUT_REGISTRY.register_input_processor(input_processor_for_mllama) class MllamaForConditionalGeneration(nn.Module, SupportsMultiModal): # BitandBytes specific attributes - default_bitsandbytes_target_modules = [ - ".gate_proj.", - ".down_proj.", - ".up_proj.", - ".q_proj.", - ".k_proj.", - ".v_proj.", - ".o_proj.", - ".fc1.", - ".fc2.", - # The `multi_modal_projector` is at the top level of the model, - # so we can't add a dot in front of it. - "multi_modal_projector." - ] bitsandbytes_stacked_params_mapping = { # shard_name, weight_name, index "q_proj": ("qkv_proj", 0), diff --git a/vllm/model_executor/models/opt.py b/vllm/model_executor/models/opt.py index db85a494980a7..7edafcd20b5db 100644 --- a/vllm/model_executor/models/opt.py +++ b/vllm/model_executor/models/opt.py @@ -337,9 +337,6 @@ class OPTForCausalLM(nn.Module, SupportsPP): "k_proj": ("qkv_proj", 1), "v_proj": ("qkv_proj", 2), } - default_bitsandbytes_target_modules = [ - ".q_proj.", ".k_proj.", ".v_proj.", ".out_proj.", ".fc1.", ".fc2." - ] def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): super().__init__() diff --git a/vllm/model_executor/models/phi.py b/vllm/model_executor/models/phi.py index 998d3723a0d7d..f9e972688ddd1 100644 --- a/vllm/model_executor/models/phi.py +++ b/vllm/model_executor/models/phi.py @@ -286,9 +286,6 @@ class PhiForCausalLM(nn.Module, SupportsLoRA, SupportsPP): "k_proj": ("qkv_proj", 1), "v_proj": ("qkv_proj", 2), } - default_bitsandbytes_target_modules = [ - ".q_proj.", ".k_proj.", ".v_proj.", ".fc1.", ".fc2.", ".dense." - ] embedding_modules = {} embedding_padding_modules = [] diff --git a/vllm/model_executor/models/phi3.py b/vllm/model_executor/models/phi3.py index 54158bc141235..937858ee3b8c2 100644 --- a/vllm/model_executor/models/phi3.py +++ b/vllm/model_executor/models/phi3.py @@ -16,11 +16,5 @@ class Phi3ForCausalLM(LlamaForCausalLM): } # BitandBytes specific attributes - default_bitsandbytes_target_modules = [ - ".gate_up_proj.", - ".down_proj.", - ".qkv_proj.", - ".o_proj.", - ] # Initialize an empty dict when there is no stacked parameter mapping. bitsandbytes_stacked_params_mapping = {} diff --git a/vllm/model_executor/models/qwen.py b/vllm/model_executor/models/qwen.py index 8f001200308fe..63d1374ab4092 100644 --- a/vllm/model_executor/models/qwen.py +++ b/vllm/model_executor/models/qwen.py @@ -1028,12 +1028,7 @@ class QWenLLM(QWenBaseModel): embedding_modules = {} embedding_padding_modules = [] - default_bitsandbytes_target_modules = [ - ".c_attn.", - ".c_proj.", - ".w1.", - ".w2.", - ] + # BitandBytes specific attributes bitsandbytes_stacked_params_mapping = { # shard_name, weight_name, index "w2": ("gate_up_proj", 0), diff --git a/vllm/model_executor/models/qwen2.py b/vllm/model_executor/models/qwen2.py index 46640226d4cf8..9f706610a129a 100644 --- a/vllm/model_executor/models/qwen2.py +++ b/vllm/model_executor/models/qwen2.py @@ -419,15 +419,6 @@ class Qwen2ForCausalLM(nn.Module, SupportsLoRA, SupportsPP): embedding_padding_modules = [] # BitandBytes specific attributes - default_bitsandbytes_target_modules = [ - ".gate_proj.", - ".down_proj.", - ".up_proj.", - ".q_proj.", - ".k_proj.", - ".v_proj.", - ".o_proj.", - ] bitsandbytes_stacked_params_mapping = { # shard_name, weight_name, index "q_proj": ("qkv_proj", 0), From e2251109c746f0d08ab9b37b5abcf44ca105d426 Mon Sep 17 00:00:00 2001 From: Tyler Michael Smith Date: Wed, 27 Nov 2024 01:55:32 -0500 Subject: [PATCH 164/397] [Kernel] Remove if-else with identical branches in marlin 2:4 (#10687) Signed-off-by: Tyler Michael Smith --- .../marlin/sparse/marlin_24_cuda_kernel.cu | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/csrc/quantization/marlin/sparse/marlin_24_cuda_kernel.cu b/csrc/quantization/marlin/sparse/marlin_24_cuda_kernel.cu index 8fce76eb52f9b..17837351324be 100644 --- a/csrc/quantization/marlin/sparse/marlin_24_cuda_kernel.cu +++ b/csrc/quantization/marlin/sparse/marlin_24_cuda_kernel.cu @@ -296,13 +296,9 @@ __global__ void Marlin_24( // We use a different scale layout for grouped and column-wise quantization as // we scale a `half2` tile in column-major layout in the former and in // row-major in the latter case. - if (group_blocks != -1) { - s_sh_rd = 8 * ((threadIdx.x / 32) % (thread_n_blocks / 4)) + - (threadIdx.x % 32) / 4; - } else { - s_sh_rd = 8 * ((threadIdx.x / 32) % (thread_n_blocks / 4)) + - (threadIdx.x % 32) / 4; - } + s_sh_rd = 8 * ((threadIdx.x / 32) % (thread_n_blocks / 4)) + + (threadIdx.x % 32) / 4; // Note that in the original Marlin kernel + // this is (threadIdx.x % 32) / 4 // Precompute which thread should not read memory in which iterations; this is // needed if there are more threads than required for a certain tilesize or From 1209261e937f7cc5a933da48d625d17e6ee8eea9 Mon Sep 17 00:00:00 2001 From: shunxing12345 <168084185+shunxing12345@users.noreply.github.com> Date: Wed, 27 Nov 2024 19:32:35 +0800 Subject: [PATCH 165/397] [Model] Support telechat2 (#10311) Signed-off-by: Isotr0py <2037008807@qq.com> Co-authored-by: xiangw2 Co-authored-by: Isotr0py <2037008807@qq.com> --- docs/source/models/supported_models.rst | 5 + tests/models/registry.py | 2 + vllm/model_executor/models/llama.py | 6 +- vllm/model_executor/models/registry.py | 2 + vllm/model_executor/models/telechat2.py | 131 +++++++++++++++++++ vllm/transformers_utils/config.py | 4 +- vllm/transformers_utils/configs/__init__.py | 2 + vllm/transformers_utils/configs/telechat2.py | 61 +++++++++ 8 files changed, 210 insertions(+), 3 deletions(-) create mode 100644 vllm/model_executor/models/telechat2.py create mode 100644 vllm/transformers_utils/configs/telechat2.py diff --git a/docs/source/models/supported_models.rst b/docs/source/models/supported_models.rst index b5cbe6915d581..c5fbb30b24e28 100644 --- a/docs/source/models/supported_models.rst +++ b/docs/source/models/supported_models.rst @@ -309,6 +309,11 @@ Text Generation - :code:`upstage/solar-pro-preview-instruct`, etc. - ✅︎ - ✅︎ + * - :code:`TeleChat2ForCausalLM` + - TeleChat2 + - :code:`TeleAI/TeleChat2-3B`, :code:`TeleAI/TeleChat2-7B`, :code:`TeleAI/TeleChat2-35B`, etc. + - ✅︎ + - ✅︎ * - :code:`XverseForCausalLM` - XVERSE - :code:`xverse/XVERSE-7B-Chat`, :code:`xverse/XVERSE-13B-Chat`, :code:`xverse/XVERSE-65B-Chat`, etc. diff --git a/tests/models/registry.py b/tests/models/registry.py index 865e90b3f8b0e..a93bfe907e0d7 100644 --- a/tests/models/registry.py +++ b/tests/models/registry.py @@ -115,6 +115,8 @@ class _HfExamplesInfo: "StableLmForCausalLM": _HfExamplesInfo("stabilityai/stablelm-3b-4e1t"), "Starcoder2ForCausalLM": _HfExamplesInfo("bigcode/starcoder2-3b"), "SolarForCausalLM": _HfExamplesInfo("upstage/solar-pro-preview-instruct"), + "TeleChat2ForCausalLM": _HfExamplesInfo("Tele-AI/TeleChat2-3B", + trust_remote_code=True), "XverseForCausalLM": _HfExamplesInfo("xverse/XVERSE-7B-Chat", is_available_online=False, trust_remote_code=True), diff --git a/vllm/model_executor/models/llama.py b/vllm/model_executor/models/llama.py index 7cc5547b4a4d5..fffb3fe53b94c 100644 --- a/vllm/model_executor/models/llama.py +++ b/vllm/model_executor/models/llama.py @@ -501,8 +501,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.config = config self.lora_config = lora_config - self.model = LlamaModel(vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "model")) + self.model = self._init_model(vllm_config=vllm_config, prefix=prefix) if get_pp_group().is_last_rank: self.unpadded_vocab_size = config.vocab_size if lora_config: @@ -539,6 +538,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): normalize=False, softmax=False) + def _init_model(self, vllm_config: VllmConfig, prefix: str = ""): + return LlamaModel(vllm_config=vllm_config, prefix=prefix) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: return self.model.get_input_embeddings(input_ids) diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index f5a02a5b25ca2..4462f6ed55a9c 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -91,6 +91,7 @@ "StableLmForCausalLM": ("stablelm", "StablelmForCausalLM"), "Starcoder2ForCausalLM": ("starcoder2", "Starcoder2ForCausalLM"), "SolarForCausalLM": ("solar", "SolarForCausalLM"), + "TeleChat2ForCausalLM": ("telechat2", "TeleChat2ForCausalLM"), "XverseForCausalLM": ("xverse", "XverseForCausalLM"), # [Encoder-decoder] "BartModel": ("bart", "BartForConditionalGeneration"), @@ -118,6 +119,7 @@ "Qwen2ForCausalLM": ("qwen2", "Qwen2ForCausalLM"), "Qwen2ForRewardModel": ("qwen2_rm", "Qwen2ForRewardModel"), "Qwen2ForSequenceClassification": ("qwen2_cls", "Qwen2ForSequenceClassification"), # noqa: E501 + "TeleChat2ForCausalLM": ("telechat2", "TeleChat2ForCausalLM"), # [Multimodal] "LlavaNextForConditionalGeneration": ("llava_next", "LlavaNextForConditionalGeneration"), # noqa: E501 "Phi3VForCausalLM": ("phi3v", "Phi3VForCausalLM"), diff --git a/vllm/model_executor/models/telechat2.py b/vllm/model_executor/models/telechat2.py new file mode 100644 index 0000000000000..39c9103527f01 --- /dev/null +++ b/vllm/model_executor/models/telechat2.py @@ -0,0 +1,131 @@ +# Copyright 2023 The vLLM team. +# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Iterable, Set, Tuple + +import torch + +from vllm.config import VllmConfig +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.models.llama import LlamaForCausalLM, LlamaModel + +from .utils import (AutoWeightsLoader, PPMissingLayer, WeightsMapper, + is_pp_missing_parameter) + + +class TeleChat2Model(LlamaModel): + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + # 1. Initialize the LlamaModel with bias + vllm_config.model_config.hf_config.bias = True + vllm_config.model_config.hf_config.mlp_bias = True + super().__init__(vllm_config=vllm_config, prefix=prefix) + # 2. Remove the bias from the qkv_proj and gate_up_proj based on config + # Telechat2's gate_up_proj and qkv_proj don't have bias + # see: https://github.com/vllm-project/vllm/pull/10311#issuecomment-2490297566 + for layer in self.layers: + if not isinstance(layer, PPMissingLayer): + layer.self_attn.qkv_proj.bias = None + layer.self_attn.qkv_proj.skip_bias_add = True + layer.mlp.gate_up_proj.bias = None + layer.mlp.gate_up_proj.skip_bias_add = True + + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: + stacked_params_mapping = [ + ('gate_up_proj', 'gate_proj', 0), + ('gate_up_proj', 'up_proj', 1), + ] + params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() + total_num_heads = self.config.n_head + head_dim = self.config.hidden_size // total_num_heads + for name, loaded_weight in weights: + if "self_attn.key_value" in name: + k_weight = [] + v_weight = [] + for i in range(total_num_heads): + start = i * head_dim * 2 + k_weight.append(loaded_weight[start:start + head_dim, :]) + v_weight.append(loaded_weight[start + head_dim:start + + 2 * head_dim:]) + k_weight = torch.cat(k_weight, dim=0) + v_weight = torch.cat(v_weight, dim=0) + name = name.replace("key_value", "qkv_proj") + if is_pp_missing_parameter(name, self): + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, k_weight, "k") + weight_loader(param, v_weight, "v") + elif "query" in name: + name = name.replace("query", "qkv_proj") + if is_pp_missing_parameter(name, self): + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, "q") + else: + for param_name, weight_name, shard_id in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + if is_pp_missing_parameter(name, self): + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + if is_pp_missing_parameter(name, self): + continue + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params + + +class TeleChat2ForCausalLM(LlamaForCausalLM): + + def _init_model(self, vllm_config: VllmConfig, prefix: str = ""): + return TeleChat2Model(vllm_config=vllm_config, prefix=prefix) + + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: + + hf_to_vllm_mapper = WeightsMapper( + orig_to_new_prefix={ + "transformer.": "model.", + }, + orig_to_new_substr={ + ".h.": ".layers.", + ".self_attention.": ".self_attn.", + ".word_embeddings.": ".embed_tokens.", + ".dense.": ".o_proj.", + ".ln_f.": ".norm.", + }, + ) + loader = AutoWeightsLoader( + self, + skip_prefixes=(["lm_head."] + if self.config.tie_word_embeddings else None), + ) + return loader.load_weights(weights, mapper=hf_to_vllm_mapper) diff --git a/vllm/transformers_utils/config.py b/vllm/transformers_utils/config.py index 4c096acdf2035..3da99bcbee9ae 100644 --- a/vllm/transformers_utils/config.py +++ b/vllm/transformers_utils/config.py @@ -29,7 +29,8 @@ MLPSpeculatorConfig, MPTConfig, NemotronConfig, NVLM_D_Config, Olmo2Config, RWConfig, - SolarConfig, UltravoxConfig) + SolarConfig, Telechat2Config, + UltravoxConfig) # yapf: enable from vllm.transformers_utils.utils import check_gguf_file from vllm.utils import resolve_obj_by_qualname @@ -64,6 +65,7 @@ "NVLM_D": NVLM_D_Config, "olmo2": Olmo2Config, "solar": SolarConfig, + "telechat": Telechat2Config, "ultravox": UltravoxConfig, **_CONFIG_REGISTRY_OVERRIDE_HF } diff --git a/vllm/transformers_utils/configs/__init__.py b/vllm/transformers_utils/configs/__init__.py index 4c721001d8434..c24433cd436b4 100644 --- a/vllm/transformers_utils/configs/__init__.py +++ b/vllm/transformers_utils/configs/__init__.py @@ -17,6 +17,7 @@ from vllm.transformers_utils.configs.nvlm_d import NVLM_D_Config from vllm.transformers_utils.configs.olmo2 import Olmo2Config from vllm.transformers_utils.configs.solar import SolarConfig +from vllm.transformers_utils.configs.telechat2 import Telechat2Config from vllm.transformers_utils.configs.ultravox import UltravoxConfig __all__ = [ @@ -36,5 +37,6 @@ "NVLM_D_Config", "Olmo2Config", "SolarConfig", + "Telechat2Config", "UltravoxConfig", ] \ No newline at end of file diff --git a/vllm/transformers_utils/configs/telechat2.py b/vllm/transformers_utils/configs/telechat2.py new file mode 100644 index 0000000000000..eb6f5a059169f --- /dev/null +++ b/vllm/transformers_utils/configs/telechat2.py @@ -0,0 +1,61 @@ +# adapted from https://www.modelscope.cn/models/TeleAI/TeleChat2-3B/resolve/master/configuration_telechat2.py +""" Telechat configuration compatible with LlamaConfig. """ + +from transformers.configuration_utils import PretrainedConfig + + +class Telechat2Config(PretrainedConfig): + + model_type = "telechat" + keys_to_ignore_at_inference = ["past_key_values"] + attribute_map = { + "num_hidden_layers": "n_layer", + "num_attention_heads": "n_head", + "intermediate_size": "ffn_hidden_size", + "rms_norm_eps": "layer_norm_epsilon" + } + + def __init__( + self, + vocab_size=160256, + hidden_size=4096, + n_layer=30, + n_head=32, + layer_norm_epsilon=1e-5, + initializer_range=0.02, + use_cache=True, + bos_token_id=1, + eos_token_id=2, + apply_residual_connection_post_layernorm=False, + hidden_dropout=0.0, + attention_dropout=0.0, + ffn_hidden_size=12288, + training_seqlen=8192, + logn=True, + embed_layernorm=False, + hidden_act="silu", + **kwargs, + ): + self.vocab_size = vocab_size + n_embed = kwargs.pop("n_embed", None) + self.hidden_size = hidden_size if n_embed is None else n_embed + self.n_layer = n_layer + self.n_head = n_head + self.layer_norm_epsilon = layer_norm_epsilon + self.initializer_range = initializer_range + self.use_cache = use_cache + self.apply_residual_connection_post_layernorm = ( + apply_residual_connection_post_layernorm) + self.hidden_dropout = hidden_dropout + self.attention_dropout = attention_dropout + self.bos_token_id = bos_token_id + self.eos_token_id = eos_token_id + self.logn = logn + self.training_seqlen = training_seqlen + self.embed_layernorm = embed_layernorm + self.num_key_value_heads = kwargs.pop("num_key_value_heads", None) + self.ffn_hidden_size = ffn_hidden_size + self.hidden_act = hidden_act + super().__init__(bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + **kwargs) From 418cb3b93fbf85f0735b5c0ed3f62d4b36808968 Mon Sep 17 00:00:00 2001 From: "Li, Jiang" Date: Wed, 27 Nov 2024 19:55:38 +0800 Subject: [PATCH 166/397] [Bugfix][Hardware][CPU] Fix intel-omp version to avoid segfault (#10700) Signed-off-by: jiang1.li --- Dockerfile.cpu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.cpu b/Dockerfile.cpu index d2f72ea975a3d..ebe226cf6d148 100644 --- a/Dockerfile.cpu +++ b/Dockerfile.cpu @@ -16,7 +16,7 @@ RUN --mount=type=cache,target=/var/cache/apt \ # intel-openmp provides additional performance improvement vs. openmp # tcmalloc provides better memory allocation efficiency, e.g, holding memory in caches to speed up access of commonly-used objects. RUN --mount=type=cache,target=/root/.cache/pip \ - pip install intel-openmp + pip install intel-openmp==2025.0.1 ENV LD_PRELOAD="/usr/lib/x86_64-linux-gnu/libtcmalloc_minimal.so.4:/usr/local/lib/libiomp5.so" From 9e0a147d502758ed31b35df1361e37ea6bacd4a0 Mon Sep 17 00:00:00 2001 From: Roger Wang <136131678+ywang96@users.noreply.github.com> Date: Wed, 27 Nov 2024 04:26:27 -0800 Subject: [PATCH 167/397] [V1] Update interface for mistral-format Pixtral (#10703) Signed-off-by: Roger Wang --- vllm/model_executor/models/pixtral.py | 47 ++++++++++++++++----------- 1 file changed, 28 insertions(+), 19 deletions(-) diff --git a/vllm/model_executor/models/pixtral.py b/vllm/model_executor/models/pixtral.py index 6711cbf5694b9..45171c1a04b17 100644 --- a/vllm/model_executor/models/pixtral.py +++ b/vllm/model_executor/models/pixtral.py @@ -31,7 +31,7 @@ from vllm.model_executor.models.utils import merge_multimodal_embeddings from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs -from vllm.multimodal.inputs import PlaceholderRange +from vllm.multimodal.inputs import NestedTensors, PlaceholderRange from vllm.multimodal.utils import (cached_get_tokenizer, consecutive_placeholder_ranges, resolve_visual_encoder_outputs) @@ -190,6 +190,25 @@ def sampler(self): return get_sampler() + def get_multimodal_embeddings(self, **kwargs) -> Optional[NestedTensors]: + image_input = self._parse_and_validate_image_input(**kwargs) + if image_input is None: + return None + vision_embeddings = self._process_image_input(image_input) + return vision_embeddings + + def get_input_embeddings( + self, + input_ids: torch.Tensor, + multimodal_embeddings: Optional[NestedTensors] = None, + ) -> torch.Tensor: + inputs_embeds = self.language_model.get_input_embeddings(input_ids) + if multimodal_embeddings is not None: + inputs_embeds = merge_multimodal_embeddings( + input_ids, inputs_embeds, multimodal_embeddings, + self.vision_args.image_token_id) + return inputs_embeds + def forward( self, input_ids: torch.Tensor, @@ -197,31 +216,21 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, **kwargs: object, ) -> Union[torch.Tensor, IntermediateTensors]: """Run forward pass for pixtral. - - TODO - """ if intermediate_tensors is not None: - input_ids = None inputs_embeds = None - else: - image_input = self._parse_and_validate_image_input(**kwargs) - - if image_input is not None: - vision_embeddings = self._process_image_input(image_input) - inputs_embeds = self.language_model.model.get_input_embeddings( - input_ids) - inputs_embeds = merge_multimodal_embeddings( - input_ids, inputs_embeds, vision_embeddings, - self.vision_args.image_token_id) - - input_ids = None - else: - inputs_embeds = None + # NOTE: In v1, inputs_embeds is always generated at model runner, this + # condition is for v0 compatibility. + elif inputs_embeds is None: + vision_embeddings = self.get_multimodal_embeddings(**kwargs) + inputs_embeds = self.get_input_embeddings(input_ids, + vision_embeddings) + input_ids = None hidden_states = self.language_model.model(input_ids, positions, From 308cc5e21e12fb0eea0a960d147dca7efc59d92f Mon Sep 17 00:00:00 2001 From: youkaichao Date: Wed, 27 Nov 2024 09:26:14 -0800 Subject: [PATCH 168/397] [ci] fix slow tests (#10698) Signed-off-by: youkaichao --- tests/entrypoints/llm/test_lazy_outlines.py | 22 ++++++++++++++----- tests/test_lazy_torch_compile.py | 22 ++++++++++++++----- .../vllm_test_utils/vllm_test_utils/blame.py | 10 ++++----- 3 files changed, 39 insertions(+), 15 deletions(-) diff --git a/tests/entrypoints/llm/test_lazy_outlines.py b/tests/entrypoints/llm/test_lazy_outlines.py index 81fb000d8ac56..2c53676c5f5dd 100644 --- a/tests/entrypoints/llm/test_lazy_outlines.py +++ b/tests/entrypoints/llm/test_lazy_outlines.py @@ -1,6 +1,7 @@ import sys +from contextlib import nullcontext -from vllm_test_utils import blame +from vllm_test_utils import BlameResult, blame from vllm import LLM, SamplingParams from vllm.distributed import cleanup_dist_env_and_memory @@ -56,9 +57,20 @@ def test_lazy_outlines(sample_regex): """ # make sure outlines is not imported module_name = "outlines" - with blame(lambda: module_name in sys.modules) as result: + # In CI, we only check finally if the module is imported. + # If it is indeed imported, we can rerun the test with `use_blame=True`, + # which will trace every function call to find the first import location, + # and help find the root cause. + # We don't run it in CI by default because it is slow. + use_blame = False + context = blame( + lambda: module_name in sys.modules) if use_blame else nullcontext() + with context as result: run_normal() run_lmfe(sample_regex) - assert not result.found, ( - f"Module {module_name} is already imported, the" - f" first import location is:\n{result.trace_stack}") + if use_blame: + assert isinstance(result, BlameResult) + print(f"the first import location is:\n{result.trace_stack}") + assert module_name not in sys.modules, ( + f"Module {module_name} is imported. To see the first" + f" import location, run the test with `use_blame=True`.") diff --git a/tests/test_lazy_torch_compile.py b/tests/test_lazy_torch_compile.py index 4756fac8e2a8d..b950877a4337b 100644 --- a/tests/test_lazy_torch_compile.py +++ b/tests/test_lazy_torch_compile.py @@ -2,15 +2,27 @@ # The utility function cannot be placed in `vllm.utils` # this needs to be a standalone script import sys +from contextlib import nullcontext -from vllm_test_utils import blame +from vllm_test_utils import BlameResult, blame module_name = "torch._inductor.async_compile" -with blame(lambda: module_name in sys.modules) as result: +# In CI, we only check finally if the module is imported. +# If it is indeed imported, we can rerun the test with `use_blame=True`, +# which will trace every function call to find the first import location, +# and help find the root cause. +# We don't run it in CI by default because it is slow. +use_blame = False +context = blame( + lambda: module_name in sys.modules) if use_blame else nullcontext() +with context as result: import vllm # noqa -assert not result.found, (f"Module {module_name} is already imported, the" - f" first import location is:\n{result.trace_stack}") +if use_blame: + assert isinstance(result, BlameResult) + print(f"the first import location is:\n{result.trace_stack}") -print(f"Module {module_name} is not imported yet") +assert module_name not in sys.modules, ( + f"Module {module_name} is imported. To see the first" + f" import location, run the test with `use_blame=True`.") diff --git a/tests/vllm_test_utils/vllm_test_utils/blame.py b/tests/vllm_test_utils/vllm_test_utils/blame.py index ad23ab83c2d81..1ddd3471d357b 100644 --- a/tests/vllm_test_utils/vllm_test_utils/blame.py +++ b/tests/vllm_test_utils/vllm_test_utils/blame.py @@ -46,8 +46,8 @@ def _trace_calls(frame, event, arg=None): pass return _trace_calls - sys.settrace(_trace_calls) - - yield result - - sys.settrace(None) + try: + sys.settrace(_trace_calls) + yield result + finally: + sys.settrace(None) From c411def234b0e85a349c8d95b5f32eade4aa1ed6 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Wed, 27 Nov 2024 10:16:10 -0800 Subject: [PATCH 169/397] [torch.compile] fix shape specialization (#10722) Signed-off-by: youkaichao --- vllm/config.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index 68f73bf4b4dc9..cd24e9ffdf598 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -2151,7 +2151,7 @@ class CompilationConfig(BaseModel): use_inductor: bool = True inductor_specialize_for_cudagraph_no_more_than: Optional[int] = None - inductor_compile_sizes: Optional[List[int]] = Field(default_factory=dict) + inductor_compile_sizes: Optional[List[int]] = Field(default=None) inductor_compile_config: Dict = Field(default_factory=dict) inductor_passes: Dict[str, str] = Field(default_factory=dict) @@ -2290,9 +2290,8 @@ def init_during_runtime(self): if x <= self.inductor_specialize_for_cudagraph_no_more_than ] else: - assert self.inductor_compile_sizes is not None, ( - "inductor_compile_sizes should not be None when " - "inductor_specialize_for_cudagraph_no_more_than is None") + if self.inductor_compile_sizes is None: + self.inductor_compile_sizes = [] self.compile_sizes = self.inductor_compile_sizes From b98c62ba4947b93673c522b13464854acf8090a4 Mon Sep 17 00:00:00 2001 From: Isotr0py Date: Thu, 28 Nov 2024 02:43:17 +0800 Subject: [PATCH 170/397] [Bugfix] Fix GGUF inference with FP16 unquantized checkpoint (#10675) Signed-off-by: Isotr0py <2037008807@qq.com> --- .../layers/quantization/gguf.py | 69 ++++++++++++++++--- 1 file changed, 60 insertions(+), 9 deletions(-) diff --git a/vllm/model_executor/layers/quantization/gguf.py b/vllm/model_executor/layers/quantization/gguf.py index 24138662eb25c..f0943efa0039d 100644 --- a/vllm/model_executor/layers/quantization/gguf.py +++ b/vllm/model_executor/layers/quantization/gguf.py @@ -2,6 +2,7 @@ import gguf import torch +from gguf import GGMLQuantizationType as WeightType from torch.nn.parameter import Parameter, UninitializedParameter from vllm import _custom_ops as ops @@ -49,19 +50,65 @@ def get_quant_method(self, layer: torch.nn.Module, return None +UNQUANTIZED_TYPES = {WeightType.F32, WeightType.F16, WeightType.BF16} +STANDARD_QUANT_TYPES = { + WeightType.Q4_0, + WeightType.Q4_1, + WeightType.Q5_0, + WeightType.Q5_1, + WeightType.Q8_0, + WeightType.Q8_1, +} +KQUANT_TYPES = { + WeightType.Q2_K, + WeightType.Q3_K, + WeightType.Q4_K, + WeightType.Q5_K, + WeightType.Q6_K, +} +IMATRIX_QUANT_TYPES = { + WeightType.IQ1_M, + WeightType.IQ1_S, + WeightType.IQ2_XXS, + WeightType.IQ2_XS, + WeightType.IQ2_S, + WeightType.IQ3_XXS, + WeightType.IQ3_S, + WeightType.IQ4_XS, + WeightType.IQ4_NL, +} +# TODO(Isotr0py): Currently, we don't have MMQ kernel for I-Matrix quantization. +# Consolidate DEQUANT_TYPES, MMVQ_QUANT_TYPES and MMQ_QUANT_TYPES after we add +# MMQ kernel for I-Matrix quantization. +DEQUANT_TYPES = STANDARD_QUANT_TYPES | KQUANT_TYPES | IMATRIX_QUANT_TYPES +MMVQ_QUANT_TYPES = STANDARD_QUANT_TYPES | KQUANT_TYPES | IMATRIX_QUANT_TYPES +MMQ_QUANT_TYPES = STANDARD_QUANT_TYPES | KQUANT_TYPES + + def _fuse_mul_mat(x: torch.Tensor, qweight: torch.Tensor, qweight_type: int) -> torch.Tensor: - # use dequantize mulmat for IQmatrix, mmq for k-quants - if x.shape[0] == 1: - # enable mmvq in contiguous batching + # there is no need to call any kernel for fp16/bf16 + if qweight_type in UNQUANTIZED_TYPES: + return x @ qweight.T + # enable MMVQ in contiguous batching with batch_size=1 + if x.shape[0] == 1 and qweight_type in MMVQ_QUANT_TYPES: y = ops.ggml_mul_mat_vec_a8(qweight, x, qweight_type, qweight.shape[0]) - elif qweight_type >= 16: + # Use MMQ Kernel if it's available (standard + k-quants) + elif qweight_type in MMQ_QUANT_TYPES: + y = ops.ggml_mul_mat_a8(qweight, x, qweight_type, qweight.shape[0]) + # If there is no available MMQ kernel, fallback to dequantize + elif qweight_type in DEQUANT_TYPES: block_size, type_size = gguf.GGML_QUANT_SIZES[qweight_type] shape = (qweight.shape[0], qweight.shape[1] // type_size * block_size) weight = ops.ggml_dequantize(qweight, qweight_type, *shape) y = x @ weight.T else: - y = ops.ggml_mul_mat_a8(qweight, x, qweight_type, qweight.shape[0]) + # Raise an error if the quantization type is not supported. + # Might be useful if llama.cpp adds a new quantization type. + # Wrap to GGMLQuantizationType IntEnum to make sure it's a valid type. + qweight_type = WeightType(qweight_type) + raise NotImplementedError( + f"Unsupported GGUF quantization type: {qweight_type}") return y @@ -121,9 +168,9 @@ def apply(self, shard_id = ["q", "k", "v"] if "q" in shard_id else shard_id qweight = layer.qweight.unbind(0) result = [] - for id in shard_id: - q_idx = layer.qweight.shard_id_map[id] - qweight_type = layer.qweight_type.shard_weight_type[id] + for idx in shard_id: + q_idx = layer.qweight.shard_id_map[idx] + qweight_type = layer.qweight_type.shard_weight_type[idx] result.append(_fuse_mul_mat(x, qweight[q_idx], qweight_type)) out = torch.cat(result, axis=1) else: @@ -163,9 +210,13 @@ class GGUFUninitializedParameter(UninitializedParameter): data_container: List[torch.Tensor] def materialize_nested(self) -> Parameter: + dtype = {data.dtype for data in self.data_container} + assert len(dtype) == 1, ValueError( + f"Data container has mixed dtypes: {dtype}") + dtype = next(iter(dtype)) nested_data = torch.nested.nested_tensor(self.data_container, device=self.device, - dtype=torch.uint8) + dtype=dtype) self.data_container.clear() param = torch.Tensor._make_subclass(self.cls_to_become, nested_data, From 197b4484a3fba4a98921f903d6242677f97c63db Mon Sep 17 00:00:00 2001 From: Mor Zusman Date: Wed, 27 Nov 2024 21:02:27 +0200 Subject: [PATCH 171/397] [Bugfix][Mamba] Fix Multistep on Mamba-like models (#10705) Signed-off-by: mzusman --- .../decoder_only/language/test_jamba.py | 38 +++++++++++++++++++ .../decoder_only/language/test_mamba.py | 36 ++++++++++++++++++ vllm/engine/async_llm_engine.py | 7 +++- vllm/engine/llm_engine.py | 7 +++- 4 files changed, 84 insertions(+), 4 deletions(-) diff --git a/tests/models/decoder_only/language/test_jamba.py b/tests/models/decoder_only/language/test_jamba.py index 6542689c3f277..87a05b3011393 100644 --- a/tests/models/decoder_only/language/test_jamba.py +++ b/tests/models/decoder_only/language/test_jamba.py @@ -275,6 +275,44 @@ def test_state_cleanup( "could be related to finished_requests_ids") +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("dtype", ["float"]) +def test_multistep( + vllm_runner, + model: str, + dtype: str, + example_prompts, +) -> None: + # This test is verifying that multistep works correctly + #on mamba-like models + with vllm_runner(model, num_scheduler_steps=8, + max_num_seqs=2) as vllm_model: + vllm_model.generate_greedy([example_prompts[0]] * 10, 1) + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("dtype", ["float"]) +@pytest.mark.parametrize("max_tokens", [64]) +def test_multistep_correctness(vllm_runner, model: str, dtype: str, + max_tokens: int, example_prompts) -> None: + with vllm_runner(model, num_scheduler_steps=8, + max_num_seqs=2) as vllm_model: + vllm_outputs_multistep = vllm_model.generate_greedy( + example_prompts, max_tokens) + + with vllm_runner(model, num_scheduler_steps=1, + max_num_seqs=2) as vllm_model: + vllm_outputs_single_step = vllm_model.generate_greedy( + example_prompts, max_tokens) + + check_outputs_equal( + outputs_0_lst=vllm_outputs_multistep, + outputs_1_lst=vllm_outputs_single_step, + name_0="vllm_outputs_multistep", + name_1="vllm_outputs_single_step", + ) + + @multi_gpu_test(num_gpus=2) @pytest.mark.parametrize("model", MODELS) @pytest.mark.parametrize("dtype", ["float"]) diff --git a/tests/models/decoder_only/language/test_mamba.py b/tests/models/decoder_only/language/test_mamba.py index 78eab8d5354fd..01e208347bff4 100644 --- a/tests/models/decoder_only/language/test_mamba.py +++ b/tests/models/decoder_only/language/test_mamba.py @@ -283,3 +283,39 @@ def test_state_cleanup( except ValueError: pytest.fail("Mamba inner state wasn't cleaned up between states, " "could be related to finished_requests_ids") + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("dtype", ["float"]) +def test_multistep( + vllm_runner, + model: str, + dtype: str, + example_prompts, +) -> None: + with vllm_runner(model, num_scheduler_steps=8, + max_num_seqs=2) as vllm_model: + vllm_model.generate_greedy([example_prompts[0]] * 10, 1) + + +@pytest.mark.parametrize("model", MODELS) +@pytest.mark.parametrize("dtype", ["float"]) +@pytest.mark.parametrize("max_tokens", [64]) +def test_multistep_correctness(vllm_runner, model: str, dtype: str, + max_tokens: int, example_prompts) -> None: + with vllm_runner(model, num_scheduler_steps=8, + max_num_seqs=2) as vllm_model: + vllm_outputs_multistep = vllm_model.generate_greedy( + example_prompts, max_tokens) + + with vllm_runner(model, num_scheduler_steps=1, + max_num_seqs=2) as vllm_model: + vllm_outputs_single_step = vllm_model.generate_greedy( + example_prompts, max_tokens) + + check_outputs_equal( + outputs_0_lst=vllm_outputs_multistep, + outputs_1_lst=vllm_outputs_single_step, + name_0="vllm_outputs_multistep", + name_1="vllm_outputs_single_step", + ) diff --git a/vllm/engine/async_llm_engine.py b/vllm/engine/async_llm_engine.py index 3224577c567f8..31a15b04314d5 100644 --- a/vllm/engine/async_llm_engine.py +++ b/vllm/engine/async_llm_engine.py @@ -300,6 +300,9 @@ async def step_async( ctx.seq_group_metadata_list = seq_group_metadata_list ctx.scheduler_outputs = scheduler_outputs + finished_requests_ids = self.scheduler[ + virtual_engine].get_and_reset_finished_requests_ids() + # Maybe switch from async mode to sync mode if not allow_async_output_proc and len(ctx.output_queue) > 0: self._process_model_outputs(ctx=ctx) @@ -311,13 +314,13 @@ async def step_async( self._cache_scheduler_outputs_for_multi_step( virtual_engine, seq_group_metadata_list, scheduler_outputs, allow_async_output_proc) + else: + finished_requests_ids = list() assert seq_group_metadata_list is not None assert scheduler_outputs is not None if not scheduler_outputs.is_empty(): - finished_requests_ids = self.scheduler[ - virtual_engine].get_and_reset_finished_requests_ids() # Check if we have a cached last_output from the previous iteration. # For supporting PP this is probably the best way to pass the diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index a4975cece9a81..ecc222f692c41 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -1398,6 +1398,9 @@ def step(self) -> List[Union[RequestOutput, EmbeddingRequestOutput]]: ctx.seq_group_metadata_list = seq_group_metadata_list ctx.scheduler_outputs = scheduler_outputs + finished_requests_ids = self.scheduler[ + virtual_engine].get_and_reset_finished_requests_ids() + # Maybe switch from async mode to sync mode if not allow_async_output_proc and len(ctx.output_queue) > 0: self._process_model_outputs(ctx=ctx) @@ -1409,13 +1412,13 @@ def step(self) -> List[Union[RequestOutput, EmbeddingRequestOutput]]: self._cache_scheduler_outputs_for_multi_step( virtual_engine, seq_group_metadata_list, scheduler_outputs, allow_async_output_proc) + else: + finished_requests_ids = list() assert seq_group_metadata_list is not None assert scheduler_outputs is not None if not scheduler_outputs.is_empty(): - finished_requests_ids = self.scheduler[ - virtual_engine].get_and_reset_finished_requests_ids() # Check if we have a cached last_output from the previous iteration. # For supporting PP this is probably the best way to pass the From 9b4b150395d509a35031e58fb6e0f3331b532055 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Thu, 28 Nov 2024 03:05:29 +0800 Subject: [PATCH 172/397] [Bugfix] Ignore `lm_head` when loading embedding models (#10719) Signed-off-by: DarkLight1337 --- vllm/model_executor/models/bert.py | 2 ++ vllm/model_executor/models/gemma2.py | 2 ++ vllm/model_executor/models/llama.py | 2 ++ vllm/model_executor/models/qwen2.py | 2 ++ 4 files changed, 8 insertions(+) diff --git a/vllm/model_executor/models/bert.py b/vllm/model_executor/models/bert.py index 1fff72b3490e9..053d838432885 100644 --- a/vllm/model_executor/models/bert.py +++ b/vllm/model_executor/models/bert.py @@ -443,6 +443,8 @@ def pooler( def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): hf_to_vllm_mapper = WeightsMapper(orig_to_new_prefix={"model.": ""}) weights = hf_to_vllm_mapper.apply(weights) + weights = ((name, data) for name, data in weights + if not name.startswith("lm_head.")) self.model.load_weights(weights) def _build_model(self, diff --git a/vllm/model_executor/models/gemma2.py b/vllm/model_executor/models/gemma2.py index c93223c740272..d35fcb012e166 100644 --- a/vllm/model_executor/models/gemma2.py +++ b/vllm/model_executor/models/gemma2.py @@ -504,4 +504,6 @@ def pooler( def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): hf_to_vllm_mapper = WeightsMapper(orig_to_new_prefix={"model.": ""}) weights = hf_to_vllm_mapper.apply(weights) + weights = ((name, data) for name, data in weights + if not name.startswith("lm_head.")) self.model.load_weights(weights) diff --git a/vllm/model_executor/models/llama.py b/vllm/model_executor/models/llama.py index fffb3fe53b94c..fe94bb352961b 100644 --- a/vllm/model_executor/models/llama.py +++ b/vllm/model_executor/models/llama.py @@ -689,6 +689,8 @@ def pooler( def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): hf_to_vllm_mapper = WeightsMapper(orig_to_new_prefix={"model.": ""}) weights = hf_to_vllm_mapper.apply(weights) + weights = ((name, data) for name, data in weights + if not name.startswith("lm_head.")) self.model.load_weights(weights) def load_kv_cache_scales(self, quantization_param_path: str) -> None: diff --git a/vllm/model_executor/models/qwen2.py b/vllm/model_executor/models/qwen2.py index 9f706610a129a..87943e53d861c 100644 --- a/vllm/model_executor/models/qwen2.py +++ b/vllm/model_executor/models/qwen2.py @@ -580,4 +580,6 @@ def pooler( def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): hf_to_vllm_mapper = WeightsMapper(orig_to_new_prefix={"model.": ""}) weights = hf_to_vllm_mapper.apply(weights) + weights = ((name, data) for name, data in weights + if not name.startswith("lm_head.")) self.model.load_weights(weights) From 395b1c74543053ebf25d4ab3af828cd145506caa Mon Sep 17 00:00:00 2001 From: tomeras91 <57313761+tomeras91@users.noreply.github.com> Date: Wed, 27 Nov 2024 23:21:10 +0200 Subject: [PATCH 173/397] [Frontend] don't block event loop in tokenization (preprocess) in OpenAI compatible server (#10635) Signed-off-by: Tomer Asida --- .../openai/test_async_tokenization.py | 137 ++++++++++++++++++ vllm/entrypoints/openai/serving_completion.py | 2 +- vllm/entrypoints/openai/serving_embedding.py | 15 +- vllm/entrypoints/openai/serving_engine.py | 75 +++++----- vllm/entrypoints/openai/serving_score.py | 10 +- .../openai/serving_tokenization.py | 15 +- vllm/utils.py | 8 +- 7 files changed, 206 insertions(+), 56 deletions(-) create mode 100644 tests/entrypoints/openai/test_async_tokenization.py diff --git a/tests/entrypoints/openai/test_async_tokenization.py b/tests/entrypoints/openai/test_async_tokenization.py new file mode 100644 index 0000000000000..fcce8b46c4344 --- /dev/null +++ b/tests/entrypoints/openai/test_async_tokenization.py @@ -0,0 +1,137 @@ +import asyncio +import contextlib +import random +import time +from typing import Callable + +import openai +import pytest +import pytest_asyncio +import requests + +from tests.utils import RemoteOpenAIServer + +MODEL_NAME = "Qwen/Qwen2.5-1.5B-Instruct" + + +@pytest.fixture(scope="module") +def server(): # noqa: F811 + args = [ + # use half precision for speed and memory savings in CI environment + "--dtype", + "bfloat16", + "--max-model-len", + "8192", + "--enforce-eager", + "--max-num-seqs", + "128", + "--load-format", + "dummy", + ] + + with RemoteOpenAIServer(MODEL_NAME, args) as remote_server: + yield remote_server + + +@pytest_asyncio.fixture +async def client(server): + async with server.get_async_client() as async_client: + yield async_client + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + ids=["completion", "chat"], + argnames=["create_func_gen", "content_body"], + argvalues=[ + (lambda x: x.completions.create, { + "prompt": " ".join(['A'] * 10_000) + }), + (lambda x: x.chat.completions.create, { + "messages": [{ + "role": "user", + "content": " ".join(['A'] * 10_000) + }] + }), + ], +) +async def test_with_and_without_truncate( + server: RemoteOpenAIServer, + client: openai.AsyncOpenAI, + create_func_gen: Callable, + content_body: dict, +): + create_func = create_func_gen(client) + body = {"model": MODEL_NAME, **content_body, "max_tokens": 10} + + num_requests = 10 + truncate_prompt_tokens = ([1000] * (num_requests // 2) + [None] * + (num_requests - num_requests // 2)) + random.shuffle(truncate_prompt_tokens) + + bodies = [{ + **body, "extra_body": { + 'truncate_prompt_tokens': t + } + } for t in truncate_prompt_tokens] + + async def get_status_code(**kwargs): + try: + await create_func(**kwargs) + return 200 + except openai.APIStatusError as e: + return e.status_code + + responses = await asyncio.gather(*[get_status_code(**b) for b in bodies]) + assert 500 not in responses + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + ids=["single completion", "multiple completions", "chat"], + argnames=["create_func_gen", "content_body"], + argvalues=[ + (lambda x: x.completions.create, { + "prompt": " ".join(['A'] * 300_000) + }), + (lambda x: x.completions.create, { + "prompt": [" ".join(['A'] * 300_000)] * 2 + }), + (lambda x: x.chat.completions.create, { + "messages": [{ + "role": "user", + "content": " ".join(['A'] * 300_000) + }] + }), + ], +) +async def test_healthcheck_response_time( + server: RemoteOpenAIServer, + client: openai.AsyncOpenAI, + create_func_gen: Callable, + content_body: dict, +): + num_requests = 50 + + create_func = create_func_gen(client) + body = {"model": MODEL_NAME, **content_body, "max_tokens": 10} + + def get_response_time(url): + start_time = time.monotonic() + res = requests.get(url) + end_time = time.monotonic() + assert res.status_code == 200 + return end_time - start_time + + no_load_response_time = get_response_time(server.url_for("health")) + tasks = [ + asyncio.create_task(create_func(**body)) for _ in range(num_requests) + ] + await asyncio.sleep(1) # give the tasks a chance to start running + load_response_time = get_response_time(server.url_for("health")) + + with contextlib.suppress(openai.APIStatusError): + await asyncio.gather(*tasks) + + assert load_response_time < 100 * no_load_response_time + assert load_response_time < 0.1 diff --git a/vllm/entrypoints/openai/serving_completion.py b/vllm/entrypoints/openai/serving_completion.py index 936aae8f1c267..fc1c4908d6650 100644 --- a/vllm/entrypoints/openai/serving_completion.py +++ b/vllm/entrypoints/openai/serving_completion.py @@ -101,7 +101,7 @@ async def create_completion( tokenizer = await self.engine_client.get_tokenizer(lora_request) - request_prompts, engine_prompts = self._preprocess_completion( + request_prompts, engine_prompts = await self._preprocess_completion( request, tokenizer, request.prompt, diff --git a/vllm/entrypoints/openai/serving_embedding.py b/vllm/entrypoints/openai/serving_embedding.py index c84a7d2d8e13e..78e2416d9d4da 100644 --- a/vllm/entrypoints/openai/serving_embedding.py +++ b/vllm/entrypoints/openai/serving_embedding.py @@ -156,13 +156,14 @@ async def create_embedding( add_special_tokens=request.add_special_tokens, ) else: - request_prompts, engine_prompts = self._preprocess_completion( - request, - tokenizer, - request.input, - truncate_prompt_tokens=truncate_prompt_tokens, - add_special_tokens=request.add_special_tokens, - ) + (request_prompts, + engine_prompts) = await self._preprocess_completion( + request, + tokenizer, + request.input, + truncate_prompt_tokens=truncate_prompt_tokens, + add_special_tokens=request.add_special_tokens, + ) except ValueError as e: logger.exception("Error in preprocessing prompt inputs") return self.create_error_response(str(e)) diff --git a/vllm/entrypoints/openai/serving_engine.py b/vllm/entrypoints/openai/serving_engine.py index cae2877ea7e99..8232c6116c1bd 100644 --- a/vllm/entrypoints/openai/serving_engine.py +++ b/vllm/entrypoints/openai/serving_engine.py @@ -1,5 +1,6 @@ import json import pathlib +from concurrent.futures.thread import ThreadPoolExecutor from dataclasses import dataclass from http import HTTPStatus from typing import (Any, Callable, Dict, Iterable, Iterator, List, Mapping, @@ -46,7 +47,7 @@ from vllm.tracing import (contains_trace_headers, extract_trace_headers, log_tracing_disabled_warning) from vllm.transformers_utils.tokenizer import AnyTokenizer, MistralTokenizer -from vllm.utils import AtomicCounter, is_list_of +from vllm.utils import AtomicCounter, is_list_of, make_async logger = init_logger(__name__) @@ -140,6 +141,14 @@ def __init__( self.request_logger = request_logger self.return_tokens_as_token_ids = return_tokens_as_token_ids + self._tokenizer_executor = ThreadPoolExecutor(max_workers=1) + + self._tokenize_prompt_input_async = make_async( + self._tokenize_prompt_input, executor=self._tokenizer_executor) + self._tokenize_prompt_input_or_inputs_async = make_async( + self._tokenize_prompt_input_or_inputs, + executor=self._tokenizer_executor) + async def show_available_models(self) -> ModelList: """Show available models. Right now we only have one model.""" model_cards = [ @@ -368,7 +377,7 @@ def _tokenize_prompt_input_or_inputs( input_or_inputs: Union[str, List[str], List[int], List[List[int]]], truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]] = None, add_special_tokens: bool = True, - ) -> Iterator[TextTokensPrompt]: + ) -> List[TextTokensPrompt]: """ Tokenize/detokenize depending on the input format. @@ -376,45 +385,41 @@ def _tokenize_prompt_input_or_inputs( , each input can be a string or array of tokens. Note that each request can pass one or more inputs. """ - for prompt_input in parse_and_batch_prompt(input_or_inputs): - # Although our type checking is based on mypy, - # VSCode Pyright extension should still work properly - # "is True" is required for Pyright to perform type narrowing - # See: https://github.com/microsoft/pyright/issues/7672 - if prompt_input["is_tokens"] is False: - yield self._normalize_prompt_text_to_input( - request, - tokenizer, - prompt=prompt_input["content"], - truncate_prompt_tokens=truncate_prompt_tokens, - add_special_tokens=add_special_tokens, - ) - else: - yield self._normalize_prompt_tokens_to_input( - request, - tokenizer, - prompt_ids=prompt_input["content"], - truncate_prompt_tokens=truncate_prompt_tokens, - ) + # Although our type checking is based on mypy, + # VSCode Pyright extension should still work properly + # "is True" is required for Pyright to perform type narrowing + # See: https://github.com/microsoft/pyright/issues/7672 + return [ + self._normalize_prompt_text_to_input( + request, + tokenizer, + prompt=prompt_input["content"], + truncate_prompt_tokens=truncate_prompt_tokens, + add_special_tokens=add_special_tokens) + if prompt_input["is_tokens"] is False else + self._normalize_prompt_tokens_to_input( + request, + tokenizer, + prompt_ids=prompt_input["content"], + truncate_prompt_tokens=truncate_prompt_tokens) + for prompt_input in parse_and_batch_prompt(input_or_inputs) + ] - def _preprocess_completion( + async def _preprocess_completion( self, request: CompletionLikeRequest, tokenizer: AnyTokenizer, input_or_inputs: Union[str, List[str], List[int], List[List[int]]], truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]] = None, add_special_tokens: bool = True, - ) -> Tuple[Sequence[TextTokensPrompt], List[TokensPrompt]]: - request_prompts = [ - request_prompt - for request_prompt in self._tokenize_prompt_input_or_inputs( - request, - tokenizer, - input_or_inputs, - truncate_prompt_tokens=truncate_prompt_tokens, - add_special_tokens=add_special_tokens, - ) - ] + ) -> Tuple[List[TextTokensPrompt], List[TokensPrompt]]: + request_prompts = await self._tokenize_prompt_input_or_inputs_async( + request, + tokenizer, + input_or_inputs, + truncate_prompt_tokens=truncate_prompt_tokens, + add_special_tokens=add_special_tokens, + ) engine_prompts = [ TokensPrompt(prompt_token_ids=request_prompt["prompt_token_ids"]) @@ -493,7 +498,7 @@ async def _preprocess_chat( request=request) if isinstance(request_prompt, str): - prompt_inputs = self._tokenize_prompt_input( + prompt_inputs = await self._tokenize_prompt_input_async( request, tokenizer, request_prompt, diff --git a/vllm/entrypoints/openai/serving_score.py b/vllm/entrypoints/openai/serving_score.py index 156fea6f47982..7cd8ff08b5608 100644 --- a/vllm/entrypoints/openai/serving_score.py +++ b/vllm/entrypoints/openai/serving_score.py @@ -15,7 +15,7 @@ from vllm.logger import init_logger from vllm.outputs import EmbeddingRequestOutput from vllm.transformers_utils.tokenizers.mistral import MistralTokenizer -from vllm.utils import merge_async_iterators, random_uuid +from vllm.utils import make_async, merge_async_iterators, random_uuid logger = init_logger(__name__) @@ -145,9 +145,11 @@ async def create_score( tokenization_kwargs["truncation"] = True tokenization_kwargs["max_length"] = truncate_prompt_tokens - prompt_inputs = tokenizer(text=q, - text_pair=t, - **tokenization_kwargs) + tokenize_async = make_async(tokenizer.__call__, + executor=self._tokenizer_executor) + prompt_inputs = await tokenize_async(text=q, + text_pair=t, + **tokenization_kwargs) engine_prompt = TokensPrompt( prompt_token_ids=prompt_inputs["input_ids"], token_type_ids=prompt_inputs.get("token_type_ids")) diff --git a/vllm/entrypoints/openai/serving_tokenization.py b/vllm/entrypoints/openai/serving_tokenization.py index 59b3b1311f881..9c3dc2c98b2dd 100644 --- a/vllm/entrypoints/openai/serving_tokenization.py +++ b/vllm/entrypoints/openai/serving_tokenization.py @@ -81,12 +81,13 @@ async def create_tokenize( add_special_tokens=request.add_special_tokens, ) else: - request_prompts, engine_prompts = self._preprocess_completion( - request, - tokenizer, - request.prompt, - add_special_tokens=request.add_special_tokens, - ) + (request_prompts, + engine_prompts) = await self._preprocess_completion( + request, + tokenizer, + request.prompt, + add_special_tokens=request.add_special_tokens, + ) except ValueError as e: logger.exception("Error in preprocessing prompt inputs") return self.create_error_response(str(e)) @@ -134,7 +135,7 @@ async def create_detokenize( # Silently ignore prompt adapter since it does not affect tokenization # (Unlike in Embeddings API where an error is raised) - prompt_input = self._tokenize_prompt_input( + prompt_input = await self._tokenize_prompt_input_async( request, tokenizer, request.tokens, diff --git a/vllm/utils.py b/vllm/utils.py index bec876d983701..6f7a6f8c54e47 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -1,5 +1,6 @@ import argparse import asyncio +import concurrent import contextlib import datetime import enum @@ -351,7 +352,10 @@ def in_wsl() -> bool: return "microsoft" in " ".join(uname()).lower() -def make_async(func: Callable[P, T]) -> Callable[P, Awaitable[T]]: +def make_async( + func: Callable[P, T], + executor: Optional[concurrent.futures.Executor] = None +) -> Callable[P, Awaitable[T]]: """Take a blocking function, and run it on in an executor thread. This function prevents the blocking function from blocking the @@ -362,7 +366,7 @@ def make_async(func: Callable[P, T]) -> Callable[P, Awaitable[T]]: def _async_wrapper(*args: P.args, **kwargs: P.kwargs) -> asyncio.Future: loop = asyncio.get_event_loop() p_func = partial(func, *args, **kwargs) - return loop.run_in_executor(executor=None, func=p_func) + return loop.run_in_executor(executor=executor, func=p_func) return _async_wrapper From cb4e1c3f3aee507130b64c9bacf5778ed265785d Mon Sep 17 00:00:00 2001 From: youkaichao Date: Wed, 27 Nov 2024 19:54:58 -0800 Subject: [PATCH 174/397] [misc] upgrade filelock version (#10731) Signed-off-by: youkaichao --- requirements-common.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-common.txt b/requirements-common.txt index f62ad66a1ecc4..02e3d65fb774c 100644 --- a/requirements-common.txt +++ b/requirements-common.txt @@ -20,7 +20,7 @@ tiktoken >= 0.6.0 # Required for DBRX tokenizer lm-format-enforcer >= 0.10.9, < 0.11 outlines >= 0.0.43, < 0.1 typing_extensions >= 4.10 -filelock >= 3.10.4 # filelock starts to support `mode` argument from 3.10.4 +filelock >= 3.16.1 # need to contain https://github.com/tox-dev/filelock/pull/317 partial-json-parser # used for parsing partial JSON outputs pyzmq msgspec From 70dc14fbd09d054ff75850036b81212ca67e5275 Mon Sep 17 00:00:00 2001 From: zixuanzhang226 Date: Wed, 27 Nov 2024 23:58:02 -0800 Subject: [PATCH 175/397] [Model] support bitsandbytes quantization with minicpm3 model (#10682) Signed-off-by: Ubuntu --- vllm/model_executor/models/minicpm3.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/vllm/model_executor/models/minicpm3.py b/vllm/model_executor/models/minicpm3.py index c38c31a0d4953..c66be2d9c2d07 100644 --- a/vllm/model_executor/models/minicpm3.py +++ b/vllm/model_executor/models/minicpm3.py @@ -241,6 +241,12 @@ class MiniCPM3ForCausalLM(MiniCPMForCausalLM): # `embedding_modules` and `embedding_padding_modules` # are inherited from MiniCPMForCausalLM + bitsandbytes_stacked_params_mapping = { + # shard_name, weight_name, index + "gate_proj": ("gate_up_proj", 0), + "up_proj": ("gate_up_proj", 1), + } + def _init_model(self, *, vllm_config: VllmConfig, prefix: str = ""): self.model = MiniCPM3Model(vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")) From 278be671a355ea89843141928a426a303bfd8036 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=BD=97=E6=B3=BD=E8=BD=A9?= Date: Thu, 28 Nov 2024 15:58:39 +0800 Subject: [PATCH 176/397] [Doc] Update model in arch_overview.rst to match comment (#10701) Signed-off-by: spacewander --- docs/source/design/arch_overview.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/design/arch_overview.rst b/docs/source/design/arch_overview.rst index a9e7b4bd69bc7..bc3f509f0a66e 100644 --- a/docs/source/design/arch_overview.rst +++ b/docs/source/design/arch_overview.rst @@ -42,7 +42,7 @@ Here is a sample of `LLM` class usage: sampling_params = SamplingParams(temperature=0.8, top_p=0.95) # Initialize the LLM engine with the OPT-125M model - llm = LLM(model="Qwen/Qwen2.5-1.5B-Instruct") + llm = LLM(model="facebook/opt-125m") # Generate outputs for the input prompts outputs = llm.generate(prompts, sampling_params) From d9b4b3f069a9f602b067a5bb3efe57b106d39c09 Mon Sep 17 00:00:00 2001 From: Ricky Xu Date: Wed, 27 Nov 2024 23:59:28 -0800 Subject: [PATCH 177/397] [Bug][CLI] Allow users to disable prefix caching explicitly (#10724) Signed-off-by: rickyx --- tests/engine/test_arg_utils.py | 19 +++++++++++++++++++ tests/v1/engine/test_engine_args.py | 19 +++++++++++++++++++ vllm/engine/arg_utils.py | 10 +++++++--- 3 files changed, 45 insertions(+), 3 deletions(-) diff --git a/tests/engine/test_arg_utils.py b/tests/engine/test_arg_utils.py index 5b0e76fe53685..de78d41ad12eb 100644 --- a/tests/engine/test_arg_utils.py +++ b/tests/engine/test_arg_utils.py @@ -59,6 +59,25 @@ def test_compilation_config(): assert args.compilation_config.level == 3 +def test_prefix_cache_default(): + parser = EngineArgs.add_cli_args(FlexibleArgumentParser()) + args = parser.parse_args([]) + + engine_args = EngineArgs.from_cli_args(args=args) + assert (not engine_args.enable_prefix_caching + ), "prefix caching defaults to off." + + # with flag to turn it on. + args = parser.parse_args(["--enable-prefix-caching"]) + engine_args = EngineArgs.from_cli_args(args=args) + assert engine_args.enable_prefix_caching + + # with disable flag to turn it off. + args = parser.parse_args(["--no-enable-prefix-caching"]) + engine_args = EngineArgs.from_cli_args(args=args) + assert not engine_args.enable_prefix_caching + + def test_valid_pooling_config(): parser = EngineArgs.add_cli_args(FlexibleArgumentParser()) args = parser.parse_args([ diff --git a/tests/v1/engine/test_engine_args.py b/tests/v1/engine/test_engine_args.py index 69cfdf5a395c1..ac5e7dde525a7 100644 --- a/tests/v1/engine/test_engine_args.py +++ b/tests/v1/engine/test_engine_args.py @@ -4,6 +4,7 @@ from vllm.config import VllmConfig from vllm.engine.arg_utils import EngineArgs from vllm.usage.usage_lib import UsageContext +from vllm.utils import FlexibleArgumentParser if not envs.VLLM_USE_V1: pytest.skip( @@ -12,6 +13,24 @@ ) +def test_prefix_caching_from_cli(): + parser = EngineArgs.add_cli_args(FlexibleArgumentParser()) + args = parser.parse_args([]) + engine_args = EngineArgs.from_cli_args(args=args) + assert (engine_args.enable_prefix_caching + ), "V1 turns on prefix caching by default." + + # Turn it off possible with flag. + args = parser.parse_args(["--no-enable-prefix-caching"]) + engine_args = EngineArgs.from_cli_args(args=args) + assert not engine_args.enable_prefix_caching + + # Turn it on with flag. + args = parser.parse_args(["--enable-prefix-caching"]) + engine_args = EngineArgs.from_cli_args(args=args) + assert engine_args.enable_prefix_caching + + def test_defaults(): engine_args = EngineArgs(model="facebook/opt-125m") diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 90b4798f17a13..f0020562c3c3a 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -416,9 +416,13 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: 'tokens. This is ignored on neuron devices and ' 'set to max-model-len') - parser.add_argument('--enable-prefix-caching', - action='store_true', - help='Enables automatic prefix caching.') + parser.add_argument( + "--enable-prefix-caching", + action=argparse.BooleanOptionalAction, + default=EngineArgs.enable_prefix_caching, + help="Enables automatic prefix caching. " + "Use --no-enable-prefix-caching to disable explicitly.", + ) parser.add_argument('--disable-sliding-window', action='store_true', help='Disables sliding window, ' From a79b1224005836bdf0ab6d3bab807d2f5d8a5ef1 Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Thu, 28 Nov 2024 00:13:15 -0800 Subject: [PATCH 178/397] [V1] Do not allocate beyond the max_model_len (#10730) Signed-off-by: Woosuk Kwon --- tests/v1/core/test_prefix_caching.py | 24 ++++++++++++++++-------- vllm/v1/core/kv_cache_manager.py | 17 +++++++++++++++++ vllm/v1/core/scheduler.py | 15 ++++++++------- 3 files changed, 41 insertions(+), 15 deletions(-) diff --git a/tests/v1/core/test_prefix_caching.py b/tests/v1/core/test_prefix_caching.py index 83bfbb6ade8d7..b44d3e5cb0678 100644 --- a/tests/v1/core/test_prefix_caching.py +++ b/tests/v1/core/test_prefix_caching.py @@ -23,7 +23,8 @@ def test_prefill(): manager = KVCacheManager( block_size=16, num_gpu_blocks=10, - sliding_window=False, + max_model_len=8192, + sliding_window=None, enable_caching=True, num_preallocate_tokens=16, ) @@ -121,7 +122,8 @@ def test_decode(): manager = KVCacheManager( block_size=16, num_gpu_blocks=10, - sliding_window=False, + max_model_len=8192, + sliding_window=None, enable_caching=True, num_preallocate_tokens=16, ) @@ -172,7 +174,8 @@ def test_evict(): manager = KVCacheManager( block_size=16, num_gpu_blocks=10, - sliding_window=False, + max_model_len=8192, + sliding_window=None, enable_caching=True, num_preallocate_tokens=16, ) @@ -220,7 +223,8 @@ def test_hash_block_correct_reuse(): manager = KVCacheManager( block_size=block_size, num_gpu_blocks=1, - sliding_window=False, + max_model_len=8192, + sliding_window=None, enable_caching=True, num_preallocate_tokens=0, ) @@ -256,7 +260,8 @@ def test_computed_blocks_not_evicted(): manager = KVCacheManager( block_size=block_size, num_gpu_blocks=2, - sliding_window=False, + max_model_len=8192, + sliding_window=None, enable_caching=True, num_preallocate_tokens=0, ) @@ -303,7 +308,8 @@ def test_basic_prefix_caching_disabled(): manager = KVCacheManager( block_size=block_size, num_gpu_blocks=4, - sliding_window=False, + max_model_len=8192, + sliding_window=None, enable_caching=False, num_preallocate_tokens=0, ) @@ -342,7 +348,8 @@ def test_preallocate_blocks(num_preallocate_tokens: int, block_size: int): manager = KVCacheManager( block_size=block_size, num_gpu_blocks=10, - sliding_window=False, + max_model_len=8192, + sliding_window=None, enable_caching=True, num_preallocate_tokens=num_preallocate_tokens, ) @@ -370,7 +377,8 @@ def test_cache_blocks(): manager = KVCacheManager( block_size=block_size, num_gpu_blocks=5, - sliding_window=False, + max_model_len=8192, + sliding_window=None, enable_caching=True, num_preallocate_tokens=0, ) diff --git a/vllm/v1/core/kv_cache_manager.py b/vllm/v1/core/kv_cache_manager.py index 8eb3fb976eb87..b492a755e6dd5 100644 --- a/vllm/v1/core/kv_cache_manager.py +++ b/vllm/v1/core/kv_cache_manager.py @@ -17,12 +17,15 @@ def __init__( self, block_size: int, num_gpu_blocks: int, + max_model_len: int, sliding_window: Optional[int] = None, enable_caching: bool = True, num_preallocate_tokens: int = 64, ) -> None: self.block_size = block_size self.num_gpu_blocks = num_gpu_blocks + self.max_model_len = max_model_len + self.max_num_blocks_per_req = cdiv(max_model_len, block_size) self.sliding_window = sliding_window self.enable_caching = enable_caching # NOTE(woosuk): To avoid frequent block allocation, we preallocate some @@ -132,7 +135,14 @@ def append_slots( num_new_blocks = min( num_new_blocks + self.num_preallocate_blocks, self.free_block_queue.num_free_blocks, + # Should not exceed the maximum number of blocks per request. + # This is especially because the block table has the shape + # [..., max_num_blocks_per_req]. + # TODO(woosuk): Check and reject requests if + # num_prompt_tokens + max_tokens > max_model_len. + self.max_num_blocks_per_req - len(req_blocks), ) + assert num_new_blocks > 0 new_blocks = self._get_new_blocks(num_new_blocks) req_blocks.extend(new_blocks) @@ -212,7 +222,14 @@ def allocate_slots( num_required_blocks + self.num_preallocate_blocks, self.free_block_queue.num_free_blocks - num_evictable_computed_blocks, + # Should not exceed the maximum number of blocks per request. + # This is especially because the block table has the shape + # [..., max_num_blocks_per_req]. + # TODO(woosuk): Check and reject requests if + # num_prompt_tokens + max_tokens > max_model_len. + self.max_num_blocks_per_req - len(computed_blocks), ) + assert num_new_blocks > 0 # Concatenate the computed block IDs and the new block IDs. new_blocks = self._get_new_blocks(num_new_blocks) diff --git a/vllm/v1/core/scheduler.py b/vllm/v1/core/scheduler.py index ba50a9786d805..f1f26f4e8d443 100644 --- a/vllm/v1/core/scheduler.py +++ b/vllm/v1/core/scheduler.py @@ -33,22 +33,23 @@ def __init__( # TODO: Support LoRA. assert lora_config is None, "V1 does not support LoRA yet." + # Scheduling constraints. + self.max_num_running_reqs = self.scheduler_config.max_num_seqs + self.max_num_scheduled_tokens = \ + self.scheduler_config.max_num_batched_tokens + self.max_model_len = self.scheduler_config.max_model_len + num_gpu_blocks = cache_config.num_gpu_blocks assert isinstance(num_gpu_blocks, int) and num_gpu_blocks > 0 - # Create the block space manager. + # Create the KV cache manager. self.kv_cache_manager = KVCacheManager( block_size=self.cache_config.block_size, num_gpu_blocks=num_gpu_blocks, + max_model_len=self.max_model_len, sliding_window=self.cache_config.sliding_window, enable_caching=self.cache_config.enable_prefix_caching) self.block_size = self.cache_config.block_size - # Scheduling constraints. - self.max_num_running_reqs = self.scheduler_config.max_num_seqs - self.max_num_scheduled_tokens = \ - self.scheduler_config.max_num_batched_tokens - self.max_model_len = self.scheduler_config.max_model_len - # req_id -> Request self.requests: Dict[str, Request] = {} # Priority queues for requests. From 9a8bff028595d1c5c52bc225013908ca7a7b66d8 Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Thu, 28 Nov 2024 02:25:59 -0800 Subject: [PATCH 179/397] [Kernel] Update vllm-flash-attn version (#10736) Signed-off-by: Woosuk Kwon --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 882d4412632a5..45a3b484e0360 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -522,7 +522,7 @@ else() FetchContent_Declare( vllm-flash-attn GIT_REPOSITORY https://github.com/vllm-project/flash-attention.git - GIT_TAG 5259c586c403a4e4d8bf69973c159b40cc346fb9 + GIT_TAG d886f88165702b3c7e7744502772cd98b06be9e1 GIT_PROGRESS TRUE # Don't share the vllm-flash-attn build between build types BINARY_DIR ${CMAKE_BINARY_DIR}/vllm-flash-attn From 3ed5e7314667f0a9c0c47e6d635ac82fd93296a2 Mon Sep 17 00:00:00 2001 From: Richard Liu <39319471+richardsliu@users.noreply.github.com> Date: Thu, 28 Nov 2024 02:30:48 -0800 Subject: [PATCH 180/397] [TPU] Update requirements-tpu (#10726) Signed-off-by: Richard Liu --- requirements-tpu.txt | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/requirements-tpu.txt b/requirements-tpu.txt index 3d1e80f6be620..b8f0b15469e77 100644 --- a/requirements-tpu.txt +++ b/requirements-tpu.txt @@ -16,8 +16,8 @@ ray[default] --find-links https://storage.googleapis.com/libtpu-releases/index.html --find-links https://storage.googleapis.com/jax-releases/jax_nightly_releases.html --find-links https://storage.googleapis.com/jax-releases/jaxlib_nightly_releases.html -torch==2.6.0.dev20241114+cpu -torchvision==0.20.0.dev20241114+cpu -torch_xla[tpu] @ https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch_xla-2.6.0.dev20241114-cp310-cp310-linux_x86_64.whl -jaxlib==0.4.32.dev20240829 -jax==0.4.32.dev20240829 +torch==2.6.0.dev20241126+cpu +torchvision==0.20.0.dev20241126+cpu +torch_xla[tpu] @ https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch_xla-2.6.0.dev20241126-cp310-cp310-linux_x86_64.whl +jaxlib==0.4.36.dev20241122 +jax==0.4.36.dev20241122 From 5fc5ce0fe45f974fc8840175e8321652238400f0 Mon Sep 17 00:00:00 2001 From: sixgod Date: Thu, 28 Nov 2024 22:53:31 +0800 Subject: [PATCH 181/397] [Model] Added GLM-4 series hf format model support vllm==0.6.4 (#10561) Signed-off-by: Isotr0py <2037008807@qq.com> Co-authored-by: Isotr0py <2037008807@qq.com> Co-authored-by: Cyrus Leung --- docs/source/models/supported_models.rst | 5 +++++ tests/models/registry.py | 1 + tests/models/test_initialization.py | 2 +- vllm/model_executor/models/glm.py | 21 +++++++++++++++++++++ vllm/model_executor/models/registry.py | 2 ++ 5 files changed, 30 insertions(+), 1 deletion(-) create mode 100644 vllm/model_executor/models/glm.py diff --git a/docs/source/models/supported_models.rst b/docs/source/models/supported_models.rst index c5fbb30b24e28..fd0671beacee7 100644 --- a/docs/source/models/supported_models.rst +++ b/docs/source/models/supported_models.rst @@ -139,6 +139,11 @@ Text Generation - :code:`google/gemma-2-9b`, :code:`google/gemma-2-27b`, etc. - ✅︎ - ✅︎ + * - :code:`GlmForCausalLM` + - GLM-4 + - :code:`THUDM/glm-4-9b-chat-hf`, etc. + - ✅︎ + - ✅︎ * - :code:`GPT2LMHeadModel` - GPT-2 - :code:`gpt2`, :code:`gpt2-xl`, etc. diff --git a/tests/models/registry.py b/tests/models/registry.py index a93bfe907e0d7..461f453d8b1c3 100644 --- a/tests/models/registry.py +++ b/tests/models/registry.py @@ -63,6 +63,7 @@ class _HfExamplesInfo: "FalconForCausalLM": _HfExamplesInfo("tiiuae/falcon-7b"), "GemmaForCausalLM": _HfExamplesInfo("google/gemma-2b"), "Gemma2ForCausalLM": _HfExamplesInfo("google/gemma-2-9b"), + "GlmForCausalLM": _HfExamplesInfo("THUDM/glm-4-9b-chat-hf"), "GPT2LMHeadModel": _HfExamplesInfo("gpt2"), "GPTBigCodeForCausalLM": _HfExamplesInfo("bigcode/starcoder"), "GPTJForCausalLM": _HfExamplesInfo("EleutherAI/gpt-j-6b"), diff --git a/tests/models/test_initialization.py b/tests/models/test_initialization.py index b8312c2d9b7cc..2a072737db043 100644 --- a/tests/models/test_initialization.py +++ b/tests/models/test_initialization.py @@ -11,7 +11,7 @@ @pytest.mark.parametrize("model_arch", HF_EXAMPLE_MODELS.get_supported_archs()) def test_can_initialize(model_arch): - if (model_arch == "Idefics3ForConditionalGeneration" + if (model_arch in {"Idefics3ForConditionalGeneration", "GlmForCausalLM"} and transformers.__version__ < "4.46.0"): pytest.skip(reason="Model introduced in HF >= 4.46.0") diff --git a/vllm/model_executor/models/glm.py b/vllm/model_executor/models/glm.py new file mode 100644 index 0000000000000..942d1e14baed1 --- /dev/null +++ b/vllm/model_executor/models/glm.py @@ -0,0 +1,21 @@ +"""Inference-only HF format GLM-4 model compatible with THUDM weights.""" +from vllm.config import VllmConfig +from vllm.model_executor.models.llama import LlamaForCausalLM + +from .utils import PPMissingLayer + + +class GlmForCausalLM(LlamaForCausalLM): + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super().__init__(vllm_config=vllm_config, prefix=prefix) + # Hack Llama model to fit HF format GLM implementation + # Attention difference between GLM and Llama: + # 1. Half partial rotary_dim and no Neox style. + # 2. There is no bias for o_proj in attention + for layer in self.model.layers: + if not isinstance(layer, PPMissingLayer): + layer.self_attn.rotary_emb.rotary_dim //= 2 + layer.self_attn.rotary_emb.is_neox_style = False + layer.self_attn.o_proj.bias = None + layer.self_attn.o_proj.skip_bias_add = True diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index 4462f6ed55a9c..c400c7d59828c 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -48,6 +48,7 @@ "FalconForCausalLM": ("falcon", "FalconForCausalLM"), "GemmaForCausalLM": ("gemma", "GemmaForCausalLM"), "Gemma2ForCausalLM": ("gemma2", "Gemma2ForCausalLM"), + "GlmForCausalLM": ("glm", "GlmForCausalLM"), "GPT2LMHeadModel": ("gpt2", "GPT2LMHeadModel"), "GPTBigCodeForCausalLM": ("gpt_bigcode", "GPTBigCodeForCausalLM"), "GPTJForCausalLM": ("gpt_j", "GPTJForCausalLM"), @@ -107,6 +108,7 @@ "XLMRobertaModel": ("roberta", "RobertaEmbeddingModel"), "DeciLMForCausalLM": ("decilm", "DeciLMForCausalLM"), "Gemma2Model": ("gemma2", "Gemma2EmbeddingModel"), + "GlmForCausalLM": ("glm", "GlmForCausalLM"), "LlamaModel": ("llama", "LlamaEmbeddingModel"), **{ # Multiple models share the same architecture, so we include them all From 8c1e77fb585c4f42783a3d88c1efc7c9e15fd89f Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Thu, 28 Nov 2024 08:31:28 -0800 Subject: [PATCH 182/397] [Kernel] Update vllm-flash-attn version to reduce CPU overheads (#10742) Signed-off-by: Woosuk Kwon --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 45a3b484e0360..f43bf8143458b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -522,7 +522,7 @@ else() FetchContent_Declare( vllm-flash-attn GIT_REPOSITORY https://github.com/vllm-project/flash-attention.git - GIT_TAG d886f88165702b3c7e7744502772cd98b06be9e1 + GIT_TAG fdf6d72b48aea41f4ae6a89139a453dae554abc8 GIT_PROGRESS TRUE # Don't share the vllm-flash-attn build between build types BINARY_DIR ${CMAKE_BINARY_DIR}/vllm-flash-attn From 98f47f2a4032f8c395268de80858c64ffcfc60fa Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Thu, 28 Nov 2024 09:01:02 -0800 Subject: [PATCH 183/397] [V1] Optimize the CPU overheads in FlashAttention custom op (#10733) Signed-off-by: Woosuk Kwon --- vllm/v1/attention/backends/flash_attn.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/vllm/v1/attention/backends/flash_attn.py b/vllm/v1/attention/backends/flash_attn.py index 5f8535eaa303f..e618edf7d35bf 100644 --- a/vllm/v1/attention/backends/flash_attn.py +++ b/vllm/v1/attention/backends/flash_attn.py @@ -135,6 +135,13 @@ def forward( assert k_scale == 1.0 and v_scale == 1.0, ( "key/v_scale is not supported in FlashAttention.") + # Reshape the query, key, and value tensors. + # NOTE(woosuk): We do this outside the custom op to minimize the CPU + # overheads from the non-CUDA-graph regions. + query = query.view(-1, self.num_heads, self.head_size) + key = key.view(-1, self.num_kv_heads, self.head_size) + value = value.view(-1, self.num_kv_heads, self.head_size) + output = torch.empty_like(query) torch.ops.vllm.unified_v1_flash_attention( output, @@ -153,7 +160,7 @@ def forward( self.alibi_slopes, self.logits_soft_cap, ) - return output + return output.view(-1, self.num_heads * self.head_size) def unified_v1_flash_attention( @@ -184,11 +191,6 @@ def unified_v1_flash_attention( attn_metadata: FlashAttentionMetadata = current_metadata num_actual_tokens = attn_metadata.num_actual_tokens - # Reshape the query, key, and value tensors. - query = query.view(-1, num_heads, head_size) - key = key.view(-1, num_kv_heads, head_size) - value = value.view(-1, num_kv_heads, head_size) - # Reshape the input keys and values and store them in the cache. key_cache = kv_cache[0] value_cache = kv_cache[1] @@ -218,8 +220,7 @@ def unified_v1_flash_attention( block_table=attn_metadata.block_table, softcap=logits_soft_cap, ) - attn_output = attn_output.view(num_actual_tokens, -1) - # TODO(woosuk): Optimize this. + # TODO(woosuk): Remove this unnecessary copy. output[:num_actual_tokens].copy_(attn_output) From c83919c7a6bd47bb452321f08017ef5a5cdd553a Mon Sep 17 00:00:00 2001 From: Isotr0py Date: Fri, 29 Nov 2024 01:29:04 +0800 Subject: [PATCH 184/397] [Model] Add Internlm2 LoRA support (#5064) Signed-off-by: Isotr0py <2037008807@qq.com> --- docs/source/models/supported_models.rst | 2 +- vllm/model_executor/models/internlm2.py | 22 ++++++++++++++++++++-- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/docs/source/models/supported_models.rst b/docs/source/models/supported_models.rst index fd0671beacee7..7b7a83f20871b 100644 --- a/docs/source/models/supported_models.rst +++ b/docs/source/models/supported_models.rst @@ -182,7 +182,7 @@ Text Generation * - :code:`InternLM2ForCausalLM` - InternLM2 - :code:`internlm/internlm2-7b`, :code:`internlm/internlm2-chat-7b`, etc. - - + - ✅︎ - ✅︎ * - :code:`JAISLMHeadModel` - Jais diff --git a/vllm/model_executor/models/internlm2.py b/vllm/model_executor/models/internlm2.py index 906128940ff76..41b9f110d771f 100644 --- a/vllm/model_executor/models/internlm2.py +++ b/vllm/model_executor/models/internlm2.py @@ -27,7 +27,7 @@ from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.sequence import IntermediateTensors -from .interfaces import SupportsPP +from .interfaces import SupportsLoRA, SupportsPP from .utils import (is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) @@ -319,7 +319,21 @@ def forward( return hidden_states -class InternLM2ForCausalLM(nn.Module, SupportsPP): +class InternLM2ForCausalLM(nn.Module, SupportsPP, SupportsLoRA): + packed_modules_mapping = { + "wqkv": ["wqkv"], + "gate_up_proj": ["w1", "w3"], + } + + # LoRA specific attributes + supported_lora_modules = [ + "wqkv", + "wo", + "gate_up_proj", + "w2", + ] + embedding_modules = {} + embedding_padding_modules = [] def __init__(self, *, @@ -329,8 +343,12 @@ def __init__(self, super().__init__() config = vllm_config.model_config.hf_config quant_config = vllm_config.quant_config + lora_config = vllm_config.lora_config + self.config = config self.quant_config = quant_config + self.lora_config = lora_config + self.model = model_type(vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")) self.output = ParallelLMHead(config.vocab_size, From fa6ecb9aa7a55a99f87fdec7a75011f87af2176c Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Fri, 29 Nov 2024 12:47:06 +0800 Subject: [PATCH 185/397] [Model] Clean up MiniCPMV (#10751) Signed-off-by: DarkLight1337 --- .../vision_language/test_models.py | 19 ++- .../vision_language/vlm_utils/model_utils.py | 13 +- vllm/model_executor/layers/fused_moe/layer.py | 10 +- vllm/model_executor/models/minicpm.py | 153 +++++++++--------- vllm/model_executor/models/minicpm3.py | 5 +- vllm/model_executor/models/minicpmv.py | 136 ++++------------ vllm/model_executor/models/utils.py | 28 +--- 7 files changed, 149 insertions(+), 215 deletions(-) diff --git a/tests/models/decoder_only/vision_language/test_models.py b/tests/models/decoder_only/vision_language/test_models.py index 3f6d8ef42cd5f..3457ec6b8e73b 100644 --- a/tests/models/decoder_only/vision_language/test_models.py +++ b/tests/models/decoder_only/vision_language/test_models.py @@ -295,16 +295,29 @@ ) ], ), - "minicpmv": VLMTestInfo( + "minicpmv_25": VLMTestInfo( models=["openbmb/MiniCPM-Llama3-V-2_5"], - test_type=(VLMTestType.IMAGE, VLMTestType.MULTI_IMAGE), + test_type=VLMTestType.IMAGE, prompt_formatter=lambda img_prompt: f"<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n{img_prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", # noqa: E501 img_idx_to_prompt=lambda idx: "(./)\n", max_model_len=4096, max_num_seqs=2, get_stop_token_ids=lambda tok: [tok.eos_id, tok.eot_id], postprocess_inputs=model_utils.wrap_inputs_post_processor, - hf_output_post_proc=model_utils.minicmpv_trunc_hf_output, + hf_output_post_proc=model_utils.minicpmv_trunc_hf_output, + ), + "minicpmv_26": VLMTestInfo( + models=["openbmb/MiniCPM-V-2_6"], + test_type=(VLMTestType.IMAGE, VLMTestType.MULTI_IMAGE), + prompt_formatter=lambda img_prompt: f"<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n{img_prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", # noqa: E501 + img_idx_to_prompt=lambda idx: "(./)\n", + max_model_len=4096, + max_num_seqs=2, + get_stop_token_ids=lambda tok: tok.convert_tokens_to_ids(['<|im_end|>', '<|endoftext|>']), # noqa: E501 + postprocess_inputs=model_utils.ignore_inputs_post_processor( + "image_sizes" + ), + hf_output_post_proc=model_utils.minicpmv_trunc_hf_output, ), # Tests for phi3v currently live in another file because of a bug in # transformers. Once this issue is fixed, we can enable them here instead. diff --git a/tests/models/decoder_only/vision_language/vlm_utils/model_utils.py b/tests/models/decoder_only/vision_language/vlm_utils/model_utils.py index 849857b4232e7..15f15dd7d8030 100644 --- a/tests/models/decoder_only/vision_language/vlm_utils/model_utils.py +++ b/tests/models/decoder_only/vision_language/vlm_utils/model_utils.py @@ -170,7 +170,7 @@ def paligemma_vllm_to_hf_output(vllm_output: RunnerOutput, ####### Post-processors for HF outputs -def minicmpv_trunc_hf_output(hf_output: RunnerOutput, +def minicpmv_trunc_hf_output(hf_output: RunnerOutput, model: str) -> RunnerOutput: output_ids, output_str, out_logprobs = hf_output if output_str.endswith("<|eot_id|>"): @@ -197,6 +197,17 @@ def process(hf_inputs: BatchEncoding, dtype: str): return process +def ignore_inputs_post_processor( + hf_inp_key: str) -> Callable[[BatchEncoding, str], BatchEncoding]: + """Gets a handle to a post processor which ignores a given key.""" + + def process(hf_inputs: BatchEncoding, dtype: str): + del hf_inputs[hf_inp_key] + return hf_inputs + + return process + + def wrap_inputs_post_processor(hf_inputs: BatchEncoding, dtype: str): return {"model_inputs": hf_inputs} diff --git a/vllm/model_executor/layers/fused_moe/layer.py b/vllm/model_executor/layers/fused_moe/layer.py index 5570771ac917b..8c6f7c6e06515 100644 --- a/vllm/model_executor/layers/fused_moe/layer.py +++ b/vllm/model_executor/layers/fused_moe/layer.py @@ -242,7 +242,7 @@ def _load_per_tensor_weight_scale(self, shard_id: str, def _load_model_weight_or_group_weight_scale(self, shard_dim: int, expert_data: torch.Tensor, shard_id: str, - loaded_weight: torch.tensor, + loaded_weight: torch.Tensor, tp_rank: int): # Load grouped weight scales for group quantization # or model weights @@ -261,7 +261,7 @@ def _load_model_weight_or_group_weight_scale(self, shard_dim: int, def _load_per_channel_weight_scale(self, expert_data: torch.Tensor, shard_dim: int, shard_id: str, - loaded_weight: torch.tensor, + loaded_weight: torch.Tensor, tp_rank: int): # for per channel weight quantization if shard_id == "w2": @@ -274,7 +274,7 @@ def _load_per_channel_weight_scale(self, expert_data: torch.Tensor, tp_rank=tp_rank) def _load_w13(self, expert_data: torch.Tensor, shard_dim: int, - shard_id: str, loaded_weight: torch.tensor, tp_rank: int): + shard_id: str, loaded_weight: torch.Tensor, tp_rank: int): # Index the loaded weight for tp sharding. # gate_up_proj: "MergedColumnParallel", so tp sharding on output_dim @@ -292,7 +292,7 @@ def _load_w13(self, expert_data: torch.Tensor, shard_dim: int, expert_data.copy_(loaded_weight) def _load_w2(self, expert_data: torch.Tensor, shard_dim: int, - shard_id: str, loaded_weight: torch.tensor, tp_rank: int): + shard_id: str, loaded_weight: torch.Tensor, tp_rank: int): # Index the loaded weight for tp sharding. # down_proj: "RowParallel" so tp sharding on input_dim @@ -311,7 +311,7 @@ def _load_single_value(self, param: torch.nn.Parameter, param_data[expert_id] = loaded_weight def _load_g_idx(self, shard_id: str, expert_data: torch.Tensor, - shard_dim: int, loaded_weight: torch.tensor, tp_rank: int): + shard_dim: int, loaded_weight: torch.Tensor, tp_rank: int): if shard_id == "w2": self._load_w2(shard_id=shard_id, diff --git a/vllm/model_executor/models/minicpm.py b/vllm/model_executor/models/minicpm.py index c9a573278a136..6254d26c7060d 100644 --- a/vllm/model_executor/models/minicpm.py +++ b/vllm/model_executor/models/minicpm.py @@ -52,7 +52,7 @@ from vllm.sequence import IntermediateTensors from .interfaces import SupportsLoRA, SupportsPP -from .utils import (is_pp_missing_parameter, +from .utils import (AutoWeightsLoader, is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) @@ -378,6 +378,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config.hidden_size, org_num_embeddings=config.vocab_size, ) + self.num_experts = getattr(self.config, "num_experts", 0) self._init_layers(prefix, config, cache_config, quant_config) self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.make_empty_intermediate_tensors = ( @@ -437,6 +438,73 @@ def forward( hidden_states = self.norm(hidden_states) return hidden_states + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("qkv_proj", "q_proj", "q"), + ("qkv_proj", "k_proj", "k"), + ("qkv_proj", "v_proj", "v"), + ("gate_up_proj", "gate_proj", 0), + ("gate_up_proj", "up_proj", 1), + ] + expert_params_mapping = [ + # (param_name, weight_name, expert_id) + ("ws" if weight_name in ["w1", "w3"] else "w2s", + f"experts.{expert_id}.{weight_name}.weight", expert_id) + for expert_id in range(self.num_experts) + for weight_name in ["w1", "w2", "w3"] + ] + params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() + for name, loaded_weight in weights: + if "rotary_emb.inv_freq" in name: + continue + if ("rotary_emb.cos_cached" in name + or "rotary_emb.sin_cached" in name): + # Models trained using ColossalAI may include these tensors in + # the checkpoint. Skip them. + continue + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + if is_pp_missing_parameter(name, self): + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + for param_name, weight_name, expert_id in expert_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + if is_pp_missing_parameter(name, self): + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, + loaded_weight, + weight_name, + expert_id=expert_id) + break + else: + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + if is_pp_missing_parameter(name, self): + continue + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params + class MiniCPMForCausalLM(nn.Module, SupportsLoRA, SupportsPP): packed_modules_mapping = { @@ -480,8 +548,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.cache_config = cache_config self.quant_config = quant_config - self.num_experts = getattr(self.config, "num_experts", 0) - self._init_model(vllm_config=vllm_config, prefix=prefix) + self.model = self._init_model(vllm_config=vllm_config, + prefix=maybe_prefix(prefix, "model")) + unpadded_vocab_size = config.vocab_size if lora_config: unpadded_vocab_size += lora_config.lora_extra_vocab_size @@ -506,8 +575,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.model.make_empty_intermediate_tensors) def _init_model(self, *, vllm_config: VllmConfig, prefix: str = ""): - self.model = MiniCPMModel(vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "model")) + return MiniCPMModel(vllm_config=vllm_config, prefix=prefix) def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: return self.model.get_input_embeddings(input_ids) @@ -546,72 +614,9 @@ def sample( def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]) -> Set[str]: - stacked_params_mapping = [ - # (param_name, shard_name, shard_id) - ("qkv_proj", "q_proj", "q"), - ("qkv_proj", "k_proj", "k"), - ("qkv_proj", "v_proj", "v"), - ("gate_up_proj", "gate_proj", 0), - ("gate_up_proj", "up_proj", 1), - ] - expert_params_mapping = [ - # (param_name, weight_name, expert_id) - ("ws" if weight_name in ["w1", "w3"] else "w2s", - f"experts.{expert_id}.{weight_name}.weight", expert_id) - for expert_id in range(self.num_experts) - for weight_name in ["w1", "w2", "w3"] - ] - params_dict = dict(self.named_parameters()) - loaded_params: Set[str] = set() - for name, loaded_weight in weights: - if "rotary_emb.inv_freq" in name: - continue - if ("rotary_emb.cos_cached" in name - or "rotary_emb.sin_cached" in name): - # Models trained using ColossalAI may include these tensors in - # the checkpoint. Skip them. - continue - # With tie_word_embeddings, we can skip lm_head.weight - # The weight might appear unnecessarily in the files if the model is - # processed with quantization, LoRA, fine-tuning, etc. - if self.config.tie_word_embeddings and "lm_head.weight" in name: - continue - for (param_name, weight_name, shard_id) in stacked_params_mapping: - if weight_name not in name: - continue - name = name.replace(weight_name, param_name) - # Skip loading extra bias for GPTQ models. - if name.endswith(".bias") and name not in params_dict: - continue - if is_pp_missing_parameter(name, self): - continue - param = params_dict[name] - weight_loader = param.weight_loader - weight_loader(param, loaded_weight, shard_id) - break - else: - for param_name, weight_name, expert_id in expert_params_mapping: - if weight_name not in name: - continue - name = name.replace(weight_name, param_name) - if is_pp_missing_parameter(name, self): - continue - param = params_dict[name] - weight_loader = param.weight_loader - weight_loader(param, - loaded_weight, - weight_name, - expert_id=expert_id) - break - else: - # Skip loading extra bias for GPTQ models. - if name.endswith(".bias") and name not in params_dict: - continue - if is_pp_missing_parameter(name, self): - continue - param = params_dict[name] - weight_loader = getattr(param, "weight_loader", - default_weight_loader) - weight_loader(param, loaded_weight) - loaded_params.add(name) - return loaded_params + loader = AutoWeightsLoader( + self, + skip_prefixes=(["lm_head."] + if self.config.tie_word_embeddings else None), + ) + return loader.load_weights(weights) diff --git a/vllm/model_executor/models/minicpm3.py b/vllm/model_executor/models/minicpm3.py index c66be2d9c2d07..e9d7eada1d16c 100644 --- a/vllm/model_executor/models/minicpm3.py +++ b/vllm/model_executor/models/minicpm3.py @@ -40,7 +40,7 @@ MiniCPMForCausalLM, MiniCPMModel) -from .utils import make_layers, maybe_prefix +from .utils import make_layers class MiniCPM3Attention(nn.Module): @@ -248,5 +248,4 @@ class MiniCPM3ForCausalLM(MiniCPMForCausalLM): } def _init_model(self, *, vllm_config: VllmConfig, prefix: str = ""): - self.model = MiniCPM3Model(vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "model")) + return MiniCPM3Model(vllm_config=vllm_config, prefix=prefix) diff --git a/vllm/model_executor/models/minicpmv.py b/vllm/model_executor/models/minicpmv.py index aacce477e0460..1e8f9bd4cf418 100644 --- a/vllm/model_executor/models/minicpmv.py +++ b/vllm/model_executor/models/minicpmv.py @@ -22,7 +22,7 @@ """Inference-only MiniCPM-V model compatible with HuggingFace weights.""" import math import re -from functools import partial +from functools import cached_property, partial from typing import (Any, Callable, Iterable, List, Literal, Mapping, Optional, Set, Tuple, TypedDict, Union) @@ -37,19 +37,15 @@ from vllm.config import VllmConfig from vllm.inputs import (INPUT_REGISTRY, DecoderOnlyInputs, DummyData, InputContext, token_inputs) -from vllm.model_executor.layers.logits_processor import LogitsProcessor from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.layers.resampler import (BaseResampler, Resampler2, get_2d_sincos_pos_embed) from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler -from vllm.model_executor.layers.vocab_parallel_embedding import ParallelLMHead from vllm.model_executor.model_loader.utils import set_default_torch_dtype -from vllm.model_executor.model_loader.weight_utils import default_weight_loader -from vllm.model_executor.models.llama import LlamaModel -from vllm.model_executor.models.minicpm import MiniCPMModel +from vllm.model_executor.models.llama import LlamaForCausalLM +from vllm.model_executor.models.minicpm import MiniCPMForCausalLM from vllm.model_executor.models.module_mapping import MultiModelKeys -from vllm.model_executor.models.qwen2 import Qwen2Model -from vllm.model_executor.models.utils import LLMWrapper +from vllm.model_executor.models.qwen2 import Qwen2ForCausalLM from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs from vllm.multimodal.image import cached_get_image_processor @@ -58,11 +54,7 @@ from .idefics2_vision_model import Idefics2VisionTransformer from .interfaces import SupportsLoRA, SupportsMultiModal, SupportsPP -from .utils import is_pp_missing_parameter, maybe_prefix - -_KEYS_TO_MODIFY_MAPPING = { - "llm.lm_head": "lm_head", -} +from .utils import AutoWeightsLoader, maybe_prefix RawImageType = Union[Image.Image, torch.Tensor] @@ -297,10 +289,9 @@ def input_processor_for_minicpmv(ctx: InputContext, inputs: DecoderOnlyInputs): def get_placeholder(image_size: Tuple[int, int], num_image: int): if version == (2, 0) or version == (2, 5): - return image_processor. \ - get_slice_image_placeholder(image_size) - return image_processor. \ - get_slice_image_placeholder(image_size, num_image) + return image_processor.get_slice_image_placeholder(image_size) + return image_processor.get_slice_image_placeholder( + image_size, num_image) prompt = inputs.get("prompt") token_ids = inputs.get("prompt_token_ids") @@ -400,37 +391,32 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.vpm = self.init_vision_module(config, quant_config, prefix=maybe_prefix(prefix, "vpm")) - param_dtype = torch.get_default_dtype() - self.vpm.to(dtype=param_dtype) self.vision_dim = (self.vpm.embed_dim if self.version == (2, 0) else self.vpm.embeddings.embed_dim) self.embed_dim = self.config.hidden_size + self.resampler = self.init_resampler(self.embed_dim, self.vision_dim, quant_config=quant_config, prefix=maybe_prefix( prefix, "resampler")) - self.resampler.to(device="cuda", dtype=param_dtype) - # TODO: why is there _KEYS_TO_MODIFY_MAPPING? lm_head should be in llm - self.lm_head = ParallelLMHead(config.vocab_size, - config.hidden_size, - quant_config=quant_config, - prefix=maybe_prefix( - prefix, "llm.lm_head")) - self.logits_processor = LogitsProcessor(config.vocab_size) - self.sampler = get_sampler() self.make_empty_intermediate_tensors = ( self.llm.make_empty_intermediate_tensors) + @cached_property + def sampler(self): + if hasattr(self.llm, "sampler"): + return self.llm.sampler + + return get_sampler() + def get_embedding( self, input_ids: torch.Tensor, image_inputs: Optional[MiniCPMVImageInputs], ) -> Tuple[torch.Tensor, torch.Tensor]: - vlm_embedding: torch.Tensor = self.llm.embed_tokens(input_ids) - if hasattr(self.config, "scale_emb"): - vlm_embedding *= self.config.scale_emb + vlm_embedding: torch.Tensor = self.llm.get_input_embeddings(input_ids) if image_inputs is None: # No image vision_hidden_states = torch.tensor([], device=input_ids.device) @@ -575,7 +561,7 @@ def forward( # for `torch.compile` integration input_ids = None - output = self.llm( + output = self.llm.model( input_ids=input_ids, positions=positions, kv_caches=kv_caches, @@ -590,9 +576,7 @@ def compute_logits( hidden_states: torch.Tensor, sampling_metadata: SamplingMetadata, ) -> Optional[torch.Tensor]: - logits = self.logits_processor(self.lm_head, hidden_states, - sampling_metadata) - return logits + return self.llm.compute_logits(hidden_states, sampling_metadata) def sample( self, @@ -604,52 +588,8 @@ def sample( def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]) -> Set[str]: - stacked_params_mapping = [ - # (param_name, shard_name, shard_id) - ("qkv_proj", "q_proj", "q"), - ("qkv_proj", "k_proj", "k"), - ("qkv_proj", "v_proj", "v"), - ("gate_up_proj", "gate_proj", 0), - ("gate_up_proj", "up_proj", 1), - ] - params_dict = dict(self.named_parameters()) - loaded_params: Set[str] = set() - for name, loaded_weight in weights: - for key_to_modify, new_key in _KEYS_TO_MODIFY_MAPPING.items(): - if key_to_modify in name: - name = name.replace(key_to_modify, new_key) - if "rotary_emb.inv_freq" in name: - continue - if ("rotary_emb.cos_cached" in name - or "rotary_emb.sin_cached" in name): - # Models trained using ColossalAI may include these tensors in - # the checkpoint. Skip them. - continue - use_default_weight_loading = False - if self.is_default_weight_loading(name): - use_default_weight_loading = True - else: - for param_name, weight_name, shard_id in stacked_params_mapping: - if weight_name not in name: - continue - name = name.replace(weight_name, param_name) - if is_pp_missing_parameter(name, self): - continue - param = params_dict[name] - weight_loader = param.weight_loader - weight_loader(param, loaded_weight, shard_id) - break - else: - use_default_weight_loading = True - if use_default_weight_loading: - if is_pp_missing_parameter(name, self): - continue - param = params_dict[name] - weight_loader = getattr(param, "weight_loader", - default_weight_loader) - weight_loader(param, loaded_weight) - loaded_params.add(name) - return loaded_params + loader = AutoWeightsLoader(self) + return loader.load_weights(weights) def get_mm_mapping(self) -> MultiModelKeys: """ @@ -693,9 +633,6 @@ def get_vision_hidden_states(self, data: MiniCPMVImageInputs) -> torch.Tensor: raise NotImplementedError - def is_default_weight_loading(self, name: str) -> bool: - raise NotImplementedError - class MiniCPMV2_0(MiniCPMVBaseModel): @@ -708,8 +645,7 @@ def init_llm( vllm_config: VllmConfig, prefix: str = "", ) -> nn.Module: - return LLMWrapper(MiniCPMModel(vllm_config=vllm_config, prefix=prefix), - name="model") + return MiniCPMForCausalLM(vllm_config=vllm_config, prefix=prefix) def init_vision_module( self, @@ -717,11 +653,12 @@ def init_vision_module( quant_config: Optional[QuantizationConfig], prefix: str = "", ) -> nn.Module: - # TODO :refactor this vision model + # TODO: refactor this vision model try: import timm except ImportError: raise ImportError("Please install timm==0.9.10") from ImportError + with set_default_torch_dtype(torch.float16): model = timm.create_model( "vit_so400m_patch14_siglip_384.webli", @@ -731,6 +668,8 @@ def init_vision_module( dynamic_img_pad=True, ) + model = model.to(dtype=torch.get_default_dtype()) + if (isinstance(model, timm.models.VisionTransformer) and model.attn_pool is not None): model.attn_pool = torch.nn.Identity() @@ -759,7 +698,7 @@ def init_resampler(self, quant_config=quant_config, prefix=prefix) - return resampler + return resampler.to(device="cuda", dtype=torch.get_default_dtype()) def get_vision_embedding( self, @@ -790,9 +729,6 @@ def get_vision_hidden_states(self, return self.get_vision_embedding(pixel_values) - def is_default_weight_loading(self, name: str) -> bool: - return "resampler" in name or "vpm" in name - class MiniCPMV2_5(MiniCPMVBaseModel, SupportsLoRA): packed_modules_mapping = { @@ -843,8 +779,7 @@ def init_llm( vllm_config: VllmConfig, prefix: str = "", ) -> nn.Module: - return LLMWrapper(LlamaModel(vllm_config=vllm_config, prefix=prefix), - name="model") + return LlamaForCausalLM(vllm_config=vllm_config, prefix=prefix) def init_vision_module( self, @@ -871,7 +806,8 @@ def init_resampler(self, kv_dim=vision_dim, quant_config=quant_config, prefix=prefix) - return resampler + + return resampler.to(device="cuda", dtype=torch.get_default_dtype()) def get_vision_embedding( self, @@ -913,9 +849,6 @@ def get_vision_hidden_states(self, return self.get_vision_embedding(all_pixel_values.type(dtype), patch_attn_mask, tgt_sizes) - def is_default_weight_loading(self, name: str) -> bool: - return "resampler" in name - class MiniCPMV2_6(MiniCPMVBaseModel, SupportsLoRA): packed_modules_mapping = { @@ -966,8 +899,7 @@ def init_llm( vllm_config: VllmConfig, prefix: str = "", ) -> nn.Module: - return LLMWrapper(Qwen2Model(vllm_config=vllm_config, prefix=prefix), - name="model") + return Qwen2ForCausalLM(vllm_config=vllm_config, prefix=prefix) def init_vision_module( self, @@ -995,7 +927,8 @@ def init_resampler(self, kv_dim=vision_dim, quant_config=quant_config, prefix=prefix) - return resampler + + return resampler.to(device="cuda", dtype=torch.get_default_dtype()) def get_vision_embedding( self, @@ -1043,9 +976,6 @@ def get_vision_hidden_states(self, return self.resampler(vision_embedding, tgt_sizes) - def is_default_weight_loading(self, name: str) -> bool: - return "resampler" in name - _SUPPORT_VERSION = { (2, 0): MiniCPMV2_0, diff --git a/vllm/model_executor/models/utils.py b/vllm/model_executor/models/utils.py index 4c13cbc953273..a6b40a233439b 100644 --- a/vllm/model_executor/models/utils.py +++ b/vllm/model_executor/models/utils.py @@ -1,7 +1,7 @@ import itertools from dataclasses import dataclass, field -from typing import (Any, Callable, Dict, Iterable, List, Literal, Mapping, - Optional, Protocol, Set, Tuple, Union, overload) +from typing import (Callable, Dict, Iterable, List, Literal, Mapping, Optional, + Protocol, Set, Tuple, Union, overload) import torch import torch.nn as nn @@ -560,30 +560,6 @@ def make_empty_intermediate_tensors( return make_empty_intermediate_tensors -class LLMWrapper(nn.Module): - """ - To align with the key names of LoRA trained with PEFT, we need to add an - additional layer to the llm's implementation. - """ - - def __init__(self, llm: nn.Module, name: str) -> None: - super().__init__() - self.model_name = name - setattr(self, name, llm) - - def __getattr__(self, key: str): - llm = super().__getattr__(self.model_name) - if key == self.model_name: - return llm - - return getattr(llm, key) - - # We need to explicitly override this - def __call__(self, *args: Any, **kwargs: Any) -> Any: - llm = super().__getattr__(self.model_name) - return llm(*args, **kwargs) - - def get_vit_attn_backend(support_fa: bool = False) -> _Backend: """ Get the available attention backend for Vision Transformer. From c82b432d4a40fd6376a35fd38cb5fc37e9c53798 Mon Sep 17 00:00:00 2001 From: "wang.yuqi" Date: Fri, 29 Nov 2024 13:17:57 +0800 Subject: [PATCH 186/397] [Misc] typo find in sampling_metadata.py (#10740) --- vllm/model_executor/sampling_metadata.py | 1 + 1 file changed, 1 insertion(+) diff --git a/vllm/model_executor/sampling_metadata.py b/vllm/model_executor/sampling_metadata.py index 84f35f75a0c32..1df8f84ed4093 100644 --- a/vllm/model_executor/sampling_metadata.py +++ b/vllm/model_executor/sampling_metadata.py @@ -454,6 +454,7 @@ def from_sampling_metadata( if do_penalties: for seq_group in sampling_metadata.seq_groups: seq_ids = seq_group.seq_ids + sampling_params = seq_group.sampling_params if (seq_group.is_prompt and sampling_params.prompt_logprobs is not None): prefill_len = len(seq_group.prompt_logprob_indices) From 3132aac04326286ae996bf0887e920096b2bb210 Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Fri, 29 Nov 2024 21:56:46 +0800 Subject: [PATCH 187/397] [Bugfix] Fix Idefics3 bug (#10778) Signed-off-by: Jee Jee Li --- vllm/model_executor/models/idefics3.py | 92 +++++++++++++------------- 1 file changed, 47 insertions(+), 45 deletions(-) diff --git a/vllm/model_executor/models/idefics3.py b/vllm/model_executor/models/idefics3.py index 014e27bc869d4..e5d2edbd81eb1 100644 --- a/vllm/model_executor/models/idefics3.py +++ b/vllm/model_executor/models/idefics3.py @@ -267,54 +267,56 @@ def input_processor_for_idefics3(ctx: InputContext, n_images_in_text = [] text = inputs.get("prompt") - if text is not None: - if isinstance(text, str): - text = [text] - elif not isinstance(text, list) and not isinstance(text[0], str): - raise ValueError("Invalid input text. Please provide a string, " - "or a list of strings") - - fake_image_token = processor.fake_image_token.content - image_token = processor.image_token.content - global_img_token = processor.global_image_tag - - prompt_strings = [] - for sample, sample_rows, sample_cols in zip(text, image_rows, - image_cols): - n_images_in_text.append(sample.count(image_token)) - - # Replace the image token with fake tokens around the expanded - # image token sequence of length `image_seq_len` - image_prompt_strings = [] - for n_rows, n_cols in zip(sample_rows, sample_cols): - image_prompt_string = _get_image_prompt_string( - n_rows, - n_cols, - processor.image_seq_len, - image_token=image_token, - fake_token_around_image=fake_image_token, - global_img_token=global_img_token, - ) - image_prompt_strings.append(image_prompt_string) - - split_sample = sample.split(image_token) - if len(split_sample) == 0: - raise ValueError( - "The image token should be present in the text.") + if text is None: + prompt_token_ids = inputs.get("prompt_token_ids", []) + assert prompt_token_ids + text = tokenizer.decode(prompt_token_ids) + + if isinstance(text, str): + text = [text] + elif not isinstance(text, list) and not isinstance(text[0], str): + raise ValueError("Invalid input text. Please provide a string, " + "or a list of strings") + + fake_image_token = processor.fake_image_token.content + image_token = processor.image_token.content + global_img_token = processor.global_image_tag + + prompt_strings = [] + for sample, sample_rows, sample_cols in zip(text, image_rows, image_cols): + n_images_in_text.append(sample.count(image_token)) + + # Replace the image token with fake tokens around the expanded + # image token sequence of length `image_seq_len` + image_prompt_strings = [] + for n_rows, n_cols in zip(sample_rows, sample_cols): + image_prompt_string = _get_image_prompt_string( + n_rows, + n_cols, + processor.image_seq_len, + image_token=image_token, + fake_token_around_image=fake_image_token, + global_img_token=global_img_token, + ) + image_prompt_strings.append(image_prompt_string) - # Place in the image prompt strings where the image tokens are - sample = split_sample[0] - for i, image_prompt_string in enumerate(image_prompt_strings): - sample += image_prompt_string + split_sample[i + 1] - prompt_strings.append(sample) + split_sample = sample.split(image_token) + if len(split_sample) == 0: + raise ValueError("The image token should be present in the text.") - prompt_token_ids = tokenizer(text=prompt_strings[0]).input_ids + # Place in the image prompt strings where the image tokens are + sample = split_sample[0] + for i, image_prompt_string in enumerate(image_prompt_strings): + sample += image_prompt_string + split_sample[i + 1] + prompt_strings.append(sample) - return token_inputs( - prompt_token_ids=prompt_token_ids, - prompt=prompt_strings[0], - multi_modal_data=multi_modal_data, - ) + prompt_token_ids = tokenizer(text=prompt_strings[0]).input_ids + + return token_inputs( + prompt_token_ids=prompt_token_ids, + prompt=prompt_strings[0], + multi_modal_data=multi_modal_data, + ) def _get_max_num_image_patch(image_processor: Idefics3ImageProcessor) -> int: From 661175bc826f4caba04182a1faeeca9e7a3259ac Mon Sep 17 00:00:00 2001 From: wangxiyuan Date: Fri, 29 Nov 2024 23:22:21 +0800 Subject: [PATCH 188/397] [platform] Add verify_quantization in platform. (#10757) Signed-off-by: wangxiyuan --- vllm/config.py | 28 +--------------------------- vllm/platforms/cpu.py | 1 + vllm/platforms/cuda.py | 1 + vllm/platforms/hpu.py | 1 + vllm/platforms/interface.py | 13 +++++++++++++ vllm/platforms/neuron.py | 2 ++ vllm/platforms/openvino.py | 1 + vllm/platforms/rocm.py | 15 +++++++++++++++ vllm/platforms/tpu.py | 2 ++ vllm/platforms/xpu.py | 1 + 10 files changed, 38 insertions(+), 27 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index cd24e9ffdf598..b1e5b412fec8f 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -393,17 +393,11 @@ def _parse_quant_hf_config(self): def _verify_quantization(self) -> None: supported_quantization = QUANTIZATION_METHODS - rocm_supported_quantization = [ - "awq", "gptq", "fp8", "compressed_tensors", "compressed-tensors", - "fbgemm_fp8", "gguf" - ] optimized_quantization_methods = [ "fp8", "marlin", "modelopt", "gptq_marlin_24", "gptq_marlin", "awq_marlin", "fbgemm_fp8", "compressed_tensors", "compressed-tensors", "experts_int8" ] - tpu_supported_quantization = ["tpu_int8"] - neuron_supported_quantization = ["neuron_quant"] if self.quantization is not None: self.quantization = self.quantization.lower() @@ -438,32 +432,12 @@ def _verify_quantization(self) -> None: raise ValueError( f"Unknown quantization method: {self.quantization}. Must " f"be one of {supported_quantization}.") - if current_platform.is_rocm( - ) and self.quantization not in rocm_supported_quantization: - raise ValueError( - f"{self.quantization} quantization is currently not " - f"supported in ROCm.") - if current_platform.is_tpu( - ) and self.quantization not in tpu_supported_quantization: - raise ValueError( - f"{self.quantization} quantization is currently not " - f"supported in TPU Backend.") + current_platform.verify_quantization(self.quantization) if self.quantization not in optimized_quantization_methods: logger.warning( "%s quantization is not fully " "optimized yet. The speed can be slower than " "non-quantized models.", self.quantization) - if (self.quantization == "awq" and current_platform.is_rocm() - and not envs.VLLM_USE_TRITON_AWQ): - logger.warning( - "Using AWQ quantization with ROCm, but VLLM_USE_TRITON_AWQ" - " is not set, enabling VLLM_USE_TRITON_AWQ.") - envs.VLLM_USE_TRITON_AWQ = True - if current_platform.is_neuron( - ) and self.quantization not in neuron_supported_quantization: - raise ValueError( - f"{self.quantization} quantization is currently not " - f"supported in Neuron Backend.") def _verify_cuda_graph(self) -> None: if self.max_seq_len_to_capture is None: diff --git a/vllm/platforms/cpu.py b/vllm/platforms/cpu.py index 3e22c87f61fac..b5333fbd6f502 100644 --- a/vllm/platforms/cpu.py +++ b/vllm/platforms/cpu.py @@ -19,6 +19,7 @@ class CpuPlatform(Platform): _enum = PlatformEnum.CPU + device_name: str = "cpu" device_type: str = "cpu" dispatch_key: str = "CPU" diff --git a/vllm/platforms/cuda.py b/vllm/platforms/cuda.py index 5e9ce551f2332..846a1869da228 100644 --- a/vllm/platforms/cuda.py +++ b/vllm/platforms/cuda.py @@ -72,6 +72,7 @@ def wrapper(*args: _P.args, **kwargs: _P.kwargs) -> _R: class CudaPlatformBase(Platform): _enum = PlatformEnum.CUDA + device_name: str = "cuda" device_type: str = "cuda" dispatch_key: str = "CUDA" diff --git a/vllm/platforms/hpu.py b/vllm/platforms/hpu.py index 3071136e43b85..10aaa6d54962c 100644 --- a/vllm/platforms/hpu.py +++ b/vllm/platforms/hpu.py @@ -12,6 +12,7 @@ class HpuPlatform(Platform): _enum = PlatformEnum.HPU + device_name: str = "hpu" device_type: str = "hpu" dispatch_key: str = "HPU" diff --git a/vllm/platforms/interface.py b/vllm/platforms/interface.py index 3328665029039..eac2b413f9271 100644 --- a/vllm/platforms/interface.py +++ b/vllm/platforms/interface.py @@ -56,11 +56,13 @@ def to_int(self) -> int: class Platform: _enum: PlatformEnum + device_name: str device_type: str # available dispatch keys: # check https://github.com/pytorch/pytorch/blob/313dac6c1ca0fa0cde32477509cce32089f8532a/torchgen/model.py#L134 # noqa # use "CPU" as a fallback for platforms not registered in PyTorch dispatch_key: str = "CPU" + supported_quantization: list[str] = [] def is_cuda(self) -> bool: return self._enum == PlatformEnum.CUDA @@ -171,6 +173,17 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: """ pass + @classmethod + def verify_quantization(cls, quant: str) -> None: + """ + Verify whether the quantization is supported by the current platform. + """ + if cls.supported_quantization and \ + quant not in cls.supported_quantization: + raise ValueError( + f"{quant} quantization is currently not supported in " + f"{cls.device_name}.") + class UnspecifiedPlatform(Platform): _enum = PlatformEnum.UNSPECIFIED diff --git a/vllm/platforms/neuron.py b/vllm/platforms/neuron.py index 4c4d778ed3dd4..87655ea198303 100644 --- a/vllm/platforms/neuron.py +++ b/vllm/platforms/neuron.py @@ -10,7 +10,9 @@ class NeuronPlatform(Platform): _enum = PlatformEnum.NEURON + device_name: str = "neuron" device_type: str = "neuron" + supported_quantization: list[str] = ["neuron_quant"] @classmethod def get_device_name(cls, device_id: int = 0) -> str: diff --git a/vllm/platforms/openvino.py b/vllm/platforms/openvino.py index ea5ec7b40b95c..29b61e955d9ab 100644 --- a/vllm/platforms/openvino.py +++ b/vllm/platforms/openvino.py @@ -23,6 +23,7 @@ class OpenVinoPlatform(Platform): _enum = PlatformEnum.OPENVINO + device_name: str = "openvino" device_type: str = "openvino" dispatch_key: str = "CPU" diff --git a/vllm/platforms/rocm.py b/vllm/platforms/rocm.py index d2f44c3e423e3..3c14fbc179f69 100644 --- a/vllm/platforms/rocm.py +++ b/vllm/platforms/rocm.py @@ -4,6 +4,7 @@ import torch +import vllm.envs as envs from vllm.logger import init_logger from .interface import DeviceCapability, Platform, PlatformEnum, _Backend @@ -35,8 +36,13 @@ class RocmPlatform(Platform): _enum = PlatformEnum.ROCM + device_name: str = "rocm" device_type: str = "cuda" dispatch_key: str = "CUDA" + supported_quantization: list[str] = [ + "awq", "gptq", "fp8", "compressed_tensors", "compressed-tensors", + "fbgemm_fp8", "gguf" + ] @classmethod def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: @@ -79,3 +85,12 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: "vllm.spec_decode.spec_decode_worker.create_spec_worker" else: parallel_config.worker_cls = "vllm.worker.worker.Worker" + + @classmethod + def verify_quantization(cls, quant: str) -> None: + super().verify_quantization(quant) + if quant == "awq" and not envs.VLLM_USE_TRITON_AWQ: + logger.warning( + "Using AWQ quantization with ROCm, but VLLM_USE_TRITON_AWQ" + " is not set, enabling VLLM_USE_TRITON_AWQ.") + envs.VLLM_USE_TRITON_AWQ = True diff --git a/vllm/platforms/tpu.py b/vllm/platforms/tpu.py index 137af57023ea9..b138f7e1c54c5 100644 --- a/vllm/platforms/tpu.py +++ b/vllm/platforms/tpu.py @@ -16,8 +16,10 @@ class TpuPlatform(Platform): _enum = PlatformEnum.TPU + device_name: str = "tpu" device_type: str = "tpu" dispatch_key: str = "XLA" + supported_quantization: list[str] = ["tpu_int8"] @classmethod def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: diff --git a/vllm/platforms/xpu.py b/vllm/platforms/xpu.py index 69388a8e0f27c..9665786f4c499 100644 --- a/vllm/platforms/xpu.py +++ b/vllm/platforms/xpu.py @@ -16,6 +16,7 @@ class XPUPlatform(Platform): _enum = PlatformEnum.XPU + device_name: str = "xpu" device_type: str = "xpu" dispatch_key: str = "XPU" From 40bc242579d260e6da7614e1494cbd80a6f985b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicol=C3=B2=20Lucchesi?= Date: Sat, 30 Nov 2024 05:07:13 +0100 Subject: [PATCH 189/397] [Bugfix] Fix OpenVino/Neuron `driver_worker` init (#10779) Signed-off-by: NickLucche Signed-off-by: Cyrus Leung Co-authored-by: Cyrus Leung --- vllm/executor/neuron_executor.py | 6 ++++-- vllm/executor/openvino_executor.py | 3 ++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/vllm/executor/neuron_executor.py b/vllm/executor/neuron_executor.py index 31e6fdc3ab1bb..a9efc4f9a801c 100644 --- a/vllm/executor/neuron_executor.py +++ b/vllm/executor/neuron_executor.py @@ -29,11 +29,13 @@ def _init_worker(self): wrapper = WorkerWrapperBase(vllm_config=self.vllm_config) distributed_init_method = get_distributed_init_method( get_ip(), get_open_port()) - self.driver_worker = wrapper.init_worker( + wrapper.init_worker( vllm_config=self.vllm_config, local_rank=0, rank=0, - distributed_init_method=distributed_init_method) + distributed_init_method=distributed_init_method, + ) + self.driver_worker = wrapper.worker self.driver_worker.init_device() self.driver_worker.load_model() diff --git a/vllm/executor/openvino_executor.py b/vllm/executor/openvino_executor.py index db0070ce510ee..057a32364e512 100644 --- a/vllm/executor/openvino_executor.py +++ b/vllm/executor/openvino_executor.py @@ -36,7 +36,7 @@ def _init_worker(self): distributed_init_method = get_distributed_init_method( get_ip(), get_open_port()) - self.driver_worker = wrapper.init_worker( + wrapper.init_worker( ov_core=ov.Core(), vllm_config=self.vllm_config, local_rank=0, @@ -45,6 +45,7 @@ def _init_worker(self): kv_cache_dtype=self.cache_config.cache_dtype, is_driver_worker=True, ) + self.driver_worker = wrapper.worker self.driver_worker.init_device() self.driver_worker.load_model() From 16ee07f22ade57eb882b3c16ad3a6944635996df Mon Sep 17 00:00:00 2001 From: Isotr0py Date: Sat, 30 Nov 2024 12:19:14 +0800 Subject: [PATCH 190/397] [Model] Refactor Molmo weights loading to use AutoWeightsLoader (#10771) Signed-off-by: Isotr0py <2037008807@qq.com> --- vllm/model_executor/models/molmo.py | 213 +++++++++++++++------------- 1 file changed, 111 insertions(+), 102 deletions(-) diff --git a/vllm/model_executor/models/molmo.py b/vllm/model_executor/models/molmo.py index acedddd84d7cb..98caa6857e211 100644 --- a/vllm/model_executor/models/molmo.py +++ b/vllm/model_executor/models/molmo.py @@ -3,7 +3,7 @@ from array import array from dataclasses import dataclass from functools import lru_cache, partial -from typing import Iterable, List, Mapping, Optional, Tuple, TypedDict +from typing import Iterable, List, Mapping, Optional, Set, Tuple, TypedDict import torch from einops import rearrange @@ -44,7 +44,8 @@ from vllm.transformers_utils.processor import get_processor from .interfaces import SupportsMultiModal, SupportsPP -from .utils import (get_vit_attn_backend, +from .utils import (AutoWeightsLoader, WeightsMapper, get_vit_attn_backend, + is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) @@ -720,6 +721,42 @@ def forward( # image_features: (batch_size, num_image, num_patch, d_model) return image_features + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("gate_up_proj", "gate_proj", 0), + ("gate_up_proj", "up_proj", 1), + ] + params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() + + for name, loaded_weight in weights: + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + if is_pp_missing_parameter(name, self): + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + if name.endswith(".bias") and name not in params_dict: + continue + if is_pp_missing_parameter(name, self): + continue + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params + @support_torch_compile class MolmoModel(nn.Module): @@ -804,6 +841,28 @@ def forward( hidden_states = self.norm(hidden_states) return hidden_states + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: + params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() + + for name, loaded_weight in weights: + if "gate_up_proj" in name: + up_proj, gate_proj = loaded_weight.chunk(2, dim=0) + loaded_weight = torch.cat([gate_proj, up_proj], dim=0) + + if name.endswith(".bias") and name not in params_dict: + continue + if is_pp_missing_parameter(name, self): + continue + + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params + cached_get_processor = lru_cache(get_processor) @@ -1200,103 +1259,53 @@ def sample( return next_tokens def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): - - params_mapping = [ - ("model.transformer.ln_f.weight", "model.norm.weight"), - ("attn_out", "self_attn.o_proj"), - ("att_proj", "self_attn.qkv_proj"), - ("q_norm", "self_attn.q_norm"), - ("k_norm", "self_attn.k_norm"), - ("attn_norm", "input_layernorm"), - ("ff_norm", "post_attention_layernorm"), - ] - - params_dict = dict(self.named_parameters(remove_duplicate=False)) - - embedding_weight = dict() - projector_weight = dict() - for name, loaded_weight in weights: - if "rotary_emb.inv_freq" in name: - continue - if self.config.tie_word_embeddings and "lm_head.weight" in name: - continue - - if "wte.embedding" in name: - embedding_weight["embedding"] = loaded_weight - continue - - if "wte.new_embedding" in name: - embedding_weight["new_embedding"] = loaded_weight - continue - - if "vision_backbone" in name: - if name.startswith("model"): - name = name[len("model."):] - if 'image_projector' in name: - if 'w1' in name: - projector_weight['gate_proj'] = loaded_weight - elif 'w3' in name: - projector_weight['up_proj'] = loaded_weight - elif 'w2' in name: - projector_weight['down_proj'] = loaded_weight - else: - raise ValueError( - f"Unexpected projector weight: {name}") - continue - else: - if "transformer.blocks" in name: - name = name.replace("transformer.blocks", "layers") - - if "ff_proj" in name: - name = name.replace("ff_proj", "mlp.gate_up_proj") - assert 'weight' in name - up_weight, gate_weight = loaded_weight.chunk(2, dim=0) - loaded_weight = torch.cat([gate_weight, up_weight], dim=0) - - elif "ff_out" in name: - if "layers" in name: - name = name.replace("ff_out", "mlp.down_proj") - else: - # lm head - name = name.replace("model.transformer.ff_out", - "lm_head") - - else: - for (param_name, weight_name) in params_mapping: - if param_name in name: - name = name.replace(param_name, weight_name) - break - - try: - # Skip loading extra bias for GPTQ models. - if name.endswith(".bias") and name not in params_dict: - continue - param = params_dict[name] - except KeyError: - raise ValueError(f"Unexpected weight: {name}") from None - - weight_loader = getattr(param, "weight_loader", - default_weight_loader) - weight_loader(param, loaded_weight) - - gate_up_proj_weight = torch.cat( - [projector_weight["gate_proj"], projector_weight["up_proj"]], - dim=0) - name = "vision_backbone.image_projector.gate_up_proj.weight" - param = params_dict[name] - weight_loader = getattr(param, "weight_loader", default_weight_loader) - weight_loader(param, gate_up_proj_weight) - - down_proj_weight = projector_weight["down_proj"] - name = "vision_backbone.image_projector.down_proj.weight" - param = params_dict[name] - weight_loader = getattr(param, "weight_loader", default_weight_loader) - weight_loader(param, down_proj_weight) - - embedding_weight = torch.cat( - [embedding_weight["embedding"], embedding_weight["new_embedding"]], - dim=0) - name = "model.embed_tokens.weight" - param = params_dict[name] - weight_loader = getattr(param, "weight_loader", default_weight_loader) - weight_loader(param, embedding_weight) + hf_to_vllm_mapper = WeightsMapper( + orig_to_new_substr={ + # vision backbone mapping + "image_projector.w1.": "image_projector.gate_proj.", + "image_projector.w3.": "image_projector.up_proj.", + "image_projector.w2.": "image_projector.down_proj.", + # language backbone mapping + "att_proj": "self_attn.qkv_proj", + "attn_out": "self_attn.o_proj", + "q_norm": "self_attn.q_norm", + "k_norm": "self_attn.k_norm", + "ff_proj": "mlp.gate_up_proj", + "ff_out": "mlp.down_proj", + "attn_norm": "input_layernorm", + "ff_norm": "post_attention_layernorm", + }, + orig_to_new_prefix={ + # vision backbone mapping + "model.vision_backbone.": "vision_backbone.", + # language backbone mapping + "model.transformer.blocks.": "model.layers.", + "model.transformer.ln_f.": "model.norm.", + # lm_head is renamed to model.transformer.mlp.down_proj firstly, + # we need to run a second renaming for it + "model.transformer.mlp.down_proj.": "lm_head.", + }, + ) + loader = AutoWeightsLoader(self) + weights = _get_weights_with_merged_embedding(weights) + return loader.load_weights(weights, mapper=hf_to_vllm_mapper) + + +def _get_weights_with_merged_embedding( + weights: Iterable[Tuple[str, torch.Tensor]] +) -> Iterable[Tuple[str, torch.Tensor]]: + embedding_weights = {} + for name, weight in weights: + if "wte.embedding" in name: + embedding_weights["embedding"] = weight + elif "wte.new_embedding" in name: + embedding_weights["new_embedding"] = weight + else: + yield (name, weight) + # this is compatible with most of quantization, + # because they won't quantize embed_tokens + embedding_weights = torch.cat( + [embedding_weights["embedding"], embedding_weights["new_embedding"]], + dim=0, + ) + yield ("model.embed_tokens.weight", embedding_weights) From e7cfc4ef4cc017e0a0229adff9f4b143b38fb421 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Sat, 30 Nov 2024 08:45:50 +0100 Subject: [PATCH 191/397] [Interleaved ATTN] Support for Mistral-8B (#10591) Signed-off-by: youkaichao Co-authored-by: youkaichao --- vllm/model_executor/models/llama.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/vllm/model_executor/models/llama.py b/vllm/model_executor/models/llama.py index fe94bb352961b..ff0ab011a9158 100644 --- a/vllm/model_executor/models/llama.py +++ b/vllm/model_executor/models/llama.py @@ -54,7 +54,7 @@ from .interfaces import SupportsLoRA, SupportsPP from .utils import (AutoWeightsLoader, PPMissingLayer, WeightsMapper, - is_pp_missing_parameter, + extract_layer_index, is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) @@ -114,6 +114,7 @@ def __init__( prefix: str = "", ) -> None: super().__init__() + layer_idx = extract_layer_index(prefix) self.hidden_size = hidden_size tp_size = get_tensor_model_parallel_world_size() self.total_num_heads = num_heads @@ -168,6 +169,18 @@ def __init__( rope_scaling=rope_scaling, is_neox_style=is_neox_style, ) + + if hasattr(config, "interleaved_sliding_window"): + if isinstance(config.interleaved_sliding_window, int): + sliding_window = config.interleaved_sliding_window + elif isinstance(config.interleaved_sliding_window, list): + sw_idx = layer_idx % len(config.interleaved_sliding_window) + sliding_window = config.interleaved_sliding_window[sw_idx] + else: + raise ValueError(f"{type(sliding_window)} is not supported.") + else: + sliding_window = None + self.attn = Attention( self.num_heads, self.head_dim, @@ -175,6 +188,7 @@ def __init__( num_kv_heads=self.num_kv_heads, cache_config=cache_config, quant_config=quant_config, + per_layer_sliding_window=sliding_window, prefix=f"{prefix}.attn", ) From 7e4bbda5735eaca3ce01860b8168feed32e339f4 Mon Sep 17 00:00:00 2001 From: wangxiyuan Date: Sat, 30 Nov 2024 19:38:40 +0800 Subject: [PATCH 192/397] [doc] format fix (#10789) Signed-off-by: wangxiyuan --- .../automatic_prefix_caching/details.md | 2 +- .../getting_started/gaudi-installation.rst | 36 +++++++++---------- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/docs/source/automatic_prefix_caching/details.md b/docs/source/automatic_prefix_caching/details.md index 2d3214e28ed93..17f806217aa65 100644 --- a/docs/source/automatic_prefix_caching/details.md +++ b/docs/source/automatic_prefix_caching/details.md @@ -25,7 +25,7 @@ With this mapping, we can add another indirection in vLLM’s KV cache managemen This design achieves automatic prefix caching without the need of maintaining a tree structure among the KV blocks. More specifically, all of the blocks are independent of each other and can be allocated and freed by itself, which enables us to manages the KV cache as ordinary caches in operating system. -# Generalized Caching Policy +## Generalized Caching Policy Keeping all the KV blocks in a hash table enables vLLM to cache KV blocks from earlier requests to save memory and accelerate the computation of future requests. For example, if a new request shares the system prompt with the previous request, the KV cache of the shared prompt can directly be used for the new request without recomputation. However, the total KV cache space is limited and we have to decide which KV blocks to keep or evict when the cache is full. diff --git a/docs/source/getting_started/gaudi-installation.rst b/docs/source/getting_started/gaudi-installation.rst index 68c1a56660fa4..249e08278ff8f 100644 --- a/docs/source/getting_started/gaudi-installation.rst +++ b/docs/source/getting_started/gaudi-installation.rst @@ -4,7 +4,7 @@ Installation with Intel® Gaudi® AI Accelerators This README provides instructions on running vLLM with Intel Gaudi devices. Requirements and Installation -============================= +----------------------------- Please follow the instructions provided in the `Gaudi Installation Guide `__ @@ -13,7 +13,7 @@ please follow the methods outlined in the `Optimizing Training Platform Guide `__. Requirements ------------- +~~~~~~~~~~~~ - OS: Ubuntu 22.04 LTS - Python: 3.10 @@ -22,7 +22,7 @@ Requirements Quick start using Dockerfile ----------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: console $ docker build -f Dockerfile.hpu -t vllm-hpu-env . @@ -34,10 +34,10 @@ Quick start using Dockerfile Build from source ------------------ +~~~~~~~~~~~~~~~~~ Environment verification -~~~~~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^^^^^ To verify that the Intel Gaudi software was correctly installed, run: @@ -53,7 +53,7 @@ Verification `__ @@ -107,7 +107,7 @@ Supported Features - Attention with Linear Biases (ALiBi) Unsupported Features -==================== +-------------------- - Beam search - LoRA adapters @@ -115,7 +115,7 @@ Unsupported Features - Prefill chunking (mixed-batch inferencing) Supported Configurations -======================== +------------------------ The following configurations have been validated to be function with Gaudi2 devices. Configurations that are not listed may or may not work. @@ -152,10 +152,10 @@ Gaudi2 devices. Configurations that are not listed may or may not work. with tensor parallelism on 8x HPU, BF16 datatype with random or greedy sampling Performance Tuning -================== +------------------ Execution modes ---------------- +~~~~~~~~~~~~~~~ Currently in vLLM for HPU we support four execution modes, depending on selected HPU PyTorch Bridge backend (via ``PT_HPU_LAZY_MODE`` environment variable), and ``--enforce-eager`` flag. @@ -184,7 +184,7 @@ Currently in vLLM for HPU we support four execution modes, depending on selected Bucketing mechanism -------------------- +~~~~~~~~~~~~~~~~~~~ Intel Gaudi accelerators work best when operating on models with fixed tensor shapes. `Intel Gaudi Graph Compiler `__ is responsible for generating optimized binary code that implements the given model topology on Gaudi. In its default configuration, the produced binary code may be heavily dependent on input and output tensor shapes, and can require graph recompilation when encountering differently shaped tensors within the same topology. While the resulting binaries utilize Gaudi efficiently, the compilation itself may introduce a noticeable overhead in end-to-end execution. In a dynamic inference serving scenario, there is a need to minimize the number of graph compilations and reduce the risk of graph compilation occurring during server runtime. Currently it is achieved by "bucketing" model's forward pass across two dimensions - ``batch_size`` and ``sequence_length``. @@ -233,7 +233,7 @@ As an example, if a request of 3 sequences, with max sequence length of 412 come Bucketing is transparent to a client - padding in sequence length dimension is never returned to the client, and padding in batch dimension does not create new requests. Warmup ------- +~~~~~~ Warmup is an optional, but highly recommended step occurring before vLLM server starts listening. It executes a forward pass for each bucket with dummy data. The goal is to pre-compile all graphs and not incur any graph compilation overheads within bucket boundaries during server runtime. Each warmup step is logged during vLLM startup: @@ -257,7 +257,7 @@ This example uses the same buckets as in *Bucketing mechanism* section. Each out Compiling all the buckets might take some time and can be turned off with ``VLLM_SKIP_WARMUP=true`` environment variable. Keep in mind that if you do that, you may face graph compilations once executing a given bucket for the first time. It is fine to disable warmup for development, but it's highly recommended to enable it in deployment. HPU Graph capture ------------------ +~~~~~~~~~~~~~~~~~ `HPU Graphs `__ are currently the most performant execution method of vLLM on Intel Gaudi. When HPU Graphs are enabled, execution graphs will be traced (recorded) ahead of time (after performing warmup), to be later replayed during inference, significantly reducing host overheads. Recording can take large amounts of memory, which needs to be taken into account when allocating KV cache. Enabling HPU Graphs will impact the number of available KV cache blocks, but vLLM provides user-configurable variables to control memory management. @@ -321,7 +321,7 @@ Each described step is logged by vLLM server, as follows (negative values corres Recommended vLLM Parameters ---------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~ - We recommend running inference on Gaudi 2 with ``block_size`` of 128 for BF16 data type. Using default values (16, 32) might lead to @@ -333,7 +333,7 @@ Recommended vLLM Parameters If you encounter out-of-memory issues, see troubleshooting section. Environment variables ---------------------- +~~~~~~~~~~~~~~~~~~~~~ **Diagnostic and profiling knobs:** @@ -380,7 +380,7 @@ Additionally, there are HPU PyTorch Bridge environment variables impacting vLLM - ``PT_HPU_ENABLE_LAZY_COLLECTIVES``: required to be ``true`` for tensor parallel inference with HPU Graphs Troubleshooting: Tweaking HPU Graphs -==================================== +------------------------------------ If you experience device out-of-memory issues or want to attempt inference at higher batch sizes, try tweaking HPU Graphs by following From 133707123e730a3544875d432a9435bdfe5e34cf Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Sun, 1 Dec 2024 08:02:54 +0800 Subject: [PATCH 193/397] [Model] Replace embedding models with pooling adapter (#10769) Signed-off-by: DarkLight1337 --- .buildkite/test-pipeline.yaml | 4 +- docs/source/models/supported_models.rst | 15 ++- tests/conftest.py | 1 - .../embedding/language/test_embedding.py | 5 + tests/models/test_registry.py | 31 +++--- .../my_gemma_embedding.py | 45 +++++++- tests/test_config.py | 3 +- vllm/config.py | 25 +++++ vllm/inputs/registry.py | 16 +-- vllm/model_executor/layers/pooler.py | 4 +- vllm/model_executor/model_loader/loader.py | 18 +++- vllm/model_executor/model_loader/utils.py | 18 +++- vllm/model_executor/models/adapters.py | 98 +++++++++++++++++ vllm/model_executor/models/blip2.py | 5 +- vllm/model_executor/models/gemma2.py | 58 +--------- vllm/model_executor/models/internvl.py | 5 +- vllm/model_executor/models/llama.py | 102 ++---------------- vllm/model_executor/models/llava.py | 5 +- vllm/model_executor/models/llava_next.py | 26 +---- .../model_executor/models/llava_next_video.py | 5 +- vllm/model_executor/models/llava_onevision.py | 5 +- vllm/model_executor/models/paligemma.py | 5 +- vllm/model_executor/models/phi3v.py | 39 +++---- vllm/model_executor/models/pixtral.py | 5 +- vllm/model_executor/models/qwen2.py | 28 +++-- vllm/model_executor/models/qwen2_vl.py | 18 +--- vllm/model_executor/models/registry.py | 59 ++++++---- vllm/model_executor/models/ultravox.py | 5 +- vllm/model_executor/models/utils.py | 24 ++++- vllm/multimodal/base.py | 6 +- vllm/multimodal/registry.py | 5 +- vllm/utils.py | 22 +++- 32 files changed, 387 insertions(+), 323 deletions(-) create mode 100644 vllm/model_executor/models/adapters.py diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index fc23c9cff0d87..46692506f01d4 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -334,7 +334,6 @@ steps: commands: - pytest -v -s models/decoder_only/language -m 'core_model or quant_model' - pytest -v -s models/embedding/language -m core_model - - pytest -v -s models/embedding/vision_language -m core_model - label: Language Models Test (Extended) # 50min optional: true @@ -346,7 +345,6 @@ steps: commands: - pytest -v -s models/decoder_only/language -m 'not core_model and not quant_model' - pytest -v -s models/embedding/language -m 'not core_model' - - pytest -v -s models/embedding/vision_language -m 'not core_model' - label: Multi-Modal Models Test (Standard) # 26min #mirror_hardwares: [amd] @@ -359,6 +357,7 @@ steps: commands: - pytest -v -s models/decoder_only/audio_language -m 'core_model or quant_model' - pytest -v -s --ignore models/decoder_only/vision_language/test_phi3v.py models/decoder_only/vision_language -m 'core_model or quant_model' + - pytest -v -s models/embedding/vision_language -m core_model - pytest -v -s models/encoder_decoder/language -m core_model - pytest -v -s models/encoder_decoder/vision_language -m core_model @@ -376,6 +375,7 @@ steps: # https://github.com/huggingface/transformers/issues/34307 - pytest -v -s models/decoder_only/vision_language/test_phi3v.py - pytest -v -s --ignore models/decoder_only/vision_language/test_phi3v.py models/decoder_only/vision_language -m 'not core_model and not quant_model' + - pytest -v -s models/embedding/vision_language -m 'not core_model' - pytest -v -s models/encoder_decoder/language -m 'not core_model' - pytest -v -s models/encoder_decoder/vision_language -m 'not core_model' diff --git a/docs/source/models/supported_models.rst b/docs/source/models/supported_models.rst index 7b7a83f20871b..f571b8bf6735e 100644 --- a/docs/source/models/supported_models.rst +++ b/docs/source/models/supported_models.rst @@ -357,7 +357,7 @@ Text Embedding - ✅︎ * - :code:`Qwen2Model`, :code:`Qwen2ForCausalLM` - Qwen2-based - - :code:`ssmits/Qwen2-7B-Instruct-embed-base`, :code:`Alibaba-NLP/gte-Qwen2-7B-instruct` (see note), etc. + - :code:`ssmits/Qwen2-7B-Instruct-embed-base` (see note), :code:`Alibaba-NLP/gte-Qwen2-7B-instruct` (see note), etc. - ✅︎ - ✅︎ * - :code:`RobertaModel`, :code:`RobertaForMaskedLM` @@ -378,6 +378,10 @@ Text Embedding .. tip:: You can override the model's pooling method by passing :code:`--override-pooler-config`. +.. note:: + :code:`ssmits/Qwen2-7B-Instruct-embed-base` has an improperly defined Sentence Transformers config. + You should manually set mean pooling by passing :code:`--override-pooler-config '{"pooling_type": "MEAN"}'`. + .. note:: Unlike base Qwen2, :code:`Alibaba-NLP/gte-Qwen2-7B-instruct` uses bi-directional attention. You can set :code:`--hf-overrides '{"is_causal": false}'` to change the attention mask accordingly. @@ -397,12 +401,21 @@ Reward Modeling - Example HF Models - :ref:`LoRA ` - :ref:`PP ` + * - :code:`LlamaForCausalLM` + - Llama-based + - :code:`peiyi9979/math-shepherd-mistral-7b-prm`, etc. + - ✅︎ + - ✅︎ * - :code:`Qwen2ForRewardModel` - Qwen2-based - :code:`Qwen/Qwen2.5-Math-RM-72B`, etc. - ✅︎ - ✅︎ +.. important:: + For process-supervised reward models such as :code:`peiyi9979/math-shepherd-mistral-7b-prm`, the pooling config should be set explicitly, + e.g.: :code:`--override-pooler-config '{"pooling_type": "STEP", "step_tag_id": 123, "returned_token_ids": [456, 789]}'`. + .. note:: As an interim measure, these models are supported in both offline and online inference via Embeddings API. diff --git a/tests/conftest.py b/tests/conftest.py index d56942d8912af..36f1d477fab59 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -263,7 +263,6 @@ def __init__( dtype: str = "half", *, model_kwargs: Optional[Dict[str, Any]] = None, - is_embedding_model: bool = False, is_sentence_transformer: bool = False, is_cross_encoder: bool = False, skip_tokenizer_init: bool = False, diff --git a/tests/models/embedding/language/test_embedding.py b/tests/models/embedding/language/test_embedding.py index 36b1e5887981c..5ef8540265d14 100644 --- a/tests/models/embedding/language/test_embedding.py +++ b/tests/models/embedding/language/test_embedding.py @@ -4,6 +4,8 @@ """ import pytest +from vllm.config import PoolerConfig + from ..utils import check_embeddings_close @@ -33,6 +35,9 @@ def test_models( dtype: str, ) -> None: vllm_extra_kwargs = {} + if model == "ssmits/Qwen2-7B-Instruct-embed-base": + vllm_extra_kwargs["override_pooler_config"] = \ + PoolerConfig(pooling_type="MEAN") if model == "Alibaba-NLP/gte-Qwen2-7B-instruct": vllm_extra_kwargs["hf_overrides"] = {"is_causal": False} diff --git a/tests/models/test_registry.py b/tests/models/test_registry.py index 289ea66b5ebc5..1886b1f9898ad 100644 --- a/tests/models/test_registry.py +++ b/tests/models/test_registry.py @@ -6,11 +6,8 @@ from vllm.model_executor.models import (is_embedding_model, is_text_generation_model, supports_multimodal) -# yapf conflicts with isort for this block -# yapf: disable -from vllm.model_executor.models.registry import (_CROSS_ENCODER_MODELS, - _EMBEDDING_MODELS, - _MULTIMODAL_MODELS, +from vllm.model_executor.models.adapters import as_embedding_model +from vllm.model_executor.models.registry import (_MULTIMODAL_MODELS, _SPECULATIVE_DECODING_MODELS, _TEXT_GENERATION_MODELS, ModelRegistry) @@ -26,18 +23,18 @@ def test_registry_imports(model_arch): model_cls, _ = ModelRegistry.resolve_model_cls(model_arch) if model_arch in _SPECULATIVE_DECODING_MODELS: - pass # Ignore these models which do not have a unified format - else: - assert is_text_generation_model(model_cls) is ( - model_arch in _TEXT_GENERATION_MODELS - or model_arch in _MULTIMODAL_MODELS) - - embedding_models = {**_EMBEDDING_MODELS, **_CROSS_ENCODER_MODELS} - assert is_embedding_model(model_cls) is (model_arch - in embedding_models) - - assert supports_multimodal(model_cls) is (model_arch - in _MULTIMODAL_MODELS) + return # Ignore these models which do not have a unified format + + if (model_arch in _TEXT_GENERATION_MODELS + or model_arch in _MULTIMODAL_MODELS): + assert is_text_generation_model(model_cls) + + # All vLLM models should be convertible to an embedding model + embed_model = as_embedding_model(model_cls) + assert is_embedding_model(embed_model) + + if model_arch in _MULTIMODAL_MODELS: + assert supports_multimodal(model_cls) @fork_new_process_for_each_test diff --git a/tests/plugins/vllm_add_dummy_model/vllm_add_dummy_model/my_gemma_embedding.py b/tests/plugins/vllm_add_dummy_model/vllm_add_dummy_model/my_gemma_embedding.py index 21958b1640204..d676eacffb056 100644 --- a/tests/plugins/vllm_add_dummy_model/vllm_add_dummy_model/my_gemma_embedding.py +++ b/tests/plugins/vllm_add_dummy_model/vllm_add_dummy_model/my_gemma_embedding.py @@ -1,13 +1,34 @@ -from typing import List, Optional, Union +from typing import Iterable, List, Optional, Tuple, Union import torch +import torch.nn as nn from vllm.attention import AttentionMetadata -from vllm.model_executor.models.gemma2 import Gemma2EmbeddingModel -from vllm.sequence import IntermediateTensors +from vllm.config import VllmConfig +from vllm.model_executor.layers.pooler import Pooler, PoolingType +from vllm.model_executor.models.gemma2 import Gemma2Model +from vllm.model_executor.models.utils import WeightsMapper, maybe_prefix +from vllm.model_executor.pooling_metadata import PoolingMetadata +from vllm.sequence import IntermediateTensors, PoolerOutput -class MyGemma2Embedding(Gemma2EmbeddingModel): +class MyGemma2Embedding(nn.Module): + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super().__init__() + + self.model = Gemma2Model(vllm_config=vllm_config, + prefix=maybe_prefix(prefix, "model")) + + self._pooler = Pooler.from_config_with_defaults( + vllm_config.model_config.pooler_config, + pooling_type=PoolingType.LAST, + normalize=True, + softmax=False, + ) + + self.make_empty_intermediate_tensors = ( + self.model.make_empty_intermediate_tensors) def forward( self, @@ -18,7 +39,7 @@ def forward( intermediate_tensors: Optional[IntermediateTensors] = None, inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: - hidden_states = super().forward( + hidden_states = self.model( input_ids, positions, kv_caches, @@ -32,3 +53,17 @@ def forward( # Return all-zero embeddings return torch.zeros_like(hidden_states) + + def pooler( + self, + hidden_states: torch.Tensor, + pooling_metadata: PoolingMetadata, + ) -> Optional[PoolerOutput]: + return self._pooler(hidden_states, pooling_metadata) + + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + hf_to_vllm_mapper = WeightsMapper(orig_to_new_prefix={"model.": ""}) + weights = hf_to_vllm_mapper.apply(weights) + weights = ((name, data) for name, data in weights + if not name.startswith("lm_head.")) + return self.model.load_weights(weights) diff --git a/tests/test_config.py b/tests/test_config.py index 3cf90297ce177..45b0b938af215 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -26,8 +26,7 @@ def test_auto_task(model_id, expected_task): @pytest.mark.parametrize(("model_id", "bad_task"), [ - ("facebook/opt-125m", "embedding"), - ("intfloat/e5-mistral-7b-instruct", "generate"), + ("Qwen/Qwen2.5-Math-RM-72B", "generate"), ]) def test_incorrect_task(model_id, bad_task): with pytest.raises(ValueError, match=r"does not support the .* task"): diff --git a/vllm/config.py b/vllm/config.py index b1e5b412fec8f..51b8cf24803ab 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -370,6 +370,31 @@ def _resolve_task( selected_task = next(iter(supported_tasks_lst)) if len(supported_tasks) > 1: + suffix_to_preferred_task: List[Tuple[str, _Task]] = [ + # Hardcode the models that are exceptions + ("AquilaModel", "generate"), + ("ChatGLMModel", "generate"), + # Other models follow this pattern + ("ForCausalLM", "generate"), + ("ForConditionalGeneration", "generate"), + ("ChatModel", "generate"), + ("LMHeadModel", "generate"), + ("EmbeddingModel", "embedding"), + ("RewardModel", "embedding"), + ("ForSequenceClassification", "embedding"), + ] + info, arch = ModelRegistry.inspect_model_cls(architectures) + + for suffix, pref_task in suffix_to_preferred_task: + if arch.endswith(suffix) and pref_task in supported_tasks: + selected_task = pref_task + break + else: + if (arch.endswith("Model") + and info.architecture.endswith("ForCausalLM") + and "embedding" in supported_tasks): + selected_task = "embedding" + logger.info( "This model supports multiple tasks: %s. " "Defaulting to '%s'.", supported_tasks, selected_task) diff --git a/vllm/inputs/registry.py b/vllm/inputs/registry.py index 68b4756331e6d..85ab4355cc2e4 100644 --- a/vllm/inputs/registry.py +++ b/vllm/inputs/registry.py @@ -11,8 +11,8 @@ from vllm.logger import init_logger from vllm.transformers_utils.processor import cached_get_processor from vllm.transformers_utils.tokenizer import AnyTokenizer -from vllm.utils import (get_allowed_kwarg_only_overrides, print_warning_once, - resolve_mm_processor_kwargs) +from vllm.utils import (ClassRegistry, get_allowed_kwarg_only_overrides, + print_warning_once, resolve_mm_processor_kwargs) from .data import ProcessorInputs, SingletonInputs from .parse import is_encoder_decoder_inputs @@ -136,12 +136,12 @@ class InputRegistry: """ def __init__(self) -> None: - self._dummy_factories_by_model_type: Dict[Type[nn.Module], - DummyDataFactory] = {} - self._dummy_encoder_factories_by_model_type: Dict[ - Type[nn.Module], DummyDataFactory] = {} - self._input_processors_by_model_type: Dict[Type[nn.Module], - InputProcessor] = {} + self._dummy_factories_by_model_type = \ + ClassRegistry[nn.Module, DummyDataFactory]() + self._dummy_encoder_factories_by_model_type = \ + ClassRegistry[nn.Module, DummyDataFactory]() + self._input_processors_by_model_type = \ + ClassRegistry[nn.Module, InputProcessor]() def _default_dummy_data_factory( self, diff --git a/vllm/model_executor/layers/pooler.py b/vllm/model_executor/layers/pooler.py index f9437b4112ceb..e0d42e30ebef3 100644 --- a/vllm/model_executor/layers/pooler.py +++ b/vllm/model_executor/layers/pooler.py @@ -60,9 +60,7 @@ def from_config_with_defaults( softmax: bool, step_tag_id: Optional[int] = None, returned_token_ids: Optional[List[int]] = None, - ) -> Optional["Pooler"]: - if pooler_config is None: - return None + ) -> "Pooler": return cls( pooling_type=PoolingType[pooler_config.pooling_type] if pooler_config.pooling_type is not None else pooling_type, diff --git a/vllm/model_executor/model_loader/loader.py b/vllm/model_executor/model_loader/loader.py index 37c2d789030b6..0e12bc5691538 100644 --- a/vllm/model_executor/model_loader/loader.py +++ b/vllm/model_executor/model_loader/loader.py @@ -9,6 +9,7 @@ import json import math import os +import warnings from abc import ABC, abstractmethod from contextlib import contextmanager from typing import Any, Dict, Generator, Iterable, List, Optional, Tuple, cast @@ -97,22 +98,31 @@ def device_loading_context(module: torch.nn.Module, logger = init_logger(__name__) -def _initialize_model(vllm_config: VllmConfig, prefix: str = "") -> nn.Module: +def _initialize_model( + vllm_config: VllmConfig, + *, + prefix: str = "", + architectures: Optional[list[str]] = None, +) -> nn.Module: """Initialize a model with the given configurations.""" model_config = vllm_config.model_config - model_class, _ = get_model_architecture(model_config) + model_class, _ = get_model_architecture(model_config, + architectures=architectures) + signatures = inspect.signature(model_class.__init__) all_params = [param.name for param in signatures.parameters.values()] if "vllm_config" in all_params and "prefix" in all_params: # new-style model class with set_current_vllm_config(vllm_config): return model_class(vllm_config=vllm_config, prefix=prefix) + msg = ("vLLM model class should accept `vllm_config` and `prefix` as " "input arguments. Possibly you have an old-style model class" " registered from out of tree and it is used for new vLLM version. " "Check https://docs.vllm.ai/en/latest/design/arch_overview.html " "for the design and update the model class accordingly.") - logger.warning(msg) + warnings.warn(msg, DeprecationWarning, stacklevel=2) + logger.warning( "Trying to guess the arguments for old-style model class %s", model_class, @@ -356,7 +366,7 @@ def load_model(self, vllm_config: VllmConfig) -> nn.Module: weights_to_load = {name for name, _ in model.named_parameters()} loaded_weights = model.load_weights( self._get_all_weights(model_config, model)) - # We only enable strict check for non-quantiized models + # We only enable strict check for non-quantized models # that have loaded weights tracking currently. if model_config.quantization is None and loaded_weights is not None: weights_not_loaded = weights_to_load - loaded_weights diff --git a/vllm/model_executor/model_loader/utils.py b/vllm/model_executor/model_loader/utils.py index b95c0b7cd0612..864dd04e79921 100644 --- a/vllm/model_executor/model_loader/utils.py +++ b/vllm/model_executor/model_loader/utils.py @@ -1,12 +1,13 @@ """Utilities for selecting and loading models.""" import contextlib -from typing import Tuple, Type +from typing import Optional, Tuple, Type import torch from torch import nn from vllm.config import ModelConfig from vllm.model_executor.models import ModelRegistry +from vllm.model_executor.models.adapters import as_embedding_model @contextlib.contextmanager @@ -19,8 +20,13 @@ def set_default_torch_dtype(dtype: torch.dtype): def get_model_architecture( - model_config: ModelConfig) -> Tuple[Type[nn.Module], str]: - architectures = getattr(model_config.hf_config, "architectures", []) + model_config: ModelConfig, + *, + architectures: Optional[list[str]] = None, +) -> Tuple[Type[nn.Module], str]: + if architectures is None: + architectures = getattr(model_config.hf_config, "architectures", []) + # Special handling for quantized Mixtral. # FIXME(woosuk): This is a temporary hack. mixtral_supported = [ @@ -32,7 +38,11 @@ def get_model_architecture( and "MixtralForCausalLM" in architectures): architectures = ["QuantMixtralForCausalLM"] - return ModelRegistry.resolve_model_cls(architectures) + model_cls, arch = ModelRegistry.resolve_model_cls(architectures) + if model_config.task == "embedding": + model_cls = as_embedding_model(model_cls) + + return model_cls, arch def get_architecture_class_name(model_config: ModelConfig) -> str: diff --git a/vllm/model_executor/models/adapters.py b/vllm/model_executor/models/adapters.py new file mode 100644 index 0000000000000..360433a07c5b8 --- /dev/null +++ b/vllm/model_executor/models/adapters.py @@ -0,0 +1,98 @@ +from collections.abc import Iterable +from typing import Any, TypeVar + +import torch +import torch.nn as nn + +from .interfaces_base import VllmModelForEmbedding, is_embedding_model + +_T = TypeVar("_T", bound=type[nn.Module]) + + +def as_embedding_model(cls: _T) -> _T: + """Subclass an existing vLLM model to support embeddings.""" + # Avoid modifying existing embedding models + if is_embedding_model(cls): + return cls + + # Lazy import + from vllm.config import VllmConfig + from vllm.model_executor.layers.pooler import (Pooler, PoolerOutput, + PoolingType) + from vllm.model_executor.pooling_metadata import PoolingMetadata + + from .utils import AutoWeightsLoader, WeightsMapper + + class ModelForEmbedding(cls, VllmModelForEmbedding): + + def __init__( + self, + *, + vllm_config: "VllmConfig", + prefix: str = "", + **kwargs: Any, + ) -> None: + super().__init__(vllm_config=vllm_config, prefix=prefix, **kwargs) + + # These are not used in embedding models + for attr in ("lm_head", "logits_processor"): + if hasattr(self, attr): + delattr(self, attr) + + pooler_config = vllm_config.model_config.pooler_config + assert pooler_config is not None + + # If the model already defines a pooler instance, don't overwrite it + if not getattr(self, "_pooler", None): + self._pooler = Pooler.from_config_with_defaults( + pooler_config, + pooling_type=PoolingType.LAST, + normalize=True, + softmax=False, + ) + + def pooler( + self, + hidden_states: torch.Tensor, + pooling_metadata: PoolingMetadata, + ) -> PoolerOutput: + return self._pooler(hidden_states, pooling_metadata) + + def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]): + # TODO: Support uninitialized params tracking + + # We have deleted this attribute, so don't load it + weights = ((name, data) for name, data in weights + if not name.startswith("lm_head.")) + + # If `*ForCausalLM` defines `load_weights` on the inner model + # and there are no other inner modules with parameters, + # we support loading from both `*Model` and `*ForCausalLM` + if hasattr(self, "model") and hasattr(self.model, "load_weights"): + # Whether only `self.model` contains parameters + model_is_only_param = all( + name == "model" or next(child.parameters(), None) is None + for name, child in self.named_children()) + + if model_is_only_param: + mapper = WeightsMapper(orig_to_new_prefix={"model.": ""}) + weights = mapper.apply(weights) + + self.model.load_weights(weights) + return + + # For most other models + if hasattr(cls, "load_weights"): + cls.load_weights(self, weights) # type: ignore + # Fallback + else: + loader = AutoWeightsLoader(self) + loader.load_weights(weights) + + ModelForEmbedding.__name__ = cls.__name__ \ + .removesuffix("ForCausalLM") \ + .removesuffix("ForConditionalGeneration") \ + .removesuffix("ChatModel") \ + .removesuffix("LMHeadModel") + "ForEmbedding" + + return ModelForEmbedding # type: ignore diff --git a/vllm/model_executor/models/blip2.py b/vllm/model_executor/models/blip2.py index d2592016aff34..76b8505ee1c2a 100644 --- a/vllm/model_executor/models/blip2.py +++ b/vllm/model_executor/models/blip2.py @@ -512,9 +512,10 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) self.language_model = init_vllm_registered_model( - config.text_config, vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "language_model")) + hf_config=config.text_config, + prefix=maybe_prefix(prefix, "language_model"), + ) self.make_empty_intermediate_tensors = ( self.language_model.make_empty_intermediate_tensors) diff --git a/vllm/model_executor/models/gemma2.py b/vllm/model_executor/models/gemma2.py index d35fcb012e166..4664aa53ea092 100644 --- a/vllm/model_executor/models/gemma2.py +++ b/vllm/model_executor/models/gemma2.py @@ -30,19 +30,17 @@ QKVParallelLinear, RowParallelLinear) from vllm.model_executor.layers.logits_processor import LogitsProcessor -from vllm.model_executor.layers.pooler import Pooler, PoolingType from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.layers.rotary_embedding import get_rope from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler from vllm.model_executor.layers.vocab_parallel_embedding import ( VocabParallelEmbedding) from vllm.model_executor.model_loader.weight_utils import default_weight_loader -from vllm.model_executor.pooling_metadata import PoolingMetadata from vllm.model_executor.sampling_metadata import SamplingMetadata -from vllm.sequence import IntermediateTensors, PoolerOutput +from vllm.sequence import IntermediateTensors from .interfaces import SupportsLoRA, SupportsPP -from .utils import (AutoWeightsLoader, WeightsMapper, extract_layer_index, +from .utils import (AutoWeightsLoader, extract_layer_index, is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) @@ -455,55 +453,3 @@ def load_weights(self, weights: Iterable[Tuple[str, if self.config.tie_word_embeddings else None), ) return loader.load_weights(weights) - - -class Gemma2EmbeddingModel(nn.Module, SupportsPP): - """ - A model that uses Gemma2 with additional embedding functionalities. - - This class encapsulates the Gemma2Model and provides an interface for - embedding operations and customized pooling functions. - - Attributes: - model: An instance of Gemma2Model used for forward operations. - _pooler: An instance of Pooler used for pooling operations. - """ - - def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): - super().__init__() - - self.model = Gemma2Model(vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "model")) - self._pooler = Pooler.from_config_with_defaults( - vllm_config.model_config.pooler_config, - pooling_type=PoolingType.LAST, - normalize=True, - softmax=False) - self.make_empty_intermediate_tensors = ( - self.model.make_empty_intermediate_tensors) - - def forward( - self, - input_ids: Optional[torch.Tensor], - positions: torch.Tensor, - kv_caches: List[torch.Tensor], - attn_metadata: AttentionMetadata, - intermediate_tensors: Optional[IntermediateTensors] = None, - inputs_embeds: Optional[torch.Tensor] = None, - ) -> Union[torch.Tensor, IntermediateTensors]: - return self.model(input_ids, positions, kv_caches, attn_metadata, - intermediate_tensors, inputs_embeds) - - def pooler( - self, - hidden_states: torch.Tensor, - pooling_metadata: PoolingMetadata, - ) -> Optional[PoolerOutput]: - return self._pooler(hidden_states, pooling_metadata) - - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): - hf_to_vllm_mapper = WeightsMapper(orig_to_new_prefix={"model.": ""}) - weights = hf_to_vllm_mapper.apply(weights) - weights = ((name, data) for name, data in weights - if not name.startswith("lm_head.")) - self.model.load_weights(weights) diff --git a/vllm/model_executor/models/internvl.py b/vllm/model_executor/models/internvl.py index b1c0065afbf30..86aab38032450 100644 --- a/vllm/model_executor/models/internvl.py +++ b/vllm/model_executor/models/internvl.py @@ -474,9 +474,10 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: ) self.language_model = init_vllm_registered_model( - config.text_config, vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "language_model")) + hf_config=config.text_config, + prefix=maybe_prefix(prefix, "language_model"), + ) self.mlp1 = self._init_mlp1(config) diff --git a/vllm/model_executor/models/llama.py b/vllm/model_executor/models/llama.py index ff0ab011a9158..31dfb235ae877 100644 --- a/vllm/model_executor/models/llama.py +++ b/vllm/model_executor/models/llama.py @@ -37,7 +37,6 @@ QKVParallelLinear, RowParallelLinear) from vllm.model_executor.layers.logits_processor import LogitsProcessor -from vllm.model_executor.layers.pooler import Pooler, PoolingType from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.layers.quantization.compressed_tensors.utils import ( get_compressed_tensors_cache_scale) @@ -47,14 +46,13 @@ DEFAULT_VOCAB_PADDING_SIZE, ParallelLMHead, VocabParallelEmbedding) from vllm.model_executor.model_loader.weight_utils import ( default_weight_loader, kv_cache_scales_loader, maybe_remap_kv_scale_name) -from vllm.model_executor.pooling_metadata import PoolingMetadata from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.platforms import current_platform -from vllm.sequence import IntermediateTensors, PoolerOutput +from vllm.sequence import IntermediateTensors from .interfaces import SupportsLoRA, SupportsPP -from .utils import (AutoWeightsLoader, PPMissingLayer, WeightsMapper, - extract_layer_index, is_pp_missing_parameter, +from .utils import (AutoWeightsLoader, PPMissingLayer, extract_layer_index, + is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) @@ -511,11 +509,12 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config = vllm_config.model_config.hf_config quant_config = vllm_config.quant_config lora_config = vllm_config.lora_config - pooler_config = vllm_config.model_config.pooler_config self.config = config self.lora_config = lora_config - self.model = self._init_model(vllm_config=vllm_config, prefix=prefix) + self.model = self._init_model(vllm_config=vllm_config, + prefix=maybe_prefix(prefix, "model")) + if get_pp_group().is_last_rank: self.unpadded_vocab_size = config.vocab_size if lora_config: @@ -544,13 +543,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.sampler = get_sampler() else: self.lm_head = PPMissingLayer() + self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) - self._pooler = Pooler.from_config_with_defaults( - pooler_config, - pooling_type=PoolingType.STEP, - normalize=False, - softmax=False) def _init_model(self, vllm_config: VllmConfig, prefix: str = ""): return LlamaModel(vllm_config=vllm_config, prefix=prefix) @@ -581,14 +576,6 @@ def compute_logits( sampling_metadata) return logits - def pooler( - self, - hidden_states: torch.Tensor, - pooling_metadata: PoolingMetadata, - ) -> Optional[PoolerOutput]: - logits = self.compute_logits(hidden_states, None) - return self._pooler(logits, pooling_metadata) - def sample(self, logits: torch.Tensor, sampling_metadata: SamplingMetadata) -> Optional[SamplerOutput]: next_tokens = self.sampler(logits, sampling_metadata) @@ -639,78 +626,3 @@ def permute(w: torch.Tensor, n_heads: int): name = name.replace(item, mapping[item]) return name, loaded_weight - - -class LlamaEmbeddingModel(nn.Module, SupportsLoRA, SupportsPP): - """ - A model that uses Llama with additional embedding functionalities. - - This class encapsulates the LlamaModel and provides an interface for - embedding operations and customized pooling functions. - - Attributes: - model: An instance of LlamaModel used for forward operations. - _pooler: An instance of Pooler used for pooling operations. - """ - packed_modules_mapping = { - "qkv_proj": ["q_proj", "k_proj", "v_proj"], - "gate_up_proj": ["gate_proj", "up_proj"] - } - - # LoRA specific attributes - supported_lora_modules = [ - "qkv_proj", "o_proj", "gate_up_proj", "down_proj", "embed_tokens" - ] - embedding_modules = { - "embed_tokens": "input_embeddings", - } - embedding_padding_modules = [] - - def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): - super().__init__() - - pooler_config = vllm_config.model_config.pooler_config - - self.model = LlamaModel(vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "model")) - self._pooler = Pooler.from_config_with_defaults( - pooler_config, - pooling_type=PoolingType.LAST, - normalize=True, - softmax=False) - self.make_empty_intermediate_tensors = ( - self.model.make_empty_intermediate_tensors) - - def forward( - self, - input_ids: Optional[torch.Tensor], - positions: torch.Tensor, - kv_caches: List[torch.Tensor], - attn_metadata: AttentionMetadata, - intermediate_tensors: Optional[IntermediateTensors] = None, - inputs_embeds: Optional[torch.Tensor] = None, - ) -> Union[torch.Tensor, IntermediateTensors]: - return self.model(input_ids, positions, kv_caches, attn_metadata, - intermediate_tensors, inputs_embeds) - - def pooler( - self, - hidden_states: torch.Tensor, - pooling_metadata: PoolingMetadata, - ) -> Optional[PoolerOutput]: - return self._pooler(hidden_states, pooling_metadata) - - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): - hf_to_vllm_mapper = WeightsMapper(orig_to_new_prefix={"model.": ""}) - weights = hf_to_vllm_mapper.apply(weights) - weights = ((name, data) for name, data in weights - if not name.startswith("lm_head.")) - self.model.load_weights(weights) - - def load_kv_cache_scales(self, quantization_param_path: str) -> None: - self.model.load_kv_cache_scales(quantization_param_path) - - # LRUCacheWorkerLoRAManager instantiation requires model config. - @property - def config(self): - return self.model.config diff --git a/vllm/model_executor/models/llava.py b/vllm/model_executor/models/llava.py index e7757b3c7d405..7fd4b32774798 100644 --- a/vllm/model_executor/models/llava.py +++ b/vllm/model_executor/models/llava.py @@ -319,9 +319,10 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: projector_hidden_act=config.projector_hidden_act) self.language_model = init_vllm_registered_model( - config.text_config, vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "language_model")) + hf_config=config.text_config, + prefix=maybe_prefix(prefix, "language_model"), + ) self.make_empty_intermediate_tensors = ( self.language_model.make_empty_intermediate_tensors) diff --git a/vllm/model_executor/models/llava_next.py b/vllm/model_executor/models/llava_next.py index e113f5862830d..a39f2f4124d05 100644 --- a/vllm/model_executor/models/llava_next.py +++ b/vllm/model_executor/models/llava_next.py @@ -14,13 +14,11 @@ from vllm.config import VllmConfig from vllm.inputs import (INPUT_REGISTRY, DecoderOnlyInputs, DummyData, InputContext) -from vllm.model_executor.layers.pooler import Pooler, PoolingType from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler -from vllm.model_executor.pooling_metadata import PoolingMetadata from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY from vllm.multimodal.inputs import NestedTensors -from vllm.sequence import IntermediateTensors, PoolerOutput +from vllm.sequence import IntermediateTensors from vllm.utils import is_list_of from .clip import (CLIPVisionModel, dummy_image_for_clip, @@ -286,7 +284,6 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: super().__init__() config = vllm_config.model_config.hf_config quant_config = vllm_config.quant_config - pooler_config = vllm_config.model_config.pooler_config multimodal_config = vllm_config.model_config.multimodal_config vision_feature_layer = config.vision_feature_layer @@ -321,17 +318,11 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: projector_hidden_act=config.projector_hidden_act) self.language_model = init_vllm_registered_model( - config.text_config, vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "language_model")) - - # The same model class supports both language generation and embedding - # because the architecture name is the same - self._pooler = Pooler.from_config_with_defaults( - pooler_config, - pooling_type=PoolingType.LAST, - normalize=True, - softmax=False) + hf_config=config.text_config, + prefix=maybe_prefix(prefix, "language_model"), + ) + self.make_empty_intermediate_tensors = ( self.language_model.make_empty_intermediate_tensors) @@ -678,13 +669,6 @@ def sample( ) -> Optional[SamplerOutput]: return self.language_model.sample(logits, sampling_metadata) - def pooler( - self, - hidden_states: torch.Tensor, - pooling_metadata: PoolingMetadata, - ) -> Optional[PoolerOutput]: - return self._pooler(hidden_states, pooling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader(self) diff --git a/vllm/model_executor/models/llava_next_video.py b/vllm/model_executor/models/llava_next_video.py index b130791808924..0de9d8c5ea572 100644 --- a/vllm/model_executor/models/llava_next_video.py +++ b/vllm/model_executor/models/llava_next_video.py @@ -275,9 +275,10 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: text_hidden_size=config.text_config.hidden_size, projector_hidden_act=config.projector_hidden_act) self.language_model = init_vllm_registered_model( - config.text_config, vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "language_model")) + hf_config=config.text_config, + prefix=maybe_prefix(prefix, "language_model"), + ) self.make_empty_intermediate_tensors = ( self.language_model.model.make_empty_intermediate_tensors) diff --git a/vllm/model_executor/models/llava_onevision.py b/vllm/model_executor/models/llava_onevision.py index 3166737d61582..0bebc1c745e2b 100644 --- a/vllm/model_executor/models/llava_onevision.py +++ b/vllm/model_executor/models/llava_onevision.py @@ -422,9 +422,10 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: prefix=maybe_prefix(prefix, "vision_tower")) self.multi_modal_projector = LlavaOnevisionMultiModalProjector(config) self.language_model = init_vllm_registered_model( - config.text_config, vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "language_model")) + hf_config=config.text_config, + prefix=maybe_prefix(prefix, "language_model"), + ) self.image_newline = nn.Parameter( torch.empty(config.text_config.hidden_size)) diff --git a/vllm/model_executor/models/paligemma.py b/vllm/model_executor/models/paligemma.py index 2e5b6bee784e7..253e689e50a3b 100644 --- a/vllm/model_executor/models/paligemma.py +++ b/vllm/model_executor/models/paligemma.py @@ -151,9 +151,10 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.quant_config = quant_config config.text_config.architectures = ["GemmaForCausalLM"] self.language_model = init_vllm_registered_model( - config.text_config, vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "language_model")) + hf_config=config.text_config, + prefix=maybe_prefix(prefix, "language_model"), + ) logit_scale = getattr(config, "logit_scale", 1.0) self.language_model.logits_processor.scale *= logit_scale diff --git a/vllm/model_executor/models/phi3v.py b/vllm/model_executor/models/phi3v.py index 4cb874a13e0c1..eef23029a2aca 100644 --- a/vllm/model_executor/models/phi3v.py +++ b/vllm/model_executor/models/phi3v.py @@ -29,24 +29,22 @@ from vllm.inputs import (INPUT_REGISTRY, DecoderOnlyInputs, DummyData, InputContext, token_inputs) from vllm.logger import init_logger -from vllm.model_executor.layers.pooler import Pooler, PoolingType from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler from vllm.model_executor.layers.vocab_parallel_embedding import ( VocabParallelEmbedding) from vllm.model_executor.models.clip import CLIPVisionModel -from vllm.model_executor.models.llama import LlamaForCausalLM -from vllm.model_executor.pooling_metadata import PoolingMetadata from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY from vllm.multimodal.inputs import NestedTensors, PlaceholderRange from vllm.multimodal.utils import cached_get_tokenizer, repeat_and_pad_token -from vllm.sequence import IntermediateTensors, PoolerOutput +from vllm.sequence import IntermediateTensors from vllm.utils import is_list_of from .clip import dummy_image_for_clip, dummy_seq_data_for_clip from .interfaces import SupportsMultiModal, SupportsPP -from .utils import (AutoWeightsLoader, WeightsMapper, flatten_bn, maybe_prefix, +from .utils import (AutoWeightsLoader, WeightsMapper, flatten_bn, + init_vllm_registered_model, maybe_prefix, merge_multimodal_embeddings) logger = init_logger(__name__) @@ -536,7 +534,6 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): super().__init__() config = vllm_config.model_config.hf_config quant_config = vllm_config.quant_config - pooler_config = vllm_config.model_config.pooler_config multimodal_config = vllm_config.model_config.multimodal_config self.config = config self.multimodal_config = multimodal_config @@ -556,18 +553,17 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): quant_config, prefix=maybe_prefix(prefix, "model.vision_embed_tokens")) - # The prefix is empty intentionally because default prefix of - # LlamaForCausalLM is "model" - self.language_model = LlamaForCausalLM(vllm_config=vllm_config, - prefix="") - - # The same model class supports both language generation and embedding - # because the architecture name is the same - self._pooler = Pooler.from_config_with_defaults( - pooler_config, - pooling_type=PoolingType.LAST, - normalize=True, - softmax=False) + self.language_model = init_vllm_registered_model( + vllm_config=vllm_config, + # The prefix is empty intentionally because default prefix of + # LlamaForCausalLM is "model" + prefix="", + # We don't directly initialize vLLM's LlamaForCausalLM so we + # can automatically apply embedding wrapper if this model is + # initialized as an embedding model + architectures=["LlamaForCausalLM"], + ) + self.make_empty_intermediate_tensors = ( self.language_model.make_empty_intermediate_tensors) @@ -739,13 +735,6 @@ def sample( ) -> Optional[SamplerOutput]: return self.language_model.sample(logits, sampling_metadata) - def pooler( - self, - hidden_states: torch.Tensor, - pooling_metadata: PoolingMetadata, - ) -> Optional[PoolerOutput]: - return self._pooler(hidden_states, pooling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]) -> Set[str]: hf_to_vllm_mapper = WeightsMapper( diff --git a/vllm/model_executor/models/pixtral.py b/vllm/model_executor/models/pixtral.py index 45171c1a04b17..215727cadd954 100644 --- a/vllm/model_executor/models/pixtral.py +++ b/vllm/model_executor/models/pixtral.py @@ -172,9 +172,10 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): # init MistralForCausalLM self.language_model = init_vllm_registered_model( - config.text_config, vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "language_model")) + hf_config=config.text_config, + prefix=maybe_prefix(prefix, "language_model"), + ) self.vision_encoder = VisionTransformer(self.vision_args) self.vision_language_adapter = VisionLanguageAdapter( diff --git a/vllm/model_executor/models/qwen2.py b/vllm/model_executor/models/qwen2.py index 87943e53d861c..7d4cc4b69e614 100644 --- a/vllm/model_executor/models/qwen2.py +++ b/vllm/model_executor/models/qwen2.py @@ -31,6 +31,7 @@ from vllm.compilation.decorators import support_torch_compile from vllm.config import CacheConfig, VllmConfig from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size +from vllm.logger import init_logger from vllm.model_executor.layers.activation import SiluAndMul from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.linear import (MergedColumnParallelLinear, @@ -55,6 +56,8 @@ make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) +logger = init_logger(__name__) + class Qwen2MLP(nn.Module): @@ -433,7 +436,6 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config = vllm_config.model_config.hf_config quant_config = vllm_config.quant_config lora_config = vllm_config.lora_config - pooler_config = vllm_config.model_config.pooler_config self.config = config self.lora_config = lora_config @@ -454,14 +456,6 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.logits_processor = LogitsProcessor(config.vocab_size) self.sampler = get_sampler() - # The same model class supports both language generation and embedding - # because the architecture name is the same - self._pooler = Pooler.from_config_with_defaults( - pooler_config, - pooling_type=PoolingType.LAST, - normalize=True, - softmax=False) - self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) @@ -499,13 +493,6 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def pooler( - self, - hidden_states: torch.Tensor, - pooling_metadata: PoolingMetadata, - ) -> Optional[PoolerOutput]: - return self._pooler(hidden_states, pooling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader( @@ -553,6 +540,15 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.model = Qwen2Model(vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")) + # TODO: Replace this model class with for_embedding(Qwen2ForCausalLM), + # after changing the default pooling method + if pooler_config.pooling_type is None: + logger.warning( + "This embedding model will default to last-token pooling in " + "an upcoming version. To avoid breaking changes, you should " + "pass `--override-pooler-config '{\"pooling_type\": \"MEAN\"}'`" + " explicitly.") + self._pooler = Pooler.from_config_with_defaults( pooler_config, pooling_type=PoolingType.MEAN, diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index 7956a98b21569..27175dbae7483 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -50,7 +50,6 @@ from vllm.model_executor.layers.linear import (ColumnParallelLinear, RowParallelLinear) from vllm.model_executor.layers.logits_processor import LogitsProcessor -from vllm.model_executor.layers.pooler import Pooler, PoolingType from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.layers.quantization.gptq import GPTQConfig from vllm.model_executor.layers.quantization.gptq_marlin import ( @@ -59,14 +58,13 @@ from vllm.model_executor.layers.vocab_parallel_embedding import ParallelLMHead from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.model_executor.models.qwen2 import Qwen2Model -from vllm.model_executor.pooling_metadata import PoolingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY from vllm.multimodal.image import cached_get_image_processor from vllm.multimodal.inputs import (MultiModalData, MultiModalDataDict, MultiModalKwargs, NestedTensors) from vllm.multimodal.utils import cached_get_tokenizer from vllm.platforms import _Backend -from vllm.sequence import IntermediateTensors, PoolerOutput, SequenceData +from vllm.sequence import IntermediateTensors, SequenceData from vllm.transformers_utils.config import uses_mrope from vllm.transformers_utils.processor import cached_get_processor @@ -1070,7 +1068,6 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config = vllm_config.model_config.hf_config cache_config = vllm_config.cache_config quant_config = vllm_config.quant_config - pooler_config = vllm_config.model_config.pooler_config multimodal_config = vllm_config.model_config.multimodal_config assert not cache_config.enable_prefix_caching, \ "Qwen2-VL currently does not support prefix caching" @@ -1102,11 +1099,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.logits_processor = LogitsProcessor(config.vocab_size) self.sampler = get_sampler() - self._pooler = Pooler.from_config_with_defaults( - pooler_config, - pooling_type=PoolingType.LAST, - normalize=True, - softmax=False) + self.make_empty_intermediate_tensors = ( make_empty_intermediate_tensors_factory( ["hidden_states", "residual"], config.hidden_size)) @@ -1361,13 +1354,6 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def pooler( - self, - hidden_states: torch.Tensor, - pooling_metadata: PoolingMetadata, - ) -> Optional[PoolerOutput]: - return self._pooler(hidden_states, pooling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]) -> Set[str]: stacked_params_mapping = [ diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index c400c7d59828c..7d2bfce9ba264 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -20,6 +20,7 @@ from vllm.logger import init_logger from vllm.platforms import current_platform +from .adapters import as_embedding_model from .interfaces import (has_inner_state, is_attention_free, supports_cross_encoding, supports_multimodal, supports_pp) @@ -107,15 +108,15 @@ "RobertaForMaskedLM": ("roberta", "RobertaEmbeddingModel"), "XLMRobertaModel": ("roberta", "RobertaEmbeddingModel"), "DeciLMForCausalLM": ("decilm", "DeciLMForCausalLM"), - "Gemma2Model": ("gemma2", "Gemma2EmbeddingModel"), + "Gemma2Model": ("gemma2", "Gemma2ForCausalLM"), "GlmForCausalLM": ("glm", "GlmForCausalLM"), - "LlamaModel": ("llama", "LlamaEmbeddingModel"), + "LlamaModel": ("llama", "LlamaForCausalLM"), **{ # Multiple models share the same architecture, so we include them all k: (mod, arch) for k, (mod, arch) in _TEXT_GENERATION_MODELS.items() if arch == "LlamaForCausalLM" }, - "MistralModel": ("llama", "LlamaEmbeddingModel"), + "MistralModel": ("llama", "LlamaForCausalLM"), "Phi3ForCausalLM": ("phi3", "Phi3ForCausalLM"), "Qwen2Model": ("qwen2", "Qwen2EmbeddingModel"), "Qwen2ForCausalLM": ("qwen2", "Qwen2ForCausalLM"), @@ -125,7 +126,7 @@ # [Multimodal] "LlavaNextForConditionalGeneration": ("llava_next", "LlavaNextForConditionalGeneration"), # noqa: E501 "Phi3VForCausalLM": ("phi3v", "Phi3VForCausalLM"), - "Qwen2VLForConditionalGeneration": ("qwen2_vl", "Qwen2VLForConditionalGeneration") # noqa: E501, + "Qwen2VLForConditionalGeneration": ("qwen2_vl", "Qwen2VLForConditionalGeneration"), # noqa: E501 } _CROSS_ENCODER_MODELS = { @@ -208,6 +209,7 @@ @dataclass(frozen=True) class _ModelInfo: + architecture: str is_text_generation_model: bool is_embedding_model: bool supports_cross_encoding: bool @@ -218,9 +220,19 @@ class _ModelInfo: @staticmethod def from_model_cls(model: Type[nn.Module]) -> "_ModelInfo": + is_embedding_model_ = is_embedding_model(model) + if not is_embedding_model_: + try: + as_embedding_model(model) + except Exception: + pass + else: + is_embedding_model_ = True + return _ModelInfo( + architecture=model.__name__, is_text_generation_model=is_text_generation_model(model), - is_embedding_model=is_embedding_model(model), + is_embedding_model=is_embedding_model_, supports_cross_encoding=supports_cross_encoding(model), supports_multimodal=supports_multimodal(model), supports_pp=supports_pp(model), @@ -399,13 +411,13 @@ def _normalize_archs( def inspect_model_cls( self, architectures: Union[str, List[str]], - ) -> _ModelInfo: + ) -> Tuple[_ModelInfo, str]: architectures = self._normalize_archs(architectures) for arch in architectures: model_info = self._try_inspect_model_cls(arch) if model_info is not None: - return model_info + return (model_info, arch) return self._raise_for_unsupported(architectures) @@ -426,39 +438,50 @@ def is_text_generation_model( self, architectures: Union[str, List[str]], ) -> bool: - return self.inspect_model_cls(architectures).is_text_generation_model + model_cls, _ = self.inspect_model_cls(architectures) + return model_cls.is_text_generation_model def is_embedding_model( self, architectures: Union[str, List[str]], ) -> bool: - return self.inspect_model_cls(architectures).is_embedding_model + model_cls, _ = self.inspect_model_cls(architectures) + return model_cls.is_embedding_model def is_cross_encoder_model( self, architectures: Union[str, List[str]], ) -> bool: - return self.inspect_model_cls(architectures).supports_cross_encoding + model_cls, _ = self.inspect_model_cls(architectures) + return model_cls.supports_cross_encoding def is_multimodal_model( self, architectures: Union[str, List[str]], ) -> bool: - return self.inspect_model_cls(architectures).supports_multimodal + model_cls, _ = self.inspect_model_cls(architectures) + return model_cls.supports_multimodal def is_pp_supported_model( self, architectures: Union[str, List[str]], ) -> bool: - return self.inspect_model_cls(architectures).supports_pp + model_cls, _ = self.inspect_model_cls(architectures) + return model_cls.supports_pp - def model_has_inner_state(self, architectures: Union[str, - List[str]]) -> bool: - return self.inspect_model_cls(architectures).has_inner_state + def model_has_inner_state( + self, + architectures: Union[str, List[str]], + ) -> bool: + model_cls, _ = self.inspect_model_cls(architectures) + return model_cls.has_inner_state - def is_attention_free_model(self, architectures: Union[str, - List[str]]) -> bool: - return self.inspect_model_cls(architectures).is_attention_free + def is_attention_free_model( + self, + architectures: Union[str, List[str]], + ) -> bool: + model_cls, _ = self.inspect_model_cls(architectures) + return model_cls.is_attention_free ModelRegistry = _ModelRegistry({ diff --git a/vllm/model_executor/models/ultravox.py b/vllm/model_executor/models/ultravox.py index b61deccde45b7..ea1e5401d42c0 100644 --- a/vllm/model_executor/models/ultravox.py +++ b/vllm/model_executor/models/ultravox.py @@ -360,9 +360,10 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): )) self.multi_modal_projector = UltravoxProjector(config) self.language_model = init_vllm_registered_model( - config.text_config, vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "language_model")) + hf_config=config.text_config, + prefix=maybe_prefix(prefix, "language_model"), + ) if config.text_model_id is not None: # this prefix is not for initialization, but for loading weights # note the trailing dot diff --git a/vllm/model_executor/models/utils.py b/vllm/model_executor/models/utils.py index a6b40a233439b..7a1e1f9bf2be4 100644 --- a/vllm/model_executor/models/utils.py +++ b/vllm/model_executor/models/utils.py @@ -173,8 +173,15 @@ def _load_module( module_load_weights = getattr(module, "load_weights", None) if callable(module_load_weights): loaded_params = module_load_weights(weights) - yield from map(lambda x: self._get_qualname(base_prefix, x), - loaded_params) + if loaded_params is None: + logger.warning( + "Unable to collect loaded parameters " + "for module %s", module) + else: + yield from map( + lambda x: self._get_qualname(base_prefix, x), + loaded_params, + ) child_modules = dict(module.named_children()) child_params = dict(module.named_parameters(recurse=False)) @@ -232,17 +239,24 @@ def load_weights( def init_vllm_registered_model( - hf_config: PretrainedConfig, vllm_config: VllmConfig, + *, prefix: str = "", + hf_config: Optional[PretrainedConfig] = None, + architectures: Optional[list[str]] = None, ) -> nn.Module: """ Helper function to initialize an inner model registered to vLLM, based on the arguments passed to the outer vLLM model. """ from vllm.model_executor.model_loader.loader import _initialize_model - vllm_config = vllm_config.with_hf_config(hf_config) - return _initialize_model(vllm_config, prefix) + + if hf_config is not None: + vllm_config = vllm_config.with_hf_config(hf_config) + + return _initialize_model(vllm_config=vllm_config, + prefix=prefix, + architectures=architectures) @overload diff --git a/vllm/multimodal/base.py b/vllm/multimodal/base.py index 6eec660e42ac4..bbb8fb4bc1cd1 100644 --- a/vllm/multimodal/base.py +++ b/vllm/multimodal/base.py @@ -7,7 +7,7 @@ from vllm.inputs import InputContext from vllm.logger import init_logger -from vllm.utils import (get_allowed_kwarg_only_overrides, +from vllm.utils import (ClassRegistry, get_allowed_kwarg_only_overrides, resolve_mm_processor_kwargs) if TYPE_CHECKING: @@ -54,8 +54,8 @@ class MultiModalPlugin(ABC): """ def __init__(self) -> None: - self._input_mappers: Dict[Type[nn.Module], MultiModalInputMapper] = {} - self._max_mm_tokens: Dict[Type[nn.Module], MultiModalTokensCalc] = {} + self._input_mappers = ClassRegistry[nn.Module, MultiModalInputMapper]() + self._max_mm_tokens = ClassRegistry[nn.Module, MultiModalTokensCalc]() @abstractmethod def get_data_key(self) -> str: diff --git a/vllm/multimodal/registry.py b/vllm/multimodal/registry.py index b992442d3b314..b73daee98bd80 100644 --- a/vllm/multimodal/registry.py +++ b/vllm/multimodal/registry.py @@ -9,6 +9,7 @@ from vllm.inputs import InputProcessingContext from vllm.logger import init_logger from vllm.transformers_utils.tokenizer import AnyTokenizer +from vllm.utils import ClassRegistry from .audio import AudioPlugin from .base import MultiModalInputMapper, MultiModalPlugin, MultiModalTokensCalc @@ -62,8 +63,8 @@ def __init__( plugins: Sequence[MultiModalPlugin] = DEFAULT_PLUGINS) -> None: self._plugins = {p.get_data_key(): p for p in plugins} - self._processor_factories: Dict[Type[nn.Module], - MultiModalProcessorFactory] = {} + self._processor_factories = ClassRegistry[nn.Module, + MultiModalProcessorFactory]() # This is used for non-multimodal models self._disabled_limits_per_plugin = {k: 0 for k in self._plugins} diff --git a/vllm/utils.py b/vllm/utils.py index 6f7a6f8c54e47..0165a22582e7b 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -20,7 +20,7 @@ import warnings import weakref from asyncio import FIRST_COMPLETED, AbstractEventLoop, Future, Task -from collections import defaultdict +from collections import UserDict, defaultdict from collections.abc import Iterable, Mapping from functools import lru_cache, partial, wraps from platform import uname @@ -1517,13 +1517,13 @@ def value(self): # Adapted from: https://stackoverflow.com/a/47212782/5082708 -class LazyDict(Mapping, Generic[T]): +class LazyDict(Mapping[str, T], Generic[T]): def __init__(self, factory: Dict[str, Callable[[], T]]): self._factory = factory self._dict: Dict[str, T] = {} - def __getitem__(self, key) -> T: + def __getitem__(self, key: str) -> T: if key not in self._dict: if key not in self._factory: raise KeyError(key) @@ -1540,6 +1540,22 @@ def __len__(self): return len(self._factory) +class ClassRegistry(UserDict[type[T], _V]): + + def __getitem__(self, key: type[T]) -> _V: + for cls in key.mro(): + if cls in self.data: + return self.data[cls] + + raise KeyError(key) + + def __contains__(self, key: object) -> bool: + if not isinstance(key, type): + return False + + return any(cls in self.data for cls in key.mro()) + + def weak_ref_tensor(tensor: torch.Tensor) -> torch.Tensor: """ Create a weak reference to a tensor. From f877a7d12a0490705e6bea0987c89548d1a015ea Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Sun, 1 Dec 2024 09:48:35 +0800 Subject: [PATCH 194/397] [Misc] Improve type annotations for `support_torch_compile` (#10763) Signed-off-by: DarkLight1337 --- vllm/compilation/decorators.py | 38 ++++++++++++++++++++++++++-------- 1 file changed, 29 insertions(+), 9 deletions(-) diff --git a/vllm/compilation/decorators.py b/vllm/compilation/decorators.py index 8b81a29936989..8700243c9d904 100644 --- a/vllm/compilation/decorators.py +++ b/vllm/compilation/decorators.py @@ -1,7 +1,8 @@ import inspect -from typing import Dict, List, Optional, Union +from typing import Callable, Dict, List, Optional, TypeVar, Union, overload import torch +import torch.nn as nn from vllm.compilation.counter import compilation_counter from vllm.compilation.wrapper import TorchCompileWrapperWithCustomDispatcher @@ -12,10 +13,27 @@ logger = init_logger(__name__) +_T = TypeVar("_T", bound=type[nn.Module]) + + +@overload +def support_torch_compile( + *, + dynamic_arg_dims: Optional[Dict[str, Union[int, List[int]]]], +) -> Callable[[_T], _T]: + ... + + +@overload +def support_torch_compile(cls: _T) -> _T: + ... + def support_torch_compile( - cls: Optional[type] = None, - dynamic_arg_dims: Optional[Dict[str, Union[int, List[int]]]] = None): + cls: Optional[_T] = None, + *, + dynamic_arg_dims: Optional[Dict[str, Union[int, List[int]]]] = None, +) -> Union[Callable[[_T], _T], _T]: """ A decorator to add support for compiling the forward method of a class. @@ -66,7 +84,7 @@ def forward(self, x: torch.Tensor, y: Optional[torch.Tensor]): computation graph. """ - def cls_decorator_helper(cls: type): + def cls_decorator_helper(cls: _T) -> _T: # helper to pass `dynamic_arg_dims`` to `_support_torch_compile`` # to avoid too much indentation for `_support_torch_compile`` if not hasattr(cls, 'forward'): @@ -105,8 +123,10 @@ def cls_decorator_helper(cls: type): return cls_decorator_helper -def _support_torch_compile(cls: type, - dynamic_arg_dims: Dict[str, Union[int, List[int]]]): +def _support_torch_compile( + cls: _T, + dynamic_arg_dims: Dict[str, Union[int, List[int]]], +) -> _T: """ A decorator to add support for compiling the forward method of a class. """ @@ -119,7 +139,7 @@ def _support_torch_compile(cls: type, # other than TorchCompileWrapperWithCustomDispatcher cls.__bases__ = cls.__bases__ + (TorchCompileWrapperWithCustomDispatcher, ) - old_init = cls.__init__ # type: ignore + old_init = cls.__init__ def __init__(self, *, vllm_config: VllmConfig, prefix: str = '', **kwargs): old_init(self, vllm_config=vllm_config, prefix=prefix, **kwargs) @@ -135,7 +155,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = '', **kwargs): TorchCompileWrapperWithCustomDispatcher.__init__( self, compilation_level=vllm_config.compilation_config.level) - cls.__init__ = __init__ # type: ignore + cls.__init__ = __init__ def __call__(self, *args, **kwargs): # torch.compiler.is_compiling() means we are inside the compilation @@ -180,5 +200,5 @@ def __call__(self, *args, **kwargs): model_output = self.forward(*args, **kwargs) return model_output - cls.__call__ = __call__ # type: ignore + cls.__call__ = __call__ return cls From d2f058e76c2a28d2109e163dc1123ead6983943c Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Sun, 1 Dec 2024 14:36:51 +0800 Subject: [PATCH 195/397] [Misc] Rename embedding classes to pooling (#10801) Signed-off-by: DarkLight1337 --- examples/offline_inference_embedding.py | 2 +- tests/entrypoints/llm/test_encode.py | 6 +- tests/models/test_registry.py | 4 +- tests/worker/test_model_input.py | 4 +- vllm/__init__.py | 31 +++++++++-- vllm/config.py | 2 +- vllm/engine/async_llm_engine.py | 24 ++++---- vllm/engine/llm_engine.py | 8 +-- vllm/engine/multiprocessing/client.py | 14 ++--- vllm/engine/protocol.py | 5 +- vllm/entrypoints/llm.py | 30 +++++----- vllm/entrypoints/openai/serving_embedding.py | 12 ++-- vllm/entrypoints/openai/serving_score.py | 10 ++-- vllm/model_executor/models/__init__.py | 11 ++-- vllm/model_executor/models/adapters.py | 6 +- vllm/model_executor/models/interfaces.py | 4 +- vllm/model_executor/models/interfaces_base.py | 15 +++-- vllm/model_executor/models/registry.py | 16 +++--- vllm/outputs.py | 55 +++++++++++++------ vllm/v1/engine/async_llm.py | 4 +- vllm/v1/engine/async_stream.py | 8 +-- ..._runner.py => cpu_pooling_model_runner.py} | 4 +- vllm/worker/cpu_worker.py | 4 +- ...odel_runner.py => pooling_model_runner.py} | 6 +- vllm/worker/worker.py | 4 +- 25 files changed, 166 insertions(+), 123 deletions(-) rename vllm/worker/{cpu_embedding_model_runner.py => cpu_pooling_model_runner.py} (98%) rename vllm/worker/{embedding_model_runner.py => pooling_model_runner.py} (98%) diff --git a/examples/offline_inference_embedding.py b/examples/offline_inference_embedding.py index 7d5ef128bc8e0..ae158eef2ca4c 100644 --- a/examples/offline_inference_embedding.py +++ b/examples/offline_inference_embedding.py @@ -10,7 +10,7 @@ # Create an LLM. model = LLM(model="intfloat/e5-mistral-7b-instruct", enforce_eager=True) -# Generate embedding. The output is a list of EmbeddingRequestOutputs. +# Generate embedding. The output is a list of PoolingRequestOutputs. outputs = model.encode(prompts) # Print the outputs. for output in outputs: diff --git a/tests/entrypoints/llm/test_encode.py b/tests/entrypoints/llm/test_encode.py index 4c9f796e5ed71..41163809237e9 100644 --- a/tests/entrypoints/llm/test_encode.py +++ b/tests/entrypoints/llm/test_encode.py @@ -3,7 +3,7 @@ import pytest -from vllm import LLM, EmbeddingRequestOutput, PoolingParams +from vllm import LLM, PoolingParams, PoolingRequestOutput from vllm.distributed import cleanup_dist_env_and_memory MODEL_NAME = "intfloat/e5-mistral-7b-instruct" @@ -43,8 +43,8 @@ def llm(): cleanup_dist_env_and_memory() -def assert_outputs_equal(o1: List[EmbeddingRequestOutput], - o2: List[EmbeddingRequestOutput]): +def assert_outputs_equal(o1: List[PoolingRequestOutput], + o2: List[PoolingRequestOutput]): assert [o.outputs for o in o1] == [o.outputs for o in o2] diff --git a/tests/models/test_registry.py b/tests/models/test_registry.py index 1886b1f9898ad..b5368aab3ecf1 100644 --- a/tests/models/test_registry.py +++ b/tests/models/test_registry.py @@ -3,7 +3,7 @@ import pytest import torch.cuda -from vllm.model_executor.models import (is_embedding_model, +from vllm.model_executor.models import (is_pooling_model, is_text_generation_model, supports_multimodal) from vllm.model_executor.models.adapters import as_embedding_model @@ -31,7 +31,7 @@ def test_registry_imports(model_arch): # All vLLM models should be convertible to an embedding model embed_model = as_embedding_model(model_cls) - assert is_embedding_model(embed_model) + assert is_pooling_model(embed_model) if model_arch in _MULTIMODAL_MODELS: assert supports_multimodal(model_cls) diff --git a/tests/worker/test_model_input.py b/tests/worker/test_model_input.py index b36e8bfe73ff3..309854e6babf3 100644 --- a/tests/worker/test_model_input.py +++ b/tests/worker/test_model_input.py @@ -8,10 +8,10 @@ from vllm.attention.backends.utils import CommonAttentionState from vllm.model_executor import SamplingMetadata from vllm.model_executor.pooling_metadata import PoolingMetadata -from vllm.worker.embedding_model_runner import ( - ModelInputForGPUWithPoolingMetadata) from vllm.worker.model_runner import ModelInputForGPUWithSamplingMetadata from vllm.worker.multi_step_model_runner import StatefulModelInput +from vllm.worker.pooling_model_runner import ( + ModelInputForGPUWithPoolingMetadata) class MockAttentionBackend(AttentionBackend): diff --git a/vllm/__init__.py b/vllm/__init__.py index 8f477ea84756d..a10f6d3128cb6 100644 --- a/vllm/__init__.py +++ b/vllm/__init__.py @@ -7,8 +7,8 @@ from vllm.executor.ray_utils import initialize_ray_cluster from vllm.inputs import PromptType, TextPrompt, TokensPrompt from vllm.model_executor.models import ModelRegistry -from vllm.outputs import (CompletionOutput, EmbeddingOutput, - EmbeddingRequestOutput, RequestOutput) +from vllm.outputs import (CompletionOutput, PoolingOutput, + PoolingRequestOutput, RequestOutput) from vllm.pooling_params import PoolingParams from vllm.sampling_params import SamplingParams @@ -25,8 +25,8 @@ "SamplingParams", "RequestOutput", "CompletionOutput", - "EmbeddingOutput", - "EmbeddingRequestOutput", + "PoolingOutput", + "PoolingRequestOutput", "LLMEngine", "EngineArgs", "AsyncLLMEngine", @@ -34,3 +34,26 @@ "initialize_ray_cluster", "PoolingParams", ] + + +def __getattr__(name: str): + import warnings + + if name == "EmbeddingOutput": + msg = ("EmbeddingOutput has been renamed to PoolingOutput. " + "The original name will be removed in an upcoming version.") + + warnings.warn(DeprecationWarning(msg), stacklevel=2) + + return PoolingOutput + + if name == "EmbeddingRequestOutput": + msg = ("EmbeddingRequestOutput has been renamed to " + "PoolingRequestOutput. " + "The original name will be removed in an upcoming version.") + + warnings.warn(DeprecationWarning(msg), stacklevel=2) + + return PoolingRequestOutput + + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/vllm/config.py b/vllm/config.py index 51b8cf24803ab..da043afbe1ae7 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -359,7 +359,7 @@ def _resolve_task( # NOTE: Listed from highest to lowest priority, # in case the model supports multiple of them "generate": ModelRegistry.is_text_generation_model(architectures), - "embedding": ModelRegistry.is_embedding_model(architectures), + "embedding": ModelRegistry.is_pooling_model(architectures), } supported_tasks_lst: List[_Task] = [ task for task, is_supported in task_support.items() if is_supported diff --git a/vllm/engine/async_llm_engine.py b/vllm/engine/async_llm_engine.py index 31a15b04314d5..7b1bb7b05708d 100644 --- a/vllm/engine/async_llm_engine.py +++ b/vllm/engine/async_llm_engine.py @@ -25,7 +25,7 @@ from vllm.model_executor.guided_decoding import ( get_guided_decoding_logits_processor) from vllm.model_executor.layers.sampler import SamplerOutput -from vllm.outputs import EmbeddingRequestOutput, RequestOutput +from vllm.outputs import PoolingRequestOutput, RequestOutput from vllm.pooling_params import PoolingParams from vllm.prompt_adapter.request import PromptAdapterRequest from vllm.sampling_params import SamplingParams @@ -74,7 +74,7 @@ def _log_task_completion(task: asyncio.Task, class AsyncStream: - """A stream of RequestOutputs or EmbeddingRequestOutputs for a request + """A stream of RequestOutputs or PoolingRequestOutputs for a request that can be iterated over asynchronously via an async generator.""" def __init__(self, request_id: str, cancel: Callable[[str], None]) -> None: @@ -83,7 +83,7 @@ def __init__(self, request_id: str, cancel: Callable[[str], None]) -> None: self._queue: asyncio.Queue = asyncio.Queue() self._finished = False - def put(self, item: Union[RequestOutput, EmbeddingRequestOutput, + def put(self, item: Union[RequestOutput, PoolingRequestOutput, Exception]) -> None: if not self._finished: self._queue.put_nowait(item) @@ -103,7 +103,7 @@ def finished(self) -> bool: async def generator( self - ) -> AsyncGenerator[Union[RequestOutput, EmbeddingRequestOutput], None]: + ) -> AsyncGenerator[Union[RequestOutput, PoolingRequestOutput], None]: try: while True: result = await self._queue.get() @@ -154,7 +154,7 @@ def propagate_exception(self, def process_request_output(self, request_output: Union[RequestOutput, - EmbeddingRequestOutput], + PoolingRequestOutput], *, verbose: bool = False) -> None: """Process a request output from the engine.""" @@ -265,7 +265,7 @@ def __init__(self, *args, **kwargs): async def step_async( self, virtual_engine: int - ) -> List[Union[RequestOutput, EmbeddingRequestOutput]]: + ) -> List[Union[RequestOutput, PoolingRequestOutput]]: """Performs one decoding iteration and returns newly generated results. The workers are ran asynchronously if possible. @@ -907,7 +907,7 @@ def add_request( prompt_adapter_request: Optional[PromptAdapterRequest] = None, priority: int = 0, ) -> Coroutine[None, None, AsyncGenerator[Union[ - RequestOutput, EmbeddingRequestOutput], None]]: + RequestOutput, PoolingRequestOutput], None]]: ... @overload @@ -922,7 +922,7 @@ def add_request( prompt_adapter_request: Optional[PromptAdapterRequest] = None, priority: int = 0, ) -> Coroutine[None, None, AsyncGenerator[Union[ - RequestOutput, EmbeddingRequestOutput], None]]: + RequestOutput, PoolingRequestOutput], None]]: ... @deprecate_kwargs( @@ -941,7 +941,7 @@ async def add_request( priority: int = 0, *, inputs: Optional[PromptType] = None, # DEPRECATED - ) -> AsyncGenerator[Union[RequestOutput, EmbeddingRequestOutput], None]: + ) -> AsyncGenerator[Union[RequestOutput, PoolingRequestOutput], None]: if inputs is not None: prompt = inputs assert prompt is not None and params is not None @@ -1070,7 +1070,7 @@ async def encode( lora_request: Optional[LoRARequest] = None, trace_headers: Optional[Mapping[str, str]] = None, priority: int = 0, - ) -> AsyncGenerator[EmbeddingRequestOutput, None]: + ) -> AsyncGenerator[PoolingRequestOutput, None]: """Generate outputs for a request from an embedding model. Generate outputs for a request. This method is a coroutine. It adds the @@ -1088,7 +1088,7 @@ async def encode( Only applicable with priority scheduling. Yields: - The output `EmbeddingRequestOutput` objects from the LLMEngine + The output `PoolingRequestOutput` objects from the LLMEngine for the request. Details: @@ -1141,7 +1141,7 @@ async def encode( trace_headers=trace_headers, priority=priority, ): - yield LLMEngine.validate_output(output, EmbeddingRequestOutput) + yield LLMEngine.validate_output(output, PoolingRequestOutput) async def abort(self, request_id: str) -> None: """Abort a request. diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index ecc222f692c41..7911dc8d04500 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -40,7 +40,7 @@ get_local_guided_decoding_logits_processor) from vllm.model_executor.layers.sampler import SamplerOutput from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalRegistry -from vllm.outputs import (EmbeddingRequestOutput, RequestOutput, +from vllm.outputs import (PoolingRequestOutput, RequestOutput, RequestOutputFactory) from vllm.pooling_params import PoolingParams from vllm.prompt_adapter.request import PromptAdapterRequest @@ -80,7 +80,7 @@ def _load_generation_config_dict(model_config: ModelConfig) -> Dict[str, Any]: _G = TypeVar("_G", bound=BaseTokenizerGroup, default=BaseTokenizerGroup) -_O = TypeVar("_O", RequestOutput, EmbeddingRequestOutput) +_O = TypeVar("_O", RequestOutput, PoolingRequestOutput) @dataclass @@ -112,7 +112,7 @@ class SchedulerContext: def __init__(self, multi_step_stream_outputs: bool = False): self.output_queue: Deque[OutputData] = deque() self.request_outputs: List[Union[RequestOutput, - EmbeddingRequestOutput]] = [] + PoolingRequestOutput]] = [] self.seq_group_metadata_list: Optional[ List[SequenceGroupMetadata]] = None self.scheduler_outputs: Optional[SchedulerOutputs] = None @@ -1314,7 +1314,7 @@ def _advance_to_next_step( else: seq.append_token_id(sample.output_token, sample.logprobs) - def step(self) -> List[Union[RequestOutput, EmbeddingRequestOutput]]: + def step(self) -> List[Union[RequestOutput, PoolingRequestOutput]]: """Performs one decoding iteration and returns newly generated results. .. figure:: https://i.imgur.com/sv2HssD.png diff --git a/vllm/engine/multiprocessing/client.py b/vllm/engine/multiprocessing/client.py index fe21c58c775fe..d26728e8c6e67 100644 --- a/vllm/engine/multiprocessing/client.py +++ b/vllm/engine/multiprocessing/client.py @@ -35,7 +35,7 @@ from vllm.logger import init_logger from vllm.lora.request import LoRARequest from vllm.model_executor.layers.sampler import SamplerOutput -from vllm.outputs import EmbeddingRequestOutput, RequestOutput +from vllm.outputs import PoolingRequestOutput, RequestOutput from vllm.prompt_adapter.request import PromptAdapterRequest from vllm.sampling_params import SamplingParams from vllm.transformers_utils.tokenizer_group import init_tokenizer_from_configs @@ -495,7 +495,7 @@ def encode( lora_request: Optional[LoRARequest] = None, trace_headers: Optional[Mapping[str, str]] = None, priority: int = 0, - ) -> AsyncGenerator[EmbeddingRequestOutput, None]: + ) -> AsyncGenerator[PoolingRequestOutput, None]: ... @overload @@ -507,7 +507,7 @@ def encode( lora_request: Optional[LoRARequest] = None, trace_headers: Optional[Mapping[str, str]] = None, priority: int = 0, - ) -> AsyncGenerator[EmbeddingRequestOutput, None]: + ) -> AsyncGenerator[PoolingRequestOutput, None]: ... @deprecate_kwargs( @@ -524,7 +524,7 @@ def encode( priority: int = 0, *, inputs: Optional[PromptType] = None # DEPRECATED - ) -> AsyncGenerator[EmbeddingRequestOutput, None]: + ) -> AsyncGenerator[PoolingRequestOutput, None]: """Generate outputs for a request from an embedding model. Generate outputs for a request. This method is a coroutine. It adds the @@ -540,7 +540,7 @@ def encode( trace_headers: OpenTelemetry trace headers. Yields: - The output `EmbeddingRequestOutput` objects from the LLMEngine + The output `PoolingRequestOutput` objects from the LLMEngine for the request. """ if inputs is not None: @@ -549,7 +549,7 @@ def encode( and request_id is not None) return cast( - AsyncGenerator[EmbeddingRequestOutput, None], + AsyncGenerator[PoolingRequestOutput, None], self._process_request(prompt, pooling_params, request_id, @@ -567,7 +567,7 @@ async def _process_request( prompt_adapter_request: Optional[PromptAdapterRequest] = None, priority: int = 0, ) -> Union[AsyncGenerator[RequestOutput, None], AsyncGenerator[ - EmbeddingRequestOutput, None]]: + PoolingRequestOutput, None]]: """Send an RPCGenerateRequest to the RPCServer and stream responses.""" # If already dead, error out. diff --git a/vllm/engine/protocol.py b/vllm/engine/protocol.py index e15395d75c91f..4079de7d36793 100644 --- a/vllm/engine/protocol.py +++ b/vllm/engine/protocol.py @@ -11,8 +11,7 @@ from vllm.logger import init_logger from vllm.lora.request import LoRARequest from vllm.model_executor.layers.sampler import SamplerOutput -from vllm.outputs import (CompletionOutput, EmbeddingRequestOutput, - RequestOutput) +from vllm.outputs import CompletionOutput, PoolingRequestOutput, RequestOutput from vllm.pooling_params import PoolingParams from vllm.prompt_adapter.request import PromptAdapterRequest from vllm.sampling_params import BeamSearchParams, SamplingParams @@ -209,7 +208,7 @@ def encode( lora_request: Optional[LoRARequest] = None, trace_headers: Optional[Mapping[str, str]] = None, priority: int = 0, - ) -> AsyncGenerator[EmbeddingRequestOutput, None]: + ) -> AsyncGenerator[PoolingRequestOutput, None]: """Generate outputs for a request from an embedding model.""" ... diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index 1551a9a998160..a25c401b4ea10 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -26,7 +26,7 @@ from vllm.lora.request import LoRARequest from vllm.model_executor.guided_decoding.guided_fields import ( GuidedDecodingRequest, LLMGuidedOptions) -from vllm.outputs import EmbeddingRequestOutput, RequestOutput +from vllm.outputs import PoolingRequestOutput, RequestOutput from vllm.pooling_params import PoolingParams from vllm.prompt_adapter.request import PromptAdapterRequest from vllm.sampling_params import (BeamSearchParams, GuidedDecodingParams, @@ -679,7 +679,7 @@ def encode( prompt_token_ids: Optional[List[int]] = None, use_tqdm: bool = True, lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, - ) -> List[EmbeddingRequestOutput]: + ) -> List[PoolingRequestOutput]: ... @overload # LEGACY: multi (prompt + optional token ids) @@ -691,7 +691,7 @@ def encode( prompt_token_ids: Optional[List[List[int]]] = None, use_tqdm: bool = True, lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, - ) -> List[EmbeddingRequestOutput]: + ) -> List[PoolingRequestOutput]: ... @overload # LEGACY: single (token ids + optional prompt) @@ -704,7 +704,7 @@ def encode( prompt_token_ids: List[int], use_tqdm: bool = True, lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, - ) -> List[EmbeddingRequestOutput]: + ) -> List[PoolingRequestOutput]: ... @overload # LEGACY: multi (token ids + optional prompt) @@ -717,7 +717,7 @@ def encode( prompt_token_ids: List[List[int]], use_tqdm: bool = True, lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, - ) -> List[EmbeddingRequestOutput]: + ) -> List[PoolingRequestOutput]: ... @overload # LEGACY: single or multi token ids [pos-only] @@ -728,7 +728,7 @@ def encode( prompt_token_ids: Union[List[int], List[List[int]]], use_tqdm: bool = True, lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, - ) -> List[EmbeddingRequestOutput]: + ) -> List[PoolingRequestOutput]: ... @overload @@ -741,7 +741,7 @@ def encode( Sequence[PoolingParams]]] = None, use_tqdm: bool = True, lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, - ) -> List[EmbeddingRequestOutput]: + ) -> List[PoolingRequestOutput]: ... @deprecate_kwargs( @@ -759,7 +759,7 @@ def encode( use_tqdm: bool = True, lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, prompt_adapter_request: Optional[PromptAdapterRequest] = None, - ) -> List[EmbeddingRequestOutput]: + ) -> List[PoolingRequestOutput]: """Generates the completions for the input prompts. This class automatically batches the given prompts, considering @@ -778,7 +778,7 @@ def encode( generation, if any. Returns: - A list of ``EmbeddingRequestOutput`` objects containing the + A list of ``PoolingRequestOutput`` objects containing the generated embeddings in the same order as the input prompts. Note: @@ -821,7 +821,7 @@ def encode( outputs = self._run_engine(use_tqdm=use_tqdm) return self.engine_class.validate_outputs(outputs, - EmbeddingRequestOutput) + PoolingRequestOutput) def score( self, @@ -832,7 +832,7 @@ def score( use_tqdm: bool = True, lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, prompt_adapter_request: Optional[PromptAdapterRequest] = None, - ) -> List[EmbeddingRequestOutput]: + ) -> List[PoolingRequestOutput]: """Generates similarity scores for all pairs . The inputs can be 1 -> 1, 1 -> N or N -> N. In the 1 - N case @@ -854,7 +854,7 @@ def score( generation, if any. Returns: - A list of ``EmbeddingRequestOutput`` objects containing the + A list of ``PoolingRequestOutput`` objects containing the generated scores in the same order as the input prompts. """ task = self.llm_engine.model_config.task @@ -943,7 +943,7 @@ def ensure_str(prompt: SingletonPrompt): outputs = self._run_engine(use_tqdm=use_tqdm) return self.engine_class.validate_outputs(outputs, - EmbeddingRequestOutput) + PoolingRequestOutput) def start_profile(self) -> None: self.llm_engine.start_profile() @@ -1085,7 +1085,7 @@ def _add_guided_params( def _run_engine( self, *, use_tqdm: bool - ) -> List[Union[RequestOutput, EmbeddingRequestOutput]]: + ) -> List[Union[RequestOutput, PoolingRequestOutput]]: # Initialize tqdm. if use_tqdm: num_requests = self.llm_engine.get_num_unfinished_requests() @@ -1098,7 +1098,7 @@ def _run_engine( ) # Run the engine. - outputs: List[Union[RequestOutput, EmbeddingRequestOutput]] = [] + outputs: List[Union[RequestOutput, PoolingRequestOutput]] = [] total_in_toks = 0 total_out_toks = 0 while self.llm_engine.has_unfinished_requests(): diff --git a/vllm/entrypoints/openai/serving_embedding.py b/vllm/entrypoints/openai/serving_embedding.py index 78e2416d9d4da..2cbb252610e39 100644 --- a/vllm/entrypoints/openai/serving_embedding.py +++ b/vllm/entrypoints/openai/serving_embedding.py @@ -18,14 +18,14 @@ ErrorResponse, UsageInfo) from vllm.entrypoints.openai.serving_engine import BaseModelPath, OpenAIServing from vllm.logger import init_logger -from vllm.outputs import EmbeddingOutput, EmbeddingRequestOutput +from vllm.outputs import PoolingOutput, PoolingRequestOutput from vllm.utils import merge_async_iterators, random_uuid logger = init_logger(__name__) def _get_embedding( - output: EmbeddingOutput, + output: PoolingOutput, encoding_format: Literal["float", "base64"], ) -> Union[List[float], str]: if encoding_format == "float": @@ -40,7 +40,7 @@ def _get_embedding( def request_output_to_embedding_response( - final_res_batch: List[EmbeddingRequestOutput], request_id: str, + final_res_batch: List[PoolingRequestOutput], request_id: str, created_time: int, model_name: str, encoding_format: Literal["float", "base64"]) -> EmbeddingResponse: data: List[EmbeddingResponseData] = [] @@ -169,7 +169,7 @@ async def create_embedding( return self.create_error_response(str(e)) # Schedule the request and get the result generator. - generators: List[AsyncGenerator[EmbeddingRequestOutput, None]] = [] + generators: List[AsyncGenerator[PoolingRequestOutput, None]] = [] try: pooling_params = request.to_pooling_params() @@ -207,7 +207,7 @@ async def create_embedding( num_prompts = len(engine_prompts) # Non-streaming response - final_res_batch: List[Optional[EmbeddingRequestOutput]] + final_res_batch: List[Optional[PoolingRequestOutput]] final_res_batch = [None] * num_prompts try: async for i, res in result_generator: @@ -215,7 +215,7 @@ async def create_embedding( assert all(final_res is not None for final_res in final_res_batch) - final_res_batch_checked = cast(List[EmbeddingRequestOutput], + final_res_batch_checked = cast(List[PoolingRequestOutput], final_res_batch) response = request_output_to_embedding_response( diff --git a/vllm/entrypoints/openai/serving_score.py b/vllm/entrypoints/openai/serving_score.py index 7cd8ff08b5608..a1f14449ba9c3 100644 --- a/vllm/entrypoints/openai/serving_score.py +++ b/vllm/entrypoints/openai/serving_score.py @@ -13,7 +13,7 @@ from vllm.entrypoints.openai.serving_engine import BaseModelPath, OpenAIServing from vllm.inputs.data import TokensPrompt from vllm.logger import init_logger -from vllm.outputs import EmbeddingRequestOutput +from vllm.outputs import PoolingRequestOutput from vllm.transformers_utils.tokenizers.mistral import MistralTokenizer from vllm.utils import make_async, merge_async_iterators, random_uuid @@ -21,7 +21,7 @@ def request_output_to_score_response( - final_res_batch: List[EmbeddingRequestOutput], request_id: str, + final_res_batch: List[PoolingRequestOutput], request_id: str, created_time: int, model_name: str) -> ScoreResponse: data: List[ScoreResponseData] = [] score = None @@ -133,7 +133,7 @@ async def create_score( return self.create_error_response(str(e)) # Schedule the request and get the result generator. - generators: List[AsyncGenerator[EmbeddingRequestOutput, None]] = [] + generators: List[AsyncGenerator[PoolingRequestOutput, None]] = [] input_pairs = make_pairs(request.text_1, request.text_2) @@ -194,7 +194,7 @@ async def create_score( num_prompts = len(engine_prompts) # Non-streaming response - final_res_batch: List[Optional[EmbeddingRequestOutput]] + final_res_batch: List[Optional[PoolingRequestOutput]] final_res_batch = [None] * num_prompts try: @@ -203,7 +203,7 @@ async def create_score( assert all(final_res is not None for final_res in final_res_batch) - final_res_batch_checked = cast(List[EmbeddingRequestOutput], + final_res_batch_checked = cast(List[PoolingRequestOutput], final_res_batch) response = request_output_to_score_response( diff --git a/vllm/model_executor/models/__init__.py b/vllm/model_executor/models/__init__.py index d66373512b95e..a3ef9adad16d9 100644 --- a/vllm/model_executor/models/__init__.py +++ b/vllm/model_executor/models/__init__.py @@ -1,15 +1,14 @@ from .interfaces import (HasInnerState, SupportsLoRA, SupportsMultiModal, SupportsPP, has_inner_state, supports_lora, supports_multimodal, supports_pp) -from .interfaces_base import (VllmModelForEmbedding, - VllmModelForTextGeneration, is_embedding_model, - is_text_generation_model) +from .interfaces_base import (VllmModelForPooling, VllmModelForTextGeneration, + is_pooling_model, is_text_generation_model) from .registry import ModelRegistry __all__ = [ "ModelRegistry", - "VllmModelForEmbedding", - "is_embedding_model", + "VllmModelForPooling", + "is_pooling_model", "VllmModelForTextGeneration", "is_text_generation_model", "HasInnerState", @@ -20,4 +19,4 @@ "supports_multimodal", "SupportsPP", "supports_pp", -] \ No newline at end of file +] diff --git a/vllm/model_executor/models/adapters.py b/vllm/model_executor/models/adapters.py index 360433a07c5b8..9cc43ae9181b9 100644 --- a/vllm/model_executor/models/adapters.py +++ b/vllm/model_executor/models/adapters.py @@ -4,7 +4,7 @@ import torch import torch.nn as nn -from .interfaces_base import VllmModelForEmbedding, is_embedding_model +from .interfaces_base import VllmModelForPooling, is_pooling_model _T = TypeVar("_T", bound=type[nn.Module]) @@ -12,7 +12,7 @@ def as_embedding_model(cls: _T) -> _T: """Subclass an existing vLLM model to support embeddings.""" # Avoid modifying existing embedding models - if is_embedding_model(cls): + if is_pooling_model(cls): return cls # Lazy import @@ -23,7 +23,7 @@ def as_embedding_model(cls: _T) -> _T: from .utils import AutoWeightsLoader, WeightsMapper - class ModelForEmbedding(cls, VllmModelForEmbedding): + class ModelForEmbedding(cls, VllmModelForPooling): def __init__( self, diff --git a/vllm/model_executor/models/interfaces.py b/vllm/model_executor/models/interfaces.py index 1545ce332309f..01a381381ccec 100644 --- a/vllm/model_executor/models/interfaces.py +++ b/vllm/model_executor/models/interfaces.py @@ -7,7 +7,7 @@ from vllm.logger import init_logger from vllm.utils import supports_kw -from .interfaces_base import is_embedding_model +from .interfaces_base import is_pooling_model if TYPE_CHECKING: from vllm.attention import AttentionMetadata @@ -389,4 +389,4 @@ def _supports_cross_encoding( def supports_cross_encoding( model: Union[Type[object], object], ) -> Union[TypeIs[Type[SupportsCrossEncoding]], TypeIs[SupportsCrossEncoding]]: - return is_embedding_model(model) and _supports_cross_encoding(model) + return is_pooling_model(model) and _supports_cross_encoding(model) diff --git a/vllm/model_executor/models/interfaces_base.py b/vllm/model_executor/models/interfaces_base.py index 957a5a6e26b5c..de733b6d49a53 100644 --- a/vllm/model_executor/models/interfaces_base.py +++ b/vllm/model_executor/models/interfaces_base.py @@ -141,7 +141,7 @@ def is_text_generation_model( @runtime_checkable -class VllmModelForEmbedding(VllmModel[C_co, T], Protocol[C_co, T]): +class VllmModelForPooling(VllmModel[C_co, T], Protocol[C_co, T]): def pooler( self, @@ -153,23 +153,22 @@ def pooler( @overload -def is_embedding_model( - model: Type[object]) -> TypeIs[Type[VllmModelForEmbedding]]: +def is_pooling_model(model: Type[object]) -> TypeIs[Type[VllmModelForPooling]]: ... @overload -def is_embedding_model(model: object) -> TypeIs[VllmModelForEmbedding]: +def is_pooling_model(model: object) -> TypeIs[VllmModelForPooling]: ... -def is_embedding_model( +def is_pooling_model( model: Union[Type[object], object], -) -> Union[TypeIs[Type[VllmModelForEmbedding]], TypeIs[VllmModelForEmbedding]]: +) -> Union[TypeIs[Type[VllmModelForPooling]], TypeIs[VllmModelForPooling]]: if not is_vllm_model(model): return False if isinstance(model, type): - return isinstance(model, VllmModelForEmbedding) + return isinstance(model, VllmModelForPooling) - return isinstance(model, VllmModelForEmbedding) + return isinstance(model, VllmModelForPooling) diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index 7d2bfce9ba264..2b7b69e8c3a95 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -24,7 +24,7 @@ from .interfaces import (has_inner_state, is_attention_free, supports_cross_encoding, supports_multimodal, supports_pp) -from .interfaces_base import is_embedding_model, is_text_generation_model +from .interfaces_base import is_pooling_model, is_text_generation_model logger = init_logger(__name__) @@ -211,7 +211,7 @@ class _ModelInfo: architecture: str is_text_generation_model: bool - is_embedding_model: bool + is_pooling_model: bool supports_cross_encoding: bool supports_multimodal: bool supports_pp: bool @@ -220,19 +220,19 @@ class _ModelInfo: @staticmethod def from_model_cls(model: Type[nn.Module]) -> "_ModelInfo": - is_embedding_model_ = is_embedding_model(model) - if not is_embedding_model_: + is_pooling_model_ = is_pooling_model(model) + if not is_pooling_model_: try: as_embedding_model(model) except Exception: pass else: - is_embedding_model_ = True + is_pooling_model_ = True return _ModelInfo( architecture=model.__name__, is_text_generation_model=is_text_generation_model(model), - is_embedding_model=is_embedding_model_, + is_pooling_model=is_pooling_model_, supports_cross_encoding=supports_cross_encoding(model), supports_multimodal=supports_multimodal(model), supports_pp=supports_pp(model), @@ -441,12 +441,12 @@ def is_text_generation_model( model_cls, _ = self.inspect_model_cls(architectures) return model_cls.is_text_generation_model - def is_embedding_model( + def is_pooling_model( self, architectures: Union[str, List[str]], ) -> bool: model_cls, _ = self.inspect_model_cls(architectures) - return model_cls.is_embedding_model + return model_cls.is_pooling_model def is_cross_encoder_model( self, diff --git a/vllm/outputs.py b/vllm/outputs.py index 2d256803edfe8..86264f604f6bc 100644 --- a/vllm/outputs.py +++ b/vllm/outputs.py @@ -53,8 +53,8 @@ def __repr__(self) -> str: @dataclass -class EmbeddingOutput: - """The output data of one completion output of a request. +class PoolingOutput: + """The output data of one pooling output of a request. Args: embedding: The embedding vector, which is a list of floats. The @@ -63,7 +63,7 @@ class EmbeddingOutput: embedding: List[float] def __repr__(self) -> str: - return (f"EmbeddingOutput(" + return (f"PoolingOutput(" f"embedding={len(self.embedding)})") @@ -316,18 +316,18 @@ def __repr__(self) -> str: f"multi_modal_placeholders={self.multi_modal_placeholders})") -class EmbeddingRequestOutput: +class PoolingRequestOutput: """ - The output data of an embedding request to the LLM. + The output data of a pooling request to the LLM. Args: - request_id (str): A unique identifier for the embedding request. - outputs (EmbeddingOutput): The embedding results for the given input. + request_id (str): A unique identifier for the pooling request. + outputs (PoolingOutput): The pooling results for the given input. prompt_token_ids (List[int]): A list of token IDs used in the prompt. - finished (bool): A flag indicating whether the embedding is completed. + finished (bool): A flag indicating whether the pooling is completed. """ - def __init__(self, request_id: str, outputs: "EmbeddingOutput", + def __init__(self, request_id: str, outputs: "PoolingOutput", prompt_token_ids: List[int], finished: bool): self.request_id = request_id self.prompt_token_ids = prompt_token_ids @@ -336,11 +336,11 @@ def __init__(self, request_id: str, outputs: "EmbeddingOutput", @classmethod def from_seq_group(cls, - seq_group: 'SequenceGroup') -> "EmbeddingRequestOutput": + seq_group: 'SequenceGroup') -> "PoolingRequestOutput": if seq_group.embeddings is None: raise ValueError( "Embeddings are missing in seq_group for EmbeddingRequest.") - output = EmbeddingOutput(seq_group.embeddings) + output = PoolingOutput(seq_group.embeddings) prompt_token_ids = seq_group.prompt_token_ids finished = seq_group.is_finished() @@ -348,15 +348,15 @@ def from_seq_group(cls, def __repr__(self): """ - Returns a string representation of an EmbeddingRequestOutput instance. + Returns a string representation of an PoolingRequestOutput instance. The representation includes the request_id and the number of outputs, - providing a quick overview of the embedding request's results. + providing a quick overview of the pooling request's results. Returns: - str: A string representation of the EmbeddingRequestOutput instance. + str: A string representation of the PoolingRequestOutput instance. """ - return (f"EmbeddingRequestOutput(request_id='{self.request_id}', " + return (f"PoolingRequestOutput(request_id='{self.request_id}', " f"outputs={repr(self.outputs)}, " f"prompt_token_ids={self.prompt_token_ids}, " f"finished={self.finished})") @@ -415,7 +415,30 @@ def create(seq_group: SequenceGroup, # Determine the type based on a condition, for example: if hasattr(seq_group, 'embeddings') and seq_group.embeddings is not None: - return EmbeddingRequestOutput.from_seq_group(seq_group) + return PoolingRequestOutput.from_seq_group(seq_group) else: return RequestOutput.from_seq_group(seq_group, use_cache, seq_id_to_seq_group) + + +def __getattr__(name: str): + import warnings + + if name == "EmbeddingOutput": + msg = ("EmbeddingOutput has been renamed to PoolingOutput. " + "The original name will be removed in an upcoming version.") + + warnings.warn(DeprecationWarning(msg), stacklevel=2) + + return PoolingOutput + + if name == "EmbeddingRequestOutput": + msg = ("EmbeddingRequestOutput has been renamed to " + "PoolingRequestOutput. " + "The original name will be removed in an upcoming version.") + + warnings.warn(DeprecationWarning(msg), stacklevel=2) + + return PoolingRequestOutput + + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/vllm/v1/engine/async_llm.py b/vllm/v1/engine/async_llm.py index a17c8eac4b77c..7335c637f0f79 100644 --- a/vllm/v1/engine/async_llm.py +++ b/vllm/v1/engine/async_llm.py @@ -9,7 +9,7 @@ from vllm.inputs.preprocess import InputPreprocessor from vllm.logger import init_logger from vllm.lora.request import LoRARequest -from vllm.outputs import EmbeddingRequestOutput, RequestOutput +from vllm.outputs import PoolingRequestOutput, RequestOutput from vllm.pooling_params import PoolingParams from vllm.prompt_adapter.request import PromptAdapterRequest from vllm.sampling_params import SamplingParams @@ -133,7 +133,7 @@ async def add_request( trace_headers: Optional[Mapping[str, str]] = None, prompt_adapter_request: Optional[PromptAdapterRequest] = None, priority: int = 0, - ) -> AsyncGenerator[Union[RequestOutput, EmbeddingRequestOutput], None]: + ) -> AsyncGenerator[Union[RequestOutput, PoolingRequestOutput], None]: """Add new request to the AsyncLLM.""" if self.detokenizer.is_request_active(request_id): diff --git a/vllm/v1/engine/async_stream.py b/vllm/v1/engine/async_stream.py index 3e6c759ad5ebd..35449238c3259 100644 --- a/vllm/v1/engine/async_stream.py +++ b/vllm/v1/engine/async_stream.py @@ -1,11 +1,11 @@ import asyncio from typing import Any, AsyncGenerator, Callable, Optional, Type, Union -from vllm.outputs import EmbeddingRequestOutput, RequestOutput +from vllm.outputs import PoolingRequestOutput, RequestOutput class AsyncStream: - """A stream of RequestOutputs or EmbeddingRequestOutputs for a request + """A stream of RequestOutputs or PoolingRequestOutputs for a request that can be iterated over asynchronously via an async generator.""" STOP_ITERATION = Exception() # Sentinel @@ -16,7 +16,7 @@ def __init__(self, request_id: str, cancel: Callable[[str], None]) -> None: self._queue: asyncio.Queue = asyncio.Queue() self._finished = False - def put(self, item: Union[RequestOutput, EmbeddingRequestOutput, + def put(self, item: Union[RequestOutput, PoolingRequestOutput, Exception]) -> None: if not self._finished: self._queue.put_nowait(item) @@ -32,7 +32,7 @@ def finish( async def generator( self - ) -> AsyncGenerator[Union[RequestOutput, EmbeddingRequestOutput], None]: + ) -> AsyncGenerator[Union[RequestOutput, PoolingRequestOutput], None]: finished = False try: while True: diff --git a/vllm/worker/cpu_embedding_model_runner.py b/vllm/worker/cpu_pooling_model_runner.py similarity index 98% rename from vllm/worker/cpu_embedding_model_runner.py rename to vllm/worker/cpu_pooling_model_runner.py index 3954e4c4c8a5b..17b2fd2564a04 100644 --- a/vllm/worker/cpu_embedding_model_runner.py +++ b/vllm/worker/cpu_pooling_model_runner.py @@ -16,12 +16,12 @@ @dataclasses.dataclass(frozen=True) class ModelInputForCPUWithPoolingMetadata(ModelInputForCPU): """ - Used by the CPUEmbeddingModelRunner. + Used by the CPUPoolingModelRunner. """ pooling_metadata: Optional["PoolingMetadata"] = None -class CPUEmbeddingModelRunner( +class CPUPoolingModelRunner( CPUModelRunnerBase[ModelInputForCPUWithPoolingMetadata]): _model_input_cls: Type[ModelInputForCPUWithPoolingMetadata] = ( ModelInputForCPUWithPoolingMetadata) diff --git a/vllm/worker/cpu_worker.py b/vllm/worker/cpu_worker.py index cf04808b73372..4fad1a3f4caeb 100644 --- a/vllm/worker/cpu_worker.py +++ b/vllm/worker/cpu_worker.py @@ -14,9 +14,9 @@ from vllm.model_executor import set_random_seed from vllm.sequence import ExecuteModelRequest from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE -from vllm.worker.cpu_embedding_model_runner import CPUEmbeddingModelRunner from vllm.worker.cpu_enc_dec_model_runner import CPUEncoderDecoderModelRunner from vllm.worker.cpu_model_runner import CPUModelRunner, CPUModelRunnerBase +from vllm.worker.cpu_pooling_model_runner import CPUPoolingModelRunner from vllm.worker.worker_base import (LocalOrDistributedWorkerBase, LoraNotSupportedWorkerBase, WorkerBase, WorkerInput) @@ -164,7 +164,7 @@ def __init__( else {"return_hidden_states": True} ModelRunnerClass: Type[CPUModelRunnerBase] = CPUModelRunner if self.model_config.task == "embedding": - ModelRunnerClass = CPUEmbeddingModelRunner + ModelRunnerClass = CPUPoolingModelRunner elif self.model_config.is_encoder_decoder: ModelRunnerClass = CPUEncoderDecoderModelRunner self.model_runner: CPUModelRunnerBase = ModelRunnerClass( diff --git a/vllm/worker/embedding_model_runner.py b/vllm/worker/pooling_model_runner.py similarity index 98% rename from vllm/worker/embedding_model_runner.py rename to vllm/worker/pooling_model_runner.py index f56805918fd15..1beae1e3884c5 100644 --- a/vllm/worker/embedding_model_runner.py +++ b/vllm/worker/pooling_model_runner.py @@ -21,12 +21,12 @@ @dataclasses.dataclass(frozen=True) class ModelInputForGPUWithPoolingMetadata(ModelInputForGPU): """ - Used by the EmbeddingModelRunner. + Used by the PoolingModelRunner. """ pooling_metadata: Optional["PoolingMetadata"] = None -class EmbeddingModelRunner( +class PoolingModelRunner( GPUModelRunnerBase[ModelInputForGPUWithPoolingMetadata]): _model_input_cls: Type[ModelInputForGPUWithPoolingMetadata] = ( ModelInputForGPUWithPoolingMetadata) @@ -52,7 +52,7 @@ def execute_model( ) -> Optional[Union[List[PoolerOutput], IntermediateTensors]]: if num_steps > 1: raise ValueError( - "EmbeddingModelRunner does not support multi-step execution.") + "PoolingModelRunner does not support multi-step execution.") if self.lora_config: assert model_input.lora_requests is not None diff --git a/vllm/worker/worker.py b/vllm/worker/worker.py index 24e7bc760b0c0..d58cb029618e9 100644 --- a/vllm/worker/worker.py +++ b/vllm/worker/worker.py @@ -22,9 +22,9 @@ from vllm.sequence import (ExecuteModelRequest, IntermediateTensors, SequenceGroupMetadata, SequenceGroupMetadataDelta) from vllm.worker.cache_engine import CacheEngine -from vllm.worker.embedding_model_runner import EmbeddingModelRunner from vllm.worker.enc_dec_model_runner import EncoderDecoderModelRunner from vllm.worker.model_runner import GPUModelRunnerBase, ModelRunner +from vllm.worker.pooling_model_runner import PoolingModelRunner from vllm.worker.worker_base import (LocalOrDistributedWorkerBase, WorkerBase, WorkerInput) @@ -75,7 +75,7 @@ def __init__( ModelRunnerClass: Type[GPUModelRunnerBase] = ModelRunner if model_config.task == "embedding": - ModelRunnerClass = EmbeddingModelRunner + ModelRunnerClass = PoolingModelRunner elif self.model_config.is_encoder_decoder: ModelRunnerClass = EncoderDecoderModelRunner self.model_runner: GPUModelRunnerBase = ModelRunnerClass( From 169a0ff911134b930adc0afc0d8c6f370091e10d Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sun, 1 Dec 2024 00:41:38 -0800 Subject: [PATCH 196/397] [doc] add warning about comparing hf and vllm outputs (#10805) Signed-off-by: youkaichao --- docs/source/models/supported_models.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/source/models/supported_models.rst b/docs/source/models/supported_models.rst index f571b8bf6735e..9f3b6f59068e2 100644 --- a/docs/source/models/supported_models.rst +++ b/docs/source/models/supported_models.rst @@ -701,6 +701,9 @@ At vLLM, we are committed to facilitating the integration and support of third-p 2. **Best-Effort Consistency**: While we aim to maintain a level of consistency between the models implemented in vLLM and other frameworks like transformers, complete alignment is not always feasible. Factors like acceleration techniques and the use of low-precision computations can introduce discrepancies. Our commitment is to ensure that the implemented models are functional and produce sensible results. +.. tip:: + When comparing the output of :code:`model.generate` from HuggingFace Transformers with the output of :code:`llm.generate` from vLLM, note that the former reads the model's generation config file (i.e., `generation_config.json `__) and applies the default parameters for generation, while the latter only uses the parameters passed to the function. Ensure all sampling parameters are identical when comparing outputs. + 3. **Issue Resolution and Model Updates**: Users are encouraged to report any bugs or issues they encounter with third-party models. Proposed fixes should be submitted via PRs, with a clear explanation of the problem and the rationale behind the proposed solution. If a fix for one model impacts another, we rely on the community to highlight and address these cross-model dependencies. Note: for bugfix PRs, it is good etiquette to inform the original author to seek their feedback. 4. **Monitoring and Updates**: Users interested in specific models should monitor the commit history for those models (e.g., by tracking changes in the main/vllm/model_executor/models directory). This proactive approach helps users stay informed about updates and changes that may affect the models they use. From c11f172187b6f44710e1f011ca8bff923ce49a7f Mon Sep 17 00:00:00 2001 From: Roger Wang <136131678+ywang96@users.noreply.github.com> Date: Sun, 1 Dec 2024 00:47:05 -0800 Subject: [PATCH 197/397] [Misc] Adding `MMMU-Pro` vision dataset to serving benchmark (#10804) Signed-off-by: Roger Wang Co-authored-by: Chen Zhang Co-authored-by: Isotr0py <2037008807@qq.com> --- benchmarks/benchmark_serving.py | 65 +++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) diff --git a/benchmarks/benchmark_serving.py b/benchmarks/benchmark_serving.py index e9fc037a46965..3256692142c5e 100644 --- a/benchmarks/benchmark_serving.py +++ b/benchmarks/benchmark_serving.py @@ -199,6 +199,56 @@ def sample_sonnet_requests( return sampled_requests +def sample_mmmu_pro_vision_requests( + dataset, + num_requests: int, + tokenizer: PreTrainedTokenizerBase, + fixed_output_len: Optional[int] = None, +) -> List[Tuple[str, str, int, Optional[Dict[str, Collection[str]]]]]: + sampled_requests: List[Tuple[str, int, int, Dict[str, + Collection[str]]]] = [] + for data in dataset: + if len(sampled_requests) == num_requests: + break + + # MMMU-Pro vision direct prompt + # Ref: https://github.com/MMMU-Benchmark/MMMU/blob/6ce42f4d8f70c1841c67867152648974415b5cac/mmmu-pro/prompts.yaml#L5 + prompt = ( + "Answer with the option letter from the given choices directly. " + "The last line of your response should be of the following " + "format: 'Answer: $LETTER' (without quotes) where LETTER is one of " + "options.") + + prompt_token_ids = tokenizer(prompt).input_ids + if fixed_output_len is None: + # Default max output len is set to 128 + print("--hf-output-len is not provided. Using default value 128.") + fixed_output_len = 128 + + prompt_len = len(prompt_token_ids) + output_len = fixed_output_len + + assert isinstance( + data["image"], + Image), ("Input image format must be `PIL.Image.Image`, " + f"given {type(data['image'])}.") + image: Image = data["image"] + image = image.convert("RGB") + image_data = io.BytesIO() + image.save(image_data, format='JPEG') + image_base64 = base64.b64encode(image_data.getvalue()).decode("utf-8") + mm_content = { + "type": "image_url", + "image_url": { + "url": f"data:image/jpeg;base64,{image_base64}" + }, + } + + sampled_requests.append((prompt, prompt_len, output_len, mm_content)) + + return sampled_requests + + def sample_hf_requests( dataset_path: str, dataset_subset: str, @@ -208,6 +258,21 @@ def sample_hf_requests( random_seed: int, fixed_output_len: Optional[int] = None, ) -> List[Tuple[str, str, int, Optional[Dict[str, Collection[str]]]]]: + + # Special case for MMMU-Pro vision dataset + if dataset_path == 'MMMU/MMMU_Pro' and dataset_subset == 'vision': + assert dataset_split == "test" + dataset = load_dataset(dataset_path, + name=dataset_subset, + split=dataset_split, + streaming=True) + assert "image" in dataset.features, ( + "MMMU/MMMU_Pro vision dataset must have 'image' column.") + filter_func = lambda x: isinstance(x["image"], Image) + dataset = dataset.shuffle(seed=random_seed).filter(filter_func) + return sample_mmmu_pro_vision_requests(dataset, num_requests, + tokenizer, fixed_output_len) + dataset = load_dataset(dataset_path, name=dataset_subset, split=dataset_split, From 0590ec3fd9857063c43c80df281e24c16c51b2ec Mon Sep 17 00:00:00 2001 From: Kuntai Du Date: Sun, 1 Dec 2024 19:01:00 -0600 Subject: [PATCH 198/397] [Core] Implement disagg prefill by StatelessProcessGroup (#10502) This PR provides initial support for single-node disaggregated prefill in 1P1D scenario. Signed-off-by: KuntaiDu Co-authored-by: ApostaC Co-authored-by: YaoJiayi <120040070@link.cuhk.edu.cn> --- .buildkite/test-pipeline.yaml | 4 + .../disagg_overhead_benchmark.sh | 144 +++++++++ .../disagg_performance_benchmark.sh | 164 +++++++++++ .../disagg_prefill_proxy_server.py | 61 ++++ .../disagg_benchmarks/round_robin_proxy.py | 60 ++++ .../visualize_benchmark_results.py | 46 +++ examples/disaggregated_prefill.sh | 109 +++++++ tests/kv_transfer/disagg_test.py | 119 ++++++++ tests/kv_transfer/module_test.py | 64 ++++ tests/kv_transfer/test_lookup_buffer.py | 160 ++++++++++ tests/kv_transfer/test_lookup_buffer.sh | 3 + tests/kv_transfer/test_send_recv.py | 155 ++++++++++ tests/kv_transfer/test_send_recv.sh | 3 + vllm/config.py | 84 ++++++ vllm/distributed/kv_transfer/README.md | 30 ++ vllm/distributed/kv_transfer/__init__.py | 0 .../kv_transfer/disagg_prefill_workflow.jpg | Bin 0 -> 142656 bytes .../kv_transfer/kv_connector/__init__.py | 0 .../kv_transfer/kv_connector/base.py | 122 ++++++++ .../kv_transfer/kv_connector/factory.py | 19 ++ .../kv_connector/simple_connector.py | 261 +++++++++++++++++ .../kv_transfer/kv_lookup_buffer/__init__.py | 0 .../kv_transfer/kv_lookup_buffer/base.py | 108 +++++++ .../kv_lookup_buffer/simple_buffer.py | 242 +++++++++++++++ .../kv_transfer/kv_pipe/__init__.py | 0 vllm/distributed/kv_transfer/kv_pipe/base.py | 65 +++++ .../kv_transfer/kv_pipe/pynccl_pipe.py | 276 ++++++++++++++++++ .../kv_transfer/kv_transfer_agent.py | 75 +++++ vllm/distributed/parallel_state.py | 35 ++- vllm/engine/arg_utils.py | 18 +- vllm/worker/model_runner.py | 105 ++++++- vllm/worker/worker.py | 13 +- vllm/worker/worker_base.py | 1 + 33 files changed, 2525 insertions(+), 21 deletions(-) create mode 100644 benchmarks/disagg_benchmarks/disagg_overhead_benchmark.sh create mode 100644 benchmarks/disagg_benchmarks/disagg_performance_benchmark.sh create mode 100644 benchmarks/disagg_benchmarks/disagg_prefill_proxy_server.py create mode 100644 benchmarks/disagg_benchmarks/round_robin_proxy.py create mode 100644 benchmarks/disagg_benchmarks/visualize_benchmark_results.py create mode 100644 examples/disaggregated_prefill.sh create mode 100644 tests/kv_transfer/disagg_test.py create mode 100644 tests/kv_transfer/module_test.py create mode 100644 tests/kv_transfer/test_lookup_buffer.py create mode 100644 tests/kv_transfer/test_lookup_buffer.sh create mode 100644 tests/kv_transfer/test_send_recv.py create mode 100644 tests/kv_transfer/test_send_recv.sh create mode 100644 vllm/distributed/kv_transfer/README.md create mode 100644 vllm/distributed/kv_transfer/__init__.py create mode 100644 vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg create mode 100644 vllm/distributed/kv_transfer/kv_connector/__init__.py create mode 100644 vllm/distributed/kv_transfer/kv_connector/base.py create mode 100644 vllm/distributed/kv_transfer/kv_connector/factory.py create mode 100644 vllm/distributed/kv_transfer/kv_connector/simple_connector.py create mode 100644 vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py create mode 100644 vllm/distributed/kv_transfer/kv_lookup_buffer/base.py create mode 100644 vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py create mode 100644 vllm/distributed/kv_transfer/kv_pipe/__init__.py create mode 100644 vllm/distributed/kv_transfer/kv_pipe/base.py create mode 100644 vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py create mode 100644 vllm/distributed/kv_transfer/kv_transfer_agent.py diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index 46692506f01d4..f5591f1098534 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -430,6 +430,9 @@ steps: - vllm/model_executor/models/ - tests/distributed/ - vllm/compilation + - vllm/worker/worker_base.py + - vllm/worker/worker.py + - vllm/worker/model_runner.py commands: - pytest -v -s ./compile/test_basic_correctness.py - pytest -v -s ./compile/test_wrapper.py @@ -443,6 +446,7 @@ steps: - pip install -e ./plugins/vllm_add_dummy_model - pytest -v -s distributed/test_distributed_oot.py - CUDA_VISIBLE_DEVICES=0,1 pytest -v -s test_sharded_state_loader.py + - CUDA_VISIBLE_DEVICES=0,1 pytest -v -s kv_transfer/disagg_test.py - label: Multi-step Tests (4 GPUs) # 36min working_dir: "/vllm-workspace/tests" diff --git a/benchmarks/disagg_benchmarks/disagg_overhead_benchmark.sh b/benchmarks/disagg_benchmarks/disagg_overhead_benchmark.sh new file mode 100644 index 0000000000000..2924ea4a49f54 --- /dev/null +++ b/benchmarks/disagg_benchmarks/disagg_overhead_benchmark.sh @@ -0,0 +1,144 @@ +#!/bin/bash + +# benchmark the overhead of disaggregated prefill. +# methodology: +# - send all request to prefill vLLM instance. It will buffer KV cache. +# - then send all request to decode instance. +# - The TTFT of decode instance is the overhead. + +set -ex + +kill_gpu_processes() { + # kill all processes on GPU. + pkill -f pt_main_thread + sleep 10 + + # remove vllm config file + rm -rf ~/.config/vllm + + # Print the GPU memory usage + # so that we know if all GPU processes are killed. + gpu_memory_usage=$(nvidia-smi --query-gpu=memory.used --format=csv,noheader,nounits -i 0) + # The memory usage should be 0 MB. + echo "GPU 0 Memory Usage: $gpu_memory_usage MB" +} + +wait_for_server() { + # wait for vllm server to start + # return 1 if vllm server crashes + local port=$1 + timeout 1200 bash -c " + until curl -s localhost:${port}/v1/completions > /dev/null; do + sleep 1 + done" && return 0 || return 1 +} + + +benchmark() { + + export VLLM_LOGGING_LEVEL=DEBUG + export VLLM_HOST_IP=$(hostname -I | awk '{print $1}') + + # compare chunked prefill with disaggregated prefill + + results_folder="./results" + model="meta-llama/Meta-Llama-3.1-8B-Instruct" + dataset_name="sonnet" + dataset_path="../sonnet_4x.txt" + num_prompts=10 + qps=$1 + prefix_len=50 + input_len=2048 + output_len=$2 + + + CUDA_VISIBLE_DEVICES=0 python3 \ + -m vllm.entrypoints.openai.api_server \ + --model meta-llama/Meta-Llama-3.1-8B-Instruct \ + --port 8100 \ + --max-model-len 10000 \ + --gpu-memory-utilization 0.6 \ + --kv-transfer-config \ + '{"kv_connector":"PyNcclConnector","kv_role":"kv_producer","kv_rank":0,"kv_parallel_size":2,"kv_buffer_size":5e9}' & + + + CUDA_VISIBLE_DEVICES=1 python3 \ + -m vllm.entrypoints.openai.api_server \ + --model meta-llama/Meta-Llama-3.1-8B-Instruct \ + --port 8200 \ + --max-model-len 10000 \ + --gpu-memory-utilization 0.6 \ + --kv-transfer-config \ + '{"kv_connector":"PyNcclConnector","kv_role":"kv_consumer","kv_rank":1,"kv_parallel_size":2,"kv_buffer_size":5e9}' & + + wait_for_server 8100 + wait_for_server 8200 + + # let the prefill instance finish prefill + python3 ../benchmark_serving.py \ + --backend vllm \ + --model $model \ + --dataset-name $dataset_name \ + --dataset-path $dataset_path \ + --sonnet-input-len $input_len \ + --sonnet-output-len "$output_len" \ + --sonnet-prefix-len $prefix_len \ + --num-prompts $num_prompts \ + --port 8100 \ + --save-result \ + --result-dir $results_folder \ + --result-filename disagg_prefill_2xtp4.json \ + --request-rate "inf" + + + # send the request to decode. + # The TTFT of this command will be the overhead of disagg prefill impl. + python3 ../benchmark_serving.py \ + --backend vllm \ + --model $model \ + --dataset-name $dataset_name \ + --dataset-path $dataset_path \ + --sonnet-input-len $input_len \ + --sonnet-output-len "$output_len" \ + --sonnet-prefix-len $prefix_len \ + --num-prompts $num_prompts \ + --port 8200 \ + --save-result \ + --result-dir $results_folder \ + --result-filename disagg_prefill_2xtp4.json \ + --request-rate "$qps" + kill_gpu_processes + +} + + +main() { + + (which wget && which curl) || (apt-get update && apt-get install -y wget curl) + (which jq) || (apt-get -y install jq) + (which socat) || (apt-get -y install socat) + + pip install quart httpx + + cd "$(dirname "$0")" + + cd .. + # create sonnet-4x.txt + echo "" > sonnet_4x.txt + for _ in {1..4} + do + cat sonnet.txt >> sonnet_4x.txt + done + cd disagg_benchmarks + + rm -rf results + mkdir results + + default_qps=1 + default_output_len=1 + benchmark $default_qps $default_output_len + +} + + +main "$@" diff --git a/benchmarks/disagg_benchmarks/disagg_performance_benchmark.sh b/benchmarks/disagg_benchmarks/disagg_performance_benchmark.sh new file mode 100644 index 0000000000000..d8d9e976dce76 --- /dev/null +++ b/benchmarks/disagg_benchmarks/disagg_performance_benchmark.sh @@ -0,0 +1,164 @@ +#!/bin/bash + +# Requirement: 8x H100 GPUs. + + +# Model: neuralmagic/Meta-Llama-3-70B-Instruct-FP8-KV +# Query: 2048 input tokens, 11 output tokens, QPS 4, 500 requests +# Resource: 8x H100 +# Approaches: +# 1. Chunked prefill: 1 vllm instance with tp=8 +# 2. Chunked prefill: 2 vllm instance with tp=4, equivalent to 1 tp=4 instance with QPS 4 +# 3. Disaggregated prefill: 1 prefilling instance and 1 decoding instance +# Prefilling instance: max_output_token=1 +# Decoding instance: force the input tokens be the same across requests to bypass prefilling + +set -ex + +kill_gpu_processes() { + # kill all processes on GPU. + pgrep pt_main_thread | xargs -r kill -9 + pgrep python3 | xargs -r kill -9 + for port in 8000 8100 8200; do lsof -t -i:$port | xargs -r kill -9; done + sleep 1 +} + +wait_for_server() { + # wait for vllm server to start + # return 1 if vllm server crashes + local port=$1 + timeout 1200 bash -c " + until curl -s localhost:${port}/v1/completions > /dev/null; do + sleep 1 + done" && return 0 || return 1 +} + + +launch_chunked_prefill() { + model="meta-llama/Meta-Llama-3.1-8B-Instruct" + # disagg prefill + CUDA_VISIBLE_DEVICES=0 python3 \ + -m vllm.entrypoints.openai.api_server \ + --model $model \ + --port 8100 \ + --max-model-len 10000 \ + --enable-chunked-prefill \ + --gpu-memory-utilization 0.6 & + CUDA_VISIBLE_DEVICES=1 python3 \ + -m vllm.entrypoints.openai.api_server \ + --model $model \ + --port 8200 \ + --max-model-len 10000 \ + --enable-chunked-prefill \ + --gpu-memory-utilization 0.6 & + wait_for_server 8100 + wait_for_server 8200 + python3 round_robin_proxy.py & + sleep 1 +} + + +launch_disagg_prefill() { + model="meta-llama/Meta-Llama-3.1-8B-Instruct" + # disagg prefill + CUDA_VISIBLE_DEVICES=0 python3 \ + -m vllm.entrypoints.openai.api_server \ + --model $model \ + --port 8100 \ + --max-model-len 10000 \ + --gpu-memory-utilization 0.6 \ + --kv-transfer-config \ + '{"kv_connector":"PyNcclConnector","kv_role":"kv_producer","kv_rank":0,"kv_parallel_size":2,"kv_buffer_size":5e9}' & + + CUDA_VISIBLE_DEVICES=1 python3 \ + -m vllm.entrypoints.openai.api_server \ + --model $model \ + --port 8200 \ + --max-model-len 10000 \ + --gpu-memory-utilization 0.6 \ + --kv-transfer-config \ + '{"kv_connector":"PyNcclConnector","kv_role":"kv_consumer","kv_rank":1,"kv_parallel_size":2,"kv_buffer_size":5e9}' & + + wait_for_server 8100 + wait_for_server 8200 + python3 disagg_prefill_proxy_server.py & + sleep 1 +} + + +benchmark() { + results_folder="./results" + model="meta-llama/Meta-Llama-3.1-8B-Instruct" + dataset_name="sonnet" + dataset_path="../sonnet_4x.txt" + num_prompts=100 + qps=$1 + prefix_len=50 + input_len=1024 + output_len=$2 + tag=$3 + + python3 ../benchmark_serving.py \ + --backend vllm \ + --model $model \ + --dataset-name $dataset_name \ + --dataset-path $dataset_path \ + --sonnet-input-len $input_len \ + --sonnet-output-len "$output_len" \ + --sonnet-prefix-len $prefix_len \ + --num-prompts $num_prompts \ + --port 8000 \ + --save-result \ + --result-dir $results_folder \ + --result-filename "$tag"-qps-"$qps".json \ + --request-rate "$qps" + + sleep 2 + +} + + +main() { + + (which wget && which curl) || (apt-get update && apt-get install -y wget curl) + (which jq) || (apt-get -y install jq) + (which socat) || (apt-get -y install socat) + + pip install quart httpx matplotlib aiohttp + + cd "$(dirname "$0")" + + cd .. + # create sonnet-4x.txt so that we can sample 2048 tokens for input + echo "" > sonnet_4x.txt + for _ in {1..4} + do + cat sonnet.txt >> sonnet_4x.txt + done + cd disagg_benchmarks + + rm -rf results + mkdir results + + default_output_len=6 + + export VLLM_HOST_IP=$(hostname -I | awk '{print $1}') + + launch_chunked_prefill + for qps in 2 4 6 8; do + benchmark $qps $default_output_len chunked_prefill + done + kill_gpu_processes + + launch_disagg_prefill + for qps in 2 4 6 8; do + benchmark $qps $default_output_len disagg_prefill + done + kill_gpu_processes + + python3 visualize_benchmark_results.py + +} + + +main "$@" diff --git a/benchmarks/disagg_benchmarks/disagg_prefill_proxy_server.py b/benchmarks/disagg_benchmarks/disagg_prefill_proxy_server.py new file mode 100644 index 0000000000000..4058b1c0a3b79 --- /dev/null +++ b/benchmarks/disagg_benchmarks/disagg_prefill_proxy_server.py @@ -0,0 +1,61 @@ +import os + +import aiohttp +from quart import Quart, make_response, request + +AIOHTTP_TIMEOUT = aiohttp.ClientTimeout(total=6 * 60 * 60) + +app = Quart(__name__) + + +async def forward_request(url, data): + async with aiohttp.ClientSession(timeout=AIOHTTP_TIMEOUT) as session: + headers = { + "Authorization": f"Bearer {os.environ.get('OPENAI_API_KEY')}" + } + async with session.post(url=url, json=data, + headers=headers) as response: + if response.status == 200: + # if response.headers.get('Transfer-Encoding') == 'chunked': + if True: + async for chunk_bytes in response.content.iter_chunked( + 1024): + yield chunk_bytes + else: + content = await response.read() + yield content + + +@app.route('/v1/completions', methods=['POST']) +async def handle_request(): + try: + original_request_data = await request.get_json() + + prefill_request = original_request_data.copy() + # change max_tokens = 1 to let it only do prefill + prefill_request['max_tokens'] = 1 + + # finish prefill + async for _ in forward_request('http://localhost:8100/v1/completions', + prefill_request): + continue + + # return decode + generator = forward_request('http://localhost:8200/v1/completions', + original_request_data) + response = await make_response(generator) + response.timeout = None + + return response + + except Exception as e: + import sys + import traceback + exc_info = sys.exc_info() + print("Error occurred in disagg prefill proxy server") + print(e) + print("".join(traceback.format_exception(*exc_info))) + + +if __name__ == '__main__': + app.run(port=8000) diff --git a/benchmarks/disagg_benchmarks/round_robin_proxy.py b/benchmarks/disagg_benchmarks/round_robin_proxy.py new file mode 100644 index 0000000000000..6eb5f63980070 --- /dev/null +++ b/benchmarks/disagg_benchmarks/round_robin_proxy.py @@ -0,0 +1,60 @@ +import asyncio +import itertools + +import aiohttp +from aiohttp import web + + +class RoundRobinProxy: + + def __init__(self, target_ports): + self.target_ports = target_ports + self.port_cycle = itertools.cycle(self.target_ports) + + async def handle_request(self, request): + target_port = next(self.port_cycle) + target_url = f"http://localhost:{target_port}{request.path_qs}" + + async with aiohttp.ClientSession() as session: + try: + # Forward the request + async with session.request( + method=request.method, + url=target_url, + headers=request.headers, + data=request.content, + ) as response: + # Start sending the response + resp = web.StreamResponse(status=response.status, + headers=response.headers) + await resp.prepare(request) + + # Stream the response content + async for chunk in response.content.iter_any(): + await resp.write(chunk) + + await resp.write_eof() + return resp + + except Exception as e: + return web.Response(text=f"Error: {str(e)}", status=500) + + +async def main(): + proxy = RoundRobinProxy([8100, 8200]) + app = web.Application() + app.router.add_route('*', '/{path:.*}', proxy.handle_request) + + runner = web.AppRunner(app) + await runner.setup() + site = web.TCPSite(runner, 'localhost', 8000) + await site.start() + + print("Proxy server started on http://localhost:8000") + + # Keep the server running + await asyncio.Event().wait() + + +if __name__ == '__main__': + asyncio.run(main()) diff --git a/benchmarks/disagg_benchmarks/visualize_benchmark_results.py b/benchmarks/disagg_benchmarks/visualize_benchmark_results.py new file mode 100644 index 0000000000000..e59d8bb0e6c8c --- /dev/null +++ b/benchmarks/disagg_benchmarks/visualize_benchmark_results.py @@ -0,0 +1,46 @@ +import json + +import matplotlib.pyplot as plt +import pandas as pd + +if __name__ == "__main__": + + data = [] + for name in ['disagg_prefill', 'chunked_prefill']: + for qps in [2, 4, 6, 8]: + with open(f"results/{name}-qps-{qps}.json") as f: + x = json.load(f) + x['name'] = name + x['qps'] = qps + data.append(x) + + df = pd.DataFrame.from_dict(data) + dis_df = df[df['name'] == 'disagg_prefill'] + chu_df = df[df['name'] == 'chunked_prefill'] + + plt.style.use('bmh') + plt.rcParams['font.size'] = 20 + + for key in [ + 'mean_ttft_ms', 'median_ttft_ms', 'p99_ttft_ms', 'mean_itl_ms', + 'median_itl_ms', 'p99_itl_ms' + ]: + + fig, ax = plt.subplots(figsize=(11, 7)) + plt.plot(dis_df['qps'], + dis_df[key], + label='disagg_prefill', + marker='o', + linewidth=4) + plt.plot(chu_df['qps'], + chu_df[key], + label='chunked_prefill', + marker='o', + linewidth=4) + ax.legend() + + ax.set_xlabel('QPS') + ax.set_ylabel(key) + ax.set_ylim(bottom=0) + fig.savefig(f'results/{key}.png') + plt.close(fig) diff --git a/examples/disaggregated_prefill.sh b/examples/disaggregated_prefill.sh new file mode 100644 index 0000000000000..87155273a81d1 --- /dev/null +++ b/examples/disaggregated_prefill.sh @@ -0,0 +1,109 @@ +#!/bin/bash +# This file demonstrates the example usage of disaggregated prefilling +# We will launch 2 vllm instances (1 for prefill and 1 for decode), +# and then transfer the KV cache between them. + +echo "🚧🚧 Warning: The usage of disaggregated prefill is experimental and subject to change 🚧🚧" +sleep 1 + +# Trap the SIGINT signal (triggered by Ctrl+C) +trap 'cleanup' INT + +# Cleanup function +cleanup() { + echo "Caught Ctrl+C, cleaning up..." + # Cleanup commands + pgrep python | xargs kill -9 + pkill -f python + echo "Cleanup complete. Exiting." + exit 0 +} + +export VLLM_HOST_IP=$(hostname -I | awk '{print $1}') + +# install quart first -- required for disagg prefill proxy serve +if python3 -c "import quart" &> /dev/null; then + echo "Quart is already installed." +else + echo "Quart is not installed. Installing..." + python3 -m pip install quart +fi + +# a function that waits vLLM server to start +wait_for_server() { + local port=$1 + timeout 1200 bash -c " + until curl -s localhost:${port}/v1/completions > /dev/null; do + sleep 1 + done" && return 0 || return 1 +} + + +# You can also adjust --kv-ip and --kv-port for distributed inference. + +# prefilling instance, which is the KV producer +CUDA_VISIBLE_DEVICES=0 vllm serve meta-llama/Meta-Llama-3.1-8B-Instruct \ + --port 8100 \ + --max-model-len 100 \ + --gpu-memory-utilization 0.8 \ + --kv-transfer-config \ + '{"kv_connector":"PyNcclConnector","kv_role":"kv_producer","kv_rank":0,"kv_parallel_size":2}' & + +# decoding instance, which is the KV consumer +CUDA_VISIBLE_DEVICES=1 vllm serve meta-llama/Meta-Llama-3.1-8B-Instruct \ + --port 8200 \ + --max-model-len 100 \ + --gpu-memory-utilization 0.8 \ + --kv-transfer-config \ + '{"kv_connector":"PyNcclConnector","kv_role":"kv_consumer","kv_rank":1,"kv_parallel_size":2}' & + +# wait until prefill and decode instances are ready +wait_for_server 8100 +wait_for_server 8200 + +# launch a proxy server that opens the service at port 8000 +# the workflow of this proxy: +# - send the request to prefill vLLM instance (port 8100), change max_tokens +# to 1 +# - after the prefill vLLM finishes prefill, send the request to decode vLLM +# instance +# NOTE: the usage of this API is subject to change --- in the future we will +# introduce "vllm connect" to connect between prefill and decode instances +python3 ../benchmarks/disagg_benchmarks/disagg_prefill_proxy_server.py & +sleep 1 + +# serve two example requests +output1=$(curl -X POST -s http://localhost:8000/v1/completions \ +-H "Content-Type: application/json" \ +-d '{ +"model": "meta-llama/Meta-Llama-3.1-8B-Instruct", +"prompt": "San Francisco is a", +"max_tokens": 10, +"temperature": 0 +}') + +output2=$(curl -X POST -s http://localhost:8000/v1/completions \ +-H "Content-Type: application/json" \ +-d '{ +"model": "meta-llama/Meta-Llama-3.1-8B-Instruct", +"prompt": "Santa Clara is a", +"max_tokens": 10, +"temperature": 0 +}') + + +# Cleanup commands +pgrep python | xargs kill -9 +pkill -f python + +echo "" + +sleep 1 + +# Print the outputs of the curl requests +echo "" +echo "Output of first request: $output1" +echo "Output of second request: $output2" + +echo "🎉🎉 Successfully finished 2 test requests! 🎉🎉" +echo "" diff --git a/tests/kv_transfer/disagg_test.py b/tests/kv_transfer/disagg_test.py new file mode 100644 index 0000000000000..adc6150edece6 --- /dev/null +++ b/tests/kv_transfer/disagg_test.py @@ -0,0 +1,119 @@ +import os +import subprocess +import sys +import time +from subprocess import Popen + +import pytest +import requests +import torch + + +# Fixture to set up environment variables and teardown servers after tests +@pytest.fixture(scope="module", autouse=True) +def setup_servers(): + if torch.cuda.device_count() < 4: + pytest.skip("Skipping test: fewer than 4 GPUs available") + + # Set up environment variables + VLLM_HOST_IP = subprocess.check_output("hostname -I | awk '{print $1}'", + shell=True).decode().strip() + os.environ["VLLM_HOST_IP"] = VLLM_HOST_IP + + # Start prefill instance + prefill_cmd = [ + sys.executable, + "-m", + "vllm.entrypoints.openai.api_server", + "--model", + "meta-llama/Meta-Llama-3.1-8B-Instruct", + "--port", + "8100", + "--gpu-memory-utilization", + "0.5", + "--max-model-len", + "1000", + "--kv-transfer-config", + '{"kv_connector":"PyNcclConnector","kv_role":"kv_producer",'\ + '"kv_rank":0,"kv_parallel_size":2}', + ] + prefill_env = os.environ.copy() + prefill_env["CUDA_VISIBLE_DEVICES"] = "0" + prefill_proc = Popen(prefill_cmd, env=prefill_env) + + # Start decode instance + decode_cmd = [ + sys.executable, + "-m", + "vllm.entrypoints.openai.api_server", + "--model", + "meta-llama/Meta-Llama-3.1-8B-Instruct", + "--port", + "8200", + "--gpu-memory-utilization", + "0.5", + "--max-model-len", + "1000", + "--kv-transfer-config", + '{"kv_connector":"PyNcclConnector","kv_role":"kv_consumer",'\ + '"kv_rank":1,"kv_parallel_size":2}', + ] + decode_env = os.environ.copy() + decode_env["CUDA_VISIBLE_DEVICES"] = "1" + decode_proc = Popen(decode_cmd, env=decode_env) + + # Wait for servers to be ready + assert wait_for_server(8100), "Prefill server did not start in time" + assert wait_for_server(8200), "Decode server did not start in time" + + # Yield to the test function and handle teardown after tests + yield + + # Cleanup: kill the processes + prefill_proc.terminate() + decode_proc.terminate() + + # Additional cleanup if needed + prefill_proc.wait() + decode_proc.wait() + + +# Helper function to wait for server +def wait_for_server(port, timeout=240): + start_time = time.time() + while time.time() - start_time < timeout: + try: + response = requests.get(f"http://localhost:{port}/v1/completions") + if response.status_code in [200, 405]: + return True + except requests.ConnectionError: + time.sleep(1) + return False + + +# Test function to send curl requests and validate responses +@pytest.mark.parametrize("prompt", ["San Francisco is a", "Santa Clara is a"]) +def test_disaggregated_prefilling(prompt): + # Send to prefill + response = requests.post("http://localhost:8100/v1/completions", + headers={"Content-Type": "application/json"}, + json={ + "model": + "meta-llama/Meta-Llama-3.1-8B-Instruct", + "prompt": prompt, + "max_tokens": 1, + "temperature": 0 + }) + assert response.status_code == 200 + + # Send to decode + response = requests.post("http://localhost:8200/v1/completions", + headers={"Content-Type": "application/json"}, + json={ + "model": + "meta-llama/Meta-Llama-3.1-8B-Instruct", + "prompt": prompt, + "max_tokens": 10, + "temperature": 0 + }) + assert response.status_code == 200 diff --git a/tests/kv_transfer/module_test.py b/tests/kv_transfer/module_test.py new file mode 100644 index 0000000000000..355461919cd7c --- /dev/null +++ b/tests/kv_transfer/module_test.py @@ -0,0 +1,64 @@ +import subprocess +import sys + +import pytest +import torch + + +def run_python_script(script_name, timeout): + script_name = f'kv_transfer/{script_name}' + try: + # Start both processes asynchronously using Popen + process0 = subprocess.Popen( + [sys.executable, script_name], + env={"RANK": + "0"}, # Set the RANK environment variable for process 0 + stdout=sys.stdout, # Pipe stdout to current stdout + stderr=sys.stderr, # Pipe stderr to current stderr + ) + + process1 = subprocess.Popen( + [sys.executable, script_name], + env={"RANK": + "1"}, # Set the RANK environment variable for process 1 + stdout=sys.stdout, # Pipe stdout to current stdout + stderr=sys.stderr, # Pipe stderr to current stderr + ) + + # Wait for both processes to complete, with a timeout + process0.wait(timeout=timeout) + process1.wait(timeout=timeout) + + # Check the return status of both processes + if process0.returncode != 0: + pytest.fail( + f"Test {script_name} failed for RANK=0, {process0.returncode}") + if process1.returncode != 0: + pytest.fail( + f"Test {script_name} failed for RANK=1, {process1.returncode}") + + except subprocess.TimeoutExpired: + # If either process times out, terminate both and fail the test + process0.terminate() + process1.terminate() + pytest.fail(f"Test {script_name} timed out") + except Exception as e: + pytest.fail(f"Test {script_name} failed with error: {str(e)}") + + +# Define the test cases using pytest's parametrize +@pytest.mark.parametrize( + "script_name,timeout", + [ + ("test_lookup_buffer.py", + 60), # Second test case with a 60-second timeout + ("test_send_recv.py", 120) # First test case with a 120-second timeout + ]) +def test_run_python_script(script_name, timeout): + # Check the number of GPUs + if torch.cuda.device_count() < 2: + pytest.skip( + f"Skipping test {script_name} because <2 GPUs are available") + + # Run the test if there are at least 2 GPUs + run_python_script(script_name, timeout) diff --git a/tests/kv_transfer/test_lookup_buffer.py b/tests/kv_transfer/test_lookup_buffer.py new file mode 100644 index 0000000000000..96b0e58713332 --- /dev/null +++ b/tests/kv_transfer/test_lookup_buffer.py @@ -0,0 +1,160 @@ +import os +import random + +import torch +from tqdm import tqdm + +from vllm.config import KVTransferConfig +from vllm.distributed.kv_transfer.kv_lookup_buffer.simple_buffer import ( + SimpleBuffer) +from vllm.distributed.kv_transfer.kv_pipe.pynccl_pipe import PyNcclPipe + +# TODO: the test depends on a lot of fields in the current implementation. +# We should have standard interface instead direct field access + + +def test_run(my_rank, buffer, device): + + # buffer should be empty in the beginning + if my_rank == 0: + assert buffer.buffer_size == 0 + assert len(buffer.buffer) == 0 + + print("My rank: %d, device: %s" % (my_rank, device)) + + # insert + tokens = torch.tensor([1, 2, 3]).to(device) + roi = (tokens > 0) + if my_rank == 0: + key = 2.0 * torch.ones([5, 6]).to(device) + value = 3.0 * torch.ones([5, 6]).to(device) + + placeholder = torch.tensor([1]).to(device) + + buffer.insert(tokens, roi, key, value, placeholder) + + torch.distributed.barrier() + + # drop_select + if my_rank == 1: + tok, roi_, key, value, hidden = buffer.drop_select(tokens, roi) + assert torch.allclose(tokens, tok) + assert torch.allclose(roi, roi_) + assert torch.allclose(key, 2.0 * torch.ones([5, 6], device=device)) + assert torch.allclose(value, 3.0 * torch.ones([5, 6], device=device)) + torch.distributed.barrier() + + if my_rank == 0: + assert buffer.buffer_size == 0 + assert len(buffer.buffer) == 0 + + print("Test run passed!") + + +def stress_test(my_rank, buf, device): + + torch.distributed.barrier() + torch.manual_seed(100) + + reqs = [ + ( + torch.rand(100).to(device), # tokens + torch.ones(100).bool().to(device), # roi + torch.rand(100).to(device), # key + torch.rand(100).to(device), # value + torch.rand(100).to(device), # hidden + ) for i in tqdm(range(200)) + ] + + random.seed(my_rank) + random.shuffle(reqs) + + torch.distributed.barrier() + + n = 0 + + # the buffer size can only store 100 reqs + # so the sender will occasionally block to wait for the receiver. + for req in tqdm(reqs): + if my_rank == 0: + buf.insert(*req) + else: + tok, roi, k, v, h = req + tok_, roi_, k_, v_, h_ = buf.drop_select(tok, roi) + + if tok_ is None: + assert roi_ is None + assert k_ is None + assert v_ is None + assert h_ is None + n += 1 + else: + assert torch.allclose(tok, tok_) + assert torch.allclose(roi, roi_) + assert torch.allclose(k, k_) + assert torch.allclose(v, v_) + assert torch.allclose(h, h_) + print('Rank %d done' % my_rank) + torch.distributed.barrier() + + if my_rank == 0: + x = torch.tensor([0]) + torch.distributed.recv(x, 1) + # the # of None received is the kv that are not selected + assert x.item() == len(buf.buffer) + # and the size of the buffer should be 2000 * buffer len + print(buf.buffer_size) + assert buf.buffer_size == 1700 * len(buf.buffer) + else: + torch.distributed.send(torch.tensor([n]), 0) + + print("Passed stress test!") + + +if __name__ == "__main__": + + my_rank = int(os.environ['RANK']) + + torch.distributed.init_process_group( + backend='gloo', + init_method='tcp://localhost:12398', + world_size=2, + rank=my_rank, + ) + + print("initialized! My rank is %d" % my_rank) + + config = KVTransferConfig( + kv_connector='PyNcclConnector', + kv_buffer_device='cuda', + kv_buffer_size=1e9, + kv_rank=my_rank, + kv_role="kv_both", # this arg doesn't matter in this test + kv_parallel_size=2, + kv_ip="127.0.0.1", + kv_port=12345, + ) + + data_pipe = PyNcclPipe( + local_rank=my_rank, + config=config, + device="cuda", + port_offset=0, + ) + cpu_pipe = PyNcclPipe( + local_rank=my_rank, + config=config, + device="cpu", + port_offset=1, + ) + + buffer = SimpleBuffer(cpu_pipe, data_pipe, 170000) + + test_run(my_rank, buffer, data_pipe.device) + + stress_test(my_rank, buffer, data_pipe.device) + + buffer.close() + data_pipe.close() + cpu_pipe.close() + print('Done') diff --git a/tests/kv_transfer/test_lookup_buffer.sh b/tests/kv_transfer/test_lookup_buffer.sh new file mode 100644 index 0000000000000..09d7ee018c3f4 --- /dev/null +++ b/tests/kv_transfer/test_lookup_buffer.sh @@ -0,0 +1,3 @@ +#!/bin/bash +RANK=0 python test_lookup_buffer.py & +RANK=1 python test_lookup_buffer.py & \ No newline at end of file diff --git a/tests/kv_transfer/test_send_recv.py b/tests/kv_transfer/test_send_recv.py new file mode 100644 index 0000000000000..65973bf10a4d7 --- /dev/null +++ b/tests/kv_transfer/test_send_recv.py @@ -0,0 +1,155 @@ +import os +import time +from typing import List + +import torch +from tqdm import tqdm + +from vllm.config import KVTransferConfig +from vllm.distributed.kv_transfer.kv_pipe.pynccl_pipe import PyNcclPipe + + +def test_run(my_rank, pipe): + # test run + x = torch.tensor([1]).to(pipe.device) + y = torch.tensor([[2., 3., 4., 8.]]).to(pipe.device) + if my_rank == 0: + pipe.send_tensor(x) + print("sent tensor x") + pipe.send_tensor(y) + print("sent tensor y") + x2 = pipe.recv_tensor() + print("received x2 = ", x2) + y2 = pipe.recv_tensor() + print("received y2 = ", x2) + + else: + x2 = pipe.recv_tensor() + print("received x2 = ", x2) + y2 = pipe.recv_tensor() + print("received y2 = ", x2) + pipe.send_tensor(x) + print("sent tensor x") + pipe.send_tensor(y) + print("sent tensor y") + + assert torch.allclose(x, x2) + assert torch.allclose(y, y2) + + +def stress_test(my_rank, pipe): + + torch.distributed.barrier() + + tensors: List[torch.Tensor] = [] + + torch.manual_seed(0) + + for i in tqdm(range(500)): + mean = torch.rand(1).item() * 100 + std = torch.rand(1).item() * 100 + size = torch.randint(900, 1000, (2, )) + x = torch.normal(mean * 1.0, std * 1.0, + size=size.tolist()).to(pipe.device) + + # 5% probability of sending a None + if torch.rand(1).item() < 0.05: + tensors.append(None) + tensors.append(None) + tensors.append(None) + else: + tensors.append(x) + tensors.append(x.mean().unsqueeze(0)) + tensors.append(x.std().unsqueeze(0)) + + torch.distributed.barrier() + + for i in tqdm(range(500)): + if my_rank == int((i % 10) > 3): + pipe.send_tensor(tensors[3 * i]) + pipe.send_tensor(tensors[3 * i + 1]) + pipe.send_tensor(tensors[3 * i + 2]) + else: + x = pipe.recv_tensor() + mean = pipe.recv_tensor() + std = pipe.recv_tensor() + + if x is None: + assert mean is None + assert std is None + else: + assert torch.allclose(x, tensors[3 * i]) + assert x.mean() == mean[0] + assert x.std() == std[0] + + torch.distributed.barrier() + + +def latency_test(my_rank, pipe, nelement, ntensor): + + latencies = [] + + torch.distributed.barrier() + + for i in tqdm(range(500)): + + tensors = [] + + if my_rank == 0: + # create tensor + tensors = [ + torch.rand(nelement).to(pipe.device) for _ in range(ntensor) + ] + + torch.distributed.barrier() + + if my_rank == 0: + t = torch.tensor([time.time()], + dtype=torch.float64).to(pipe.device) + for tensor in tensors: + pipe.send_tensor(tensor) + pipe.send_tensor(t) + else: + for _ in range(ntensor): + pipe.recv_tensor() + t = pipe.recv_tensor() + latencies.append(time.time() - t.item()) + + torch.distributed.barrier() + + print('Latency test passed.') + print('Latency:', torch.tensor(latencies).mean().item() * 1000, 'ms') + + +if __name__ == "__main__": + + my_rank = int(os.environ['RANK']) + + torch.distributed.init_process_group( + backend='gloo', + init_method='tcp://localhost:12398', + world_size=2, + rank=my_rank, + ) + + config = KVTransferConfig( + kv_connector='PyNcclConnector', + kv_buffer_device='cuda', + kv_buffer_size=1e9, + kv_rank=my_rank, + kv_role="kv_both", # this arg doesn't matter in this test + kv_parallel_size=2, + kv_ip="127.0.0.1", + kv_port=12345, + ) + + pipe = PyNcclPipe( + local_rank=my_rank, + config=config, + ) + + test_run(my_rank, pipe) + stress_test(my_rank, pipe) + + # Use this function if you want to test the latency of pipe impl. + # latency_test(my_rank, pipe, 1024 * 8 * 128, 80) diff --git a/tests/kv_transfer/test_send_recv.sh b/tests/kv_transfer/test_send_recv.sh new file mode 100644 index 0000000000000..1e89e246b4992 --- /dev/null +++ b/tests/kv_transfer/test_send_recv.sh @@ -0,0 +1,3 @@ +#!/bin/bash +RANK=0 python3 test_send_recv.py & +RANK=1 python3 test_send_recv.py & \ No newline at end of file diff --git a/vllm/config.py b/vllm/config.py index da043afbe1ae7..5d9e2766c7faa 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -2052,6 +2052,88 @@ def __post_init__(self): f"installed. Original error:\n{otel_import_error_traceback}") +class KVTransferConfig(BaseModel): + """Configuration for distributed KV cache transfer.""" + + # The KV connector for vLLM to transmit KV caches between vLLM instances. + kv_connector: Optional[str] = None + + # The device used by kv connector to buffer the KV cache. + # Currently only support 'cuda'. + kv_buffer_device: Optional[str] = "cuda" + + # The buffer size for TorchDistributedConnector. Measured in number of + # bytes. Recommended value: 1e9 (about 1GB). + kv_buffer_size: float = 1e9 + + # Whether this vLLM instance produces, consumes KV cache, or both. Choices + # are 'kv_producer', 'kv_consumer', and 'both'. + kv_role: Optional[str] = None + + # The rank of this vLLM instance in the KV cache transfer. Typical value: + # 0 for prefill instance, 1 for decode instance. + # Currently only 1P1D is supported. + kv_rank: Optional[int] = None + + # The number of parallel instances for KV cache transfer. For + # PyNcclConnector, this should be 2. + kv_parallel_size: int = 1 + + # The KV connector ip, used to build distributed connection + kv_ip: str = "127.0.0.1" + + # The KV connector port, used to build distributed connection + kv_port: int = 14579 + + @classmethod + def from_cli(cls, cli_value: str) -> "KVTransferConfig": + """Parse the CLI value for the compilation config.""" + return KVTransferConfig.model_validate_json(cli_value) + + def model_post_init(self, __context: Any) -> None: + if all([ + self.kv_connector is not None, + self.kv_connector != "PyNcclConnector" + ]): + raise ValueError(f"Unsupported kv_connector: {self.kv_connector}. " + f"Supported connectors are " + f"`PyNcclConnector`.") + + if self.kv_role is not None and self.kv_role not in [ + "kv_producer", "kv_consumer", "kv_both" + ]: + raise ValueError( + f"Unsupported kv_role: {self.kv_role}. " + f"Supported roles are `kv_producer`, `kv_consumer`, " + f"and `kv_both`") + + if self.kv_connector is not None and self.kv_role is None: + raise ValueError("Please specify kv_disagg_role when kv_connector " + "is set, supported roles are `kv_producer`, " + "`kv_consumer`, and `kv_both`") + + @property + def is_kv_transfer_instance(self) -> bool: + return self.kv_connector is not None and \ + self.kv_role in ["kv_producer", "kv_consumer", "kv_both"] + + @property + def need_kv_parallel_group(self) -> bool: + # for those database-based connector, vLLM does not need to create + # parallel group, and in that case the kv parallel size will be 1. + return self.kv_connector is not None and self.kv_parallel_size > 1 + + @property + def is_kv_producer(self) -> bool: + return self.kv_connector is not None and \ + self.kv_role in ["kv_producer", "kv_both"] + + @property + def is_kv_consumer(self) -> bool: + return self.kv_connector is not None and \ + self.kv_role in ["kv_consumer", "kv_both"] + + class CompilationLevel: # constants for the levels of the compilation process NO_COMPILATION = 0 @@ -2317,6 +2399,8 @@ class VllmConfig: quant_config: Optional[QuantizationConfig] = None compilation_config: CompilationConfig = field(default=None, init=True) # type: ignore + kv_transfer_config: KVTransferConfig = field(default=None, + init=True) # type: ignore @staticmethod def _get_quantization_config( diff --git a/vllm/distributed/kv_transfer/README.md b/vllm/distributed/kv_transfer/README.md new file mode 100644 index 0000000000000..dab2d10c4c9d0 --- /dev/null +++ b/vllm/distributed/kv_transfer/README.md @@ -0,0 +1,30 @@ + +# Distributed KV cache transfer + +This folder implements distributed KV cache transfer across vLLM instances. +Currently the main usecase is for disaggregated prefilling. + +## Abstractions + +The KV cache transfer contains three layer of abstractions: + +- KV pipe: a FIFO pipe for torch.tensor transmission. Key APIs: `send_tensor` and `recv_tensor`. +- KV lookup buffer: a lookup buffer for KV caches. Key: the tokens, value: the KV caches (and/or hidden states). Key APIs: `insert` and `drop_select` (similar to SQL semantics). +- KV connector: a connector that connects the KV pipe and KV lookup buffer to vLLM. Key APIs: `send_kv_caches_and_hidden_states` and `recv_kv_caches_and_hidden_states`. + +Why we need KV lookup buffer: FIFO pipe itself is not enough as prefill vLLM worker may process requests in a different order compared to decode vLLM worker. Say the QPS is really high, prefill worker may handle requests in order A -> B -> C, but the decode worker may process request C first. This is not the case that can be naturally handled by FIFO pipe, so we provide KV lookup buffer to help translate a FIFO pipe to a lookup buffer. + +NOTE: KV pipe layer is bypassible: you can skip this layer if your distributed +communication service already supports key-value-based lookup (like redis or +RDMA database). + +NOTE: If you want to not only transfer KV caches, but adjust the model execution flow of vLLM as well (for example, allow vLLM to receive KV caches on some tokens and do prefill on the remaining tokens), you can bypass both KV pipe layer and KV lookup buffer layer, and directly implement on KV connector layer. Bear in mind that as vLLM's model input is constantly changing, this implementation will likely be broken when vLLM has new updates. + +## Disaggregated prefilling + +The example usage is in [this file](../../../examples/disaggregated_prefill.sh). + +Here is the diagram of how we run disaggretgated prefilling. + +![Disaggregated prefill workflow](./disagg_prefill_workflow.jpg) + diff --git a/vllm/distributed/kv_transfer/__init__.py b/vllm/distributed/kv_transfer/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg b/vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a25ec5ef52491a0e3faf596669e6cf0e7c7ae175 GIT binary patch literal 142656 zcmeFZ2Ut_v)-D`+C(=7XKv5|{1nERTnuv%F604naBs!odHl%P*G7*Q~lOz z^3lQMdH^*W4g1;4nzS4y59ma_IOW2UbLqvjsyn&vjAF&F*gpwpVC3fE<>NnhUgE;V zOY#a=6_u1#w6E*v>ggNYFuiMLZee-P%E9sBBPVAUS8pF*KYw^YV8qkNsOV=gu_>u9 z($X_tzRJwYFDQIhR9y1@Lrra6eM4hYb60myZ(skX&tJyICnl$+XTG7(nB|q#??2Yo zH*kAD_YV$#;g61glZygC^;fa}F4^zoVk65%Nli^fP4}Bz6qNqtLd8Z+bM`VVyQT@< z11}CyxiET8t>oP5P6n|ncd%UcPevKJ#pTiGaKDN67s>wn1PlL{B>THy|Bwp>V5XuV z4;~d800JQVIy8pjdvhX?hv%IakF{9>D^Ppw-&9FPPD_g?F`0^_=iPRt*BI&(YHM#( zy5+k`a{{uQXAN}#=TTS3M*o$HFhdkLa;(uf3{qa&EmrenluI(%9tgcT?n%4HxEe+b3BuFNLJJh5|B+h^GMX$uZ|Y zwkSqNZGOCZj~w1R1uW+3{eA12kkEgf@b~`u-?o>yxCR$WJcLgH^(8C71MQChUrr0< z@2)vNiS57EO#WlzHl3c1clM)_QvhV(6c7?A{Etn5kI#Rd@YnA8Kc<(AyxvRZoiv4b z96MfI_^+Y6!4$cHa#MDCFxdH z*W%%Mt5d+b?*S*paS1^J`MHcJa0;ltqI(J$N;l|DUcyc@Z+LgP*A5%-PH%x>^A)jR1Y2<$MaV+5E?n!F<%2Wu;DPZPRK60H|VWVyp`oW)=Io#P!;E?w?IUDuSJe2W5 z-oIQfz}oldTFA|OR8x*zo&x>iIWkWq{}u^qKmHC=nI@?}I0bki(=@_2$kyx~-hVb{ zEnpRq>`XpI+>^kIyG9IeY(TXQajig1 zC2$?d9U=%0-yaOby3||kyeh3Ia&EZE+t1mUz-PFvdTV^KN?~8OkF>s-_-1nzA_zxf zklTi(aBZDwl5l%Bglb)7LS-S}`@L)ZFaL}-U9=#Z9)*V8_K$Pb@swBMuX^+(G0r3O zPg);hYmdL@zJos@ba)Mv-kZ5Ks<34U@BhMDz&Y@E06bVL7p`bp1+v?AoAvY$bjUQ< zHEt~o$!oigFF`vO;a_1}4n44gy}zQ=D9&ejmu^(g%o+Zev+6npNEg3(SwHopx9SNk z9TgX}&yHpk>EID_3Sd%z0@M%F&`3_07u>@dyOr@_&Pu=LfkTt#S^E^-?bhX8fF567 zr}vgA0TeX@OTt(?%V%A5yc^f`)vf4VtM70BeY?E0!*omfzrUA{gx{W{E#&(y&ar z9PbsDB74h_X1IFNP>>>Z+W;y_492a11Y7B_{$0YtZH^Hbzg!nz-*aUIMCGC5 zZ?s(!q)Kd5547mGg9yYMr+@pw<_4UHP%rJ2g;wW-3HgUGu}k6u+H~%eEG=5Z8oDi-*t_`~bTA z@8 zdw%)1*?R_rIi5VY}>KPO;JDQ_HLqyJ38-q z(-5XeLZZmPDxOe^dOiAK@5{AcTzeAR;AOmqFp&*!i-uCcGiB$t!8;le*K{oA9<#&nCD7WHjs zko4bdT$_UnyL+aqr9!37PJ}-j7Lks32PA%PC?kT4LI#m_JfS}G%mzhp$vzdqY>=UW zgny#s?43uUJBWlqKF7@zEmq?@9l@rx+Y*9-V`c6G&&%|RxqeDS`(`EagiJs<5MCFu zp+KVgsBXjqwmH!h-1IijEzZ~OX7pL1D>u695J%V(5?QqX~V)QMcYg?Bxw!o(V(f*!O0O11vBI!k@AKN;l)}rrX2n|93 z$)#QJu6DEd-pon5VFaM(XMs)wjYmt1Qom9^*Sc5xEB^Y+jZ{!2WTz%%W$rs!6-Y^^ zfWlFW(fU&WZkLCUe{^S!E)oW48?aGGe0rFBv5rNfNpJ+a4uq-F5Y^G7u^g$2E$>t~mOC(nvGcYC3>+L4+VZyOC{dI7GT(kwL|} zIV$yDS)23I0rs{Iyg+peU&oM&g>7y2HKlbs6N1Dk;Bys`{J+}Th-^4IZ}yX<;X0&q zA+v$NNcV#iZ@p0otWodaoQm{MEA_F$Ifu_qc3M|Y0a@-$6!A+FvnDy#ALN2pB@Z}r z`Klu&)Ri`&0^rd7#czs-#dwE&Jk75BDS#G!OYhlhdsA*hDsUy%rtQdbVV@YJ$#bMuJXjvZ&~i>~!E{&#qF^Sv%dL_Oex_-EX2 zI$Se<)Iptd^Ul`&PJNMM`!*1M8UqAwfP?Wyh^G zdv2d=8B#^pc?w{hQS5ZGr^b6!9$`r+^Ph;Y9BOyH2`i2Uqe=-l4aX zN83jEYiG2s#-{|bs#=V*U%1E18k!8{B8@NZJwURwwUK>@QA4QfAgS5-SJKatAm)dh zCi)L_IB!$@u2A1WR>a{-T>ok?*ZwJ@dFMmg2L16^u&xebkf6SR?8?P|#-*a`OFU__ zJY;d%&k5aCRj=)R`}{%DUGevqLuhG7EQM!r-u>)|%lHQv5AJ?@1)k9@@sHLP&wFY( z`DWYc+=D$``@11LkRD(Zi5+({z104AC&}r(!u!wP>2G4i3#snA&$g4F@+)CbF?f*i zN3tF52u)n%RB%vg$L&E~&9OtQB2`+WvzHuGxL)vV#>*{vrS1=JEuR9;Z(~bIz0zW* z0R7x|Be5EC_;JNZ4Gp3)-m_W7RvR+_BScRmeAcZa$OA7>sz--xza(_??b^MIYf1}r z#I!l!$pa`cOHe0U=BhSieNx_BUqen65xP?Oe(rAlAee#s!Tj)nZpC9ty!Y)`8!i^M8$t)Pw~|8FHrN?DVVjJ%QgI{4}i4 zreUL)prCGzkyxy2!RP10^={eU(ipfe@zaud`FoSxneO*t77w4ehr~2*KeB67DzyAY z+DJcU^p+unw^b%hwaMW1usPZ%AL>B72t9oIaa7a0{z_Yy5YUJ@$ysWj0DJQO!==72 zHw)4S1f5b7bw!-2h_v`z3`iJp2S1y)sd(AA2o8lt7>{8RY2tgCwC+X7@;%WXvR2q0 z5RhBBC^o@}pFMO)0Y)6RZ=0xrgG*6(CsXUE1fOm7uj@1~D!2+do$PtsJq?yiD$iq8 z5fFSOb_&8pygnWx0bAtob^g&3-4p1S_TE7F#m?ZWbWHq_mb`#hsAj+VMww(z0q6IG ziHB(~f@1NNJvrPVBKWLBBp!?oiksK&wv)r{RI~|A97x8QhaalKiJkIa5@|n+-@Rol z79f_bS4&hrc^)D}l)$;?V89HTrEAA87}uXTOjZc1>v@VPUifb7{J-ow-fG8B(9|Of6^`9CLotc9iU~Y-*0k^X?Qj)K!O5H4ODUG`w*% zXXvBmgQ+E*L1<2EFmL7vl7tsGc(TuqX@J(T8zfN9Z5A(2X~$TLy>rh68?}mgazz>* zqGx1+{h5u#ioV=nr%=MQiXDRSg&5;_B;{kG5vFBk_aLT@sgd7#N#DZpiQ6?UfRO4z zCHN%!6!376V;R(K%u2k8k0{$jE%t!fyE4D39JH0Iw>IC-0CM`}TDwx)=2NrHFjLq7 zJE5_eIh_#v`TBB90@tW-X|V1N>WfPT+~8r8C9&&rxS65sFdvn&AlVUm8)CgZUsr#; z9P1Y11+VN5VHiPBU2Rz!1Y=qd?lla>y_nJj;9YiCp7*o!=ggttfmE3=za_dKVj`qS zIP`9g4Yn_-#MWBj&HU;MMGk8N0Wlr1zOqkdO}ygq!tfH;4wq3K&ZytR8C)zm#CSI3 zG%d+jLEGsRAVv*6=xl!@tHOUF?>Q)T5o^_cq~gJZoOwlFzze4*O#c6g5^yRk9#PGo z0;aFrn`*~x0ts=%Qa#&Kz;pG*6K{n#~a{8NzO@Uqe5vP@07*Ni2%J84uwkuY{dA$u7GK_-f$@^fpfy55!|!P4Vs5 zU%DFvDz~rWmj$%WFutYBF7Q9rh?NoR((6!dkAZR_pcrHo+L7J+F9yVqodG&R9T(bL zZ_`UW*A#x5V^v{|(F=WmPeZ>Obyp~dX+^FYSpCp!B}jLoylX-jP(fiwqn;Y(CkZ5h zVGmplhNf%rjdf=mMf-%BGCJl(vY=gXM>ggRklEqrcGdEDxa0;jE)W@`kQ8Niew--m zmfG_xUC!Flz+Y99^N!ND?>uJqS81hWl`qlp4H{vm(Us9{Y?9Q(jfi%>L!>Im@P%AtA0v^T~1}m9JsB69k#^xVkLTofg@Iv z3T_4>mi&9#g4z#zo(O%V5L?woeY_K^)@L#zRWR-&Bsb*N=Jv7dcH4Mz)9s6$`=2U# zaK8Q9bPn(F;r%zs%DfB$LJdU5SKAvi2e)${L?&X`g&vwMD=9zDenZz{at$Q=jnuCg zXuP$I22!stoVPaShZ~#nlTs_nbjmSp>^0ka!x{!GwUu_nef9Qm-(eJ;$yDtCloRjW zfx4Q(`N>yEci6P4F&!S|U*4*E%|b%33BZhnS32dzJ;UVyiDJ_}i>UElB>N+nF<#FX z%slISxbY#?>qkL#3wxF)4`aZPH>wut0*tiE==A_t^`h6&wK%`%_g1h>q2BHnG45|) z$ljx05xaQbYs=FsVx!i?EBhWngDBpNe~A6O-+m@bwVi#F$KUGRM{j%UFWDOy-GHk# z0DX$^MB2-}0sdhj!bIQp;#Bq09#L1Yj;X9T z@m2=p#nO@|IFbK^;{6l)Y5jMvZG@)z{|)Ke+eJapZl{3Wq^R!$+UWvFPM5&Qch&jE zUS|`%+CoF6t!0h8-2B|l zAuVpe_T3AOeR8Zym0Wm#?CfDY{<8Nn6Nqsg!dfRSV2KmXE!VzUiG)iqd0kN{rJuOa zwVW6*e7>zE?@h164{!pMIph-2^CU~48&%g3S*8|(DlGqz>foz1-r6XWt|KsSxRMu- zn>Z|0F)t_F32ytMy2y%+maN?oDOf5`@SFa2F;UI3bf=DegKSy(5kVZZSTDeHr5$^yia&n{{+Wp zA88VM8IbnnH9HLghx1ZIQu}#)TI!WcG{#-ghP8t@)2Gz#$$tLT>i$^^R!7^n;04}JNMw*Nrwxz}pNsYHPY7}OqloYAv=2ez5>OvN~AU%;)pJS{3N zKw%CY=5hI5a+)c%8|pn!J9UUINQZ|H1NuNlr^=B_q=@BN>42?lTkl?#s2bPD-(5I0 zm1z`Lbp;aj$j<#&IL{#rcRr2)!snpv3Vn;-wZCV%*{#s}^>D+fF~EMVmqL(>ZdK{? zCe+YGrw37uV_M4S@rZ#|XGFJhc)we>@=x-CR(+T$Kis?*nTra&K|zr%+e97K0+AJ* z9#>(RDh$q>v4L@Wxg6I%7n?_ED!vgkW&8QQjdqnzNohUyqYf2Wt)Bl`n4isn)C}$` zEG{?&*zzpj8Tq}}^OxVyt07Z4H4SlXQm)6W?Mo~-`VFS@t3F~$MGE_o+(uVd>Fo=| z*m2eN`AKa7l?7`>Rl*K94)pcR=&kiKgfjN4A6C^_OmBS5{Bl>8!`+*&**PiJV4qu= z-jOCq6j(?!0_^k_A=OkfDnIKc$5)}iv@55Nj0?8ws@%L~Z zJy6aNPQ-O=WKrZp958af8c4B6ME(?*4k2?XrMMdx^LpgEjUaVTTXl;x$40}R>^(>1V7M6_Q+YJiI*{zRPfY0 z!FEiLGj@h9GO4~Y*CQ_Qem=t_?Dg{uEkIfNK%LXZDJ*2Tol?zsNyq`SI-KY$q-x_K z0M&+%uYVbpbZ>b0My=+{rIrs8Uk%KdJ#W#{g(85s*(KJLY9{ z8poh|!O8lLs3UJGCpw5CU}>526hN^7spT%g1Pg@FMQw&n>reZ$O#K)f6!v4ERzLH>+jMsS3xMtXr+V zC!5`(;bkcrmL+`DOU&leoyczsrvS}yVo)hjccPrAvkV3aHP59}q)ajS>gsRbpC6iF zJq29m6DKhK3cSz=0wb>Ay$RP4V%Wik?gaKgfPLlMeMCO z&0_7+!%#sK-jHx%e!cyJMkzL^Z`$vJaq(yPU_cRvXpfzVxLjxl;>(xvh9ch2m4$0X zp4C5xpDv#is(}mE+qkobJDM?8)OE%@^H7^ym=7a$3hL61OKb@m>S8bFl?v5*w;k6S zg+P4G2V}1>zXtYI416f!Fm8WlsS)F~}Q$Ki}sFU+lHx#>4Uk zO7S(lS2YC3#R$_@g>#Bt`s3Zvi*O%v;%0 znXWm96^~3BvR7q;_+6k6Ko+CWvkg7NsD|Dz*0-?WFa#BYvPs1z`{EqMj`Za7oF3EM zu~UF+OD~Hz?n+0!^vM8})!+gsPG8bYD}T*cPNO-m&EOR96nXge6p+`11`^L@k^6w` zQPGp*PnxFy9q%p>-jLk?!bJyxZz8fKrY0;tL$FDt3DCZU+FA0cHAvhB1PwYVhOAY< zwNC*Ik4^#06Tx;x_kYA3)N$<@6Ui`49o-4nFg+42wMY&CI*8OixpDq)F%DD&DfY)x2sYh=q^>u);l~Qku{ME@v+(wStV53Rw zgMSSv|3R4l>l>b2Ym7=^Ylg%wFu!SU0Y?dG60g_(ZG=8@?JAg>zV1vh_&>;))`rIbwV(fvr}P;mF!;?yEBDGlaW0UMx3* z;q>4zrGhMXB+TA%q0%zom&&bM=y$}a71%2RXU&lTiP z6v<2hLViYeD}R<7|4rl43@yg5bBDFA3&`Yu?=Pnm&nr6xH~^3E8T8vL;nrCEaFHLP`-)~S~yfQqWjPg0~Ustnnzs6?j(52#$MbJf6#1T z`J`I6wI(o)I(~&xJtX$Xevup@u1o9DB8Ngiz{lU+t(cI&K{<&kKVK>JPh4Q4D5MU0 z!Udu@1)#5x10h1i-;=z-Zmd5h?ph4AW1o>Gk^6TMbYxxo+D`OK!e2+awjW>8pdvr~ zkDw$~J^7&M`e^TsOOmULf2jMzwevI8yJxkV4!F4X$RNV+ZcJ7+5UP?Cn-qEqz`n{j z1tg9R3Zu7k=Z<4zkNp2wN!He+Z42@&q1S=L-y9(8pY!)5&%=?OU>zL$k@prYLQ)H1T!*}ZGr`nIONFj9F%hYoO-Ql zj)@6WhTH+auZ5u5PTrpa){5~!@>KK+iQg6+-4R9!4Hp+zW+4Ai3Hp8SCOZED{#Vcc zHZFhZdF1Gw-*^12&wsJ3E3?~Zi8i?T{$9zXNC>x-PotUxce+BB5Ul%ya`i;H|B*YT z%1+|-5PCJXaCPI+#hGqW$hnhC`;TJYRm4u1<^0FuiTtbHzRj!Sa{8O@g8q8!10 z_5Vpv{1d1O{hQrlcD9Zn@hsPN`}}=}^y3jRKXP#S3eNfuq$S87h)r*Slg7ViU)!1e zk;vx!2OII9=vx1+I?Aa3AT7BDLr9v3iwB>P&x`*Kz?!a~0``FQWS)~DF_O$>vivLD z+PD~(!-J5b#T8Evr>}x;3qJKgBq>OYnWF+(HF=*=`U#|%6e)X~~@g`25IsG*| zaK`P^yz>4d-eEH;7MtFjP~t;^BWx;L0pBS!6o$p{>GVgFcuSwxTv(Oy71balhl{SM zc*=p+VFChfGGCZfRpI5*BASA?wZhmHgt+c{(p6O)TRMRQAIX)tv7tjaPZAO5SZhGf zNFvY;2sh%vuPk%i-FMETjpEvFIWh{L2#pX^yIq(YOG26X)JVUZ+nHIxGjFeJuDw^H zTEAmr7w^DZs>55qfY#wOd-l&L^nYvnqFHSH-87B76f&v=jmi1Y-i?g4Ff3kNV_Vk{ zNof6~u+5N=K$P?Il7f>>v5%Q6Qmx*&%|p}s$I&Pab)szaD{&o5qQSfCS5m_wn{Pun zl9q%*V)Cj=bh#9DO5{C=LipvS$|O53<1=sx>u8G)L!U8#g2N9EFTclBjaKc8ttZ>A zB8+j?hfmrXa1ID#yn1o}QckSjvQMP0{D~Lc`Gvi+o%8VwbYg9uj0DiuFu~MiQCRvw zn#p3^5sU2kNXNFB&D9F|7}j{QMQ&nQ!B^(GOB8qp`gNwq`BxLSB=x;DyR7CxsfKuI&tMH2C&j6mNr@^^N*O zn3q3aifEtn$j)s|G3KDy?`pvWozF|q2w~RA{F)>id?aS@>en?AsE%4;b!9}X8YO4W zC1v0+!tA7QLwf5@oKw>%R0BVbDe3_}wd(I|B${1K3Vw%=!mbO8HGQb^;r{iZ*qQm} z!?ONm(7fRh0O`0U}ZMrwL zPd-wmW#{TVbg{6o@{fF6fzb1HVcF+4`CMWP-f^2Ui|2do4iTkg4p3u#*V>N;ZcCzv zWBGr@)izAlO7fAB*U$MbUgyi12Hsz_&>9*=pc&fp;e9N=z zSlb1=eok*)3r|NWU1Ft}yZ5!Sq43z@5jE{Xe5GOZol##&Bj~M{$wHIy1M;^tJ1Jfy zKZlpSvBvmE)JmThvM|9tihlUcNV1b_1r;yJnIgvJV?H1CP$Y`#x#rLXs&{#%)*F8Q znSs#R`Pt$7xT67I!S|!`@FDEdvb*w-mnA=4FPIiyX_;X#im=YzKotZv+`H$>W+*%R zE3w!9mg;gLz*oq*GD$-O-#5HWx9y@B?u0M*MFZ)a0~x=Rq8HEfKbl6Re&rbezYd6N zO?X|k*U>RxF%fbWmk}Kzw4R=XF=n$|8<9QJ>s4k~Agy|VZ}ro{{E)i$-DMc(GE9hu zOACC)#h)K^IAQhXMW*8OX$Z=|GivrTAU$=E&b@=;Tumqi*(SzY3iG%*+!)K~2{hdZ zEIW_s8U;JJ$mI8JU22ymdML&0yqE5$y5ajEjFp zsr&Spj9kp?8n#Bq3lhneNM4PLGaF>0QcjZZA{{>OOYp639T(Dg?#4x_^gAC`L!4(` zxC`)gE*+b$I+#2@43zv#4l&tc4~n|lqt4*-`&1rloSP&n7Y>bX1coz}mN?cw%eX(- zSkvMpQ6f2= zYq+aLCF>~Nx(Dx}>xtENv-jo-K3HEK;_6DR#Ujt>-F8iXg@bf9Pd@``jT7(XF2eNh zS>LLkc#wi9J{Q&TP5k}}>r3Tz-Q1FKlY6LN&-QbeT(*(ruJtPFvCmnia-TfE&alKX zbZuDd=X(+tE<0>FL6M-{-8>o95u%j_cY69vM7 zavr0GpwgCss4i{(_!>}m)ylr$boU}~KiKrJ6Cdq_%juu;w#G}|OiKzSiuts+=y&!X zCUgn&TXb9k0ssLW<+~`@oz%m(_@584T4OH2P_;`UQ>`B2Qd;w0^@D8Hs*V|WS!Z@= z)pzJc>%uRH#GDNmw_J`@U@U%n|58{7IrUUHZ80i;aQ9PRL3tiXhs)YDsCa+2zdGSj zo2AWc-wI6t<(tryYn_i-^($|8q#HiVPf=R9YassV`XfL+?JYV4V@ezE$!iT^1PDKm z&U&dK!l!_Ye=>K=H*Ko&e8ipl7dJYuU$T5)rnLm{dz~mt6KvsIyaugV>_XPURO)lR z^PI1ujpl!5Wi`=mi8_@woio!5p>g8#mfYuCFtOt)3K=^tuPDiFk0bGJhA?2Kn*7qO zLK=dDs2AH1&!#ZcM@!6}6p!PRH*Jjjz;Pi8WMa#PPXvjj%s*L7le_h zje%LRtxVEtCztvT-E+){x=cpO~p4q!wkbi#w8qfcz9GgDge=L*r9(L@znrS;bv+may z)THPswm1Ezv(0pUxbU3#A(PNN7rGu&n_m46-*9b==z$%u#L8q;&Xp%-mDxc%pCovm z0yZ$O#P}j#6W?A|OWsWWwurEa(8mwHLugFYHa%Xns7n87IC-;)%reC~HZaxNUu(%J zK^xx;z2!)U)bWI}kfdgbx29Wd-QKx;IG>Crkh3X!CsFquJ~o=Xy6#HH_%zfq+u%@X z*EO}RgVW+GIq7Y;$Cv{T?jrFvjO@QLajWp% zymE2xs4?ZaTyY(KHyd+MZuW?6cD=0QB}Uow`;G25?{(?KV}G=o60W&k#@F1#?uMyR zyhmJbA7wmWqF%Vg?N>i0JHK-D!3mKYa3NnU%X(qZSct$=oR{ck{^O&Kb~10%fMa&6F=rR+>vwh!jmYLT`I_C~&()1E#eXayo^2SdI2cgZtR2)kkSd&TbNiZpc&_oD zi~vNG{n~Ijd~qupZ4IF&QEa*gUg*L?%JJvW_JVna<)`u9FQr1IxSKTdxme1DvudvJTMc1%!XBb+sf7*9 z>jr%stE1ZM59T>n zRuSkP(<$P#ldevouw8ri%cUD%UB1mT5adEeP)^%I*j<7c%&hbZldArF;B^xo(8k~ubx}zi#PA}SevG6`j4L;pc9<=naDVmJ2DOw^N9hsA4R1f`eY-4IIZ*=?S z6$a0vb-m=NZze{Iy)QYANc6UI!6fnCBaJR7oxItP(_MVv?XKosVqHopW+`UE`Q+lK zQ-ED>0BCOz`$Q5&q9RI7kl2U{5uIcoCn;|GabdcuWLBKFVBDkri_`@z9+K&{b1W^o zv~uvAZw-fL&~IesrQO+y?>O;nG3#Or<1vTy*)J+?q1i5^;g%qeqwf;_1B>xz0GS5i zE@Jb$wFFzMFPJaKD1lkYmngpN3Ndy)smHRPzGyNRu+yuZfvxP$45N_|;!|ysu3$6^ zMIDPk|Mc_q47b9LM^y_^~_M3FlXPV7X--NO%9u;dyhonHr5D(Uy8RL(Fxn z`?BjWYB}u(j-X3>0#SP+M{Vn~t;iylIh&x18{2QEJZ)IuWF?{TBY+Gt-rtdNF zMY3PU0^;k@t05lV-OSZi7*?!rlxRQECW27K4dz6-AC%HRR{0A>l}X2Z@8Ii))DU1@qd%>wos1nUUHJa!uAxv5wIe%$==k9j((!$PJKwJ=MSU z3+o3BTVR{0z31yVmHE@Eb@J)@`{^51839AT5$iW~ zX|Df>k$e*}YgJus7IX!ia$QI0wbIEH!>3R7vX_d87d)N`E8+Z~DnMd1)W+&QUg>Hs z3+yFRJaSK-4SVxySbzC$Awr9qwMe8FKU7X_D^!;7eu^FLTBMl^S+u)ao6*HjonD2v zn>LN*8C$w2G;7QTi@fuaJ*DRr-B)t7UlJ{S=O>1)e=#mwVv@vWhq&83c2I;piGMP0 z$>eqkbq1glwV_G)e7Wg0*dbzo@fd1=SC+{jT!%+sye7x=eT`U!cwLZ>&gNZiR($n+ z_lN4`2ItMGCL|Yv0;iFH6S#}3>>B2^#%mF*&(@Vom{cid25;q)>?rB(qzZ3c$>eUGta`P8A_*tXFSZ=JYdp!T`;rbZuH~_^4BZv&CadElU}H!8QYl%E?8FrcaI44)=)3UoJ{mk_}4c9W0eQk4oqax(@F z0bcGYM$Utxw9c>N`}6_q1&)mExCrzqAd<*LkRWQ2!y&M4D@b(6EPCu0^xIW7tD~)I z;yYt$U5w&Yh9O0e)jqkYIn0mS@wCVmdKmfV;Uw? z$bJxIZyxs(#ki#_KQHOGUPH}|#(*Oy{Fpo=ojl&$=Au7yCRfA8z9eLXCu2eKBtC@E z$QP+*@vFHlT3}Ik@uT_W8(E#CL*?O0h5gz^hKH$#-X4*QsQ2eJ*vXPP5G=NSxbId6 zXASQ)_zVx$_}A|oe1f+eIyK_-I>FHk6i~K)H*loKK4EHtCHY|!LDuuG-glO)2H!ca z#Dg1Fn{%$@vfA!4%(RZIsoa{zh+)UC*I}nh0uQZjq7u^{Coc@8+^ptFdF<}r-9EIX zFLNuPQVCbo_2!00r1>!@4D#s2qAUC2*jGg>NQ}tDepmp@#N)E_DM;0?&tD_HNvNxM zK_3urU|ExJ)}1OpB7N7GQRe0DQ$H^)Klzb=aqEkx=3(Wl46=+kx`rNjy>U9bOd!-loW1o5~xwF-|n{@PIw87!r-Y=2t9b(6J^5=M|!Pj3N!% zUBUZfcG(sX()bv<$W~>HVPAXusIT-{k8~6opJHvk(855W%PV;QiB_1cM~87}28jB= zLug?R|8BSU2|oXJ^zEQ>EgHi}-#FMSK6-Ph-j{B78D(X?GQ}se->#G$s0Wgf-gx8e zy4@pvy{=sXM_tm10820odJeX@_dF?b@lXN=I=;$(Jk{(_y)lYEW1_>Q+xfd}nZNMH z{?&c=t{^`Rc#sX1ZQ`cY2Qp70Ien4zarS7O%8DFrp~E!%;gtY?d~Lur(EFU)!$>{3UUJ#MtOQI?}Y(#PeJe#>p;@A}?a^bJHx7JPj z_D%abM={o=g~wb~B5mq_f-IeXZE?kbYNVVXh5%2H6E)67-^NKQ^e497^OAVhs^cq5 zY0=WCdZ_rW_pJV!K#0mTnS~XEi;Xo9beo^MlwC(oHbLsOO@a&d9+$lhidkW_e(YTM za)~*cV#(|aJu|(S2v+AdNP~bJMa8Zbe$B=)`HlPy>2@2oG6K=po02WPYq=25HR*xl8wu8j z_ITq87!)<#4&1-!5qFe`88tcQ)>$Je!>#>X1FNvM>F_vRYtRkD9S*r*{Z!cUri1&! z%YNRn0D(Dw4d-bR>l{KKo6zhEtyL$LqUUeuJ4X9(8wz!Mm_MVGHJGni6|>D*wlw+zR$V{ErqAD;7Q(h*CU-lBXDqr@ zRwHW_6^E6_r3+fct!Mc>9Ah442yST((Y@t5u!u&0^5A;iRcrh*X|Tezs(d}6gfcCq zG;7sRlg-qlNmY+15?3wOgD7eON8S?VLw!C;ncA3|y%#4tqpNIhJTdX4Hm!yA$0umr z`E+?86Z`@U$)f<|#pnI(vGT#KM+L#9=GDdq)vi{{+-3P3J~TrYF&LP!?d2fRb7I&H zVNnAMg0NMOatL;_<^jT2-p*dvd*%I+R`Yq}GjMQ!e3)HEUdRyH`BqbXN6ulKflKJc zP7lQ{m8E0MT||}R(-GAu-D?tnm0t>zGk8VqafD$pJZjdkF4|^7OuyoQCO%+XGOm@4=MMCI^(xb0U99`r+|*Q zVbb~Xp!Z){wGTIw-AG*cTNv|i)WTQBR?~F})o*`(_!je{ciUbw z1x_~;miVF#h_^t40eHVM}YEv0cM`qvFCxB?qA@= z(QVCRRE;2J)ss62L)Y{`_40zxQW@U*>#I+%c~8XS_I~wxB5S9jF)aq|P9Fs{KQ`3k z0&VMFDE_*3Q4vzK#5-xO8t+PwWr@rKnjiH7( z-W?iel@sbqfU?;_jq`)EXO6I6XY-oOUp$Nu5mFM+(~KoB-NQ!3VT3I7CJvb&lS^#{{52!;a4!9zhhr>za)IDFC;aCK?Xj9QsB760AZq%!n)M7BG%(uhHux%p!RPmJQ2;hFaU&lSOT8axGVQlUC=GhY`~cmTe3O z%!}>uoHJ*f&Md^0QDDn<$QT&)N5yG8iVF+Zy)zd=jY( zwME#9Bww^2b_M0u+RS7X99H`)p4X^tFxzgNs0#1ZxZQ+qws_hKT&^U$;^^~nn0Y&C z4inNqP0Of97VEAPMFpmbq-Un1fqje?eJzxyPWt(1Kp_hD4#(+azHY1st@zcO4au^_1T8_NhW zAg?VMBP}y~Z(L|fx3W>4N^t^%@7 z1c9kJSvfb3&0D7pPclhdYt0FDCrP35Ox6P~+|G{@KU0a7e0tD-` VyoS(94Q7bHvy3QPzj(r^spo3ajpLn~R&DgWWN)+dLQ?s9eREqH zJW4W7V9nh5kttCso?dM8T4r0y>`tsZ1Q+ zzC?t7Ns`~$+}pgvF*czup<1vHqC=5+vybGFUxGY3+_zPB^g)pt?WPC$6Y_@<-^*PjUS-L z0Ny$C6>=Mhb)zA_{Q=%XQqb6U{{ebSy$mqfbu4~>;tPI&LLZSo!@ixH1{SDRF>=R< zum7X+KgZ+0c07jTba-2BS;B=aACI?HWTbS#^Z)qW6r+ggM<>V_Hmv*gIMph{F$pR-}vVsFq2G@3{i3(`TWqTEPNiuFiv#66R#d` zRCROd4)NO42luUwIr%**uHkZhPd-Xaj49AaQhck%Vkq{rUHs2_i2kNz?__*?Zs*Dr zz+7$w6w90d&9DDtLdLwOD=$36l_)G*i)A0#%Ir7;2Or&#&H~ub7O3hep;VfKnnrNv z5Fp$o+Ji7TvP6jCU1n=-3*ZsPBhr4@JWIf^dwo36aW{l(;xs?R+C(*crB*jJ(Z>7W zLW9T(fL~~a56BrC-LGYI@S?&uv9k@L@5T#JygcCK87Sc0WW@$tJ+5wHhO>8ZWrl9= ztxq*;+)C&h#=QIH*En8+;$-e)x&16^heE)IbrwE_vkwG9O=u0FHcW;nrg+t}yt#T_ zd?5Q z_GwN@uHFa%%9cURHI-5bU%()QsfB|WaimI+3mdvUvhj^rEm!4amg5s*#&?ZW8ip8q z7~L8z2klF*TZ+7_BoRRnXW#E5>XZ^;W@9Fu?q(f1mg2cH(VM}YChMa9o!SN&&cTgT zhOZ$vt>oIaC&6tHUfU6k7uE8QC8d*1x4#`tX}LTujeIKfzPtXZC@q{i(~IMZE{n(ttr`uHl5$Pw;rd)H zsx6)7_B#yUg{I-n$1VoMmbC1d&*JmW3tI0{XFMUI*cKaaT&h^>Azj2$=7LFZHlBk} zl6w3ifLS){;OM?q;M8}42l5`RJjWhA=7ja_sj>N`1o0xr>oxWHr7JVG0%VTinpyP0 z+pc;Ess1}J^dDYbaTF;_F0@#`ZkPA=(Z~H-y&S^TYTpJ!1M->iI8DBbKo!E>d1;hg zr~di_56Zxh-M*b??~kTx_GePpEcT;`h)il6v1oEJs5SnnpBJ>;a_e|;{c&df4r$iT z4pt9HeEf9|d_^D$H#=bTBpN>)lzELYpXmPiVuoK=ZPNXM&XdZPj-Q|28*rFa1Zc3R z5M{>9E^DEYg+WhYBh%^LlCD}?$1Guq4M3nx%>NR_u)ehg9x*Ef@CI`+leSg2cZrJlET2a02 z=Gn+FoPf>oywtW@#)3kTXf)t}eVPX!@-`{`00n76iGWw-0jD8{x`L7OgJ7~q?C0JH z9P;nqLlG@!fw=YURL2}qI_L)|;tkV3^dS@gruwUZgK{PylX&EMxQKaga;B8@0DpE> z?pBKo{)jmg*zOY>AsxL@(d46q3*YfN%1%7yg^9qglWOf^jsK8&b2LTAC5(@cQZ4P>H5) zo@qo-dw?u}(m_cN6~H|#%Ez2gf;9Kr1zrxG3n36`+)quBH`7K&7K!){Zo_8}K^Ty?*cq|}HV z2KTB)%|5H(EVhOa1QQlMT~|0}H>esZ$vH}9GZZU8vI@T9k~X;n?n0fMhQES)OR_y0 zbec+mRvI5(RsJe)dE)irX?3OIfL*ilA+)%-1lnb@%vG_!vP1g0xvRKO%NJb*lZ2R{ zREcNxY97|S#1t%q-HF}TIDElH$Ygl)>hh$SwT4&)uFOb_pOe+2%Ub%2A~@e4(|P?< zOt{@^|7^WFO~dC2eP`50=bZ;&&~ehugicuXQYfIb^~_P3tpRVIJ%@4;M> zd8y4MC+cYI9kH(~6&a`~=U?LJleBNVPpB|yh`n%i^P-{B2YwEo)LUB5Yt^2HwnCyO zeY#Pg74u?i2RC@g&U>p_2R1p1+2Lg>R)KO*to&XX%9>a`pGm*2zjD|U>1lPAN>~tS*+AS(H$F+y_uVa;*dZBi(@YDl9 z#6Zq>E^Ijjr-w3YdS?>I8jdKKP-QIceyTXw(EMy)M6?U)t(OL8;XV_tNr#`VMp z-s+Ou_l)AX>q}o$EUCzy{#XW5bf@-++Cx_7cQe{!X*#ZLDN1~ijPT;l6#P_n=7H(` z2-%D!Z`D(Fn)jU#UHcs~ydEAg^;8d7_Ou`S?smZ#o5V}j7|_T9hol1&hA|9KvH+O)tG{s35nqqIZVD#g(H+(@HRmM}>#9W_Q5hp3ji$WJ$8fGr5&AB$o zIeb!IWOOGo9*OOJlHGD8YD$Of%r{0)o!Gx}hhpWVzgadsL31Q4T+GWtbZk=D!tX`w zw|&9qX5u=I!dcIOoYr5uIk0R)aXWp-OJpPW!dYa`*W{y5q@u6?ZZT|Ncl^`8{Vzj1 ze~^ZJ*Xj?0ZU95FxtZ%bw~-F0)m7qM$(^6L0kyh&r53P2U&hQH?*52ErSF7`qmjpf z_^QHveb=QPiTN}+*BR#BMh>BfM_}-A;bO(+rYa<~$+G~)6(keF z9)~*XkT3@n8rru+XKqO37)SHT*y*39ql=RItZhJ)n_mhA#1t~*2pO45-%@KM7i*IB zViwdM>9s+UG7N%Gs?XMo_vb2v?Ldh_!7agZ8_8IC&wjxrCv;Uyp%9}$t@=~B@A-{3 zXc%^COuht)@?qONat5l<>J1g>;L$n#jCM{HQ*M3Jh@M!u7^Z=a=6t%pQ_&cJZ->y# zIhlXj4n-K^bVK*APgSL6ps-4hCdQ{5{gDL_^xVoj&`ka!rXOzBx-rZEzrf%i@d}E7Ip?DfBc3fLl3NTd=^eyG%836V6#ZoV? zvL<0!rqS+k@^H`BTK+um1pN^UNN{M)kh!+>y;NGp&E90yfYnLO z_Zv>lM290mqAT_w`W(SKdqJ0#2tdk=W2>8k52C6K3uFSN`kr#?mzTE+WxTc(Y&0c< zSK>(anF~)c4#>>3Rgb<2hJJ)q`lnxfdh_H|W5BZFR<~4_Xc%7UYE_}k$)7sH@?=v!oDCaPV zPHifKA0;6<(Jg{G2l<`ssj4gQR0ky|UeKS5J>BTz+8g6z%UhUg(iTmxLHtxox*h1Z zM4(3Jb>&X0?wqeVqkgy0XsS;3?t>)$$;hbh`TIUVHqhb-$Zw8MEJz*;vQ7l_1V9EC zj)Of<8Q!VZtY&1}(XKMROCR#^l~e-iRV-+^9BqV00trVfhU$?rbWr*z{g$>X*BX0j zQqI^}ent1`ybn}Aotu2aL`mJ_?gYd3xBGKrQSCDdt=Dgl#jn)JzBB6*pN<%M>E0RN zrqcfz89JC|gRsqAxwVF9Ej_?mnj`mJ&@juZ`b`!WB*yP;e>l*YkbQa=dA~!@@M+m%vkt3J#k7>UjR81g?xkzel#~b-J{c5oa<$& z_*Wnsp@FGObEle0YA z6VsHmyzftJ-EsOVZ&`9J$|d4@GFS(_KIGO7e^cVN_N=InxBOoEPR%`d$H(Oy zpHvi`#MO^TM{op}Mpyp?6x$zNbuF)IV4Gx=rMvhs&3wvkUPGM{6*rO`b8VreeN@^B z3H7#G`9XXnADnyW{0hwp3VhYuHL7tR+_1%maQ0txmT>+>)U1UoF)$5(fP#LHUc zJ-S~r$m-|D$M87D-KP-rwK8E$Ugj-60c%0JG_bWHKtM$x9y}H>QyyAKzquLT6->t_ zJ9)|yq`534R483V383XeM2Yl2KtF}H%@Kt|e}KZX$sFi;$zx^+4RSsULKbY!i-^k* z2hd$6cy@9)I0%E+2Sf?ZYLKL0t*}D@WhS74_!E?*+AD|QG2e+)0&%l+e^kO?SLje>&ZwZDa zpf{5Okg-$J?ld4VO|A-{IG9K&=HK7efa@#ujT7d7)XpX#|2!XcWpyFBsQ%sTgnmLNBK zk#=(-02(0_MV4`#+=_N7nKdH<%2ZREy$@qJKA17RpAsoCT`cFk26|1QN8%opXyGDB z9A{zihp*1zmoNqCBBQ(o;+TPf3J8ZD_fogi>%-Io$si=n6jnVS*Oy%w{S!RXI@z4= z!o;Gho?_*P&r+jriGFz1skUtbAFt|~xUDWBrc^fjVq+^PS|D0K%f_8Mcs=I$l5nHs zE6e!+5Q!GAOS&2meuztB)0XsFiK$QO{qBv+osSul7(Xv?41A1r;qLmCZmc zdHID1p_>#6O3Z6twFN2)c8{FP-2PKcC~3_koRe69~a?-r((p1{PCQjzC%Rp zn^|~AC*<8|9PW{wzxs|}q`%`?2`-0#zQ;nwc0EitsVM_FVnUkfJ1|r&fj92$$V) zs%O)@;nX4E_ICAI{-q3R`BlRYc{8CNy~;9hp^hIQurjblYsp*#&eHOWV>yGc4YqZ< z=Bl_d{lMq1ZE#9*a-|Z<*mX$cTums-u*+C#?a(K%s*&4Yu>Cum=Dm3Mpvf!2%ck4T zQXkr^TPfCIEXrr)I$hZi3Zu#T7H6Y z7cj(|2$OxOyg99lS<_RnDVpgl&3+TZfrfcu+@lURpQ(ZvN)7y-p!v+HngMBE!74m8yA`#=;z`{O#@w$uBY17^mj+Gq zh1BT+P}YW?&=x5R0Ps%}U8ZPQT7!~J_Ix)faE5WGd!;<A< ziF0cuMG~ix5Ok&(SqW}~o`fgwd*Sz%n%|DYll2HP*|4>SkhhNoG8VL6RbKK)`c6Ge zb>@I#K6Sn2B!Y`z(zdBA$!HBP>*(Y=RT=R}tZG%dHJL7^?WLCB#$`^sHlC2>sdNBZ zX%C1BF%2TY2qj{%TnLF(`kfYI^im4z%xL=3HG7MvX*Jq$uWjo2UstQ@>VT5>1xF>( z%FJu#Ix~CS#jXJC??|HpQx~f_WbxCyMt}J?TwIKZWLPf-*sUDrK=2PV33>3|QC&Jm zRGrN0{6}u0u`n>fZNtH(DV-LOy-X92k*dKvp@N8`6Z_i}gl%C;pnm`-R zTAkobu~wx=d`cMiDXcX11_2vTAOPNA9zbq90*3Zw^|#F)5<;6yAbSBo|FbP1V)nvv zTN;EGj_NMABnFNdp$y1Vs5_Plxi;rjvd8he3I3$Z z*=0)|!rkCcM0R3h_i6HgGB;s4ru4S!8z69OUbASwvn^!MT<-Il0n*HKvrj0qNn>mU z%}5eLf=N#Zp`8>3B_9^(=`En;-%T&SGE(21SmbRo@xSSNms z&w22w&eu;qXZfK*7ftx-GiDs+`LgQ^@xV}Ho zJ@y${8{9F6U>Rx>I-p0?T`vJc}lS4S&mN!j7~8`n@qc zEoT65jhvm=F=~PF;sOT0RL&XXFouu1DJ=R~Dp3h;+vPr2$-YV9L&(OC%*HLGAvZF^ zGtM;WaCaVB<{Il(b+d54yb+~#sqa$v)z~W_p*D+j`vFgVOQ9ZcR4TYtH^jF;8llGT z9|?p>-UFF{>O#+%8ys6(ofO23XM-GbQFA;^oOt?n$puL+r!%Wbc1?|Nl{q&biE9xY zJs-FHX>iYB5^E5^g?ogYeDFO3B1Hlbk&=gv#p_;;1l|pMOwygcC+Z4YG{m%A4LlM{ z-ALf?8X!3k`?(zXt~KdQ3Wd&z+hZe*zg`FjQ@dX_>p!=Q;a`dP=5Q~uKJT7L`kCEQ z^o9_AThG=52Y8DtB+cRJt)zo4uFcNeDa;n`!EL|O0r{oQ=#86raTZ}fY%o?yKl}&i zOY!uhvXMLVZNNfM0V1wlGkt+dkT~YJ)$7ikB$;Y+Cu{wR-iH>p)LF z?KneX0OpDlP`#h4`2pgBV#CPP$N`xFA;|Mpy{zq>VStQy~E-k_8NMd;rR8<08wyIXUb7S8^kKa4Wz{zo(j z);d9L2%R?e2tD60AS0S4fBG;w9;7?Wbx8d$UlU^472kvFX-#3<#lpHSHF;%q1kN%M zBO?RXpds8NlkeSLQ&#)M&xlTtPXMimc!|wrALi#yJ0v5)%0lLO>zTCc4`>BAzHa8U zn&IYm>aKBic~&oF^0b^-f$&tFvT3>EpnPFyZj(zcTr5C)=E^((RgdDWrxbn7BJ}r6|?3EJjbV*@IFnYZY3kIcxAtPBkumR`JC=mZR>B1BLG?w%T{oj}RZP!*dVN(fZRqIe)9Ot38GFprQa}MEV7MO~M-o zJ)m(S!8oVkVWQL1^D*OceJy}WBaB zu(W9xw$8T1Pc^yG6k0NT&4!njT3WIJM4o4woh+dwU<&HBIeL z%NHt)4O`nDpUVCC>|3hnrxqsQlf?8w_|D+6Zb%RL41fXQlM7Lu^%c^v@rB&$^3{>v zDtiXN%xwOw7{aAc{spyU{dzGm|A+t1B&K~NjsFejD!x>jP;~kM_s-aaA z8kfIg2j9%_X4%%LA6#3nGhq`!lz-iF(sA-w=OS+Jx8qy#@sd5}KAZgW>-3Sh{2sn^ zoIl$~Y_-+Za{}XSd=D6_mQgPmUW@yOTk9VEgm4;ErX||zw2W4PPdg>CA4EC{?X$fc zYd_g0wky2ys=rY28RJu-8zZ^|k2Y@%(|y9XS;3+Ec*}AP_lCt}x|$ZtsG=xKJjFw! z<+s;lcI-O=lcpOm{$<7WTqDr~pTyxs`__1dkzD^Ir(O5q>7Il+8l@a`EJ=e9q&g~b z|2Uj}rNlc?kFCS#I`hH3E1L$YEJ6-22+<$ zb|gj#vliQaUifmU*F3WdrC>ZLE7|GsRMpnE9_bOAUQSIrB@UjT+b6RU?^b{qGH+-G z7*Nb-sO$k/(Q$Jff*IbJuFlQEzVZ>WIJ)I7Ob73=o-)SB=lAiDH?c;OjygkDbJ zIc6Heh@0(6iXuB&E5e|x#FvF_R1ja;*iPF-P~2+9OVCRbZhyk{!9E;yA|5|J{h@G; z>!;LqD~Y*F7qmF(;?k$Wt*`X}aamtJb>bFanGAoCBcxn*om|#z!K-0H$k&LD)UIbq zO7#|LFT=2C&q5CIkw)L>5>tDHJGg0}WXVZj4UJNi%XOO6X`fm3&CM|xO{O2zv{PB> zSStbJtPRUnj5CD4?1{7M>B@E()Fky{rAFmXXM;aPKY`+Ppl7VtGT-H)-mV#NdVy)z zOlEmf#&?gxr)&;It_$z$TU(3B#wUAZr49A~s_|-1xL*Fc3;lmt3;i!_ZTycaKV^bR z&K0l;=>99zg7AqkEUGi*emQ~V*?r!tCi-EIrzq*-3q+QWre0;j-umt^X%v)=Etqtq z(=04M+r!WhGrGJOX-sRKoC;{$+S(ZMiDssvqD(i1`EwCIc-wBS<Hw$i~LD`&?W168}FGFY|+$oT(st%2B4^~=Bxh8zWCc`nv8%ivloV& zF5>074?m-PE`}7^=4r`P51Y<#N8JnY8;k0gca5?ZC*I5_-O!CZjLr{V8sr}p`{r`J zS7u07@xC*?%pS8@OFG;GV;Fctt!WGW+=5C#+I#0x^ffhM;AG~v!wrtDmI=i^|sqi zpl*xkWPlPvG8xC-cI?;geULdZHWn*tJ{_;36&IAZ!SFmTP=jB7Q!N+mN3k0k21qVu zoWN@n*CZ#obqR7&C8~4QaD7)7*sTWDqui>Hi+zQlL^Fz+9XvH0>fpi$NEpKaGK+os zV4$?_w&)Rx7MgB zA0+JHN)3rlK#RIuLISz<(1aXuYn+%kf9z^_oKHRj4;;E(S2y6Jg4tb^1=nXAk2s{m4G1&sJmD(rTub;Xz z(K9fh^)pNTzTQXM;e&pw*eO)AuD@A$}R?u9`%hNTneW$iG;zppi^?* z@!lYc)j;(@r!8@Ajd%Dhlx?m<&A`O!=;e#m)lD~PSv243)0Bh2?M+Vm`p2*PTcOmm z{bi1nozT-HjVE4`% z7(w*VHyX;S)8u>^&smG1q|Ed^Ol?Z*%%tBdwb)MrL{!c>-FW-xP-Zi@w<$+=sV_av zub7i# z^>x(l`)>A5a2U|e07bxc0h$A5idueHDD5ZSq_j6H#>u)2Lsd8O>gxP#I5J;}6gu1R zow|m029!ROeCKd;ApsYON*l)NwbxC;HY9SS{NMEpSlSJDW^+ zy;3uKu8D}mHlC_RfhC$9+|IW5bElLh=5eRW#F>CHK(-{KW_x zGqoprZ~dB=;K;)0!{^_YVxLCxzI#ho)cEXskUP`m%QWRgGR&`1|mK{|M^hr4w4$sc$sU5oiZoV-y9-|E+|3o=sV@Yll5Ct z1w+RD+hpntOy7K0F$O)$ju3Z(-KBSK2+~pzO zowG4}S5Nax0O3BRpH#m9#oUG3*6q`e%)y&9`73iwVoh33^Zp(>uhwL*kEE})**`rc z$rD?({B@UIL__|ZEpIdTf_*!TV+P*e4s;W5;zi~n+ud>wQQGH*lLW6Q^A03Yo0e7`tx9aA+N_5>6fQblMy9@_dKk-_ zDVwzdhjf5xW`XvQoP?tr+G*JZS~y0-jVl6M?=-}oSSa4$cz}B4$yZ&9bB#Wjy?z{{ z2z{FUnI^Kyxn`Cl+$izZY@bk^N9$4XPG3J2jqyVB&3zx@y;6jD1!35{&%AQoTBK>T zG~mOfC+Cu`vC7%0b4%5zykGtF#QMx}nxY1F4y9A5_0(~WQOqlHo7urzRQ_u2-mq{~ zDc?sbRlBL(47%6$g2|%qIC~LK2-ImBrS;l*&FJ&q zk;|w0cPk0;nz##j#2X8eV7Tl4GyHxs{&P>4aR$TK#ujeW^OHd@0YC-ck5XhmlQJCJ zUmZ&tKR)T()9o6pEHKjKfXxgGplB?wA%ujoB)Lzlr3IJe+VP+GqEK-0l)OzVpWCVx z_*peF+_$C%))l}5}p-(dko^YmbAiw4JgU6=o#g7LY-%iWa2LU+ms_Cor{=_ zuLL^N9C(hEcyE)lQRv6!Q>&KHX9(7N_;oClB;oJqH+KfU1Ete0oSw6sz4N8&;q>it z0q%$kMOo);8td-s3O4KF@&orEVRJN17T9Qk)`pD2`=k3yUwbv`-esmd6JSzewD^Kx zK;c7m@DCfprR`NB)iIhDA{`s;oT~BHA!$^o_R{X1I!|R0{8m07-o!_d(3v6DOjyb$ zKuvOsiDkcgpGYJ>%z;G)wti-`3)dm$6aJt~}%9}asTrv3?5ztISN6AQrDW4<;&zn|F9eZ>@9RjT+@20$s zS)(1gcS40}P)=%f+@F5iZk!=AD%r5($P_^xEEsn_Q3iQM#+{XL0>}Thr}-%|CV(HV zyCB!o&%K%nWSOyoE%^N42Po|!X>qo}z0I%n9h_VCP6+0|M7?%EBZMO?qW zVJ_41`Z4LETv+MW{nXJVQ_kD87u)(S+@dhh;QSuxeut*j3WD{@tH$yQbxOhWTQM9i zb`jdoi`Lji=J^Ef+L0lULbI>vy`%l~ zWJTMD*Rg5+H+SZNGNJq~lougS%qL7rf*%D)d?86~zX% z=b;meW8+-bX|CC;H8pHu1Cx`T<((xPix-}O#5xmN*R*67pd5hRUYf!shbjvbxpfEk z*3)z@STP@H3_p%8yYCT3Ge^+S-?WJO+u} zBbLwWC}WwZ-M$#LnKe`Oz0_B8{F%x9b2v(2l0-S%lt!lQxnN1UPR#cLll&|Zx5n&V z#01m#1S%DyiG{dzSTKeW0l3-SMxkm$OHC9FSBo;mq4T=cNG|&g3jESGHgh9wUO`ER z(<}PU#9rejNj`pc(yh+h2E`S(me;glJHc(Z_7}0h)W?Q^vN@_x@*wTIQExs#?}jA7 zw&iw;U;ZnpFP4Av?|;ij94$$~EZWrq@`T=G&(=|ZD^5WK*w-hE0loD+eC=^~66Vm# zUk&p6Ca`)mYx%zi)1r;gBF71s#nAvp0IZmjja+4rS^M)2*S}`&{*RYWar4jkL3g=+ z=@wN8C~|$LnL>_TB&z~#U&VmKhOjPSAQ+(BMn@h9IGNlLAQ-4QE62{|UHr(~YPFVs zOP6xbn1;ubKgc>!4TQLe1xNFV5WzTlXb6PwrG@3kS=~_bd6A7FuVILn;N#e7TTYrl z|0}uVIAf-w-kYKfAvEVu=34>hNY{ZV3r05!cLK=qdCghY4BGbaw3itl!AAEco>O_t z?64K$p*W$9%&WTi-lt?qgrs2|QNgrss>C6@pUsEr{$1X<5MPF~$!OIi!O7d*cMr2y z%td?19OMxUkb{TS#PJmhg*54NPC-LRTI}PJSmf4Aj}Yh<8|#Kx?wwQTj>fV%FNx;e zD+ea7-d~sy{{q|QS?+YDG6$rz_!_ZJQ{koJGETP)8ap#Q2&TeCrH*DLR@VWCz5TaD^Dl1j-#uf=LI@YU zTw5`90|6CYB7Nsd(vWt;g04zznN{V6=!eoKEdpl0!0R2+HESAZs>aZBmWb;d1t3>f z>QIWu5g#*?`LpZ!-p1nH(oKhEUS=(NLAjw0^tz!9)#yOd#XL-mvdCKY!xXF^W!+oo z&g9pg)1A#)_h~7I#1vo?sQtTjMB5S=;wNHgu~1Zt9?Ye@nfY z-iB?TDq^#Rg8Z?C_Bh@1DHi`VXDoGOA7V&gD-_BsSY!5auv5e-ZgS|aT31*bnGY6_ zV#2z90`j!WVxn@^z`|q&x?L2K)^AmMZ4(Kw!?n!MsnfRy(NR#Rbn4xCNH>a!{AE(D zDuL*}z=B_n*|agZHBkw!oSXNOk{uB$lVu8y_RqX4vkBHEC`bC|IF3G9SESDFUX{FF1UQSaJjc!sf~)O0w!r? zM$Fu&92FR>HuVCtlKYddBoE0HB<45b3m<-{t)on?`S2Z9J9h4v7mGgYz<1@#CJ;Ru zRF6uh<<2mEeVL{C+VArA=CDcaof1dU>?D0R>6NUA@z{SceZOSm{_L3&V7}uqTEx;^ z2k+}uwWoXX9lZ}r7@e`Y>GY3P+?h<2)Vx{rwLbz=Chxa7mA}3QogGU3zPuo`$yf}> zb)5xz)w8=mB1lgqx3;}Hzkfr6)!`2Luk6fkd>-$9$~SfUmRn#`NP-`W@?{~C^l}dv zOrjeEPd3Tt>ax}28Vf$#M$BH}qGyB7C04`@ODGWz+%I%TguOLVKT*(1IS9yfxgBSC zLMQrLOS4ypw2Pg5U*sKFj%~}=k4SPSC*2%>5L^6Ocuyc9uUyWe<=Y9^AIy{@om0(fv6O9H%(ON%B+* z+d-Q$1z}MNU-_t^YSKC0ZzILhe&>)54e3IoY8G>sH%5vpV@mB8L^)3%)7n9E{Y%xH zD_GeLo?buzXgT^I9P*f3Hw>shKg|a$QZE$y1OX_PsCa>J5Bcw(4lN5fTEb+)dl)I zy+pRp|D51aA6kHt_#A$KNbW6zS*ViXEuJ`K0K6L9M!=Ax^E;75MF2iS{0hO>Da!$R z(N#p$4-ic%@&||qT~(01E~5}Cpc{$&!{us_Bz|TTeREL;@PutknLvU~bW=q5@Y5aa zKkwj|?_fe%lH_IA39VFbfz_sZKPSfR5O;CGwJ)SUTz)V}=5xsS=>=brM6RF_iI#6xdWX!u)Y5 zWdCu2CGID(_&>~l@^85$$iIB?f3leW)F5b#I{GmEH@XF&Vf+~!`ZPlA`1OVt>a63~ z_1PjXe*?;=-cvbJJ$@ot3DYM*U>h38L&1MqnqN)(|It>?qrza-8l9M6WuEH?K=Rps z6R9ysg`Z3-^hy)rdr-zK(=M-AL7DHn@|O7gNE05{6b;!&hN{^^#OK1Av>#VU)|=j} z;G19_0qn3gW^r8b2dE+x*f<8qi`Oh%i3gF#ZKOi~`}uA=KM^OP6X1n|?ixy*T$jv1 zlboR9HVsKBy!1l4U(T3r3a78#Q<|C~m>x-rsEY*$DYMq%=K`zfq2Nb&&)TZn?9Mh# zG46x%TOYNQt49Y+Z{4}G`7*HvC@rz@Q{c3kh9e)MZ+-7tHOco?!?)O0?uzUVK&;$h({juNh4thd?tYbevcO0H7 zM~>~VF8&W)JIkLpvbzEFW%z4j;!I$tj`goSc84uq`o?b1^=G{R{y}BeKRUqx>%#1B zI7@S*UuzqlqYJ;Gk^hyP@uz{uoxa-{-(7Rbsd=#u4C z`0D>39x)LYAaI<$L82s#-68rf9CP714!xVXz)bKAGfLAnxodoO*=NW_HUVq3vm8Bo zB0Yb&b(8Z1Tb$&^0HL_G0rjHEgrM7Ma;UQq1X#a|Ss$o^UZx5$+X)!HzxV#|;WNdv zKm|0bB!D5HZ)ute>n-5B050`;UyV9-hoUE(UFZi@{djlT9!YQbZi}J;NW3i{? zr*7dqj&D*p5)b&W?;G8nMksCz;3tUhHfW(8tVJ;;X4*sxxLv$JG{f@-YzfF`k-2u~U60u$Pav@xK)lfdcx53?L?Dh)i)_s#>Q5pG zy`a5b(f2@V8?8(Ygc|qpLSz>2Op-h5zuOUe#eaaDpn-A*VoyjgyjZ+cdloU~W5PE+ zJD)>vR27)jBDXuTbak=`eDI*6A;u1>n)8L`69yo2f0w1jY}^|F0;3v{!=x#)(!vkW z6Qsy@Co&z7VH9kWJC3>zB`0;k45pAhO|UKSR~6;~wZG*iDeml&p<*RLib>C2=5 zGi1|TZ`uDAW%<9e+W+eN|8o_~)mRKT{=ua0Ka|h@t$`a&{MGWw)TQM9^;`%yg5Q1` z!Kd4m-7%8^YX5z6K~{3#^$bZZ1<|J@klDZJQ`<|r_4OwJ8rZ9y2b%#i2SaKhn82@$QPTHALcIj-y;tc%)JlTRm_j2 zUCDmF<*r*`lB-LXR4J8sr>FmPKu61okvt1Dk7Ix!F}dLJ>=>W$=)-h5pUzvxmz8S9 zTyw~Mm{Xnt^tSk^4n45HvK%yx()z8MSJ9K0_kAg^g-F7%5X`r8Z~(1xQV$8RTHAwv zMy{yRgkbPSGyq!AYKGOaxZLU=`Xq~cS)2EhO#IsbE`ZMP9E&Eg0)WF#U>VV|W6{T_ z_<*q-(8Q)%z%c-2D?3AqkeY?$nVcG=A*{*&kOyBM>NAa z;4_Ba>r|8v1gt1?f$tQ#?*;N=8NcaqNDHGQ`-yE`d_L*IVIXQ|7Vf>{M5}ig#EtqI zaZq&Wn%Y+22Jr|7*+?dM7~**V{xPbXew!5zx15@l3{~_BA8%qVDd2B5n#Ej+p?l_6 z?U&r)I@PF&GOxmic0(CvNj!w87i!@Bl0YlL?+p)Uzad`79awO@r+77%KPnW}1b&0~ z34i=KbiswM-*WIVyK_=)1?}mhsBiq045yz`*V_k2weT{ss@E!gz7!9SH%)&K-D>A0 z&j3CbG;O#(Un+s?`2i{~9hzA|7b4V&4BY;+;cn+Mjq#IbceR|CQ{GtTg3mEcSNfOE zL-`&-&m#Eq;F%p(6q2uPc+U0zr5~81Ub}o4Io4<&{JmPF<=qu}(`0 z-_e+Rx?!aNNJs-{uT9yc<(u8bMP>7Dh5gFuY`q%&16v`AFMhrfbNP-j^#DyaO${?B zV_~{o)wjYoxiqPtm?U@744?`1=E@kz+NoQX?DIka)xZ*QbV)u?YFhfa)MWE>sp(zZ z@aY@Htc0-*!-ZpAtVxWb{*gQ}V@n%N)X3sO2cfsP#h+%qA3yUT!<^>3^7CiyZ~Dip zu^+BAUM+r-6(OSj&uaGHyvJW0ntx9<`{!u=KkLEzYp%g7_tzYR&SOD`|Ha;$$3y+@ z{o^B*Eu@HSQ&B|7z8eyfkjhRfStn#43?o9;389FwW+(f;WKGs=Bl`?xXNHVnmcH+E z?)%*5oX_WT?%(;H`}=tOzTf+I|5129xaN9q*Y$e6p3m3wY1$3p-bO9uZ?$1;P9%)M z^e{oGD!PS0i{Q2I6Rv8OHy@9@F`Q>D32B|MXJilpc|b8c-+1ey_T6G2Yz}wP{8#Zw zSk3Dz-g#Lh6%o}JXHY~Aw+N){%{z1PKULX{9)A1s|9lqab^KSp0zS1~4f}y=%=g^( zO{A=~tNMC^%=%_wKiPvvv$LBULk;Wvwm-y8()TEdNJx(vm_*G;`@^v zr>nk7S6i+Ho&XFZ84CX=&-hJU?f>A7NW4ZqK4+nS?CBZ#}}apB45}FoU!PX&5fp@E=)H*;$yuPnaa{f~BR7GRIlHH`C-tMA@)b_(UZ`Gqp~C51ZZ;1nM_be) zeQKqge9oOQ^mLbe;V{D($3;IS`NUY~hDMJU*$%(g!B0CX!!vg;V9&i=U0h~PWT?Y3 zdgJAEN!N|wq-n0FwV<^G=s@f7)mkx<2|RF;ds1#3+cdV7(*By8Rk+DOs{pw4 zn3;C=a-wQ`V{LI1!VBAC9ZZwNCINq(ij;Z8RI8*+x9X zKzLwoUED(`%7=)~748p7p80*}vYdUkX-bC&^gvTR8Y1l;%DFK3H2We z^9;n(8lm@@W&)k}3|>bE*fCagr5tyO($aZc{WzF*#Izn@zgjMlz_E$2U!d;P`B26B zucq1wXw8}oHeubEil)PuZ+6!@Ro!2Vm`ajZC4usd<TX zO9^fEYCYLD;Vr=mNJg&nh|3n9-@cQEe9KjrK2t1zLjV)PkNZ|TBsFyppC;fOXjtl)9ks{&<*#_ef9E#ee>MOV{$tO#&_ANV z-Cq^Y1Gcx<9e=@$|95=-PjKgRcfUM{3AwHXS0!Bt$2BjU3*sL#7FW5tq`V)+LFKVb zCfyYKsk}@}8MARW?FOF{vs`r8n#h91Ma#7L8VpRWZDg+Qma(+OFsiK+BKYS+MY@hO})&z%{?JR)Dnl zh3J6Ehz4Y#VZ~jI{vw<{;KJ@F`a}w`(WHkC#Ouu9OZUk?+f^ixy)15YsjJ^aH=B{nzI{Elql|hR_8E;&v zcNtYMruF1KRneo{Yf<;_N+N&sI(tISHal>3qQa@d_&Y;vUV&2BeLXMn_zEqur6bvy zl_)g);tDa4#7~y1Y|>Auo1`;+UpH^-Enp?S^E}}KS@bjFnH&m`9eP1!TC2Q1u6AKV z((<971M|6ewAauc0grFI$XGU=A;{fzsx=`bR|uY)#FrRpadl)1cNFjigBW)!?lr#& z;A5Xm5dv=g%kjVAyC(fL2KqA-_m603|MIcFVY_;l{!O)Y?L{e&!qVa`sZ6FopO-$y zjdWHF@w4$L3bI&(*!7wR=n555XaFcHFUVw_F#IhZ`Ougogy<_F1B-2iv^D_pazQ5? z|K(X`M+qMbZvyo21KN&$I0r(g8ia-YKE&&SKL#$Wk#G^fvjlSD|XE(AI{4arQCP06X>RJ89aapFfn`3+~oNGX<7bF z_WXY~K>G@4(FgoeHOgbuu26@lQj~O+$AxfBBIl#jwy^#4%)_s!+^$mzZ6*z4Bpq?- zw@3WJUbI_`A~`~wURTN+_UJyfAv0KsD|y52m?na?_Q0EQ8NKCQw@pDh=(Q2lyXUhG z9|F+XrV#uLK^mviQ|bZcu zogSX0w15mj-#=z8+CBYrQmQBLeAF9oG~>P)c*(SLm`z)7v^fA1tIMkCR5GLY3zXSV z)~(QQ?_eJ{6b1Cmz1-^r|CDk96TzInKpZ+`p$wo)D=YEVzl-A+XYo{e=B_h>^Zr#3I~=Y|N?d`qj(^mjSb-hU){dEiHf^VhesdNv)Cp znUm{RtBIAaos(-7=f%m^nbSRr5^DL!XoxB0@_58nDGu`K*1|I)GG}2x!7}ii_P8}N zbC)(aDEFR6)Lh&EVR{fR6j_Ztx$5gb)KWOTdZeGdlR+RDu^Sq^G_#83!4jHdlb@9mX4x}6fs6`L zC04pcDdjaWl){xNC}|p1uFaOTq){b%gZgFe?-^Ivh~8rN-8_e}lg$Xy^I5%YvI=zP z7@XT-*V~47_U7Z3Ndc;Y%uiQi);X#7-U1aSBuxym6U+?f!>OfUBp96Bow)ha!70qm znhA)=xyGuBzH5GhXk93jIkrS@a-`SIjq#eh=9#dYcdv7(FM>{jIK=!C3q|5~mChLb zX}KPF{6Ac+-|@QsFX3vPghV#;Wx__g%!v`2W8NE*31a)ox3&dV%D*f|?dn4bJVZJy z!!&dujGsqJvhTWuAMI{TLAhskuo}P@itwinp?=;5l-$0g%uwVu1|B2_l3Yd_tAc(%@n`?l+ zTRu>{Z3S{AfO6j1f^aZAx~wEY*_ZzlfjnC{lQkXSd(5|0h&?TQ%*Yf4?dKx~ zxWuHy<9zSP(67Inpa~uM?0u@feJ8cEv6q@MXmfvr4zLdX?R5YME`NVPZEhN=gO!1J z_3tkOVAc6|7c`I!Aja(guA+Z;5s<&To=HCe#Bi_ecXtW$o9k)%72xUpGxM{C^bAk; zV#?cf1uF4Z8VJbK29U4K%DG*x`#d4q>SkvJ9VR|1ZalmkNYbQ^g3q-rqu`QXgSZ_N z0taoe%rkmE>wYY?uMx!xn=`#FR<>8+!uoj)YqhA4tebnb7ZN1GxEUUQ-aDg9w>+ff z?-m-w2v;NeUDCxMkHZie=_k(xLhgj0N9@#C_u^%D)5zSEOt@iWvp(sLvrpWd3tSRI z3Xm~;d}R|=c-IMnc^7oimy$=lvNHYSB1IHY{9sc@#bDtd5@~E`iJb&7p#PYUeT`p` zk{NVy!{0nraWHTBW8e7%IhNRv7UN)nS(6pAv{0ACks>0pjmQCP`e$_}ihNj|&Teyy zVs?o3Hm_`xC?C|WoZM=}UV>b^pF`9mX%X+7hf9)l4U5=Dhk?fK5Da)~fa@cfQzp}LM{a504={2Nj3ZgF0{?Ih>FZEH$=+*I9JEZ>sE ztq_8%lgK<+VF`&X;MLjCZ8$l9G!;jmK-U?=FC7zrR>=`2->M#)+6QX1xpx0Uto^$M z`~Q-~`nO>B-~RkB5Nj_gEaOC%z>(nU{8)se+u8M?V?q&(pzhy zFvBuO`}d4Ybks0^*4n#kZDVP6sb#RsWVK7!$x2udN2j`m=DcgsVV~-Vd3oxc;G?3G z((h5upyVj$Tb063AARxXP-I^R0qv`jLH$%VVx8Z~Cbk9~B5w0lq9HyaVqx=uFr?&a zCopJ3u&jMLstukqX z9ErSq>?n-jq3kM2)ui{1-C)+f z-G5N~jQ{4%F4I&83NrtdV$SdW_Wws^sA6ZOqz_12*)7VroXPXDHCJ&#ww~zq0Rzy{ z$ca?Zl{TKB1;jBGR@gIp{C;vxq3hJOW(d!^RK-vJTaOU5RxeR?%c)~ZFrCGSu5z|t zpkc)yV`n-z4+e&Kz&LJavCWr(4qEV#wwktH%ay^D%{~=({s<#{STifJ;6+#f^ijB`7@MKtvQa4k5?tkY(w%m;ihP zlaIi716YfI(PlomL@~v4BT94`L3C!F=A_1U8URG-bud*shsqsF0J6-S=VNWS#)w~_ z_yWRxstIp@!+tl%F68%v{mqOP{%u0z0at&4L|N=+kFZgP;NOo26vZ+rS%}qHnH{ot z7N8O3tor5`C@Df}JJ4MVj{JiG5uJB~M%sS5a{E#_fkw++S^h7GFP-mv)#|fbb$&k_ z1XBQUv^kPz2BIXU0>mLV(hl)(B*hShwE_Lt@>pB z3&i4c>6i!gx~>skfseId=uXrO&thG-LmZfetUa>Q1b;HLzHBj^NWOS^T93;>{|UbVAbExVQ6c=d$KVyeK*z^{#t8F|0G_)I{FJw6I=;W#`@6{VKQ;|O2lF1> z*(MlFOV-5_vq>h!r*5EcS5KSG+T4cMpB5%9C?C^#{-Nbbt0JYSETU_8x@obuQ-Cxl zeKUNim%HSeRO$Cb<_yGEW(t1Bm}^h>qP(4_4q&~YDDkO0j6g^&*(xA*nkUsD&p3P3 z9*Wm6wE8i)Vd)Lp172j5Kqm{bTZr9S=DYzY8nQFKHf0e^6t+(*+c*N0Ca$1t-BfnJ z^g6$A^q24v%A^%cJ%R#qXIpl-rKRu&wOCAxiQUAK=7q{}v1x|O8VB#BJ8kRf1V&q3 zhoTo+gA8FA# z>p76x-c;wWFnw(<{r36$?d2t|ANSl{4wD1#=a2z4&j!4wDG}=A_ndldw3IsG?K(MB zQ4;XvStR|mfe+lI&h&tH$YszkI+Vi4 zaNC$`h{b%H)0f_FCXjS?x6sWoVkI`TR9f~3>M>_l(E7R5d@_4g*Y32HrZ4@NP)r|F zgmnj3r?}2}x=_PQABuC=t#nT;%fdk^{MJNddK=h?_%0qFsJz~glfH~}Qc zukXLm(Q>K7kSJW&CF)R78{oKg9-Le!i{aX%n`u-=+eN6YgR4v4SZ5zY zq>nNWv_nt)x`Dc)T2gF!i@DB-Cm`=+O_G9xak(DOV)?6Q%(aiSlDVNX1+#P~?(axa z7W-6AEyi2^0-Xe;&ylonAIr~Vp`z-8NlkgBz*(^q1A4FJ*&lydeo_DQ>5?M0^$u}D zJ_<}AjBa_QNw8WupLR0(5axy*mL{npyt*nyq@Eb?3J*X0HiI-GTH+wsSk#uD9Uk2M zk*K3%@-Z2Gw*uF%667y)r=m&w`&-m?o#bI0>+Np_rj-_FZ5t*LE{0*;%n`4)%W7uA z09z-0r5~&nw=O66NP#K}X<_{L6Xs5yG!#&4!hp|Oz{WZnbzWUN^Yr6{*rk>_PKHWD z5v@~UDHS3by)AHM5+{)pF!0{Gi1##}sO}Hc-qUM(Yo7Lo1*j_VDxXkspgn&edyv=C z>{E-^I0a>w6uudIlHXn@>7 z-IEQ>dU!lVv`T3NQ1uu1XCdH!>i7QIT=)NpYV|)gM}YJAufy3QqG<)sw!YWbCtV1+ zEFoBV-B^oOFu@-re9U66Ed~%m=Is0h`bN^=u8Sk6HFL--(5+8?oYm+L<%LhbgqiIb(Taf=C(!$Z2a zj4cPw9hL4)uYIi8_-@hhKzR6(y6R+W+>oD2s_1AqKiiroo!k%<=syP*n(faPFq=+p zoruzkQ2sPKUY?ezf)mJwzOv*f(HxgQaZCTr2KNsbZhuC*PSKuXi(hrj4pt#yIX4e* zXl%8siuqzJk9krL^LTQHay)etzZH)-$??O&ce8WJFw|v*3`Fo{#3z$#rXAEPZd)!F z8-~VB53}jGzK|ETOuvqs+{Vu?*plqNO(@*uPfYNw4=tEj`~HVt%g|o()9?)$!*!|r5arO%D%^X0 zi+~*Zg$%(|2KLuV%raKpf3r_5?BmqYlX`Bb_A?+LxbWwvdl22W)Q#9ELMV-v%>Bg(_(R=UnS$&n z&41?kS2F`1IcESAo$?J3ZDYn`1e6j@~ZASFPZLC5wr>Q?P)p zy>I5dmAl4Yx*|aDpJps4KHLYW1p>K1>}o>;R5Yhvq5^TuOT>L@7wP(BYrn))&@|Vll}$C9cXjD$74JP`03(s#{GI7I zRs=BHs@2H=}lsE&aBKa);jzFosp zq;?di+C>|{BI=%V!n(X1)Um>H_L@<<@iD>Tu%Z1PZ3F3PWC)^?H6)0Rn7Y^H51)>! zD;wZe*r{#iWC-EAJPcCr0z z>Gxsm-LawK^t_}OhrtS;H$jT+r4r$7mGZ&VGvph!c;E1DO5R*+I^}(BxrmGbW0;f8 z_t{Z7DFgMsRXUC>FQY!5`Aqoyj%_2n)20&kGSXHt&2D%O_5ATyE`B;JY_EB6caRn^=3Qc&2HQkzo@3Zh0K4 z$|e-z$YTj6-db#^9p8l2On4n0_hrjclzbmj0vOIqv0jLI|CK02y1r_2yk?`tW2}q+ z5_&%%&E0X&8T~znjrMh`S-ZkkeXTOcuiQ2}<(gZ7Gp9-uMq(3>SqNsZ;ntBGZ$F+A0d z;F+;@J%0557Tu!g79(-sP8|eycR1qQTK_ar-Z<6J&hLpahz; z4*d6b#iu{7b^q$I|JI%B9HtK@uKiS%+A$gze^U9eI&!7HA-zL~-I+bl1hUq5jr1_Kw5T$FJt6j$=GvnM^?Pd7V)tIi<|0 z5o>T{pW{$?xp~k{+I&{9cXgkpXG8F=4#qh&saSdRN-G~ z2^f4#T=>Kd?m{xI1CCf%(pXBf*V(wr#2n6_Qaac2B&@1ooJhjMxaYx&mlP7fXl@aNEu zfuW|c!u3tkdCKd+u6nEEKcNI%eb ziWYKR2pwme&dm-MT{{XaOSE;fvvuJ}Pj7~@%i%8ET2jBx@!&a#FANS{vXsW{P#Ld@ zr^8zySfP*(!PxeIjjKU+{JEOn0ie&DkOVIWHp=6;Qc7KC8#DRA$d<*1H4$3eTly9Y zZcgoP^YuGTkbJ#B7%+MBy-)Vi)3OqO7mhRxifxU09k-oQwkChoFK^xC>)iCLwnwG* z`gy};Ehp1v4`6A$bF6viA+d=Pf$#5VV?k7^07ZZ~xfLVo7|CEk;0x=npC!xY=0?4( zDzpD(cqqvb)@FsX*|Ns<>~$(>Op>)cvmTl`5#Dw9jmf+!`vuy(;#*glI^^-)lv}l_ z8^RNg!rR^?24Ds`sb^w676Wrk9Jk4@ssjuUH$ z%@vOahL7#eHEokB<_ z@yTxz*GgBjk+d+ZI~t{#4+#A7*7^nk9liYMvSy6#Jw z3QCbUWe}4=(l9@y7*voYTj1)HR#JC?=m_Q zJ={hkhQP=X=kT6b!J^AyZs?AqX9Lo&T(qw+A{y<5>Tsb6k~SK~-E{>aQNqPQf%iLA z7=Cjs2ym>x(?##w8`+|~D%Wq4wtF8L&V_WQ%tzz1NpyL29C6MF~zl-OVc=vdb za1JM;9oQ)O{&o!^7T!lkt&D%^HTo}q{ckQ3iU)6TVq-N4rNlP*PhggL zT_@Z|ryB=4ZXarYVZJZ@>zez={hl>d$BOY*uo!52H15&l~sUPtIsL4L_qJ8eoXYT;ra(Li#?Et--K6Yoo zPtlb|HMp!qRl-TMCI0>QFF=NRXLR$YwH(@TYzx?(Ya$nRcqI9hL3d5LJlN$SO}SNI(S||%Wwx!oIIf;{9P14QK4|QS8utSE2BoA`J}{zZ z{sPW-YC%te5+P20LM@FDxww0IH#bAQ zd$u*>lvv{x`g z;nT@_@YJwjZJkYYoqJcYr{~h(1HCJXkE%Wzq0|}3gYFAVKNonM!PT6-Ow4pW{x%V^ zoh7~Q*OWel625Jh5ZGvYdc3-z;XLXJvkZ{y?1y=FKLoy65?+pwHE?NNYS(c4aZLqQ zPC+htlSsFG$?;9skoZbVcu120*X!pXAr)R2coSe-&}*SL$4YfdU_9n^Nk3I@Tl;oZ zI$|%6Z>4`Qx2Gyaw?(|fqSLt>xK#S z&8WPGDeq60w}5MNZ98K?vR8pw-$h$88G zF0}=ltx!e&{0mpTNWn*!nxR&dA{EIG#H#xv)#f(X?FucU9y(Z4fh3*$U>w2+WwAboI z)Kn9tviaii6m;!0U9_wr{nWsS6|Bi{l-otp+VYf%y2oebkG%f8F^7=O1~*wZK!PtU zMI*@-KybWkYZK0WhUdbIk156Mr5?0}D)qixq`W0d_zyd)fLS91Ec0HMCtoy^RB@G# zU4Tnvco_JVWX~mN%9CC2DIL_~M0*v4pZDd1@bhBd^-5)rf4KF-?t%C5807o2L+%;U z-Op2OYK2~Gd5ft~BD(YeJ>Bk)(LMTkD*lph697X~sfOdOw7in%LFl1G&4O+KKIjlK z^5fO@Pm3Oeh-EE_hXx4Kuq^5fX+MX4OQxyb}LU~>bj^h zm?ce7$o!&XIuj+Y)#^ILYxjhjlYA!fd6^#8w~NAx0f)_4qf@_^PQ$hy)mFY7boW$= zU|=_jGe@fU9}6(maVavNx3-$Gocp9YSjeOz`T0Cn@aaj=@>75GS$r`jE)j3rDaWmr zRn(VlGqcXN;KUu)RFFJRkPVEBnpI>RPEyr?SbUwIrjNiZ^t^5ME7Q-OBSN|}&P>>b zZi(`@?Ap>D|Ze&T!B&PM$HA1+nU=DZWji#;( zT@^TIRsZQw+uv3ZTLEXnD`QzhgT$OrOhFeBc2N20(wKurNTk<WjliO3@BOayT~{FDhh&>q<-tqJ&4yjC~QOR`(`wZCEhBXNt-?Bh(x1Fv3%LA!yU zw|vy1$JJC|YW)}{@}2M_YrPKWncEIkF*0wR&Z)3JsTUcLV&M9;(F2RMCG_JSA_MJr za$rSnr{=80N`l0qbr72=@voS^4;k*Tr+11#UM|Y|9dz%!tV03y4qHg%v@xNMr9~~P zK(1)daw_^(e12;RUMYj*4=EQpluKVGoFk3T#u1NT98{=XW7W3l8%yh)n zAeyWV#2K}#umFrsn2ino#8|JR^^*`LuRU?`JlmI1T)RvSn-CS#fi`Zcqy=0XAMr`~ ze6uXpjeh+#Z0B};+;CT;{UF3OUp1XC!rOY9mfZ?Pu=H9mgZ_ zDB-e@8$dr0i|7((hxOSdhfa!NN9sC9$eQLSUoOg4ddD6wK4YkKm`_{hhI7g$pf78U z=Oiq27)G)b;&nbA_vB!?WJ&<^zW=jDqug_(7LliC?qZhav-+g-e zRpuiQ9f)Si6cyPWXq^Y0olb{1!WRGwa}6SZ_^PVh2a1PV!+)Y-kyYsyk>sCvL+aT& z2w6Yy>I#rUsq#>dWc+p!!$7l3YpnY(kQ3xP8L`L{XftqFPUR29=}`S)`9Q!lgQ8wR z68%{}fe+WPD6)_%f|z=Ae>joqFHiaDxVH>c17ZOJBR8ZPwM&KIXxFI@g9zMDa0p~? ztQ$f!m<42c0qfY2AbJ4%H)ID$fyw~h+=U*^fTQ-lw(aETpoGXj-AJonDL|z(1^){a z3^v_U12|q0nPfI#^b7!7@90v(y^Tga$N;qL{6!J$gEiv(@eMhK4AX7m@!mo|@s| z>cYX-9Fp!YkOWX82dS0+fP+bj)G?b%#AY34h9cD7h=_I>%2LeOyW2A2YTs19ys4e| z=rFTvA>hX^o{W*;H_qwP004uV;Q97n9= zM%FHf56?n3C#G{a+n z+|cd)XjR5}Rk`7A2nvQRJ$do+Pgsp+p9UaPgSg4AcyOm7S2r$YDWNx) z*a(?hoxj>MsND5Bj^pbvgPLRmi&P*Hq90LG+9wd=zK+ax%#Qkb>PB-!x6P^j7E3|~ zP9n}_bas_nE!NN0Yf$|1nJ#QYfEEAtQk5iQk(Akc5Zyhe;F>*yeAk6q)2$OOi>mSm0Jx$Cp4TaW6&u~V zqx2y+;=0qS&Z&Y5@U7a6T!5Z*s_Vg;cS&F{E1Ny`rr9a^orZzd6!?Hjk&gT=-+0?kDvg% zB$#Xh6tyRQf%?W^mej6(8;W>qHj*f2Lr#|uYumh214QN<>%z4A_X!4nqL&cme}SIZ z11n$db#xIr>Gzta|E0NS0Tr6xNtuc;`%={p5c>n;M+kE0R@+9xFVIE9%*JHl?B+TG zirCn8td)j{?kV~rAv33@`Lr~&agBOQo7E=$P5dhJMU0iKA3MWBt3k?6lEIhnCQ(lklFq1P9nxOe4R zK+?6dsvV5`Q3t}E2@JdmzK;RL{pCMZ^LZyjaVzRLbc~7)HdbjcbCRqHY!6OW=a=2( z-!$2FoD+FVM^qp7UvtcU&ASj1#J37zS(;cHc{V-h?Q$c~WK(8W?}ydEOs;!N+=wnI z2jp|?BUJ#-fOG5Czdd#zEwh$lR{g5;WMTeXzk^qNVtU5YJGo_um&+coa`o(_q!K#t zBI+fT8z^I?QRuX*>*G4eP1ZcceP#A<8=qayi~ly<-@X#8(5;IB(6|h4TwABnqL|#L zMO@|d@Y8F9U2*ewx(A|vNOLZKCU1Wp=4vmijJg(#k#z$aRVG{6g*u~4C1*N}KJhjC zpd1t7&T)M7uWKn^+E06Ch)A4+pTgzGHVXh_v#gKITbZv)b*iDB*CmJzNZrgcZXWSH zTm9M-t%A-p0wmOqiEQ0fRVWw4(`^>xX1X%2Hlw#?pwR&{Yj@E2sdf2tlLtcvx~amm z0ttSKV0n0)C|dZ#XuyjB7#$`*yp?n9fS|@Tx-|N%yqy0C>i8}u2wGIkaqpH^NBNg} zT0YU?9U!^{&e9^e2%SLm(Qv!`sWI(#92Mhrm?xy1F$GEX6N;@@{e*4FhywKFc*|pe zl*OTU{se*!%KfAdQ)_mAQ*Kwav8e0I*P^#yl0Zc`)@Q~j9vGb_F>;YK%(Ov>^~<)- z){$)-z@K|yXU8)V&Lz^q>b&Zh|CCO8i8VaqJpSNiZ+NptZfB9>m#9KFIZytH>5@2> z?~jzO0~Rdbn`uZ2L{Us@8%teYXOY$V#~YKl+4a<#!G)yrJ;vvBM3lUH0yoB1^S%Jy zlvoI3E2r)UGK?6DKPGmbteU^U#Gmm||Dx2wAk)VL+=u6{W>0LZi@j(~C4ju3M!4M5i3Pq_I2k-Cm@b<)re_`!RgoCVEkVAC|CU(>7LqZ9h7**w;6A z-SnJYipHgn6w($#)!4?>xcYF^Ksj9%(M#9d99@6RFWwUD zOF_KVea@g<88C^8R^wlND2G!;zm$!ZYa9_TO&+o|PQBHCGZWM_5y}$!+S8hkeKUBO zY>O-J00Jy$n5E5V;@z(TW|ImJbxe`=a#FW)l|J^)10KFh)i&RBFlCYMd2?m4o&kkU zb%dj&+B`(Ttd&*nh}>!$Y{;M+vOB!Zdn)u{la}~{19hOX|8Lea0wSMF%8FJyPn7`b zI2wH2V(c`Sg^HvV@>QT{f4cS9&<|B+Gh%Uv@G|C^1Ki5;Lx9%RMTe|mPaAvFj<%Za z@dZZj@^BPyi4l4DIMjV+ood#XsM~yrDLC-CxjS_%bh`CNU47L0ydoyvK3t0>;Ub_> z^;%<_zMQ36@ygc;PA}cRN*evGtg?61><6dc^8esTq-Th}r<4@PW`GUl40QGAu#+(L z3zX1D5=TrC+KvR#U-uX7A-sVsvuXpvMHb6QmWYBr9;sW&qm`BKPv1C0uT}%x&oLaT z7tEtZPqghxP3d)eWx_fA{+>)N0lnChXZLhTK$YDT#prD0kyxp{n`qEyMY6j3Y|%cU znYK5B33m`C7R2T_FG+lXEiSz=(?y@^{WCE+(YR63o#X3WkNSs7G_@@}y*4}m32eb6 z(E!g@xi!87Vr8bsLL=`r;+J$&c& z?qZE&!Vfn=29D#8UYSUu6_4|KTBEk?@bz#eqH+oJTvfkIxs^I@O9n2b=NhC=9%;Ti0rF79N9+0$7h)|xMA1oy?& z^f>s2=r{6Xyk_S-?L@PD?mF6_vHVq8Yxf6p%busnexN>O9WB~~-e9Fv)wJ*Ttlfxo za;4HG+08&$3_J;RaB!MI4gO@7=z3%PrQutE7{vC`BB-lUgG=0W`Ee81=iX8aq9;DF z0|Fq!P15O1bhnp zsj7xr<4^WV%d(G)mfcc0O@ASTUL}M+U&J${IqAi1iJOC|heeP>-JW;9Ku@cFfjm6X zqcwXE5W9)7($qc&>hen204qKQN~S}5_mEA95P(M-JI_`Rr~!E?yZGKpLy14q?3%(X zxrgiUy}Kp*&B*4q-%95JioR7fsK1f}XFN=n0Ebr~ibnPWUYu;p@9phpC(bCn7`Z)AZw^3ODTP?Lf~*7*WR<5t2CNS^Cp`-SHVyh#Ci1D2t_=23 zSv@{0D7VYoRDF-#$KMCN;y=qZzw{{4cJB^ohT!gizfmySYJ8JY55R|9nnQV1}< zD(c+^$}Wux15#{w>Izw73CT<};7qZ^vn1tehhB3le-iQ2o}&}dSq`9}3s~Iz$4*iy zR+J1T$6B+7w=Jfls_MeZ&#W(ClETRAp6O4vSBy0G^&+hiw{5Jbs{s({ftt1%+jc2u z)*RwFW*cn&t4a>91$wiVk-kN@w=cL)?F&**#wrs6h=U|UgG9LHcyGsW#4}$+2P-D6 zP1>mSVV0W&sH1Szm`*@|KFFEG*%@?pgv|G$H-D8|4wY@DeLcC+yU4ODXG8>a0&1#) zKbw)pt^%oUIxgJam^t*fQ04*H!PI124@&N{!Ha%V^hOrHFZDzJ zoc5*Zb=zk}Na5qIQ}Ql!G4BZ+XMAdck2<$r2flMvAyS9hrTUv)F6`2)O6o!VPV6@h zyU6&St`il!bKYa`&AI#ONo|U{FjM=7(T{H>{v$ze}#0n;NWg<3RCmB z*wc?a+!Itc0v1buW^fS4%K8@EOM0<)zi(13MhD-tobrd_gEEJona-b5WXz3tb}1W%oL5O6XT z!uPl9U?aLzW#Q)7a&~L$&kn8euRew=zq=I21$w4a-DAcU!emQy&TofG=2Hj3{H3SH ztm$P9*>2c3zV5sB;<4TArl0)<4?_#~`Bc|)YHj9hIeWBM?>IbHvE8_FtMY=^qjhb~ ztDj|o<(2+{#HMjp*$5|PSLfnA0`q<}8X)1A+BQ4+FD)=Chm?H@bnV98Bv_FlQv z!nA}AP99!LBhK_tk2ew%ao@e(RE8+aa!Ocr&MKI6KS|zodVkZ+M!qYkIA*T7`w;;deMB>;zSB;TLVjtdr2E&XD&rcn2 zJ_EJb`VjN+aNa!T&Mm1Gx{J>hmJ~Ov$(}k=$#Giop0QsvICjxneT!fgXfY1`U|c2u z+Hzy5+dn8#tgA>*H8-y3mLbHuX<~RNTY4nP`Wc zm$G*p3Nn3i+Cd<8YM)M8f0i|OYhP5aIPyaH`uTk1iKRI8frho$tQ=2YXC^$SvpAF| zgb+_)SYOyMy21l+8+Z8G@rExaTr*p%l0vOBpYw~pUl}y%6H+yFw=r5h__{I8oV3*4 z$TRaaDf^vLapq-n!-`KQ3w7rQw5FDIU#mPTi^#e9xm*G8gBN}Xzf9CDhgIo@Pg4&J zt`~Ja*NsVecRGC9v)u#Q2l(b@FD$Y~1YIXe+_Sfops_YAUR&+D7^vjsl*RO(g-gmI z>yf`#8Myl)eLXIMeD=VmWjsN$jF1evf9y36EBDW!dE5v6&xHai zNMiETmTW+P`~*xBsIMI80F@I-pqx@=Ec6i>)LIX<}$^u&Dv{*a#l(r!m)T5 zNd==(W@C$EvMpDBT*q0`v9&Z8Se!q(?RQmJwe_xsVmWjYvdk6;m~{vOL+iseNXoOl zxP@CpMLf5YK-E|8Xy^h1v`YDUoGj;=eyFs^b4~3p29hzSVn*MjCTz+C_;%S$-`)VW z{A%A&HEt&pAK%)eNaKm^%TEMroH*v$)mg6@GqJ_d+@EhDv~rF;X`Ue4;i7vj-s|({ zES2-mZHOeY&Rc%FTs`uwgQt-`*nR85L3yKasO!N=AgTHfBU;)-V%?%1A_~kzHpPgR zW{Wx8$rw;Rb?Ex}24unp%IeJlC9vTsyj||1Kl6$C3Zeq;qPooHbIVOY_YJGdyq(`v{c5g@{!g`RMgB^G z-b?;5F#2bSsq)LjcO*0NCE|@?&M0_5uK7a=@T0mnbJmIPPhWT?^Af3Evz!91Y3nwO zD*g^XN8~J-o4LN!^-wiQCYs~Rck9QqPm_Y4s&G7!ZeItf)s(9dQ6vqx98m(g0xT!f z@)41Sl49FsZ)R^d2uOA`#2?aa7p^@#&$?NM&&Ot$v~j8_+nAewh6G~TB78axYJ1P$ z_gWhCR#;iSC-vOl_AB)~wFg3{c6N?ui|3PxyhW8le2ulOH)Td{uBo{%`m~>_$Pf1oY z0`K38p{7U&ku-_E4v{bYhQ0<~-{Yu1FHGm2Ai6#+OpA4!)@jGX0i~18c+6*6Yg5a( z&K?Y;TDG{hvFZKWZ_&D<9$g904W<7q*UZ9#4>|V@kA9~70;$X5*~Tmh&8CtP*5@R|?CajxUp2~k&LWljh#KBt^K#*o z3^LS)^=CQVs>JgngVeH*JPovc;U2x$K*QId;pwIZX;E`EM`>SFM)flUNT%lL^464e zms7iQJP0214p(s=uY|3-*KW3RaZ-kb1woe`xr5q(hyg@2k!3R5?oJ5K;kvdZRtE_UV+!OcBDxiLHtcu2O=(v`|U9RhQk5@NlSJ_x^^lFKL#0wps-=kAd#~%cwmAWsX z7B$WK{el47;DIA@(zBztj3;;0-_f6LD|?eKGV+(#4U(28WeVt_L_H*?CLhzHC`n0W z2nhM@9^^UYTDCcUB?h5m|H&XwfqeEC(;bW`c*>A3C0DEi(-u=jB6!{pk7V%R+4T>RpaW+-v9(9IG_V&9^KUU~14 z99c!|(>^i3Noaj_HWcyc)w)eqGEjG<6We&@`6(X8fe#_xRZl555-GOwaC)RXTldoF zuxZ^HBi&le`YhPsAjKUkeHI?AoyAcdX)u90{hc#ljhp@vpEeiFeMRIcExhbB(!PaS zhuZZ@DX~F zAg9^L+qsd?`L6fm>l4&4g3$y^0h+Z>*zuE_qaVloX0^M4I#w0oT|{Z*PB!-bH7_}{ z{g^vak{lgzIoPCG`jcN;xap0F%`%r}9!E^oDfcj?=D(!UCnBF4a^JltKtgawFN>Q&ZP zE&oeFV!RHFJJZq{8VInmhqK^7QSXn;k!lA;I^Re!i10k&_Ct~-xBuI>B^{yoZ}PC->If2U3DQx*CIT2&Aw;whP-xu+3v4>;Tq>K#~jJ zHUooiMUAQV&ellevXXA!rm+9|3_!1Q-~2sOm-wzOfG;q{&-WOCiOV6=dVaq_Y?hNZ zJl%Pas=`e6=FJbK82Ifn_KvS8c&P_s3xLKU+Z^%40C?QD%`1jTuC8uV{HFZJw; zCSP)EUA|R+r<{gk&AYc7mPho35k%KQPO@VHHTgJ)Z(pbx+M~bpnE(Bb{?)uR5k$^I z_qLUc?Lwe+;HBx0&u(i0WNftBIY7{1x&3SzZ!k7fYYM@qSXnD9r!Uv6Xzj#UTo*~5 z9o=v3RcWaJwz1qXO>UC$VA-~m^u;=YZH0rvVqCm@PEz-ar;1T3B2Q&rtwIgWWELP% z$KVdDP+MkN26xPu0#CL6>$C@A+e=UCt#_z28Xx*t6tk>6BdRSZbu&v~&`>5%v@?bb zX-f+jw&*H$IT(>bc~i~STy6e^rKw=TSUeSC+ShmpsZ?E?uY^X2@vH}bo?dIvXlk@t z=QS6xnr)xX&A8U4N7cxGPHwpAM;szX3O*3!H=kUwX6cyLSO(``ReaSs5&ZuB`#zib zc7D$rJQtPUz7i_urlUDCLU76?BpVU@0loUOeW0*Cb$TMbQg33@Il}5X)mWG4hA<$E z35}SCQ_{)O@`-d^XXiz8^4)BCENuLau#kpVhXq%EN|jK7CElG4671LFFACpHYQ#HZ z=erqg)HFEh4WAGtjW+y+aiKP4`=YFSH)AVs!lo1w5eG0PeHdQOuc7&l+SM7S8rvT zSXT2|bg0{cxEMuo&2u%gu4kPc+Pb2ou&Pe4EFLh5JJo+Qff3=)9cn zOqo@NG|6%rvp^=httR3Q-7YVgmTv{ML_7(TH1I9#!1c9I_kHSfAxs0p8HLjuz6`M- zF1ZSu%s@XtRrdm)ywTz|UulDCPv)MBFC1G8@|1U;`ex8^_mg6pT!bubz%g#TX)aF1 z%vewlHxMBQo4ZIf^8IM!T;Qt7(7Zb}?tYR*uL6*SX*+OU>t>SwH9X_WTBc!7osbC@P{)?dAyrVPSA(TtkZ@U_pNW z#_{%?hpPjDd|-PG7-%t70+g%hv0h`iY!621dKo50VjhsEM6>mn6D!^n$+{p}exQULPpox4@ zQR^MIRRx}YwLlk>u9(fGE^+q9{2uwnRWfp=X27t;$oM+nJQLC}`xy`-au_{BGvuz; z-r9zKrJItPS%|O<(%0>IHP)DIxqoLFEk}@R7nG+VhI9psqN!MX9b=9M#!=unS-fmOEDeItsSR(<`y^R9zP3c!F$l`eO!CPru>GdVL@ec^zeDJA5eTd?>3n~xto=AnU8cC2*uA% zAb%}}T|;87&tt410KLw^vqY+AsOrD_UY$Rb;$mc-Ufns;uz7fi&g#h+ZrVD^pW2lz|Y;Nu0E$zufdqAd&>7;9apwK$P1fVE#2B1Eifnc5~%pa?O0UPi=C89CF?ptR4 zf6WI*^ykfJl_1*;A9LQ995{esSlXj_ij=x`Gpw z4+zw(tF|q8#se7Mb7LiM%?s`-jjFbJI1M> z0WXP{#J2=k+dM5vNwf*)37D{WhhHxkgvHUut2~n|{e7MCYoOXR6RWf7TcYb&JxJk#2XHRgHPoTe{s)%= zn8*_5BkyW%?}~)J(n0{eCx*aH;4c6IWkBN3{00H!#-V?+HWqT`ePo8lK;>l=fJ?wg zk;{=Q>lkSY6%-IuPC@?bg2r>hwtm5t$=7DxNPK$OPsr{*9!L{FK%0rU3I3NA!S({X z^=4)6d_ z4`6XQ)cpRjSq_M18V`T~G8^@*g?Rc;v(M$;);A?6t4+-1jOmhr`8^< zdZ#SA&C#_O7@Htt6KJm12?RULqIXqoY!gJ!Hj*&-VcAC*#BdRYA67hr9+NAu?rh*ka(x&mZ6vzzb0Dg z>;R;=rJ-$5&UMq=HYDD7`)s$*1p+Eu67_nI*&F8d$bRd#O96#O0-;uY`!0oj_7~tJ z`*H7|b~jMIasAVM<-(P>k{@2WvIr;YR~%4_HoFl+u-oxK-Ai7{RsmlSSCxGVerwZ5 z>*H#AhU)y+LJ{}!uj5w zdxG;peC-$jPZ{7?Z9awfQ}UT|shO};wH+u5@~r;!uAV%Vb)6yW1`d#xv_h*ADiL5p zZH&4@fn;(Ff8sEwk?P_Tta}EhF`Xo%XTZ`^K`gK}VkA17jz4~0#%@%d zwwhx)WqmPCob?Iz3m~l90>=VsZgmK@FGQ(4(!4T>Ao)Gt;Hx}MhHCY0R zA+74DE!cXzP1>ug(MxP!v)_=f|KQ`9w)Mq3L1P7hDDPQd7Z_iSR>MJ9-Po=k*Sw`o z;eRxM(2B6qXWzPts)mFlzm|Jp)Mu4NMNBOt#vjBIQx~QuaiNUYt45Y9Vjr3KyN)Hi zweJfZO4!Tq520H3GwW+%1*TVJ;`&?Su|w*{IYc?5A^(%vEB@LeT1%tbg5$d5vL?b> zI@hh!9+{_%aJ1l|2NC1`tl`Zn1aP8#)Cm+X*_{I`1m-#wYM~Vit$dzd%a3vV#g(w& z1N!vF{Fo)~h`lpRkvN}^poVZ6CHuM19N^c)Ix1YeHJYkR%0AG%O*%|5Y3uM`bXG?w#6*yLLwW+_%SVTy+{$nV z+;m7UgxcD?U`qsMWm9@>u zef1G)F}v?yez4EB=3S^pO-^9UVnto6jiR#HM#Y{`D|AOc+41r{jB`dc1q+;yLTr{I z+tdO5i+iwkNb>Urb*b;>)tUXCUscq#7aXSf14YVS+)5^ryrLN_4n zv)2pFxLaqg|B_e(47E!YC+x&XJ=g3GV6#)|l9VrRzqDK=a4cPLyeP6KZ~}gyLz0`{ zWX&@WUIkZGrKfBjU7SE%fkD*wT_!bjlsBRG0j{)!-UrH03OGxP-)Ho2$F!NJjLw zv&r5w*d|6D!1a4(efiP|IM^viZTE2pBrXH*zRlO`&!UCrT3vL$G%3J}fy3a)yjFX?QAPIyNf zvC8kPyb)S#Z1b4Mlu6@8KVqdAxN{Kaev`xGXhl`utr@U>UDi-I4(*`MXfr$>w`Pbc7)?+exN z?#tBc;u4E3Y6lbesdAWSs1q}6X^%xuis1CbY~)!(G{pl${S`(6j%H-@UZWCQe@ z$#v@lS<>B+s|;7wo9?K%gOgp)f(uH6St3vLNumUGw_|8$HlwWsKXXPEwMMAb&q2C; zV~YmoSieM9y^(4w2|No!b)BBd-jQCZ_`elT9hv<#vs)6`rr)yo6L^R!qJe(<@Xz8k zD^KEKJh_cjc*^phrrG|rS_`dzP)#I5TA@)e&1feDB>(!vl{`31uiK!1;~lN=NsR1< z5kE+`ALNUyD%iC0$DeeXBALT@R~c}S_>8I$Y`J8-fhapr^D`^c#IocB*J7!I2Evh! z5I+)CS7s&~8}#CAd11Q-79@j`Ha@SfM&nVp&C*V`XiT3JBc)Puo*;_0W; z-#Y{UJMTf@uIhMSOjD0Dsvl&TJUYJ(?Q%)m>5C^zugU{S<^gE zFo>96<@#Eko|mG0@}+yeeJzN2eY4AFEQdF4-3KSe{>xHg5pa`FUp|2r4?e{Gx(#}0V2f(j4q+I|W>r+t5Fz$UXJa7X+%sE?1>p5yRKBr$kz z2Dbis%&>8ph01$TEMtv|``-KC}+JC_MII1N$ zM)#(RmAd<4vP5TOuYJ?mN4*A)HGp6?5;ibL7Y`t@5f`xDWxtf}Ppxj|yPli9_4($H zf%7T7xgNU8D$3jSbYCtRw9KrkAR->XuU#&@#mATLNU~5IxypGSKHV2-K2Tyzk^vhux!Tb-{64(S5ZQN)N!?GFa>h^A9kHn%!z&Wg`k{hZNC z&^deZJworsjd|`Z*f5Ee(5OwUFValO2k3U#Ze0?*Ug&-3#; zbFA(?SgIV<`39y76EQluM$}%xpb`)=SpA|s*mPU28?R={r5V-gs+Z`B*D{ecH-F4a z_{d(N2oSljo-t6GFd5BiJA?)n5)=4kO)NNzmD0ZwwT9Dr@j~ZJy4* zMIfv*n|Y&T&mja0X0=*9{^(*x2tq!QaR_aUYP0#K3rPUgi-RZO|;cNYU%<= zCE1k)k5?#!>KFoyg#oTxi}l(stK~meebl*z45Z)k9sLG*n@K6j*bqp*?TGPW@{v1xqDQvI-1SU7<#>xOU>e~o^x6dfZ)PSdHF?9waKi8k-!Yh(SAx#7Vb7K5Jl zqw?{+X;!d!WZmjCb?qm$M`)x$C6VBa2|=<*y$e?OKLTMXjOCe zpE|!(k+^LxOE_I?^F@Nf(X?ak>LJ&zZ-nXP$zH(~Zox%UXA=8%+e8){`87#E^Aq2R z71RQK1ODThrG7oH9g@RQVG&uAke;P9sLq4;)Je@9*A#)(96#v?OR{7$GHfmNeEfVW z^twINV^!6rEINO3wdPuBb>h1#o-|hu9TLxr7aoA9Y)!?@UM4$y%otv3_)l@yKgHx_ zm+Eu>=KzF?B{vlMh(J7*9z@f`uLX-}$iIpo?gJM)6-cp4f=T z>vx{c0E&>1$byv2pcgJRzZQ24Egk|Wvfw3Pl!{_LK+TvTXDNWXr9n067u23*I8}tY zC+TSEwIA$ld4KJ9$R>&@J*%ooLCX*z*xJZ9HA^V(usm+ z2*IZ>aCHjuaOUk8B8MUt_5`!PlO2IsFo0*1L1{Dk;|yd9&4aJ(#1}5CW~44f%B&P* zK&MeT`TOy)m_2nf=+QS$ zG!P~uJfm`7o%1A*rG*6T12lZQ<-&*4jEGmb+6_dp-0J!;EYN7yyI_aJ2;s-ZzI>WT z>rKJwB3GdgfZtBj1&P1MI=Rwmz&k}U8N>xDGs1_h>~QKwF?Gj#s9S>o2^#WzPPBD6 zOqgh7{K)&uv}H}efV!P#2>4qy;ub!$l_K>U^kO?w9a4(?c}qVctbSnr7I~R8P#RRz zc)~;i5zcl^v_lxRb~%58Zj@UY8j-!Hdus214o*S}J%x8Amy_VTi9_Y~*@Z>0B?%N! z6x@2+S2PjRZ`Q%Zm-_5ju`!4*f?oJ^p2xGWPf_G>WW^=|#YhSywqSNs0kLGe;UOF7 z8f5HC_#jo*&z?IVNx1?P!eI1TblK)6(yuq(}y9 z>=TaxZm}o!wd@0ic}a5D!4HR^KzQ8IiXF;{A?7X^b%R-Tugl8d8~e}r$i-5FT4{NN z0=wnzyF%~}wkMA_KPEpUh!$bu->O=zt1>S>q4i?8!dFQ5>K!if-95UoQL|_w@?e2@ z8v9VYl30(?4SWe2RXAU%A=jH}4^9j=il$tKD|Nk51;_hIK4z~%Is_>PkMqz^NXOups#it7bASC2J2{qmhc?*j70Oe|2KWAae9!5Gm4_W&lE>3hB* z?aeEB-*~2MP4nvyns0Nfg(?YpM^LSq;OF9lfjK7A`rTQwb%My}np`>fnurA1#3E(p zP~|lxX|_dwK?_u|pJ%+SAb`*@`;*Mqhc>=N(R?IBux<2?L_lxFJpoug2k0uV# zWa-irB_Rl`&zi+8iR|qZP-b^V$<)0uqgO?3&R-xhFMfkch4{~^tz@N2=GO^MXu;>h zfu|sLA0qIRDAqm4Fct=eXB)@qRsGs_id!2d3e9}wC!EV~U))^T%DU02`Sqro6(G&w z0Gy0>>aNOq)h`a7GPaKHV0P#Z9*+)EN>Yb7VrC6I^ONSLy0|;`6K3BmMQMQ4 zojE3kXMkqVr+*9s_`65qZ|^=Zg2Y|VxM4W;*LuoN10IDIHMb){{k!<*KYSZ~*b^MM zmm9tTKNkQ={zpco=}WGVL^Xpgfa%)F(HK{S?dagwflA(;UK7C&zh7*8dg5*uTCY*O z`JUAh7!?&?ur}ql?q4uJ-Q(}B>i1IYUT??G2v(e}G%lHOiim3q29jy%*9uvvf*JF> z^aEe7K6yqlFmL&jWOG`>D-UkqshXTZHf85*L_=3Z6ws)%;E2!3?dXz+?w6!iw3PCb z^ZwE%gH`zdb~*rD_Y2v~B&89qgCKGdA{1ZLGl`bNb|blo7Dn_5D_k{Sf4Y0uUH(jjW|>OZ3EwbumwJU62VHB zIEkOXO^Aw{cK4^hrSD-S4ZDWbg_85^bQVJOCi$IVjSW}qXCJ=Ww1GaQ$PfdukZ@1+2f2D` zQ|7U0JHzmVK*cYejd@YGA^xpBg1GU5w;G@=alVkgkO8@kLpGzO3Z0#IisJ$gaXAMl z0bgJ00|s48xR1ZJLX?cg{jd6V~`fw5uE&x=rzaGle*&2&X-4Z4 z$cK?tZ_ygA?pVgo@%*M2SpB+|i8~Ad?fx3k55o2IG@c%)MywrA^?W-q=rpA~iMULf zU~C%D;#o8LfI8Je{^C~*VbRCFL7&D+44jTtuR~}tzlJ(zuZRnnPzuEK zlk0E?j9FY&3+p-zpf?w}O7wKYw&_`Lzibe$I>O?Ue;oJg#@>}P zpK0~P%0XtS#1Y&v+_|$MI(jdg8s6A-TpA}~*r1&mW9HS{Is8S~d*GW9)Uq}oac?M0 z8o^HBijgbjZxM>Hh2$`WXL%HR5MF<6fGQr+4OCj6RWJ{ ziqBuii}+Yur3vZt=-BD-?@nlVxh%)8acbDc?X>VCf(1DywyWHQTjwMRi3i!`a{^pO zf_E>ZLdj1`;#OeP&m;%vI5eLFcMV|2(`_|hbWzMmtN3J@TXh_Y7MB)&Y%p*VE=rbB zaUOOV-vX)t_kYf{Sqnqe9&y3w$Tb#a`J-0>)cHVPB~~8A>1ym2ER_o@b{&2x+)$b7 z7+db;_00>W-E2mFNfC0HXGA^=N&6AP=1Lu2lE=gblYIriQ6%v3meKn#vpEe za%q@Q7!=c2j`AS6+ z&!ge0X(Lrw+r_l+*9BwfzV}x#B?aZa_4KM(uQJO-2opDRN!FtZWN`txhxqLn`73M+ z%v@H_FV;U(RQU3=$*rinWWgJ%_kd${qg4uNFI1@M~#`Rr2&>1Y^djs=yWeT z%R`eywN?=?wdZ4(X9}5a^`PS1T(wlIMbXRTC@lnOU?5iSXX<7(JQ!u zHnP%z1s(=lIe12FbPo!YwOY00=Y4JYemvhxriS}snymT$*Ip3+J3Q;mVK(8&k+O>v zFD@b)X^mHxikXed7pu)a(34fn4pJB2nRlNbsKD=F#9a*MB7ABGv;r)r1t;NC7o)1KzxgBvI68LHJ>s=bBPb` z5uUf9tU2T9$(n2HgNjjK-;VGm_Ao;@& zsK@uM?vV-ZlEuvGa6zjDjgU@owP!aVYvl^&*JQ;HbcAYeagLOgbvC>{jug6C7JfaE z%WY9`{i&4*N)8K+4`U;y5`4RBkZkI%rEp}2tzZkKe^}mzVfzAm!{uEmr?g4EM|z+O z>UP<84l-MwHvmFXTb~ z3oSs(&Qmb5A2Pt@s>yuKniyg>o@OuEX~y=nyw`>!`|E4ASACCn!!y=3`6)lxiMeeV z;5dXaE}I*YxUXzKy3$-R?i3;6^rEEscEE4YT01DU}m1C2#%;I>(A zEd2D(hq;tql^)-LW7<<+YSW+9Bs9A8c2g}r#%pqnX+nss?E!*k;#)1bVmzzr&UFuT z`K&)uQ?W}B7pc5arE7kf=iWCf8hmi0d;e$Zw?s0*N`Ev=y$gTR6?S!$bYY=X*dYFX zgX)DktEM~4M)mCa^inCGU-o#?B(V2#IkcxU!l_2fo`h-e#&=E&SpG%xDBYg8aU^flf#qW((LkEZ+P`pb)R!&PKltx)f%0M zkfaD)?b1|mAb`A7Wtu*bC4wGZFn87qkI==?cNPip=X#Gms@n#nrIod@>SBbKiepbU zxiBlwxBU6G86R!vOTUNz@_O*OL~mJ$Nwr)>?A4V!eczp=y9Z)tv_q0(BFbOXn%7n0TAi!=D3-CZ#YdY?}0oEPc z6S#@nSS_65WNH)hJw6+W z*VwriUxe(wJ5#yL60LILq+~OE}*Uwb3d@v(WmdEx&cVSP& z^#1At#6@8IFv(%OI#j;NM?jDE}iy|$WvZJdSarAO)adMBNPSX22>`F;&O;dX?;_QqtoHOxy!GT({R zdY^}xpP-eyX6`$%+o#M|k2ED~3CWTEtkIRk^QMPqY|2TP+EBDwUSk!E1G&wJG}C77ORj*8Ng^NoV*JTdDWcIfG-g&z)yG*|_R z*7$rcP(0uZ2DqXBAdk!P(pu8CzKav2|%-z3iWbbRRy14S~Q;F{#jDXuFsZfNh|4xT;x54^EjxcT?bu-dl}k;azRYk(nXqYkq+0LyuDC*zR{qgLO$Net zn`bOVg1{Q7uHuesIYa!ia-#hkr2q0jO}~-xM<7}&{Yeat7`#%1yVTeeY|UbZfp$ut zheu+}y2z}p$K5&D%u*_AxS8pweTs*Zp3qzhyZF}2+jif0qQ3_$CgrqLHl=r<&biHj z)9(@v<2S=D{Cu8nI=J8Ogz@mmdq`f>gUMXTWzsuM2eM=9MY)god&V|IcaUw5&~g|u zUDuP7y?$OSdCPvM&APO6==0MJC7kD(J1aS;s|2M;L@1%JTadnPJWOzsr0L1JMp#R4 zAdFpTs7p1bKT#L7jIukam)=RtU1NMgcOeLGqyr3@%g0Iz?phLV2W(?ps==xg(E2F< zlr8J?791V)(Z<|IH$-p>2O?+V;SRC-vfVjUKISiIRm6%=r-+FdX@0ocK}0;(`58jC zS+1&iP&+>8lO!p?=lZIQ-wU}SraZ-KDyIz%=~2NIFM~=-7aN{hS)<*CTg&*h7f*fT z4&C5SIP9|K$Yd7oN{Es;m8#}xwJ zB1wX0ycCbA2A_T%`ra(B)~%(2v23^G01n>+Ry;tmWC18X*-=xDGfrxgdtIX*RO`){ zgqahp*1#}&#$l50!CJ`DUrfER4XQ8II)9&) z2NZ`ZWP$3aKC18o>zL7`8pXhQVV)eDp+tGnVOwl#@~^$$e@OrTVeAc)XM|lNOR;7T)0XO$+j_noo}n5gU}P<2aTbyjiF_*YV_3x z>?XA^t450`lcDHeH`-`?=xKb!F=7n97Zm&*28PCOn`xVAQ>DI+px2-mE-a8zV%qCR z^a0r$bOQOfC5978+blAt7lP5ac{A%YA-|10LpT`OvikV_EtTGy%+G{SIony)xIB*} zJuxna8zMA9VYlmZ}y^#)-9o7}TsLmmAk zt17jrs6;NeUTYIhdB^X}ZwCE9AIvPbYqSVo*kp^f5FvfA`YI z(DiZ?mtFl5vVu9WIr~J42(r5QBDRz_NRi1iCEw?JsGdSQWq?yPpa_7UlG7^GELXWS zFA->zYT1gM;%E~}5G?6Ue zboOV8V?NP7}Osq5MNO)wYA-^S>R;<0+M+rrN=}M3dpVw%NBf3ghSt?pJ8Ot`SEPy=uDUaLV$ ziHqhj;)6p<+%ez6e21hyAn!R%V(=uG{=nTMup|bOi=L$KwoFOpnZB?(`d*rr^5}(x z$AHR!!n@EV`wa(#_PzO<^!9`V+>Nmq5-ty5CuC2nEiA?;qNlV=8EZe&PgWc~g-!0W zQvCwZu2m?GS@T$bvJ^vUsT8T3R(Yv=e~M+~eQx2WwJfT^>TDjDW^weFUH@4@75j>Y zUG5-#@l@|Be*VZAQ#6XeF(5iZ3c_V4AkU&x{4LO!V5CNsV?JMN!-LXi55cLU+B+&aq z-sT>lEm4seZ{{IM;#b-QXhXT$$6uPe`r2qMLj~Xac-rU1 z@ChA)`&fD=YW1wXr=$P!wVm7-XK^mdDonv~WoF-A2yzhEZiUT5>30C7|M|Jx-=L3M zfEF@7*M!{AELe9>0DY;jSW!ht^E%{i_V@jN5~(G&cqssPwDHc4ah{N zvPZKt-k9H6XN@8-X5Gjo zuJj=J)M2dKXF zX1DpOld!rU(84(xB6eQ{X?;{$f57e-J+-AqGQu=@lW*u7P@6Y$)lT{6p_t@!PbIkD zS1jmlxrl*x%XCRu*e)P4@wDU8r%5h&&)Dj9wLS=l_|~5u&A>d`Hlm+}#pL~5yFTTZ zAXl`xOf~9#_}&MoLpxxy1>v@=EbErmfxH^%7k#JEcL$Fo*=S`Go^eQoCxG3S{FB)b zHdSLNyJ0jNrmO6q~?C9%?z|JY3eL&)3erobnk{T zU6x$NtzKF02aVgvF$8 z&v;di<7Wi_+61YUQB#9vzo9xK@w?8Klo8;@ufp5B$qNEwHPt7E2;Z?i?lF=AFf&9i z@C;?m{igE4594UM7rsdalGzze5?G~;^#yQT#xP9vAg;&4935F`KBEUyk8M4($-{@I zrQKNY9tIL=TB6tu05Uu6&e#!b(wvxQwbEBzbD!48^pW1xo`lMChII0;ZR5+q@1dvn zvU#jXY2%m085NrhEq;H)^behVPvrQPsqFH4D8Uq(=K>8jVb>P4E3%Z133n z19&|%^L9IW#5dc6;}nvx2^?e zg?~}=2||64r0@}aCVGOi6LLq=Dd!~beDynN{1jk+m9f_CyqJfk$);_n|A_$UBDy(f zzWq+xj{eHm0<9obi{7JN(X%IBEV>}7uQ>kuOQEM$ir^1A&k`%!j}_}Z`MNxdCU1rC z`#AWN#FSC>c=1-dR@{D}ozfl851588x873)X^1x$eYW@&>^`1bID5;%9>#9q?eB#V zj#J=qastNnty{)?BWx#C0peShF|;w3uexGruZ|asbrMz<;Lr(hcurj}WJYzuPm-EI zyJJg@fcjdr^CiH5aLe-m%H-m^<7WU?OVQoD+7jAGnyuyFH|xlzTZ7co3tbe!-!R_>smnkMbPtOpeyBl1|nMilNYNhXjG{lbc-c|2n zO8tMGf68jVqOG@*=H$5*cM^=A2CgVyZrI>&5Tieo&TZ%G;|VIKiqQ|?=JJMH4OeiQ zpXYEwDJdCsdhK^~4Q$N&KccySQtDW#H7p~!-80TuCl6tPpW1j0P80AL^%mH#Sh!{< z+v=wgE#ACuyYcu_S(0|i22e08{$~ZzKll;KeM%9V9Lo_ZR8IGz81cnBAyHI$> zKa6jVgYxvR-sLggC5iFlGB$sGOnrL!W1QW;$}DYN3DdYwsd%+^f=$1>R5e!Pj>2Vo-uS@Y%%$<~C&DT3}* z@qx@0F~1yNoCeGdS#MDdb`fgk4pH3WmmTiK^1E@ETyF&d`1Yg2gWKe+{p9DtwzW72 z*t)BTWJ*XV4l13weM`rg8{AM4laQFgqP&px7R0`%08e!b(X(EUTQdDCS?ZU|<3PMm zMFfK0)5Sns@d;l`j!kPhwh7e&<1Jf`R6QAyKi>kPy8MaJdvt3B8}EOEL@V*0l+J#> z-ymIklnF|l{HkSfJd7Do%f8J;t}-dCRsGfF{-0T=`s4V0p)pVPRnW`^5|j5Os*<06Tw1~| z+$m|9dpYWM2G4m_Jw95~2ZIsH!gJYBH5l|1MJ%l6(03kjC2AdO)_$|w0>K#Q-S}W= zbf>%9>1!$-^h&*kiU^>O1CEs`>NY6<)lZ(xZn-D-M^l@Iq+hocS|}?!rqLcn(F+R; z&o#OE=RO!+er;`}4P?C^Qs!X$bCcb3O-)y-{#P0yAiY)|1#HXt7B^3T;?>CrRQnmt zXAwHYWKC06MtI1%Z;3rh=a@?98F(an_ljP>Hd$m)s#vAS6KbC0w>tuxg$Pnb?zt`p zF1E1Zf~nk}fleB)A7{0rx!Ncog4yPD$&C`?m+q5xFzCs$MTz{CO6J43w}z&$rz%O) z$`@2?*VI^}AVA?Xruh>5b03y#lPwg6T1!K%xKL;+M1CF1tnqXsheZ#cgi|K^YS$OF z5;@pzeoi=T{FE*~$iIRmY$HHbn9=Rqu~Vk&$JOz1PR=0<^QrFgHqTQuNg~G2O^jPX zOm-%pFP91b&A0dO*=#UxxjS)}^RE3jDfEArc2h4w|GHQ7M+$dZ3V62cGAZ2xU?A~- zger1x!Ock@((Qa;0QUZWI1K&?os(*~OMzT%ne$vSyZqAvQX{IxT+gbT{EJPW&nJDQ%*BK2nCd+VkjWa za-06Y9Nqu21bjcM(*W!v`T>Qz5obgeFl_-cwz1m3S)%`CW!-B-us;jXL;d%kL}wiH zQzKKOERJAAe6*bi+4HvqeA(=#^M9<||8=3XqdY`0#D)W#jsG9)y?0nsYq|#-1jGVF z1f&xc6cG^V(h?gWBA|lOi3);r0jY+BBE3XVKv4+2NQp?Vp%($^AVnZSr9*-c0wnRS zJv004;@)#-&N{iyz2E!(O6xQ~YJL&8HK->GaX+!de?MRrdWY;t zu9j$p;UnvT=0g845QQnaW+>9QCM^mo*Ov8)oIe_GX#EVIe+Tp(ge&!P777|GnK*^T zsEO=KBlJjHp@j--?8wpa^0EwzwRPzNs=9;dO(##y@g~cM?;Cc5%>Jp}{&*nJ3iih3 zv6V+g5vXP!1n{+Hya5VZ#3Z1a1dzIa9Qx}Kj?ZJcfJF8T1?VSe-2zDRcm=1w(#C%r z+nH2l`Cs)tzgZkFO9;BRRh|5$KG~MH%iQ8>v8D`t@Q_6NC|QJ3>^-3%J^re7zzkDc zjf`c^#02HPM&7Ez1K%Bhcx?K($uxv;$faNjMX8zRp0$|{CCcm-qUMrCz_x7KgxP-Z zSV<5Gyz*d5emI+mZNRddBW{!Ipk`AOjKYyQ&!}EJS_fR8dZx`~&v-)%ch!-dZrvRVyAvRRZ0HSi{nE!>FhK5_$6nIX#4V6$N!Yq<$5p6BZi4jdb(g=HGgCs~U) zdiwfRUpNdt{|3aJZ+=VSYWCOIh%f-n7jV}^4%N}+0u&fls7t2rY`%kH6Q=2P6WfD~ zra-i0AyoW8S^1IHavt&q^UjN!)Z2*L<&<}!9Sf%Rd6{_$hx zKk{X{?toio0g9eQHaWm8_89`)UFM0~2mF|aih{IN zD>eJd*$KdTRz;^|a54l-F17gko)Z(?Q-$?N_lE+WK`=X*|FMv?OcSaPF;iW7!>?TB zvun&6!}iH@4cpLr_Gp~+d{?eqW(*VgtoazKc=D$!zq-&zUIiG!6w582PF zs9|#_N>B~A53|ha13}W33;8Z~|cSQKmkHX-^J&HhE*YxG}&DFp0-<{^e1a zRJdUEJR}^uZy4u<-D{uJk^-UFWjUaDsMl)!&`qBDQnh|1CpsCpQR94Y-wiMrXqPA> z!4vj}7LgJaBXrTl{upUBc)srA557+(d5CQi^Eg&-ol8W$XGh}kEBwtD2SvUB>RQ$n z;eJg=85MdZ%}@uV3@r4YIwAK^k+f7`ItmUYIG}>24!xqegsO?0tsJ(!G<>pdH*XRm zeRQtQ4+1E|`Vm={xAD`xhsFAgziI9Kq_^{T{(cqTN6LQH)ym#m23$#S ztIH2YTtFpZ__tk5ng4(1TsVFcj^{e%p`W zH}lwd7=Ct|4L5(A5)Mo>$vqGvX)R=WLjrO6C$h`eNN9tfkp?0Ukna$t0F(H|iWYsU z=<0*t@85*XxL7UmMW5om)$FpvU4I;f7jxJ8&F<5C-$HKn!vWE^-yG`i_J73Bf2XMa z4yASXvNv{yr8Pev73S*~OOYd-OPJ6i-Fl)2(W;eUJ_}#h>=cwS0fDU9TclvW*{w1C zEUkvl4k*ht^J7908Z-LBi#sQ?7|lM7S$JRkk`d4A22NYu_ZhH|iTdd(u+t)dg&l^= z64?vi2dLi*4Hhb31oADWD3P~@ZoJYb(gKE7&E&X69u0_1o_7ynsTA25sAr{scXg4y z({2dVJk`eX?G%rXt7EsU;*(+nH79AXwqQeFZHW`UkTcu zOKCsWIe$_5{b!#0C)|AhnUDSXwFsbK`KN^yYyxg3%fDi|`4polB~lXv-l0X5BUE>< z+oQN~4X^mb%Zqx_Qo3@|sqo9k%buQ5nfp&pN4Gx(sXctaxWNAxZc(Pq zzN{C)!kGmBBN34+K##Q-2tjLU*R>0bFMbiNtB7vDWoHPPJI_2vMVg>$r{5RdmRhaJ zE*#Df((F5(Ao_9$B(n)jPE8hRs6GchK)VfS-<_PLVicfQh2qAk9LfPB>3Ksc;+ar-j zm$Z6Ds3v*TEh9I<`v%JE1m339<-P6^o7Ec&cjHDLzh2M#2q+$f!b(`0Fi%K8$!gzd zfV_iFCZO;b+*(;xBR2XW(LHKs4@;SEFqe`2RTYt==@F-YDz5)W4eTHNUm}Jy*NA1H zUa#=3HJhRFv-4MN7PUp_id90C#_8Rb5){#1KO&fLG7Eq1`AZlc zOk%?3&btfOTvV23*MoYTsO!DDF&zXef8skDwL%x9ni9WJWht?zS~HboSq42~U%Ql4 z&bMG@#FS@6KZ7q7DG}P6AO{il#Mg9Tl4VjN+38!eEVX1@XWZC&F-Bc$Hr71;Y=1&= zf0AZ~83qux6h`*q1k?EL4vr4c*W<3ggE8fGEoN0d)kT8U9e9FZ%-j z{nFh3kD5mR=-8=0UT_EfQ6=AC{!CWHsr08!|1a6z-_2Qn|K2|%3KX{Voc?dv7dQ?z zlRLuphTf@?9QR?Q(7n*^diaoao9U`dC2Lgd>rEkvFW9j;ve6)tld4@!ksv9xV-Bea z720#w%>{2tvpJ-9X^y6~cZ=wsdR_%$>*&^=VZGzL)BgK~#I>FbyPY?7>r)>mEOj1q zDb(~HsJV2Mm;DJkzOG%K;oAwo6rsIY>1nAP)tFKxuN&PlB{4HINB4Pbh#{_GLZ>&* zQLh4lruu7rE>N<#6-wVU8~4y6}BJZGgPp1EF$0o4KkPF(o|m`84Rv)ahZ_sX3mbw)5f`;tFnzR;0F| zCX(ALRxl+xG_7X!deR-P2k1FTTLPl@DBHDCGff1COw7f`p(U@+95I-sMH-IE`HJjO z5qu9GQhtUfL)*)38ChU0N>|uW_>ZW0ouw1T3A>LQ!;&3|dU+)1H+L;xC zabUC zEJ)=8r(MQ(w(Dh({nPvD2abJ{mpDXMB1t-QcsosWm4z2mrNosDX!E^yEx%aC9KlT3 zD4gg6LeNIkPxl`?4*}Pj4|(#|+s153ARk|}bbDO#;>XXLNyAck4>+GDL@g$>MKwRNmF<%-LkeVh6HFQZ|{%N zBkUtg)~1GBF*vqzW4VzNBcV#?O{VhX)00I4ma zK5p-n_c?pU>$-{^7W$ca4jqzP=u+){f@{WGhqOen_8Z`_5ABUfSzF|GfBG59t2V{` zPVc?}v=4Z>liPU{Q)Le8@OS&(`wJ|L7?LkoP0wS)9O>u0RWXgao7 zGB)qa6I#adZv9T!6CPPLS5Ap4=0w+H7uqByQP$Jpqp-tN#)85RcA z2g#uI)Lopf=8KqW;4&~{=1`pS68p<09No}#fy(=)lLopu1ZYu}~x za=lyOEP||`XOqHv7M~8lu8?2b3i|fFH{xUR`dZIJ%Pw0J22V&gA%%%rVRQY`WlmC( z88$EJy*x?pGTvLUFvn_phziPzFH2mSz4f7LmkZfjvY^5f37C^nu&us>Tq9Tv=ftx; z+fe~8t7v`A9^F-90k>9^2Jag0-p{ztcl!JmSAdSS{}t}-C6dF;92#{W_|COj0P#EB zkxd~F+lt8B71dA6N`o8Eee6cGFfli?&c+4}T9L;oWd)NI+Zj}pnnLc>1I|Sv;Oop( zUU~mx_tPu66cJ?GJ>{91odI%!-n^vWm$JY)yY?b+iEm5f2qHKJW-Wj@NYxpvKj{F# z-(DH+9?2KZk#nlF61c*3%x145$A`1iUgBt%`bQQw)m^lThwcj8s|i#d^4)vA0a@mn zC})>?=H?qY>tIpv1#Nf|@;FJ1DhebUVlndAXviU8>J^A|AnUVr-usYnn-ng(o_SQ5 z$0echIa}+C1QvNcL^)FtGU4Vs*_rsG!Y{c=-$5(tBnLAJH7w!VP>2NP{3;j+VT(w9{smXujC0A>k6Iq2`Do+*1fklCk@e^;S(sQ;>SfBChK znunPfWw3_o`uGjmJS%@$zkC8Z622ZM4HV-xXJ8w*hTVYEGisYhcDcieY(AMbV{AL_hlxa_qJ#n@;QMyL`F2$pBmgOn=%%~4dQ$a52|M*p;sDV zVN`blCX62!D&J+c_RenHGG^s-LxTi0zm}ntd%L(Qyi@I7{`eQB{s zR!ndTGui-Hs%IFEj<%@y1viup3CMh0iJowT%gk(7szV8VfWx)1RNA8DIZ9|ILfwLE zu~+sx$YJsN!tHe4$ge(nT!N2;zgF9C8<2;I(&V}DdTuy`gA%FFgFNwWfZmm}D17bE z{p9+=!z-%JdzbgiKICHc-9YgwSUAjt`XeM}PzSs*Wu*Z}`2q%){UQ+>2gS0m{>!U7j+|V%o31cq& zm^3@{A)YjW?*W=vWjqc#wULE#22WxYG2XA6RX94vKXwQi#DQUp{`&F zB{1StQQ%J*4fr%I8_Kx|;iYr;W)gzOVh=(;zv(Ha4^c7HVKqJ}eUKv|XMJgpt$&XJN#J<&TXroGtr3`;f;U?;b z*Lv(;e1PHr`}5$VQ^ z7Cp9RNm7hYr>eliko$%<#T9fF9dvddxJX)>^WA-}8`xDT!zvfxulpF&T(jb0KpgV2 zJVvY)J~WgW+hzCAqTRjtLQ{fwTTcta)2vY3-NF8g=raw7oLy4{8|iBqNwpEhUC%#p zDUX6~CLOdZ-Oe(!6EWd?noZzBPhaD8W?_{XsFdbF$WXQc=~`G58sd#@OmdFSc~IT@ z8k;i)zw~v?G^<;-Q?llB*I}-wkyRlB2ZH>O3+Y*dy66|%?gXPo6dxssJV+EnOUlDb zz4fmJiq;H4NB3-B-Y=_G?s{yut6$%A73QGtFH7&k90XV+%V$Qq%m#}z({i+rjiXOz zhZ_Sk}Jn>%Th2YQ%f6a=%A9+zm|k#0uwQY@RBOoa-HJM$=EJzOCu z$wP%@Csfh!_`Um&#_g3*r5}*Ocft2lP%~cxvX}#Vb+sLx5X(>V_!}b37{&beAGQ7l zM;XN;5VRyU_1#NQ|8a8On}v2wAJL#)8G^+Z@*L_VP<(j!sNKNiJ*IjkUuY9>OnXUe z2k?$7+5~mRrYR_KBp$#Q9eA*n{dM<7NwxO9+NXPJ`jtwGPg@^23!3ucX!XltC99Zo zn9#CMCF~nf&=$SkcphX+Y=y^m$H@s7xLcacx{O8QQ7tZqniq~5^v>mEbPx4>u5Z@%HnzMz&mkKx(5l}&Zas$+ENHMB_eS|hvt+M zyN2)Dv%CX8tRI)W(fM)y!(zvKSa@LI`3~jAEi9Pc3ENjIQM9o0;n|FzBH#%*r}N6; zMUZpEW#!v~n@mPUgRXA3?-rA$qhMD09QM=idIPBthc_3bqp1>d_D8x0I2_kiOVm`u z<{3c+=lO1=usKkbhU(=PcgW!x-ib4|Pd4&T-M^6UV8rIkk;W7vbDC%Jvm1!Xnl+w9 zVbh7LE~&21d>c%Tp_q{lPmoiHVy)79DGv0*uhbQ}0tqAH{T62}1dhG*78h@1nu&SH zEY_y)4^<`VuP}I!tnd|pss?{^Uen!SHCP_+Q1P)#D%-(^LB(cASDCDs_;<(=>PcP) zQv`_8bk1HLZEV-61mlD%U_xfq&=f_MKgFihvjS35jhlsO?07)@bo|X!; zrkSM7&Fy|1C_2ysdP zX^CtSsxd6Nu_PTPE^qoI3Bkj-ye8^A``$#EAWx6{kq2l>nVVH^#s*U zX96}ir#0S|l@-_TUcE9Jl@_~o$~}rXDsZo|v#(Lr4gEy3$ur&+E7~)V!<4RODDSY^ ziMztZg%m4`@{x>z%$Ij}@3rZDW^VyKbnl!Kt`)?FTe1N+0yeZ-5ep8@B(g+P6^gX- z?T(lhel{rhDk-SinjG*!OC&nzXb2XW{wlf!(KtJgl%Nk-LQ08>ImA`{Q2rCcEz;N} zp7E@*k1}mt$3mvgb=pk6uSA{)MeWcOY#oH6GmrG|IUb@A&hUBVI*7)sqj5lS2nf6R ze`&p7lTj{aN6=)yS1skAvor8Q4!WZ&qL#TN^GwGEa*JCB>cGzFS_AUXdhc=$NvWr8 zdf7(?vv%K;URkgx(HNteu~a8=6Ky=;hy~yz1|EC#FstuEfb`_qFK7*EyyhC^Rz3Ul z77#BAwroc`u8m|)uge0ssL>jf3Q@25-wDC-Gm_)Kvc2tG2jub+z|3i|JrZxTG)*Gl zHiu!!3Y9OjevKCSPwwOI8teh7bYO~>tJrc5BH^8+l5Ww$ad9E9)36DV@hhtgty(z< zZd(2dwFkV{@_X9tg+2?;5!u@q* zAAppdea^5c#l2pKX@-oCk7(LbdS@bAMHp;}B_MUWk>D_vVgz6i&@^szD1cDX0aDub zT*ie(iLa0!pLkP<3j(-t2hE~aNC)v*lU7GX$GJNU=8+~0G@w@A)k~qi`wsHK#rsSL z0UBrrDb0kf2s$5kT|f0co>)?j$!q>1Fmd}}WWIZxXrv^A$Wz~l^_pZBa^>!=^0gz> z(^8cyZQ*_k(_5SCm=!;~e$38jD>j&XgA&xpAKKR8f}pY0a_3l>+21Ztg(&LeS;uFX zU3eCI{Ha^RL-RvmPl{Ge6RNCzg&QyaQN&`}j()NXy-S+BP_7BlywbR34&LaYh8O@%whDLy0WnhzAj?|V)QQ!<0EU9eQ#l-$?I zWPRHyryUNVUQ)>}B#t(Wdsg7pp*~WC`r$G;C&gS++&jOQ+`LYc?0g+;0ZXxJ^OBF( z^K3wSwC9;mAYylKBFu-7M*+lf$}uASX)|RKyxnQho9Lhlu<59;IfwX`7TLCifNY+##ehRh98LHLDdN4Zf z5l_!>u=UBia_?P5(siucET8evdW*HKkeoIFe0TxMIY^%_wD(NXP1r=Q@RUeu*TrzV zwdbibBff)nZ!9JxiM64}wr=1mb#w%D;Wtx!KA$t5H9lrzwG290j6Ccxf+`2Ms$v8` zP3BW18offMFB6xX_d2BeM=k}caRz#_e{7^59qh$QUoYMmNhIisnb!_UZqTldkk|A* zV%GPaU?(p(#2-HOMH>{GQkk=ua&%xr+40_xhkzNMOOBbtmDQ7m4&nNj&+D0>Q?bl3 z-3WPFS;~W1<44#q--Aa^y%xIIS442_dqEZ??zd5Uo%E{ppi&ELLZB(>y5u9=p8549 zgCh1p5BF-`s#(ZUt4T!Z_6x{`eiO^n!_uYtY{1g~WoB2=;*pUxNq%eD0{urb)|v&= zr$kB5TD{?6^-zQk#r7n+!227X{4B>?>v~e)`Y-}0hPPR6r12>Ws- z<(v>|@#6%%AKLU|cu)tax-%=Bdc+~RSUpKH@sOF>cz1DC+_|mXE<~FCi5Hmo3u>cE zsR&QfNHi(3dFmh~bf!o3QaS0eyJD>a~zWNX~- zK5=Go|MGr?E(*ByJBYhX*ImbjgQ9eyocJ||?}eqEX|t|E#EY)fcoPtUSQV9G)@PId z(y~canw@&Nk9v#H%Smz9tMVyxVJqFAcey{_IO_chFRQm>*hJ2?+vRmwJ33FlOPQbP zsw$go+sTx#i;l|xPe{_0CNYP8qC&>tQR9r{mm>-enZ<*W)&pnGIE#MxIBV=2u7BzeJC2d@ou9Lg4XGdmiiqGP{##M=%wFeDpX;O3uy$i`li3S999UiNO z70gCnD!$95*x#|cr#%?N48YpiCvU;|=zWmGNI|oBbc}{BVju{s zJgO-P(>%@MePr5rs1{1j+10;3gzT{{?s5`rd4EwZr2CcO>DRMj+RA!? z!&Ey$;p0ZAEOF5r-Zn1jYTo1Gq+D5ERq-r1NvvU3?T`=6A%+N9IWg-JIcge=4Wxpt zBpy_^j3bM^-q^FT)w*VOzd3enf85Q;+9*)XbK4|ypoA%;8+SQGF;3G^gZbr)8IVex zLz~(KQcf%MC?%lKTMM5UQlNIh9lxGeusGiH&2-GHyVOds0q~%ELZxd|H~~7;r|2WqZ3hygK0!D>H1!mvlMh z@T=>BcRrW98zB3pj#iM2mdl9WjLQb^^sivcoc0o=pdT5yt4m3-{Qe*i`(Dt3$+vp5 z;4)|!NvXvN+{C{Xvf-phxPql#dN@+;E2{A%?ljNQ*e&FBeT6}zURbFXHDP*em)^Vu z+qts2q3|Gi0$@UM-{JmNv9gk@M3pO>3r%G%19RK&I0xAAZ^mgm-xxc;_JR;sAC65a zTT!o9K~CVnh!ZSpfKK#>Pqh{W)$2ItXP=A1QeF+wiQw~_!FFzg_cbgqcJtMazGu)@ zIW6wonH&>oWQ_x`_}j1Onr5h-Etfgd;a~MHYcCc2UuEjIFp7V%%2X?bWyUB7Aa_o* z(xv-1w!z_uTOQ)(Wj)+<4*D9JM>nu!HwSI#TB z=C&vme+T(0^RS5E)cM@Weu&{+BjgAE-e?7NF+yUq_|ZYA6valiobJ-aUSjvUF1r0% z(1#J&qm&F~f&RSZFUTiX)jC#lu@V%XpGr*vjxJJ1#!>8)E^Vk@1>WL{W9{Hq!Lz5& zPraO-~8y@l{M9TGH}xNB5NtcgrEXmMRF9)7mM z8#Bo^&sywcD`}=`u@}s{lU^&<0wAswx&TdK{-T;9@6*%mbdKtQyCs8O=HqJvlZS2f zPkNn4v@t~Hi7{w7?oVmHjAKMdWOUONM+Vz{;nB4g1fx&6i~JI8#b)W*9qHlRAK#>& z>pHCc6D-7|pDKl4+k=;2+=!s*-#{;*MJN`p1RE0$%$_!n=ZsSbY1!4(XRh8#Cs$6h9fwMu7FVQdxZVy(rSuo9$QtrkYS;@^V;Yl zTs?dGT%0xo$aRRQ3h~_$1J^ih8pg1ewxzVP28y;5N;^Q`?2YhNjsyR;SH}tAroJZ#(avH>sx6gD=4+~oCJ^I2%5p9 zXBP&$eVICig9u#*UE)rZ96K`gne{FrTZ3s+8Gc)AL83>v`Fd?hP4&Bqs_J((kL|L< zO=fI(?>#|3f6AhN)iW#GWJyuZ#QwVuUVw(b){2j_6O|c{ z4jaE~l0~#Bu`3L>`e51rLhCG(dq|8P27i|r~XP8iTK_W{k4r?lh%0KQx|O^>{~b9_l-a1C3q z<}B$nS6d?KR3sWE*JOM*;;?L$pzCs7QZ5)TN&|KSMkHhgFCyDE=&i4kA14?GH2%zJ11z9 zD6!4l4UXiWej*QJSag}Kx{Gcaz2JytZn`qRbZnyZ3A6v^-L3PYPZ>nMequ2$rl^vF z@FM}lUV`M560toD%N9KPWNygd2ESjfrE}Wz!=y}aF}v+67x(UUSXxMnA@Chbl272{ z%5Og&a@%0&koWPowJ_pV6P>FzVNHX$nwVvw)7)!oE2yU_6Ww6T_2tN?2FkjApc24y z17M~;pPXN#6zkXOczwRbB<-60dQE>1A_ zMj|Dyjl1d#pvTn8*Ah4vs(cO3nHw=S1d&BQjRyqlB7{JP+E7q>8{k! zyAPgw?m=W7nTjR%D$eGCHsNJjO@3wsSi>0N#LSA~SkcJ(z!k}nD=X<&G7xW>!)^qO z*Ikunq}cbi$Srv$?R~j7UnD`Pf2^%UFwVV_b?#ZOrJY;NM2D9IaFbJ~IEg%=kIX_o zx#lbo&8}$ocE`R+aqT@`??Y4qVw*RF+(h~0LJ)H9z3g{>LEL_8eV}P8O~e@M~;?9a4j~Z8e#ChJr)WhSU8U}&2%M`T>bA_q3|rNH`u&AO$5P4Af}xa&&qo$GsiikBpm|S=W{YxKsC5G3L%ECgEZm zw3FD3O~&jM2Nf@|z$C88rX}XKDYyD=1yaRl{G=OToMpW%-svb_-8_??BUx|sa}PER zv^u`euVp>V6_DaX4 z%lWgaQ(+E~I;ay>@Zbbm(vR8Ci#;IQY#ukpVMi`rurvu>mN*=SD0yAfZN(yn_H}9x z*?3jQI$yGo@XT(n4EUoZ_t|A39vhi}1+9|_>jzU9b~WwGuTxEE7zOb&c(k6%a`%~+ z4Uu5`M)p;VjEHa*_eQYQG_VpKme&E&0_eF{?$+x2r-3n za&mI~-5!*WyCc+_T_z4>o&s8cKe}PsRFFrNOyrj8eF-Sd_CeU_QkMiMI&Ia^VtN02 z1FBg%v75mpF}kYdla8+8D?1TAl;qtbR1Qj`hr(PqUGPB#e^g6pBH2|yHPMQyFM5Kn zceiEq^#x#Z7^S#pC0C0@Aw|92!1#)R+FSBBuy2|5zZJDcu13{iUM_AJC8C;Z5i)?2 zsTpC5egs%wkhp#r;f9-A1dY|t<@Q?w-Ewvkv7N^45;;vV22%Ex8$XV0wgL?ARo`gt zFVn;|2rH`pHU(%I1V`bhfJVme2mTd4TGoXkh6{GW$C*GS5thcYH@l~aFWx@{bv-_MMOHT4r5-B`Tb1Y|Ilz*VGk}##C zqp_%lE;Nfr@0_0I!4ZoS0a=HPrO3CBl_od&CAx6!63Q*7w8#U|JYT=1h4+(W_m`O}-kdCP_wB9vB z5mDqbmB8}g!yrMcZTgUO3mP&(-SK-G+10-RbvrV`9@UJVI@inBhOeY)+1k3~j5j+t zFnNyjT;IhMCcyB{eTUCd+PUazy3^fEzD9^{3`1cN~CTnm_gSk2FH`4^%_O zO@lw6jE}}6JEzwjV0!^A=WG4AjS&7#(|?*k@N1$$S`k1U6kelOC1C)x?o+=Cnj8G{ z=|AF@>!(&H0Pno?k3!@Bq1XK@D?j~(A(Q_1=>u7Xs1ANW7NhR-cTk_M#T(O6Y5*1x z^O@48G4-+Zz=+pewq>S6a9fAJg9ej_{j}g`P>nLU&+SOyU6VTBL3&=dHiiBnyURc2 zOq~7Kcij1}`W^f%k^7hJFP(0?U$?*q&BFgzcq{yku+6OcA6;ehH@|3){ki1>cNapB zl8o;Ss%IXqg%f%-cfJpOS|V}#)Hn^qHDR1@m#5lCON}~!cIh|@pt#PDK#!tkE<^BI zR7DI>>)TO4he0SDEEL1X_NeaA_q!1D8*mYT0BT1Cf=Pxk1t@w196|*{e*gCJ9BPL$ z+-fOeTa_F_sUk@LGNI9rXW5W8B(DYw8{U;Ocgu>SUDPC$&!>2PzCL(Cr!AV|kOmZc1}T{DAkAVDBcOYC z6li&?m%+YX_)lCLiO(pC6D%B1rE`EBf)S5{i5YYOeFBQc-`)bI?hB%xA`g8By=0MC z_H)%hL<7={`6LA&moZxcdw=JIyrk>0OeYz8pN7FbSUW)PPk{_cKjPhFlQtZ1hPLO= ztMn9C;0A5mJazbb%>~bj3UV|u9f+Eh4xYU2;64mQ`r_aGYJSFS{>e(|8k;>pH-bb= z|6V%{NGpG@uLeTC`EeFo$NpXq`p<9mpO5A5Y`Oke=J~=OO?ToOJU3ha8?+OCBir)t zC)2?49zwi+%dCfUW-9sIH%Dot$b{{7?4l^ydA^96IyI3x8BnJ%dg{T<^+xHeaM?GP!0#X@1`=m8(IJJOqBGlLdnNg$ zRc(1$#i>?3Ue<0E{sH+f@{_k)s!px?iD>vZ=aXDo85@y;qw+2xDl40-Id4uxPocde z6-QMbiN1As3AJI?>~Dazs2Js?>~B|)jf|Pql{uCEoUMgnB=g=4^J#zN@dqeQc@v=2 zH(B;6N-6OOn~0{tR$qtJQ_$nRH|3_e2Mp|5$0Gdz*l^}qY%-m1ycwXcHeN!Wpd~2; ztxT~DtQH}P(JD9GMej}w2c9nS17Xu8=6V4P(fZRh;qRa^*dS`-o&5%Y2bx91_5xxe?3$Ge+^bM1@iSj6A-=5R0>Ei{tVSOsQtxH z^H-8~&wq0&zq%X@>xnA>j2*ad7;(#)^T3QMVAQwyt^E%MXKY}CddtvmATY1%rS*RY z6~J?`L4cRKbd;qO5wkptUy;+MXb=E@ol8=U7B`y~M^0Ivym;k?NXTrqE8JW2;iCSv z(Lj1Losq=StUuP?JOv>^6BW?~6Fm_qAkJSGMu-XC0S^17;nX7X;$nW0nl-p_EtcEaW(f z!wlwtcWmiFFvo&Q+#>)Gl6rgSMxETPin{u1T!Gq5NYe`C z?YFLN=L+pAvv{%xzJvV5EmwR6A?>+T1ENtA^k6><4<$XZ++REF3~#kmnINxRZ95rO zdX?{b=?%WOvO!;19?XJvc$(pi8gCG=Q34Uy)_cgi=F+g0#^9#s&{15?ppct+R)G-Ac7Rx*l*Ys>dvZamsk|0S-Jz>J7CHAH8mIn#6NG_?_Z%;5Ery&1L_w59eBa zV9RsFS(9?gr&*Nnr5Q7H@D%$E}A#JB4Rc~4qgGAaKodwOWgxt2M36k0V0G|3< z-;Pa0+3-?riI%%JDYk{ciHHrpg4frUlJ4W4kNcD*X~vp#I5k!XI(`23Eb3l;dO@-7 z`}{<$;z#uYz#MO$od$;@VBbN{L%p}?o%C0L=IJw|K96s6K#_U-iWeHSj9I|#m?-$3 z1OGavUHQM5#`R}E?0;zde{oE^?TIBwi8aro-`FSk+ABAeeQCPPyTjIEn?}u#7>3zF zo>YrQ3#XATHJ0DZm9)aZOSKwAAiiKzDKqjGEoI~KCwCIOd0Ck-Zd9pe zRchZ}=Qau_8t9L$I0K9~O52=yrHI6l#7v9s58h1yUjns7!G$0{T@6GDgvuq+IG!o1 z7(m=2goX>bU0j#~SKn#yP!oO)=Yxr zb3GtGIiK$!NCJFKX+1xFJ$`hBkF<7U=(q)pY%}E4RBq)2_(5I|5%6KLq4Jj~wP45F zjYEzM(%C5@m+l#%$T<;~Gcz&XF(=w&RZ~Lzp_%FR=26Ho{tT5;9LvV3vGc4>n*xc~ z=0rJ8F~z7v@n_@qqa2(}1>p!E!1j2zdY;Z1f!sg$vhg!RyU>Tn{Ggrsh;69Ln9giM zmD{@|T$ydSsp8Fd8W|MdUj2@95DCqP>?{^lOmj7f z3mmf$_pUqVI<|l_b1>csjVfh3@zSNNN3kQ1A-HwYbtc9C^Jq6v$7T9J(en8s+zcpnAZKVV1YBM~S|_aaY;ST`i9|axnXD5H0Nu+yeVWa+zi36yuPG z%eWlgGL*m5j|6FYD6H!vS3 zW^RY`pTIUj_6pTpt!s;9THPloG$0{Jhi*;)jn{otHaz+E0F@6a3A0HacCLa8kI}6;VzC~Ff$!SKOU84 z{Y;M--DlJ=&l;dgT8M>TYt1x1FC!FRYejqU9Cfw)4c8r3usvw|KH#11U>Jg029He; zK4iHZuLbm3e)Ws?BvUh4xaazwQK%oYj zEU~zFiV^UwtT88Pg+n3qUSD(+RgNd9R!6BNT6!f(QY8?cRb}$o@c1JJGdE@=OBc?r zlMkD}G>4w}Mpvh^pvJU;DSAT@o3_-@jXan9(GLJnbAL|0pmBy>E=K$(VcRboxNsz7 z1bWN?xxcK|mLweQ#P;b+{^bVIo580Vt0A@GnNv;GT0pk&E8)go4EQ_f%dsi-@1S?6 zY=Bs24LFM-wuwNA>88-^^sd;B3q3*CGKP^dEs}z3@G}4s?g3%%U!@%X5t;r+)qJd% zrlk6)XNt>93Kn**c5bn44qU2PsEq2{TgKgiYMLR6(b!WUK<1qZq27WJE7kUY2VKem zvTULq3xJ3{FXZ=c`7(!j=-EERO>2I|iX8hksJZbcEg5*xfa`gIpEJy#x7Ytj{Ed5W z&5wphIeiv7z`Op-o}s^~2K-YsGy{_ng~@JiKkbH8mK!EK$TNMVo1Z)i?MAvr+#^|A_Xx}yoKszTopLr_1-H7c0JmB3I5O*>Gii^ z#2e_)(Q8W?6jg#&nCEL-U1BChgi+KdSjC#%XO5I>MxH0J=e?z%$c~hnT#?vt>C&)5 z9sxJKHHG$UejTcHTs6 zThreIB=M6U3U{wl%9{5GJ*qoC(EX^PeM%&zc5^P`F|rBkCsLkI8YN6+ResJR?Kv+C z0?`*wECAG%#UrsIyWRPl=iYcFeSMX4@qW#al+!(4=C3brXzHy8_>$PPhGekIM>-e9;z0b-&O32}*lP(NP=%hp`VG^eDhDO(>KTZwk>X4W z^=Rklr*7;QZG+AW^W~!&^>Mc9O5KMjQ%eT14DUbAd#%0pWW9U9ecAPv8U29Hj6|zJ zDusm*8gbfh0O#5p*{|{i&I7F5_1rAm@0tt7w-Gp_)B#{u~u3S~avL7$-C2 z)2OfaqFrIq4pt0nu;31;plINEc5$T5EDhJeoSlYg8gg^x(RCJ%Gz2@v74JQwE`SHx z&!u6*LBYlSPN@t5qJpQ{-L$C=@5;+U-ZO_x@y-MzmtAvj3C{VSHvNlw{GMO*M{oVA zqW-6HoZ#oUAFEd>uph@5f2nB2c|iE6nj~Dy1Z=|n0??l4%UR@6Oc5S3(kpYaeIK*a zpN*y+zi6~?sg&&n%=?>V4@q@)HEl+ka*?WFg%2vCUMF1^7Znz$sM;go^d_62ZQIVF z>E%zEbFs%mPdFq9gK0JO!uGzOCVeemdEtdL-wSkQyyJOsEyloSMyPiEG1@er#MzPr zZ^lg=1e|;}Yd$_}03foZ<5ES{1Dv)lZr`*deI6bNI3BgooUZwXj?h>Aar0-?s1DvdCR%M_VBgfFcWDoMF1&Do2u+k;}aTgjJt zA?!E4E#q@z&xIzRS0;5AXX^})2`JJg+TnGa;;LIE(3g}8r1$y}NTE^ceMK~oaIX1F zC20qiglf9yxpdEMR>U3(yl(@{BW(9Qj59>{&FIO?fC7g!{&>^ zw~O6va`t{zbqe;Jl`bv6n%Aby*W5?dz#?pD@kmjGbWNbDbOe%bU{(I$r!x}=$lZ-6EpEO8hN)GrES!cH)>n<>lgrfC2)LCC*#P$d=%Anv*_qyNkC2D+!Q1Ve zM@~=qXdkl^c`(~%5`Mi#Pi2$vwh$1*(HHb~CwM)P2ZzB}QFxAiH4{}6yXq*F0xvAv z$iNaSLB_v@)wRv-!cjq27@wM02IZhOEgk{G?^a2ds>50p9~xU+#V0+`C>{90>R*Qk^659)(f#v~@9SdN!UpJnA{{MfnS$^^1{+~){wx~cP7cCowm!cGt z46X*XtE^^W7y137%Zpe?s_(~_H8%^MZPM#QUTiIX7CZQw23!!2`uKI1<%8crqU@+$ zzOr80IbBOnm7CELtrG_65qhS{fO7-70lOng2l{4>=5U~8_S?snJsJQxuoUx6t|s17 z&6dvFK4U~Xi~{o9nIdVR)AQTMSet?#gI3d%p%35Mn|{FL=yiUTH1cw7Y1I~a$abgXp6MR(|oXTrLZ z^K3j(CHYOuY?4bb!Ya*sR@YCfxwTKfhQi(Bt*h!dyd48M=1s^8dr$ zdq*|drt70YL<9t+cY=bV(nNX<7Mh5NNRbv5kR~9~YXAk577+!d1u243A~iG{b_V1j1_HXvuf3V0}#w70(-sidR>%Pi$saK0(cO864 z-5Mbg4OhxbGJ^5qs_&PRfObU87()95-0I(T?UBWrO}TNuA3DD==KfE@NB?wB{L{Zb z2-bXh=W>1zo1N!cs2dg9T{nEHspGm8+;lH))~^?~nuB_0E+ z%#K~EV)&^DE#5eSu?=bS00B@cUC5QVF6Ej5l1++tk3wgvkobf*wvGisKD)8HTq&(= zuUtDc@6hHisD!UMZ>TEIcH|8G@&~DTE7(k~{js6bn^khL2<}FGIet5p7C!b7%|{n| z=(HfEYA_EIzU8ZkF-Rq}UE^I?PFRCUQrjVej?iI#{*5FJpf)ecMcY8CEZ68i+*yKE zo;ql}uB{J5lsT|5Niw`LI?vw`2-qW+!`x-uAlq_J!jF9Ck7*tXHIX3C(Is=+ztMuV z{0`4pZvqnSFf;KrFq5K$uh-sPnEzt%F_C4a6X(`YvP2wOYEedpl9}6sIysL{4yT3 zid>}I%65++2CRJ}uo$CU-jPdj!>KA3XBnDb6MKc|*6fbNQ~0Sz3AE{o&=UloSBoRT znko`LnLo@OGI!Vd>uunVrw$Pg5*&;O`mu`N%Xq@O^2ggFY$RBVR8PN#pU9vw8xvT3 zO4H)Ys$(ukDSkS+KC|5uZnb|5{`Hmza04ABn9mlwOdTP=LcSFXO4a75AKwjdmd>kz zsPFqdSi@63>S3-NDllhh;ujr^39U!epfMlhjfF@7%fj zIc-b`AfZMe_!SZ$NQiBvuTz8_)m0U~1G>MEUH4fmTeVOhh@^KLL}mGqN{Hqi_y*c3 zL7p=#3hIQrNcp_^?|rL;3cq;kp4C9B49qjbW&G9!#ne+k)OoD zM(?M1*zaj5hNrib+@}QtpAugUX_^OL?0_v~yq(M3*>_0aPX}szbT`mOGha%sRrCLd1$JoWMMF(e+pw>$=z9Pz z9dT`2Dlolx8)4W7RfV-=ACy2AE6I|;*9fo@XMsC2T8m-@>vJZU&^W*S1htPK4iGk< zr^lOi<@f^GTf-scNxf}Ywk#x8k9O~J(YVBN=`ub2TXr!ApIS%u5L$iGM6LCgtQ%cp(o>p5)l}h<;ndpiDECbz) z-8wWTA^5ZiHBI3=`HS)l!XmP$UCeZ3sY*yMa6S1Q@V$x-sykQk#GzisPZ0_mzoEHOMj$$>3Fv8rA9EP^USjKl+L}^143lZtBHiiTbYM$g`2li`N$}ou^j0R(WDK`9LUDbvWPuDewDoE)g$bG-%?go^q^p zDR&x_SLrr;0z5veT4*Nb1I{Y+)vOA72d}9@LB(AR=wvTktz*Lryy(rA6Wyt#4j-s` zAa;F(Qmhz zuvXe3PgBDhUxc8xOsGL0%pDF`!$VrvE?WQ&Rf7^A2WE*P)m7G5emv{L%Od6c&!lRg zuXn8X3tQ4t3rhmMOwhx=7KHU;&5P^4h4o2uShUG!C-{8$1X6gU7c5XM<;6zBg6Vhc zZaRJ(@bt}sniw_2jPYRwc!pR+=}uf%n5KWvvt12Dj=go~`Ib+#v$?9GugE4M92_Ie zv0YMZ^Mcn7VqvR&*3B50be#BT`E8nDDz~Qy18DanJ#T)33WePE_0XOGgX0;jE05es zL;C`~+PK9aEFOI83XM0+44~|N<^nnxB#>OO&0+cBugiL?%0|?nOdF}P(}#sK*T7c& z1=C^2;N$v(g8N%vPXCAXzF(7#K?BHf?|wn~UC}I0t2GwR7d8nGe`B0|2xCfdXi;KVCGof9|cvInEpwELw_n^GlGxVqKZ17x~O?6T@aZDxu6zW{&P4D|gy6eP$xeXz>I;a|HwfR5U*$rle>KI%FpE7l~bpO8*jTi-dX@(2)d?D znqcBNhTwucuyYfDRnm@;xmq7(zV4}EOGuhTyFtvv`}I}qqG(K7557B+tmxvMnjI9(j-OaAmB zmCg>@Cf`=tB@w>PAjTQyv!Mev={k3*F5s4EiT5o}!0^W^?^92qvu9Dkn}RHEAVncF z`_)z$VB=kkvrWwvaWEyKp4L&YhZihn87Nix>TbIz0cEUE9iyrzR!3ZXvf%@6hXd=8}c6tKS?$-zU5Kkxl`;?lj+Rplk`w3aTwSM zBh-a6WW!|xZ(e?mSqzS|P0|Z)uXUIvtXL>wMjK`<;@V4S+=150Vo~1;#WOkCEf~LB zx)z3VkOT71$)rVUD!Cm${!Zrf#Bp|e{~L_YreV$3AkOXQ6rCoOQ}^yWl7A z*{gC55?O)*Zi|=XL+h&GO9XJ+2KTc#z*NgI&_Zv%PWba8Ti{Z5F1oP%`}Ug5v&w-r z5B8GdA42>^cY-6djp3vp#OviJ>=;ST^`cx0 z@2lSyTY^-S?!0|4Yi!d#d?Rwp;gpqBFhOlyYDN7F{k%Alh14`G0iYFRV*vNkdG`Fd z5|qPGBhOdhe3TY>Z9*{JLz2`rq&ei_gAtuM6UgPd?#=$@0wx~7iD1x8a!fqmmnJ|Gzaw@U3!m#)yXiQ*qy zfntVSwycAfc&;NZ_TrJ$G7wSvgB8HI3n*$*KHO{ z-ppCnp%#Cv7+CWi4b3oe@wmG(-^#DM$ary;ao*w|1Y7-A2Lil;%@q8xv8(y!2$j2m zW*p^3;rE3d$i$%wfCvY8AZ7ymCF0btTjhTelRxxp=)rq;=;jd&uw4g1wz(|bQmX+F z#vLHe?WO>jWGC8XxwP(6+!|x&y2$iK_GyRGUW2TrXZ$ z|EF{_|GMSXY1q-l0By+{q6Ahq0WDUIayL-YM_u;emNs-5zUIknDCwd)H(bqaR~lix z0JN5|;k^B!XM=F@a%`^t*}{#tUo05L^e9%paXNfeR80BfLxTrNKHq#5%^Gazj7<6z z4 zHm=X}j$ONIA|KRqT7H7V4M+b;eBA%?JPN~ zkVFN`fnlj{LT!RRK>_vV*rKzFa)XbKSibGNM<)obTu&oHT4}6x{n}@j)h!x|T}M1T z97>noChtfonQty+UpSv4{QOIufWQXSK1l!NcsO?I)bOdzZP-}v?5pwfr%yM0jQS>& zdF;(8p^NyUC2X!27K%Th!75=+Ylf;0S!V6LsrV=m+c2+Y(DRhALu10H z>p}L@h#_EX*JVryA=M8n3t_N~t?G0W4Qcv>ms}C@J;#=PdcxG%vd*%v#uF>eYm;{{ z*w%KRPH`#jeqSj&%>+MC-g6CKdbCgaJ~j?SrrbD)kf`sQAg7R`$ls8!VL|g?&XOA_ zE#}vm+nsvU2D^UPOgiGIEbs8j?FM4nGuzHIJIl7+D~7e}YVs9e(+>6hE$GdaKh8!4 z?O}UqOuZEKu?3n;^`|Kx1Zp{81Pb49_6*w}ffj~$LJn;g|jIi>@)X5elDqKvnmC5-QSHR$49_oKpaSsvT) zOJ#lkuOtos=LC%ZOg{R*Jr@ilb^ODbZt*0?^AVW_nRz|E>40H{tpj)b@B9A0FV6dufbTDQ z|GzTsdM!BkUz#-}_=kNm^fqMDs~|LkatH{Tq({&?JbVBZAeZHv*D!|QEJbbc#pN@( zXx|OZZc-fiTOpN|Y>5nv@h~!~8g?kJ4HNA(%hZ!A>p!`~EIVWSg7Hh^mSs2_)s2Aa zkYgLSUCb8m^UJy^r7AhNrdZsTIo5S@!Tm$|*)2x;fL)Vw^tCYCme+D>t%L<$H{ z0Qg{sgj6~}Q!jo9^dbeNQ}l=Ad^392615nH5swH))dkIVix-Dm6yTNzSp<@C#PNlvKhR2Q@6YL_jvuHQsM*I$x8`gYY6I|@xZ|LpE zbC7umm$Z<|@0qPAIT)>jri8h>DdN_|&uqT{AWVBbpOu99M8(w{LKyfque2{ zV5ff!n=$?*w)YLZThLS7N3_Z3Rw$6H&;pF6wyOq1Es;w!(TZehe#VVjtH+pKOUWPF zEoeJ3WEM`gnXbNt1qAkCf)s{{;MJ|~q^8W32w|c?{$1gwEGxu4Gv#xnN z>%|?8#T+j?N%m%MyRCVuaP;<)asal9_r_A633Io^a^+dJO@;Fl>TGO*ZDDpkk5Cm2 z;{++JtIrDb2zjLybG+)xu%<%k=nB?G{-R%DPo;jX-_2VD{f9w3U)^C<`oITfT~fBr zE+o>oruy?WW!OAZI#bg_&wWbU>-CXiZ0dLREz_yT2;jsjXi~Nc@qXcq1|*gyG_u`b zo?=`*e4_ET+-v}=s#S!!jLcHs6;D*vsPfxGX z;ea!zoQ|q2xJ8aVR7e>dtiOd1Qpv3uUbfWCdZAt0zhk83Wdg|1%5doN+KUn@%Kc2@ z$tEXW#U2KmTDULBjUI7@;4&2{l6U%C>ucUS9XTS;X@8a5K@-FVS_L8V+_UqwGAA?y zb+>fSBQr2FxzJ4)nmkY)W+?!7jq=cT;vY=*KIBoY_TQS|oQKrG)JUM%*xv=Ryt+n=b<${;wkD|M6L0{{p4G5bat34tdaq*jftGn1S+Y-rG6$-(Psts``I&H$#oG|asCV&R47MwL z!I;=a!VJcYv zfCH8YBkKYb3{gNZ+G_wz5afuz?a}~W7(awNut05QWeK z=gw3c5tBi_I&@EZ$_ygi(d>6!=p`E zYYVApjoQ77E4F-8mF#4ojfdi)G!En6+_V8OX{#Ty7*7S-vzD_M@{V=h(}bW~zNdOJ z0&1tp4ls^Z+*l}+D-qFV0b|aW0aL~M#%vDCWir&NZTeN7?sX{Rg*`fa7Qq0S;r=Aw ztV*+%1}-%taEh-_aY_1H@kK@~^Qk(9TQN=(HVs@a3?(H-=z;BRAs^PqM+QoTCuT6> zG6dJS4c|Wr1Oo&sOWkqGutkjAR zd}2c9CrCQNVP0bXQ7|!y{qNR(5Y`&B1m4!A0Zk~5dzu>jPO)6RIE7oU1lhmVXaAPCJ&Kn~Fa$H|viEsXE0 zmdVO$k4YTj3>}e6e4FvkdnA1mPWI9gr#;1VV5ld7xe*)~p8n0Mr0E|Yux*ISQX1Sr z03r4`vdD}DVNkFny+!$L0F3lT?09Cc!c|L``;>le$=29r^=mg7q(jGU=b1)4Quy3Fp_mu*;e&4N&S`V8*?#9q!N1Y!C098VmEN> zFY!G8TrK^-h8MrcLH`xW`u{q;BG0!_12P;p{RGW2qyYpjU_!R9jh*Is*}8fm>XPp@TVO7-v`QWF5p41O_%x2-B> zjP3)Y$q<^5Q5$006TuB5@b&^HH89kR=_g7a2o<)2sSE(FH5UhWH`z;@(!TAl{WceE z|M&k6!BsUH0xl3sME##2I$)H&ckYA_jr?;RWHv_#8}oJs*WYloGd zT447)N-s7Ls%TahA)ggwDzmLJUrL8K{Kn|}oA*b;D0kh13liI3ZLE$FFRb^>D#y78 zm6T*j=sM3L+6RGV=;u;as)~!vY&C!Q-TT5=scs2q`>mpW*$@AWKUV-G zkmS^9+Nbfo;p$&X$ahEn*Cp^b&f}kdN+v^SnwGTs!mZMCf3bLS_g1+2o3FyJ&-_1l zOg|6($M91qFc0{L(dao$F2Ce$7Y6-BAbNd^29d^0dCgn?u|2v&Fx5QBVIe^vd>(FY=GX0)Mu@FdeC$gtY9-(2yS{^&Q?> zHSMaXTv&Kunj`(ydlclpPz3WhIOnc_lxsD4$`#1;>^!+ z`L6b$=LPEN?LuF!Y5+VX^Quo*r>zHMlvq?QVC`B`T2`Z3#LqZsSRGOokD@D9dIOkl z+@SmOPmtv^^m#Wy_BEjyOe|m%TCs#(hX=hk4xVpZ%;u#t*U@+wGI#%z#RpUG>iuVm z8+U$p!Duf8@W9L3r{Z0P>B45hWY-rZ54ooPFla>;A2zq$XWl>g_w?^FDh5y?G6VbH z?-m&UD;L-Or-^od7HR$}-2A^-F8)i+^Z$bd#vEEERO|69rd5uMto~!Z1UJ0BSCaTe z7SG`)rmZnEgGWm($g$AAiz*+y_Vo~NfeD84c?10~4Z%6ujGt!v3CbVs;>3w7v>jPQ zn|AfUMn&A0pwG+#E2f+FtFjp%U#}zH>|jk$ue>nc;3@Q&XF(&tp=Q-NVWgL)4!Bi8 zW+ht>pv?|5r$0H(irnuv);2_ztx5EkevPmC;xuI>cFbT;;gvA+eBOqBs-f!ir}Qnw z_j&ab*n>it3qWxy(H3>J7bV(f;L7pt&48TMxGTxv!S)XP>kHrM%~Ypz$FnbVuh{$K zhuZhk1VU}eK2A83#9b(Nl^mB_>y@GHEazLO#@eXcX~%6_x&&W|-dACZjgYT($R~EL z_Rd0%Q6w(A`27f3!73wo9$G^nu zKnfqylsS58y}Iv%E7af_gSw*U#TQXMUBxXo1{d~XYtD&KLI>8KG6B?UJ+Na5=HcSO<^*6_o z?pXi}cqS_T;y`*^O{vy_lQC=P#Ivs<*D|gf-V2h<5w>7nL~9;ohO3h|+SFAZ%QeDg zWrP?4%vo*YHgvorb!%RQqp5uMjA5+rR0{Mgxz2o!rGGP3HZ zA5(7jjqj^3Bs1=wdxGAPN(Ir4q}y*OdU+aagi;Um`*zjXpF&w^QdAMLLl9Bu8o3oo zhPPuG@|Nq%UX>ip@7uVmE9cGq@Gz_3hmCUqTmZM6NvsrsWyC|`l)TuepkbPb+w`bH z;DBxQQ~iK9Vp5t$PuF=K+L>lNKiXwoN@VE-3!*Pjtf;34HKk_l?hRE<+Sm?y z0Zi$NoamS_EU%^Wb9G*Gp^J3zhYFQMhnet72M>zMVz|J-!Z0wAWgw@Rq}-@Cel~A& z>A2=9lQ1V!Q`f>LTaQ4Bjr~T=@aER&8~ffg2gc4}ZS8|hniy4qNKZ8xY4moQg&o(; zaksXwI*~M%?lR|ns>-zE@QKixmiebMjtL{iM4z^71#$-EvK19fKs;M-6e7WnlLy-UKT9 zcriG9I4=Ms8WI7~%TYjgfUknJc2H#iz%K>QPUBbu06+J|gS7+vUf(B7BTRh@QdK}L zG#MK4SzspzPDf)FUxeNTa6l~#c@0b;Vpw2(5@di&^CSdD#O^~vV1YDWFRu-74}9$` z>`a(lfc8Uxl8{KYp*@rA0FXg|NuLCu7VJU<)B&eS0)(s&ke5V|dAq1x_<|mZMx-v% z`qMUJ|Fz$6#EgI%$^f zB`Jf!%tHf3v?{vrxCNy439{w&#O#%}dfke%SZkZbmq4JSUkS?wmJ1h?+8>X38qQa9 zKCz$4FObir<54*5DrlQo>T}@%63u8`dplf~d`?+hOu4~hfJQ0vr;zp!va5cWr_Eww z=VUU&vhTn4UN*fBDFuZb@NAh+Q@I030aYFt!pGqSX>K7IQQDlmc8n@NxD_MfuD(lS zzD>9={Y^O?`P9!`_p>|Xm>a)C!kQ4yKD-tX{_H66Y(52;Oz^-%OUb?)^weuZZ(i0> zz|p5PEEBlu(jI4z(aeP*gd5;B1JiBz*cSf&+u9TaSx?*?%f@v0`XqTt2YlNUDuG-p zC$itrS9e}T5ta2^HEUzua$DT3qnByW&|~xYyt{D~Qw{B;bmjAcE9`5CVqm3(_y=pR zQx*jkva_C1&m00S@d2{XU_Y7Yl(iMq?I#c; zFZ_Vt^C5Spu(KpTGRJ{9y%rxPEagtX;A&o0+=;=AEWEx_+cUhmpZ!=W8Q})n?}*#G zu6IzaBo!p6s?Fjm+sO>~dEanazL(>?+QL?W*HPLSU(XK{QL4-U_Zemx-;BCl(MQ(_ z^5Y{^9qr;=v^9#XPcer|8-)>9TJ~W=StX%@70*W(Rj$^FQp0}3cYF`t%5KU*T*vwx zB`~9!C)}BY=MKwAIiPHb>7f18xIJ%(lU%%t;mKui)lX2O#57P5yj`lXn30u-*FFs1 zJbV8jPHCkZuDk+_<&p9%Rfh*lbLlgI@i4vCBK?v(VCD({M?gfVjWpl;L+Bf@QK{=T zpET`59mv(8bSF^c3dafmojp(|&Jq9_qWr1KgF!=$xoHgkzn?(X1vigi^W65^?4dp1UcVErS zgOJx+cf(S_=FN4cUSdl6qT`9@0dZ%u zDa1MTxvs-(sgXri=7-BpIa8IjpchSwmlN2L)ts>AtKt0JLWYHb&a=GLqS}Vino=6l zi3dY6s7zvy)birpq$#B&2`jvPTm%Th90m$401hNjP2(1%Qu{CNt+o9bjYNS zQSDLhmt3pHOW}?K^W^)`xJr$(O*zH6YC7-b1!Q?MvbZRR{S{CNy7eaP=K|ya49SXU zC3pJsH&UIG$F#tmIS>AgfvJ2z6-1<5vu(^aS?%Mg@N5Y~c323AK3R3>>_@-LjnF=O zP+Se;UYTb9je5KxxX3%jPt?S4`eJHaEdrv}&lqIu8dpT99MMS}NE}^*T3PmjQ47=% zD$oN}qyRwLq6hTyNaSm(x5h^bfXhgAsYk`x3Oftm1P}wB9snUbPSyGlxL`r>E6Yy_ zOuJeWw@zvjkBK=eUmnjW0~)*Ukzsd&W{LsapVt-ixy`fiJNLGRK_7i zQ{Au=3(>!lfBy9N7nJSSV2=N4yV-v*s70)!`q9P*k+8W)02p>q2I}2HrsA((AyJ>M zr-M1R{en!|*|9dw(=4Hv`C>iO%tsoHBu>sP(!c|A=-RlG4<(+9wcR02=!IAoTXtou zqKF%-3ltr4?`(L+%s3oOmTI>kJ-%Ugs@u)_ZqzCGTs#@CVRSqXBCp1eWi4NlmYApG>|itD}m~6x@*d8qP%x zB!>`vTJ}-#no>oV75!baEYwbHdalJ4pTGTb>$@_Wf$9TMHcg{upuNo?c3({Ctaol- zW5l&5l9qhIOvcX6zDVX5;vrp*Z8=|#%ogc=Zv}CfuiEC*z5rw(Ty(EvJ;DwEZEa~I zf;9yc)j+QHbhczCA3R3DO@2|Cmy3_WZ8_zmD_-~6;rJOV(8Uw6TJ8FzND6V@l-o3;4{f#h#I2GuYA>C>Q>xXu80I`lnfnrTy4r+2cQ$|{Zs zZO1MoHF^^|XT_@PiC#~fB1bGMo?aGx?|gnz_=kLpGP9!`So{3ZlNoO%GV1Fm{rIZ% z8%&`>p-e={P9)Z0<`ews8tvrL6k{r1ZIni_ohVCPwb~1d6Ynf_o}Wy7U(FNGAaO7& zRYHKQ*}b=+b?i>_gqjsz4ww}Y`Zln>CPSB`kug}y`&qT;`=+{QhfbF`J?PfQ&G-t} zhEbj?J6c<_k_lWm0N2CXBpZ3QqH{_WdEU`B9s*?%$>d*orOEX3U32~n04n(#=xTWoo+iGQAn&PsNpej@} zUiw~=Z(GBlJYCWwoykngJ`4yp7agh&EM9=gPf)+h()G-{EQJ9N6EX|jMdts2KZDG7 z|FJHc#7^j~gj%0Bv#$#lg{IGFT zZ}Z|($GOwwJ<8QdX{0MfX}l!|$4lwp{n8ov;*$6=zYXl~(tT*ktjZd+NIGU<1~7Zy z{wmAk6>2U~7@$dgo{*I3z4KwE6e!LT`Jt>-$$=x2UkB``dQ&?1SWqH+KYo7!Yq-fl zg`d#F7b}pXifQ-faRb#%w~vI}zsIJJ3QnM^9h4#S{WI{45!>xPC?|M~{f+ntm9xp{ zU9-o@2v5L;AYE$S35d76F==4jxu)dqm=l`Y+bkI3sZANAy|TkNPABp8G6Mah+Xh#a z_v2R;gq^H93HXMT&u5W~8w0?%km&6G!MaOFK-3r)pg38 zyU4`{%?qkG5CKW5s-!jZYB*VPCx5sMTiA(6On-n}I`Q*)qEFbB$Ge^E42y2L#G&MnZT=+@1W$oGNz%qZPmBiYWPUuw|F!|K(>W*8v zOlS0t-4(HBipf)C$4`(?h10@PoKD!;0K6C563w-Kq^kQaM_abvoO>lo%~MKn1#j8f zeO9vpE*f~BNj|G$g?HI|C29xn4BSsKnPU)=NodEl~I zu)FMIyC%*qXil%O-xnYR;o*a{)jWdPbrzD>6Z<1iK0KK~i&RbMiYRbM zmdbTez!8lKu5m_ut?XZPinMf-VtWZQa#75|rTG>sA7p$w_BuL5_xV zhguGbWVlJ&b0@3#jL3e0C_v35T*1t7nDqp+BKrBNgF3klP9Fz~cBp;5Jp5%%yq92V zCuqI)v%tn3}ljgT0!%esOg$B?(87AUVVHMxj zt>%QL1+1G6;WZJbJ_dX54+YUK(1!WCGr(gjt@Yu1+Gb&RRj17n)xD_PluqwG|KE$l z6h~s=T1IF^1~eANvRxv};Z+@y@wkEU$HBO){&iLD?F@*=q~Q&L@S{um_YJM8_0&{n z<`Kzjox|u#pfJ3KBCxNeg)r*}5sh#)ufJZ{99=ZK{w0j@HAs9S(ilDiUAqgztYNA; zlJ7{Qyeuc^=0|dgQ48WSlL94C_Uu#TqEFKLv9u=|7*Jt0y|vsdmsYe^Bi#2(twI(%Si` zT!@)2XZLO|Za2pWzO`_v!RyVv#+7V!8|m5XfHrHD!bVT#DH}Y-&^WZ=Oh3&ozc7tF zjZOs?0S46GcOYXM#+y`3wDqx$3kE9ZtJ)SIAtAy$8#ct8t;OOAc#8aqN1WU`Ra;n{Fwk%k~s#I_<&MRu05H;SS&Vz2pqU|%7 zXM!JA51Q%-qynkxB}0b=brVjo-O_&$U$vmdFYIH@(f0g3M{Z8f!#wW&KMxoG+h&+@ zf9Fs7A6b?Hxkw!0XQ&1P7!NzPfop`r`YdaQUy54h*|L+53- zMBPWj&xZF^P*vc~^%C008;R^~2=4J`GMeHJ0it)j`CUVoKDTtl2Jc1MXfV->s6wAk zi48QKrG$iPG!A>+?O#P|o+!HA>=z^!C;9qo;zy0+Dh@Rva8q+_iNHK#vNy#wkiAn| z3>_uMK#px0SgcW>=$9KUEwjFo#owLyLE=i2$LNWz=XFX;k2*rLH^wk_0lzz(Wj~%V z!SY!lW_e;GftNZ7WGzb$<)(&--_K2tPR%XfI;pRIZdUg&XTGm^woaKsvwCz{3dKLv z z$-fuFg-#|+et9dh{Pwy!YJEz5&+XaEtPvXppbvWjpq*>9vw_ZSqH^kHE}{(OsiuW1 z^@$z5YeIorKkSZvVV9MD9A%Y~6xFyv6Kv>6o8qV?N9I$724!jd<1M=|EK0ELhMO6- z%*a;%-i;SIXHOVKH9hV)T9U{JC9-$G#>{5k1%fT_@e?2L?#SBuTkA4YX?ZNj$Im zu)4}j8q2KH)in{EWRf1L zKE=KN?qvCr!d=jx-G&l^ZY z7>wJG8P#rob6&fZ=PE_oOY3s>?S0WGRJshV;7bgJ<+^J#5tZv^dO6&E0%HwELX`@u z_pjW)vgNU$tCslqgjjY%(vc(j<`ktsl{T~(xf%Zxlt4Sv=cx&dP?#=_FYW8a7In@~ zjgRT@T%M4lRyuDE)i{f1665xzIn{-4n!O4se24TmsoeU$H$b8%OqJqH#t`}gJD+5C z^)WUpoysWE#^JZu3K`u&58b;P>4B(T2nXaYLV&D7QO92+igaL#+|>QhZ@F|FUufVo z#TPnwjWYs7&+n~4J^M0u3U{9QTy=HB!XAbwIlduI$HQFMc>qsFt`f3r?+j2vcXGMB z0HVNm1zl0)m3Ia7y*0p@A_=DEhM{*X`w;XG))hCR&~(-HQAX}XiY!M9VjRy9Yf(Dp zFE@{h9~N9!5mn1C%F;cv*`d6A>)t>^z(1|by!D$?gBn$OKgGB{rnA)_rE;Ou%ZoMF zP@@0iE#0F`_e?-^J;wJ!r2)Guc%nAj<92)YK^u$jvZvFJda*V>FL`%XQ2CsM`Kf@j zt#pr}03W5<2-rwBF2iw%$XUYNR;bhn{xQY*My!RglHb|gp+?k;BbgrsYcxTbV-7pjshV(xySGNdoQ20KUiG$b{t$|1n>>9(L{7La(u1(#yfSCN<#$ z!=xl=X!0moaN>?5q0Ez!e+SyjO}C@Pvsq00f)U)f@@^M!MNrH~6hQSQ5IkH`rpD}%F=&r%iQDt}`LK{UDi?lO|n5@{04xjOKZlY!_B z_awG{o`_D7y29`JgT_A05BqZw{ybc6sfN2qnUNJhXqXt$yzXJ!RV=g2VDKGO)43W}5cbC-d0gBygiQ9LhJg?6s*K@=o) z+GX$roVEAMqsc(c5KFE1;r(DH-}`rqLT?b9p50(!K^(W%j|k>^qcOthM4=1swKxQk z!|6rLD?C;-Iqj3(@Ol9mJXwB_NMm#@FYyQLmm1H-6+Z0A5b`0%fsFM@U&-tRPKZl{D=pSwDlsh$ zT2m&iUQDOgF+_FTtF7As2`quQ|=x(76{!3$;ci5#xgLmG`gxXQ=ESt7Ka8RmTzetz#NbuTVB&heW zzK|dOT`=u;=VF;EXpI(L+mU?vU2!J6%ns96hmqGK0R!;Tb7Xzer$)}8>Eqf0K*1!- zwYtyqwN7)6+JB6rvouT}XeR1Zny0aGn^)wBl-8Q3Idj%zr3=$`1A>VVXBhV6!rg|0 zLfP5w_2;((Z?+$~5NOx@QGO-+)s7Z$?US9W8ALOS+Yn&--|_!Eu^RZ zAmI*iyi^`@&*b=&6tJBGNfY<+336jQuWFjKU_(W{o+19o_lr5~sTf?OVtm^(KY*OT zuVgAnkkxedPMmOxExd{ZQ@NnHE0Hk;AINwc%X|H&^Je4IZZoNMgY+H-?Z@rc06rDh z*~zB>o#Zl#U28gO-OteWy-Y~)WF0=i(SZFLdy~aC1eGh6rhCYWpw&WkrBc78rlE)dLVy&vCB$RrLi?|hpD>+&&OV~&!|Sc zi4J^EJ;itj+>)D$y`^>L#8n0EXwcjK_(b5tq2fX>A2gHzE`9ac<(LqAJJ)iJ&!Zhs zsV&5ArM(%j-KUWNpfiQI=#MvOlC8aRg0kbgmc`s?~@`)3u ztnk%mi2BxYo*CF*y*j{a`K$MqD;i#hc!+lPEB-xgOy$#hTEu-B0bT&@RAWlMrPjclu+gDjmU*}%&bFXgU9YcULJNvpX z+`lkB|Md93FMw9*kNO#O#UF|-70%O`chp@%_4@`*bwih?xGkU3Mll@K$QTSA)hr}u zslIA=U}?luwz%MGxroH;r#6N6njUUTF)L9iHeCeRDU$!{g$KvO=FW@2I0)bD1+1UhNRL zDVe=(Sd-~TlC@Ma*@$>qm;8;e zp-W7QopbKrF$f*JEss_{T--5=e^gO{@V4`Fl9{Ee_2-MAU`3vG~DaDy7T4(QC^yx#_- zL5$R@2J?$VM}(x^o&Vl zILCyW0-(el-dBL;B>6qdO1X2Y=3My+OUdUs^pQ(;ipyu(W#3PHG4vbML8U;>cfo{rYwJP4S1g*9+F?dOna2H}=h4opEispXagP_unan zJXKwv{EWXn{iyxs?Qc21o$nUkRkz;!sKzCYKb`_Nc7=Y?igQc*QG){@;05-^BCT@%}J> zsJr)0oXCZ>UnW=FUYF){Ugt@H`EH@J4GfYtVe6Y}*ca4Te)v-*-}dE1$yzl_xzMfc z*3Oo9j!(Wb=UULJrIPM@_iWplWtKTl{x{=BU=>gkQT%&_>EAeu>%UijyXj|dRB!dm zYQrDt2esS}-}RKt=X*IJ)2eV@^yK_Jj(8(?edStjp?ax1YhIlVnX_cqbkWlyO_;}T zhF>y8-W(2Wq5yY?&jqa@;Hu}gTX_Mv#gY>^o^X9n{V$HySKWbg;1l$K+X~}80#__4 ztq;FGF92y{45Kq}5y}MH`+ti~?SP6De&_#svj0Q~)-ra!W2p{MYa6W1LJeAU_WJtc zT|n1{{3~6Gxc1pMPc4%gR@JP=v!iYOwEeh_;|pQbg)O22(jthB+$-)WdDNoL;t`34EO&td@MoZmwr7f zdj4ya{i)2i>34ogm%p6|Jgri{rT#lmJ@9Dm#pnX-*JUhzwWWTF?V8{7i7;EFHmv_s z`_}ozPjc^XbN|b*PY~#g-@t4A^m@_w`*tqsUv=?cz_-osYI?zw84JV@*#ni<1J5JB zh%Rvbx=A3&)pFNr&rjT5cT@fg(+}VrdHwtU3_o_6qw#+|>#|$z{?qx^`JJE4-rrXK z*AUO~pW*#qkPpQ}@_>id^~G-y+qy8SChOWB^@Ts!J@>Z<%|E=4zprd^_$;-%LjF2y z%8p1dg;qUY=8*bZu4~1^m9C#PUa1H9Ms5^!TEURO#~`vLFjftDOCl?8`9Fv6^>1AA9Mv;9Ca$8>I zOkA^ZlT|x#i+17!$2S?wPv-y7n74b`kLllhV*Nd?=$6KsJw763?Od5`be5+f-i-)@((&z0a*G%Wsm~}?aqs?h|toxl4dCl5ip1-;M z+v-QN`$yONN6&Xp|M>d8*w^YprlXu^=eNH(uXO5aaby3Z`bU;ep3ZErn~2n$BK2M^ zCZy&sN$jDx=i20FKtwdtEB+~a|F-(ua29jBD!p@)P`lBKE;0j8RjScXQ-9`xPX`6(th9mxpGDIwe?oO1**uBi~k9pwfH zC;xu^E2%vB_v@e6_DC}QKio~;|NJKZ{UY$d9EJ;@C;xu^^CH-pYk&m?lAJw~E0{+e zhzQ%!5Tr#KyjHeX$#l}gDgQoJ|Goqqkwau7U_#}%Hy3zm@8Qq(j~#l6%mIzFAejW5 z4@!ZpMviA-GyeUuf9(E8c~<$K-~7K{0H>xHGQgSS1vn>Mt4EfSN6N5_MCN6-c5p84 z2ZvEQhDQ*NaF7~xAlA}wGz96F28HgPn4h!R`ucnG`mF8nTEOiKsD_vctRRppfzjG* zw7P}1N=DlQ1X@(rVtS;HOnV~We)IP7Yrs8T$nCV1YfGxVC2LM3a{M?}lp$CK9Q!pU rdkm7o6Gw>YK8*l1j6c=_hhTuiFq4-7cb|7o+y5)J7I>Zx`~RB&kn;59 literal 0 HcmV?d00001 diff --git a/vllm/distributed/kv_transfer/kv_connector/__init__.py b/vllm/distributed/kv_transfer/kv_connector/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/vllm/distributed/kv_transfer/kv_connector/base.py b/vllm/distributed/kv_transfer/kv_connector/base.py new file mode 100644 index 0000000000000..6089e3babac3e --- /dev/null +++ b/vllm/distributed/kv_transfer/kv_connector/base.py @@ -0,0 +1,122 @@ +""" +KVConnectorBase Class for Distributed KV Cache & Hidden State communication + +The class provides two primary abstract methods: +1. send_kv_caches_and_hidden_states(): Send KV caches and hidden states +2. recv_kv_caches_and_hidden_states(): Recv KV caches and hidden states +""" + +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, List, Tuple, Union + +import torch + +from vllm.sequence import IntermediateTensors + +if TYPE_CHECKING: + from vllm.config import VllmConfig + from vllm.worker.model_runner import ModelInputForGPUWithSamplingMetadata + + +class KVConnectorBase(ABC): + """ + Abstract base class for a KV connector. + + The class provides two primary abstract methods: + 1. send_kv_caches_and_hidden_states(): Send KV caches and hidden states + 2. recv_kv_caches_and_hidden_states(): Recv KV caches and hidden states + """ + + @abstractmethod + def __init__( + self, + rank: int, + local_rank: int, + config: "VllmConfig", + ): + raise NotImplementedError + + @abstractmethod + def close(self) -> None: + """Close the buffer and release resources. + + This method is responsible for cleaning up resources related to the + connector when it is no longer needed. + + Raises: + NotImplementedError: This method must be implemented in subclasses. + """ + raise NotImplementedError + + @abstractmethod + def send_kv_caches_and_hidden_states( + self, + model_executable: torch.nn.Module, + model_input: "ModelInputForGPUWithSamplingMetadata", + kv_caches: List[torch.Tensor], + hidden_or_intermediate_states: Union[torch.Tensor, + IntermediateTensors], + ) -> None: + """ + Send KV caches and hidden states to the connector. + + This method processes the input tokens, KV caches, and + hidden/intermediate states for a given model and sends the data to the + decode instance. + + Args: + model_executable (torch.nn.Module): The model executable containing + start and end layer information. + model_input (ModelInputForGPUWithSamplingMetadata): The input + metadata from vLLM. + kv_caches (List[torch.Tensor]): List of KV caches (keys and values) + for each layer. + hidden_or_intermediate_states (Union[torch.Tensor, + IntermediateTensors]): + The hidden or intermediate states associated with the tokens. + + Returns: + None + + """ + + raise NotImplementedError + + @abstractmethod + def recv_kv_caches_and_hidden_states( + self, model_executable: torch.nn.Module, + model_input: "ModelInputForGPUWithSamplingMetadata", + kv_caches: List[torch.Tensor] + ) -> Tuple[Union[torch.Tensor, IntermediateTensors], bool, + "ModelInputForGPUWithSamplingMetadata"]: + """ + Receive KV caches and hidden states from the connector. + + This method attempts to retrieve KV caches and hidden states for input + tokens. If all required KV caches and hidden states are received, it + will bypass model input, else it will fall back to normal vLLM model + forwarding. + + Args: + model_executable (torch.nn.Module): + The model executable from vLLM modelrunner. + model_input (ModelInputForGPUWithSamplingMetadata): + The model input from vLLM modelrunner. + kv_caches (List[torch.Tensor]): + List of KV caches for each layer. + + Returns: + - hidden_or_intermediate_states (torch.Tensor or + IntermediateTensors): + Concatenated hidden states if all required data is retrieved, + otherwise `None`. + - bypass_model_exec (bool): + Indicates whether the model execution can be skipped (True) or + needs to be redone (False). + - model_input (ModelInputForGPUWithSamplingMetadata): + Optionally adjusted input metadata for re-execution when + `bypass_model_exec=False`. + + """ + + raise NotImplementedError diff --git a/vllm/distributed/kv_transfer/kv_connector/factory.py b/vllm/distributed/kv_transfer/kv_connector/factory.py new file mode 100644 index 0000000000000..015f892cec933 --- /dev/null +++ b/vllm/distributed/kv_transfer/kv_connector/factory.py @@ -0,0 +1,19 @@ +from typing import TYPE_CHECKING + +from .base import KVConnectorBase + +if TYPE_CHECKING: + from vllm.config import VllmConfig + + +class KVConnectorFactory: + + @staticmethod + def create_connector(rank: int, local_rank: int, + config: "VllmConfig") -> KVConnectorBase: + if config.kv_transfer_config.kv_connector == 'PyNcclConnector': + from .simple_connector import SimpleConnector + return SimpleConnector(rank, local_rank, config) + else: + raise ValueError(f"Unsupported connector type: " + f"{config.kv_connector}") diff --git a/vllm/distributed/kv_transfer/kv_connector/simple_connector.py b/vllm/distributed/kv_transfer/kv_connector/simple_connector.py new file mode 100644 index 0000000000000..5870070a54c75 --- /dev/null +++ b/vllm/distributed/kv_transfer/kv_connector/simple_connector.py @@ -0,0 +1,261 @@ +""" +Simple KV Cache Connector for Distributed Machine Learning Inference + +The SimpleConnector transfers KV caches between prefill vLLM worker (KV cache +producer) and decode vLLM worker (KV cache consumer) using PyNcclPipe. + +But the logic can be extended to support other pipe and lookup buffer. +""" +from typing import TYPE_CHECKING, List, Optional, Tuple, Union + +import torch + +from vllm import _custom_ops as ops +from vllm.config import VllmConfig +from vllm.distributed.kv_transfer.kv_connector.base import KVConnectorBase +from vllm.distributed.kv_transfer.kv_lookup_buffer.simple_buffer import ( + SimpleBuffer) +from vllm.distributed.kv_transfer.kv_pipe.pynccl_pipe import PyNcclPipe +from vllm.logger import init_logger +from vllm.sequence import IntermediateTensors + +if TYPE_CHECKING: + from vllm.worker.model_runner import ModelInputForGPUWithSamplingMetadata + +logger = init_logger(__name__) + + +class SimpleConnector(KVConnectorBase): + + def __init__( + self, + rank: int, + local_rank: int, + config: VllmConfig, + ): + + self.config = config.kv_transfer_config + + logger.info("Initializing PyNcclConfig under kv_transfer_config %s", + self.config) + + self.lookup_buffer_size = self.config.kv_buffer_size + + self.producer_buffer: Optional[SimpleBuffer] = None + self.consumer_buffer: Optional[SimpleBuffer] = None + + # 2 pipes for every rank in the world + port_offset_base = 2 * rank + + # In disaggregated prefill, the prefill vLLM only uses send pipe + # and the decode vLLM only uses recv pipe + if self.config.is_kv_producer: + + self.producer_data_pipe = PyNcclPipe( + local_rank=local_rank, + config=self.config, + port_offset=port_offset_base, + ) + self.producer_signal_pipe = PyNcclPipe( + local_rank=local_rank, + config=self.config, + port_offset=port_offset_base + 1, + device="cpu", + ) + self.producer_buffer = SimpleBuffer(self.producer_signal_pipe, + self.producer_data_pipe, + self.config.kv_buffer_size) + + else: + + # the current vLLM instance is KV consumer, so it needs to connect + # its recv pipe to the send pipe of KV producder + self.consumer_data_pipe = PyNcclPipe( + local_rank=local_rank, + config=self.config, + port_offset=port_offset_base, + ) + self.consumer_signal_pipe = PyNcclPipe( + local_rank=local_rank, + config=self.config, + port_offset=port_offset_base + 1, + device="cpu", + ) + self.consumer_buffer = SimpleBuffer( + self.consumer_signal_pipe, + self.consumer_data_pipe, + self.config.kv_buffer_size, + ) + + def select(self, input_tokens: Optional[torch.Tensor], + roi: Optional[torch.Tensor]) -> List[Optional[torch.Tensor]]: + + assert self.consumer_buffer is not None, "Please initialize the "\ + "consumer buffer before calling select." + return self.consumer_buffer.drop_select(input_tokens, roi) + + def insert(self, input_tokens: torch.Tensor, roi: torch.Tensor, + key: torch.Tensor, value: torch.Tensor, + hidden: torch.Tensor) -> None: + + assert self.producer_buffer is not None, "Please initialize the "\ + "producer buffer before calling insert." + + self.producer_buffer.insert(input_tokens, roi, key, value, hidden) + + def send_kv_caches_and_hidden_states( + self, + model_executable: torch.nn.Module, + model_input: "ModelInputForGPUWithSamplingMetadata", + kv_caches: List[torch.Tensor], + hidden_or_intermediate_states: Union[torch.Tensor, + IntermediateTensors], + ) -> None: + + input_tokens_tensor = model_input.input_tokens + seq_lens = model_input.attn_metadata.seq_lens + slot_mapping_flat = model_input.attn_metadata.slot_mapping.flatten() + start_layer = model_executable.model.start_layer + end_layer = model_executable.model.end_layer + + # query_lens contains new KV caches that are added to vLLM. + # so we will send them to decode instance + # FIXME(Kuntai): This assume that all requests are prefill. + for idx, slen in enumerate(seq_lens): + start_pos = sum(seq_lens[:idx]) + end_pos = start_pos + slen + current_tokens = input_tokens_tensor[start_pos:end_pos] + + keys, values = [], [] + + for layer_id in range(start_layer, end_layer): + kv_cache = kv_caches[layer_id - start_layer] + + _, _, num_heads, head_size = kv_cache[0].shape + + key_cache = kv_cache[0].reshape(-1, num_heads, head_size) + value_cache = kv_cache[1].reshape(-1, num_heads, head_size) + + current_slot_mapping = slot_mapping_flat[start_pos:end_pos] + + keys.append(key_cache[current_slot_mapping].unsqueeze(0)) + values.append(value_cache[current_slot_mapping].unsqueeze(0)) + + keys = torch.cat(keys, dim=0) + values = torch.cat(values, dim=0) + + self.insert(current_tokens, + torch.ones_like(current_tokens, + dtype=bool), keys, values, + hidden_or_intermediate_states[start_pos:end_pos]) + + logger.debug("[rank%d]: KV send DONE.", torch.distributed.get_rank()) + + def recv_kv_caches_and_hidden_states( + self, model_executable: torch.nn.Module, + model_input: "ModelInputForGPUWithSamplingMetadata", + kv_caches: List[torch.Tensor] + ) -> Tuple[Union[torch.Tensor, IntermediateTensors], bool, + "ModelInputForGPUWithSamplingMetadata"]: + + # When bypass_model_exec is set to False, it means that at least for one + # request its corresponding KV cache or hidden state is missing. + # In this case we need to do prefilling to recompute missing KV cache + # and hidden states. + bypass_model_exec = True + + input_tokens_tensor = model_input.input_tokens + seq_lens = model_input.attn_metadata.seq_lens + slot_mapping = model_input.attn_metadata.slot_mapping.flatten() + + hidden_or_intermediate_states_for_one_req = [] + + input_tokens_list = [] + num_computed_tokens_list = [] + start_pos_list = [] + + # enumerate different requests + # FIXME(Kuntai): This impl assumes that all requests are prefill. + for idx, slen in enumerate(seq_lens): + + start_pos = sum(seq_lens[:idx]) + end_pos = start_pos + slen + current_tokens = input_tokens_tensor[start_pos:end_pos] + num_tokens = slen + + # collecting data for rebuilding the input + input_tokens_list.append(current_tokens) + start_pos_list.append(start_pos) + + ret = self.select(current_tokens, + torch.ones_like(current_tokens, dtype=bool)) + if ret[0] is None: + # didn't find any match. + bypass_model_exec = False + num_computed_tokens_list.append(0) + continue + + roi: torch.Tensor = ret[1] + keys: torch.Tensor = ret[2] + values: torch.Tensor = ret[3] + hidden: torch.Tensor = ret[4] + + num_computed_tokens = roi.shape[0] + num_computed_tokens_list.append(num_computed_tokens) + + # check if both KV cache and the hidden states are received + # If not, need to redo the forwarding to compute missing states + if not all([(num_computed_tokens == num_tokens), hidden is not None + ]): + bypass_model_exec = False + + # update the end position based on how many tokens are cached. + end_pos = start_pos + num_computed_tokens + + # put received KV caches into paged memory + for i in range(model_executable.model.start_layer, + model_executable.model.end_layer): + + kv_cache = kv_caches[i - model_executable.model.start_layer] + layer = model_executable.model.layers[i] + + key_cache, value_cache = kv_cache[0], kv_cache[1] + ops.reshape_and_cache_flash( + keys[i - model_executable.model.start_layer].to( + key_cache.device), + values[i - model_executable.model.start_layer].to( + value_cache.device), + key_cache, + value_cache, + slot_mapping[start_pos:end_pos], + layer.self_attn.attn.kv_cache_dtype, + layer.self_attn.attn._k_scale, + layer.self_attn.attn._v_scale, + ) + + hidden_or_intermediate_states_for_one_req.append(hidden) + + if not bypass_model_exec: + # Some of the KV cache is not retrieved + # Here we will fall back to normal model forwarding + # But optionally you can adjust model_input so that you only do + # prefilling on those tokens that are missing KV caches. + logger.debug( + "[rank%d]: Failed to receive all KVs and hidden " + "states, redo model forwarding.", torch.distributed.get_rank()) + hidden_or_intermediate_states = None + + else: + logger.debug( + "[rank%d]: Successfully received all KVs and hidden " + "states, skip model forwarding.", torch.distributed.get_rank()) + hidden_or_intermediate_states = torch.cat( + hidden_or_intermediate_states_for_one_req, dim=0) + + return hidden_or_intermediate_states, bypass_model_exec, model_input + + def close(self): + self.producer_data_pipe.close() + self.producer_signal_pipe.close() + self.consumer_data_pipe.close() + self.consumer_signal_pipe.close() diff --git a/vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py b/vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/vllm/distributed/kv_transfer/kv_lookup_buffer/base.py b/vllm/distributed/kv_transfer/kv_lookup_buffer/base.py new file mode 100644 index 0000000000000..bad119a1aa929 --- /dev/null +++ b/vllm/distributed/kv_transfer/kv_lookup_buffer/base.py @@ -0,0 +1,108 @@ +""" +This file contains a new class `KVLookupBufferBase` that allows developers to +think of KV cache operations as inserting new KV cache entries (`insert`) +into the lookup buffer and querying existing KV caches (`drop_select`) +from the lookup buffer. + +All distributed communications are abstracted behind this class. +""" + +from abc import ABC, abstractmethod +from typing import List, Optional + +import torch + + +class KVLookupBufferBase(ABC): + """ + Abstract base class for a lookup buffer. + + This class provides an abstraction for a key-value (KV) cache lookup buffer. + + The key of the lookup buffer: + - input_tokens: token IDs of the request + - roi: a binary mask on top of input_tokens. + - Purpose of roi: Since KV cache may only be available for a subset of + tokens in the input (for example, when vLLM is connected to an external + KV cache service), roi specifies the subset of tokens that the KV cache + is associated with. + - NOTE: roi can be further extended to describe which part of KV the + current process is holding (each process may only hold a part of KV + due to TP and PP). This is not implemented for now. + + The value of the lookup buffer: + - key: the key tensor in the KV cache + - value: the value tensor in the KV cache + - hidden: the final hidden state generated by model forwarding. This allows + vLLM to bypass further model forwarding by transmitting the hidden state. + """ + + @abstractmethod + def insert(self, input_tokens: torch.Tensor, roi: torch.Tensor, + key: torch.Tensor, value: torch.Tensor, + hidden: torch.Tensor) -> None: + """Insert into the lookup buffer. + + The functionality is similar to the following python statement + ``` + buffer[input_tokens, roi] = [key, value, hidden] + ``` + + FIXME: in the future, we should only have two arguments, key and value, + where key is a tensor dict and value is a tensor dict. + + FIXME: we should transmit both sampler outputs and the hidden states. + + Args: + input_tokens (torch.Tensor): token IDs. + roi (torch.Tensor): A binary mask on top of the input tokens + key (torch.Tensor): The key tensor in the KV cache. + value (torch.Tensor): The value tensor in the KV cache. + hidden (torch.Tensor): The final hidden state tensor generated + during model forwarding to bypass model + forwarding. + + Raises: + NotImplementedError: This method must be implemented in subclasses. + """ + raise NotImplementedError + + @abstractmethod + def drop_select( + self, input_tokens: Optional[torch.Tensor], + roi: Optional[torch.Tensor]) -> List[Optional[torch.Tensor]]: + """Select and *drop* KV cache entries from the lookup buffer. + + The functionality is similar to the following python statements + ``` + ret = buffer.pop(input_tokens, roi) + return ret + ``` + + If `input_tokens` and `roi` is `None`, it means selecting any of the + KV caches in the buffer, return, and remove it from the buffer, useful + when offloading KV cache to KV cache storage service. + + Args: + input_tokens (torch.Tensor): token IDs. + roi (torch.Tensor): A binary mask on top of the input tokens + + Returns: + List[Optional[torch.Tensor]]: A list of tensors. Can be None. + + Raises: + NotImplementedError: This method must be implemented in subclasses. + """ + raise NotImplementedError + + @abstractmethod + def close(self) -> None: + """Close the buffer and release resources. + + This method is responsible for cleaning up resources related to the + lookup buffer when it is no longer needed. + + Raises: + NotImplementedError: This method must be implemented in subclasses. + """ + raise NotImplementedError diff --git a/vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py b/vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py new file mode 100644 index 0000000000000..fe8d8d7375f36 --- /dev/null +++ b/vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py @@ -0,0 +1,242 @@ +""" + Implements a distributed key-value (KV) cache transfer mechanism. + + Key Features: + - Distributed KV cache transmission using PyNccl pipes. + - Non-blocking `insert`, blocking `drop_select`. + - Use CPU signal pipe to avoid racing condition + - Handles buffer size constraints and provide backpressure mechanism to + stop the prefill instance when the decode instance is slow. +""" +import threading +import time +from collections import deque +from typing import Deque, List, Optional, Union + +import torch + +from vllm.distributed.kv_transfer.kv_lookup_buffer.base import ( + KVLookupBufferBase) +from vllm.distributed.kv_transfer.kv_pipe.base import KVPipeBase +from vllm.logger import init_logger + +logger = init_logger(__name__) + + +class SimpleBuffer(KVLookupBufferBase): + + def __init__(self, signal_pipe: KVPipeBase, data_pipe: KVPipeBase, + buffer_size_thresh: float): + """ + signal_pipe: on CPU + + NOTE: on-device recv will block all threads in the process, making the + KV cache producer unable to listen to new request while transmitting + KV cache. Luckily CPU recv only blocks the current thread so we use + CPU recv to listen to new request. + + data_pipe: on device (e.g. GPU) + """ + + self.buffer: Deque[List[torch.Tensor]] = deque() + + self.buffer_size = 0 + self.buffer_size_threshold = buffer_size_thresh + self.buffer_lock = threading.Lock() + self.signal_pipe = signal_pipe + self.data_pipe = data_pipe + self.request_handling_thread: Optional[threading.Thread] = None + + self.normal_signal = torch.tensor([0], device="cpu") + self.end_signal = None + + def _matches(self, tokens_roi_sender: List[torch.Tensor], + tokens_roi_recver: List[torch.Tensor]): + + # tokens_roi_sender: tokens and roi of the producer (in the buffer) + # tokens_roi_recver: tokens and roi of the consumer (query) + + tokens_sender = tokens_roi_sender[0] + tokens_recver = tokens_roi_recver[0] + roi_sender = tokens_roi_sender[1] + roi_recver = tokens_roi_recver[1] + + if tokens_recver is None: + # consumer sends an empty request + # semantics: DROP SELECT * LIMIT 1 + # so any of the data in the buffer can be drop-selected + return True + + # Assuming that roi is a binary mask on tokens + tokens_sender = tokens_sender[roi_sender] + tokens_recver = tokens_recver[roi_recver] + + # simple common prefix matching + min_length = min(len(tokens_sender), len(tokens_recver)) + if torch.allclose(tokens_sender[:min_length], + tokens_recver[:min_length]): + return min_length + + return 0 + + def _send_tensor_and_dec_size(self, + tensor: Optional[torch.Tensor]) -> None: + + assert tensor is not None, "Use self.data_pipe.send(None) instead" + self.buffer_size -= tensor.element_size() * tensor.numel() + if tensor.dtype == torch.bool: + tensor = tensor.float() + self.data_pipe.send_tensor(tensor) + + def _get_element_size(self, data: Optional[Union[List, torch.Tensor]]): + + if isinstance(data, torch.Tensor): + return data.element_size() * data.numel() + if not data: + # cannot perform `not data` on a tensor + # so this check needs to go after the check above + return 0 + + raise AssertionError(f"Unknown data type {type(data)}") + + def _add_to_buffer(self, input_tokens: torch.Tensor, roi: torch.Tensor, + key: torch.Tensor, value: torch.Tensor, + hidden: torch.Tensor): + + if isinstance(input_tokens, torch.Tensor): + input_tokens = input_tokens.clone() + if isinstance(roi, torch.Tensor): + roi = roi.clone() + if isinstance(key, torch.Tensor): + key = key.clone() + if isinstance(value, torch.Tensor): + value = value.clone() + if isinstance(hidden, torch.Tensor): + hidden = hidden.clone() + + buffer_item = [input_tokens, roi, key, value, hidden] + + with self.buffer_lock: + for data in buffer_item: + self.buffer_size += self._get_element_size(data) + self.buffer.append(buffer_item) + + def _is_end_signal(self, signal): + return signal is None + + def drop_select_handler(self): + + try: + + while True: + signal = self.signal_pipe.recv_tensor() + if self._is_end_signal(signal): + logger.info("Received end signal!") + break + + input_tokens = self.data_pipe.recv_tensor() + + roi = self.data_pipe.recv_tensor() + assert roi is not None, "Please provide the roi when sending "\ + "drop-select request" + roi = (roi > 0.5) + tokens_roi_recver = [input_tokens, roi] + + matched_length = 0 + + # perform input tokens and roi matching + # FIXME: this matching is O(n), ideally it should be O(1) + # but this buffer size won't (and shouldn't) be too large so + # the fix is not urgent. + with self.buffer_lock: + + for _ in range(len(self.buffer)): + + temp_length = self._matches(self.buffer[0], + tokens_roi_recver) + if temp_length > 0: + matched_length = temp_length + break + # rotate the element we just accessed to the end + self.buffer.rotate(-1) + + if matched_length > 0: + # need to clone the tensor + # in case the tensor is freed before sending finishes + matched_item = self.buffer.popleft() + for tensor in matched_item: + self._send_tensor_and_dec_size(tensor) + + else: + # no match, just send None + for _ in range(5): + self.data_pipe.send_tensor(None) + + except RuntimeError as e: + if 'Connection closed by peer' not in str(e): + raise e + + logger.debug("Closing drop_select_handler") + + def drop_select( + self, input_tokens: Optional[torch.Tensor], + roi: Optional[torch.Tensor]) -> List[Optional[torch.Tensor]]: + + assert self.request_handling_thread is None, \ + "drop_select should be called by the KV cache consumer "\ + "(e.g. the decode vLLM instance)" + + if isinstance(input_tokens, torch.Tensor): + input_tokens = input_tokens.clone() + if isinstance(roi, torch.Tensor): + roi = roi.clone().float() + + self.signal_pipe.send_tensor(self.normal_signal) + self.data_pipe.send_tensor(input_tokens) + self.data_pipe.send_tensor(roi) + + input_tokens = self.data_pipe.recv_tensor() + roi = self.data_pipe.recv_tensor() + if roi is not None: + # convert from float tensor to bool tensor + # as PyNccl does not support sending bool tensor + roi = (roi > 0.5) + key = self.data_pipe.recv_tensor() + value = self.data_pipe.recv_tensor() + hidden = self.data_pipe.recv_tensor() + + return [input_tokens, roi, key, value, hidden] + + def full_handler(self): + time.sleep(0.001) + + def insert(self, input_tokens: torch.Tensor, roi: torch.Tensor, + key: torch.Tensor, value: torch.Tensor, + hidden: torch.Tensor) -> None: + + if self.buffer_size > self.buffer_size_threshold: + # log outside the while loop to avoid this message being logged + # repeatedly. + logger.debug("KV transfer buffer is full. Handling...") + while self.buffer_size > self.buffer_size_threshold: + self.full_handler() + + self._add_to_buffer(input_tokens, roi, key, value, hidden) + + # when calling the insert, the current process is a sender + # need to launch the request handler and start listening to request. + if self.request_handling_thread is None: + self.request_handling_thread = threading.Thread( + target=self.drop_select_handler) + self.request_handling_thread.start() + + def close(self): + + if hasattr(self, "request_handling_thread" + ) and self.request_handling_thread is not None: + self.request_handling_thread.join() + + else: + # TODO: have a explicit close signal and have a explicit way to + # check if it's requester + self.signal_pipe.send_tensor(self.end_signal) diff --git a/vllm/distributed/kv_transfer/kv_pipe/__init__.py b/vllm/distributed/kv_transfer/kv_pipe/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/vllm/distributed/kv_transfer/kv_pipe/base.py b/vllm/distributed/kv_transfer/kv_pipe/base.py new file mode 100644 index 0000000000000..4b0cb44cc5b81 --- /dev/null +++ b/vllm/distributed/kv_transfer/kv_pipe/base.py @@ -0,0 +1,65 @@ +""" +This file defines an interface `KVPipeBase` +that provides an abstraction for sending and receiving tensors, or None, via +distributed communications. + +All classes instantiated from this interface are assumed to be a FIFO pipe. + +If your distributed communication platform already supports key-value lookup, +you can bypass this interface and directly start from `kv_lookup_buffer`. +""" + +from abc import ABC, abstractmethod +from typing import Optional + +import torch + + +class KVPipeBase(ABC): + """ + This class provides an interface for sending and receiving tensors, or + None, by distributed communications. + """ + + @abstractmethod + def send_tensor(self, tensor: Optional[torch.Tensor]) -> None: + """Send a tensor, or None, via the pipe. + + Need to support sending None -- important for error handling. + + TODO: add a `key` argument so that we can use traditional + key-value database as the distributed communication mechanism behind + the pipe. + + Args: + tensor (Optional[torch.Tensor]): The tensor to be sent. Can be None. + + Raises: + NotImplementedError: This method must be implemented in subclasses. + """ + raise NotImplementedError + + @abstractmethod + def recv_tensor(self) -> Optional[torch.Tensor]: + """Receive a tensor (can be None) from the pipeline. + + Returns: + Optional[torch.Tensor]: The tensor received from the pipeline. Can + be None. + + Raises: + NotImplementedError: This method must be implemented in subclasses. + """ + raise NotImplementedError + + @abstractmethod + def close(self) -> None: + """Close the pipeline and release resources. + + This method is responsible for closing the communication pipeline + and releasing any resources associated with it. + + Raises: + NotImplementedError: This method must be implemented in subclasses. + """ + raise NotImplementedError diff --git a/vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py b/vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py new file mode 100644 index 0000000000000..98222fa67e492 --- /dev/null +++ b/vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py @@ -0,0 +1,276 @@ +""" + This module implements a PyNccl pipe for sending and receiving + Optional[torch.Tensor] between distributed ranks with advanced + communication features. + + Key Features: + - Supports sending and receiving tensors with metadata + - Handles both CUDA and CPU device communications + - Implements a non-blocking tensor transfer mechanism + - Manages buffer size and provides backpressure control + - Supports distributed process groups with configurable parameters +""" + +import threading +import time +from concurrent.futures import ThreadPoolExecutor +from typing import Callable, Dict, Optional, Tuple + +import torch + +from vllm.config import KVTransferConfig +from vllm.distributed.device_communicators.pynccl import PyNcclCommunicator +from vllm.distributed.kv_transfer.kv_pipe.base import KVPipeBase +from vllm.distributed.utils import StatelessProcessGroup +from vllm.logger import init_logger + +logger = init_logger(__name__) + + +class BrokenPipeException(Exception): + + def __init__(self, message): + self.message = message + super().__init__(self.message) + + +Metadata = Dict[str, Optional[torch.Tensor]] + + +class PyNcclPipe(KVPipeBase): + + METADATA_LENGTH = 16 + MAX_TENSOR_DIMENSIONS = 14 + METADATA_DTYPE = torch.int64 + + def __init__(self, + local_rank: int, + config: KVTransferConfig, + device: Optional[str] = None, + port_offset: int = 0): + self.config = config + self.local_rank = local_rank + self.kv_rank = self.config.kv_rank + self.kv_parallel_size = self.config.kv_parallel_size + if device is None: + self.device = self._select_device(self.config.kv_buffer_device) + else: + self.device = self._select_device(device) + + # build distributed connection and send/recv implementation + self.group = StatelessProcessGroup.create( + host=self.config.kv_ip, + port=self.config.kv_port + port_offset, + rank=self.kv_rank, + world_size=self.kv_parallel_size, + ) + # add a barrier to make sure the connection is initiated properly + self.group.barrier() + impl = self._get_device_send_recv_impl(self.group) + self.device_send_func, self.device_recv_func = impl + # set target rank + self.target_rank_for_send = (self.kv_rank + 1) % self.kv_parallel_size + self.target_rank_for_recv = (self.kv_rank - 1) % self.kv_parallel_size + + # transportation-related variables + self.transport_thread: Optional[ThreadPoolExecutor] = None + self.buffer_size = 0 + self.buffer_size_lock = threading.Lock() + self.buffer_size_thresh = self.config.kv_buffer_size + + def _get_device_send_recv_impl( + self, group: StatelessProcessGroup + ) -> Tuple[Callable[[torch.Tensor, int], None], Callable[ + [torch.Tensor, int], None]]: + + send: Callable[[torch.Tensor, int], None] + recv: Callable[[torch.Tensor, int], None] + if self.device.type == "cuda": + # use PyNCCL for send / recv + comm = PyNcclCommunicator(group, device=self.local_rank) + comm.disabled = False + send, recv = comm.send, comm.recv # type: ignore + else: + # This send / recv implementation here is NOT intended to transfer + # KV caches (and should NOT be repurposed to transfer KV caches). + # Currently it is only used to transmit control-plane messages + # for PyNcclBuffer. + send = group.send_obj + + def my_recv(x, src): + x[...] = group.recv_obj(src) + + recv = my_recv + + return send, recv + + def _select_device(self, device: str): + logger.info("Selecting device: %s", device) + if device == "cuda": + return torch.device(f"cuda:{self.local_rank}") + else: + return torch.device("cpu") + + def _make_metadata(self, tensor: Optional[torch.Tensor]) -> Metadata: + """ + Create the metadata as a dictionary based on the input tensor. + + Parameters: + - tensor: The input tensor or None if no tensor is provided. + + Returns: + - metadata: A dictionary with the following keys: + - "dtype": The data type of the tensor or None. + - "shape": The shape of the tensor or None. + """ + if tensor is None: + return {"dtype": None, "shape": None} + else: + return {"dtype": tensor.dtype, "shape": tensor.shape} + + def _prepare_recv_buffer(self, metadata: Metadata) -> torch.Tensor: + """ + Create a buffer to receive the tensor based on the provided metadata. + + Parameters: + - metadata: A dictionary with keys "dtype" and "shape", describing + the tensor's data type and shape. + + Returns: + - buffer: A tensor of the specified type and shape, allocated on + self.device. + """ + return torch.empty(metadata["shape"], + dtype=metadata["dtype"], + device=self.device) + + def _send_metadata(self, metadata: Metadata): + """ + Send the metadata dictionary to the target rank. + + Parameters: + - metadata: A dictionary with keys "dtype" and "shape". + """ + self.group.send_obj(metadata, self.target_rank_for_send) + + def _recv_metadata(self) -> Metadata: + """ + Receive the metadata dictionary from the target rank. + + Returns: + - metadata: A dictionary with keys "dtype" and "shape" describing + the tensor. + """ + return self.group.recv_obj(self.target_rank_for_recv) + + def _send_impl(self, tensor: Optional[torch.Tensor]) -> None: + """ + The actual implementation of sending the tensor and its metadata to the + target rank. + + Parameters: + - tensor: The input tensor to be sent, or None if no tensor is + being sent. + """ + metadata = self._make_metadata(tensor) + self._send_metadata(metadata) + if tensor is not None: + self.device_send_func(tensor.to(self.device), + self.target_rank_for_send) + + def _recv_impl(self) -> Optional[torch.Tensor]: + """ + The actual implementation of receiving a tensor and its metadata from + the target rank. + + Returns: + - buffer: The received tensor, or None if no tensor is received. + """ + metadata = self._recv_metadata() + if metadata["dtype"] is None: + return None + buffer = self._prepare_recv_buffer(metadata) + self.device_recv_func(buffer, self.target_rank_for_recv) + + return buffer + + def send_tensor_wrapper(self, tensor: Optional[torch.Tensor], + tensor_size: int) -> None: + """ + Wrapper for _send_impl to handle exceptions and update buffer size. + """ + try: + self._send_impl(tensor) + + with self.buffer_size_lock: + self.buffer_size -= tensor_size + except Exception as e: + logger.error("[rank%d]: Exception when trying to send %s, msg: %s", + torch.distributed.get_rank(), str(tensor), str(e)) + import traceback + traceback.print_exc() + + def block_if_full(self): + """ + Block the current thread if the buffer size is larger than the + threshold. + """ + while self.buffer_size > self.buffer_size_thresh: + logger.debug("KV cache transfer pipe is full. Waiting...") + time.sleep(0.05) + + def send_tensor(self, tensor: Optional[torch.Tensor]) -> None: + """ + Sends a tensor and its metadata to the destination rank in a + non-blocking way. + + Parameters: + - tensor: The tensor to send, or None if no tensor is being sent. + """ + if self.transport_thread is None: + self.transport_thread = ThreadPoolExecutor(max_workers=1) + + if tensor is not None: + tensor_size = tensor.element_size() * tensor.numel() + else: + tensor_size = 0 + + self.block_if_full() + + with self.buffer_size_lock: + self.buffer_size += tensor_size + + self.transport_thread.submit(self.send_tensor_wrapper, tensor, + tensor_size) + + def recv_tensor(self) -> Optional[torch.Tensor]: + """ + Receives a tensor and its metadata from the source rank. Blocking call. + + Returns: + - tensor: The received tensor, or None if no tensor is received. + """ + if self.transport_thread is None: + self.transport_thread = ThreadPoolExecutor(max_workers=1) + + future = self.transport_thread.submit(self._recv_impl) + + try: + tensor = future.result() + except Exception as e: + logger.error("Encountering exception in KV receiving thread") + logger.error("%s", e) + logger.error("My device: %s", self.device) + import traceback + traceback.print_exc() + raise e + + return tensor + + def close(self): + """ + Close the pipe and release associated resources. + """ + if hasattr(self, + "transport_thread") and self.transport_thread is not None: + self.transport_thread.shutdown() diff --git a/vllm/distributed/kv_transfer/kv_transfer_agent.py b/vllm/distributed/kv_transfer/kv_transfer_agent.py new file mode 100644 index 0000000000000..9ce97851dc849 --- /dev/null +++ b/vllm/distributed/kv_transfer/kv_transfer_agent.py @@ -0,0 +1,75 @@ +"""A centralized entrypoint to perform distributed KV cache transfer. + +This implementation is a shim wrapper on two APIs exposed by `kv_connector`: +1. `send_kv_caches_and_hidden_states` +2. `recv_kv_caches_and_hidden_states +""" +from typing import TYPE_CHECKING, List, Tuple, Union + +if TYPE_CHECKING: + from vllm.worker.model_runner import ModelInputForGPUWithSamplingMetadata + from vllm.config import VllmConfig + +import torch + +from vllm.distributed.kv_transfer.kv_connector.factory import ( + KVConnectorFactory) +from vllm.logger import init_logger +from vllm.sequence import IntermediateTensors + +logger = init_logger(__name__) + + +class KVTransferAgent: + """ + A class designated for distributed KV transfer + + Target use cases: + 1. Disaggregated prefill + 2. Remote KV cache storage + """ + + def __init__( + self, + rank: int, + local_rank: int, + config: "VllmConfig", + ): + + self.config = config + + if config.kv_transfer_config is None: + raise ValueError("KVTransferConfig is not set in the VllmConfig," + " cannot initialize KVConnector.") + + assert self.config.kv_transfer_config.is_kv_transfer_instance, "KV"\ + "TransferAgent should only be used when kv_connector is set." + + self.connector = KVConnectorFactory.create_connector( + rank, local_rank, config) + + def send_kv_caches_and_hidden_states( + self, + model_executable: torch.nn.Module, + model_input: "ModelInputForGPUWithSamplingMetadata", + kv_caches: List[torch.Tensor], + hidden_or_intermediate_states: Union[torch.Tensor, + IntermediateTensors], + ) -> None: + + self.connector.send_kv_caches_and_hidden_states( + model_executable, model_input, kv_caches, + hidden_or_intermediate_states) + + def close(self) -> None: + self.connector.close() + + def recv_kv_caches_and_hidden_states( + self, model_executable: torch.nn.Module, + model_input: "ModelInputForGPUWithSamplingMetadata", + kv_caches: List[torch.Tensor] + ) -> Tuple[Union[torch.Tensor, IntermediateTensors], bool, + "ModelInputForGPUWithSamplingMetadata"]: + + return self.connector.recv_kv_caches_and_hidden_states( + model_executable, model_input, kv_caches) diff --git a/vllm/distributed/parallel_state.py b/vllm/distributed/parallel_state.py index ccbe00386c5da..34815d7f0aa78 100644 --- a/vllm/distributed/parallel_state.py +++ b/vllm/distributed/parallel_state.py @@ -27,18 +27,23 @@ from contextlib import contextmanager, nullcontext from dataclasses import dataclass from multiprocessing import shared_memory -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import (TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, + Union) from unittest.mock import patch import torch import torch.distributed from torch.distributed import Backend, ProcessGroup +import vllm.distributed.kv_transfer.kv_transfer_agent as kv_transfer import vllm.envs as envs from vllm.logger import init_logger from vllm.platforms import current_platform from vllm.utils import direct_register_custom_op, supports_custom_op +if TYPE_CHECKING: + from vllm.config import VllmConfig + @dataclass class GraphCaptureContext: @@ -904,6 +909,14 @@ def get_pp_group() -> GroupCoordinator: # kept for backward compatibility get_pipeline_model_parallel_group = get_pp_group +_KV_TRANSFER: Optional[kv_transfer.KVTransferAgent] = None + + +def get_kv_transfer_group() -> kv_transfer.KVTransferAgent: + assert _KV_TRANSFER is not None, ( + "disaggregated KV cache transfer parallel group is not initialized") + return _KV_TRANSFER + @contextmanager def graph_capture(): @@ -1052,6 +1065,26 @@ def initialize_model_parallel( group_name="pp") +def ensure_kv_transfer_initialized(vllm_config: "VllmConfig") -> None: + """ + Initialize KV cache transfer parallel group. + """ + + global _KV_TRANSFER + + if vllm_config.kv_transfer_config is None: + return + + if all([ + vllm_config.kv_transfer_config.need_kv_parallel_group, + _KV_TRANSFER is None + ]): + _KV_TRANSFER = kv_transfer.KVTransferAgent( + rank=get_world_group().rank, + local_rank=get_world_group().local_rank, + config=vllm_config) + + def ensure_model_parallel_initialized( tensor_model_parallel_size: int, pipeline_model_parallel_size: int, diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index f0020562c3c3a..4aa0eebd976c9 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -9,10 +9,10 @@ import vllm.envs as envs from vllm.config import (CacheConfig, CompilationConfig, ConfigFormat, - DecodingConfig, DeviceConfig, HfOverrides, LoadConfig, - LoadFormat, LoRAConfig, ModelConfig, - ObservabilityConfig, ParallelConfig, PoolerConfig, - PromptAdapterConfig, SchedulerConfig, + DecodingConfig, DeviceConfig, HfOverrides, + KVTransferConfig, LoadConfig, LoadFormat, LoRAConfig, + ModelConfig, ObservabilityConfig, ParallelConfig, + PoolerConfig, PromptAdapterConfig, SchedulerConfig, SpeculativeConfig, TaskOption, TokenizerPoolConfig, VllmConfig) from vllm.executor.executor_base import ExecutorBase @@ -108,6 +108,7 @@ class EngineArgs: # notice. distributed_executor_backend: Optional[Union[str, Type[ExecutorBase]]] = None + # number of P/D disaggregation (or other disaggregation) workers pipeline_parallel_size: int = 1 tensor_parallel_size: int = 1 max_parallel_loading_workers: Optional[int] = None @@ -194,6 +195,8 @@ class EngineArgs: compilation_config: Optional[CompilationConfig] = None worker_cls: str = "auto" + kv_transfer_config: Optional[KVTransferConfig] = None + def __post_init__(self): if not self.tokenizer: self.tokenizer = self.model @@ -908,6 +911,12 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: 'compilers, using -O without space is also ' 'supported. -O3 is equivalent to -O 3.') + parser.add_argument('--kv-transfer-config', + type=KVTransferConfig.from_cli, + default=None, + help='The configurations for distributed KV cache ' + 'transfer. Should be a JSON string.') + parser.add_argument( '--worker-cls', type=str, @@ -1201,6 +1210,7 @@ def create_engine_config(self, observability_config=observability_config, prompt_adapter_config=prompt_adapter_config, compilation_config=self.compilation_config, + kv_transfer_config=self.kv_transfer_config, ) if envs.VLLM_USE_V1: diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py index 1f654a9cce465..c9f06eef3f907 100644 --- a/vllm/worker/model_runner.py +++ b/vllm/worker/model_runner.py @@ -21,7 +21,7 @@ from vllm.compilation.compile_context import set_compile_context from vllm.config import CompilationLevel, VllmConfig from vllm.core.scheduler import SchedulerOutputs -from vllm.distributed import get_pp_group +from vllm.distributed import get_kv_transfer_group, get_pp_group from vllm.distributed.parallel_state import graph_capture from vllm.forward_context import set_forward_context from vllm.inputs import INPUT_REGISTRY, InputRegistry @@ -1666,6 +1666,24 @@ def execute_model( else: model_executable = self.model + # Receive KV cache in distributed KV cache transfer setting + # In disagg prefill setting, it will also recv hidden states and bypass + # model forwarding + # In KV cache database setting, it will change the model input so that + # we can skip prefilling on tokens that successfully received KV caches + # NOTE: The receive operation is blocking + bypass_model_exec = False + if self.need_recv_kv(model_input, kv_caches): + hidden_or_intermediate_states, bypass_model_exec, model_input = \ + get_kv_transfer_group().recv_kv_caches_and_hidden_states( + # model is used to know which layer the current worker + # is working on, so that we can receive KV for only those + # layers. + model_executable, + model_input, + kv_caches=kv_caches + ) + multi_modal_kwargs = model_input.multi_modal_kwargs or {} seqlen_agnostic_kwargs = { "finished_requests_ids": model_input.finished_requests_ids, @@ -1677,21 +1695,36 @@ def execute_model( model_forward_end = torch.cuda.Event(enable_timing=True) model_forward_start.record() - with set_forward_context(model_input.attn_metadata, self.vllm_config): - hidden_or_intermediate_states = model_executable( - input_ids=model_input.input_tokens, - positions=model_input.input_positions, - kv_caches=kv_caches, - attn_metadata=model_input.attn_metadata, - intermediate_tensors=intermediate_tensors, - **MultiModalKwargs.as_kwargs(multi_modal_kwargs, - device=self.device), - **seqlen_agnostic_kwargs) + if not bypass_model_exec: + with set_forward_context(model_input.attn_metadata, + self.vllm_config): + hidden_or_intermediate_states = model_executable( + input_ids=model_input.input_tokens, + positions=model_input.input_positions, + kv_caches=kv_caches, + attn_metadata=model_input.attn_metadata, + intermediate_tensors=intermediate_tensors, + **MultiModalKwargs.as_kwargs(multi_modal_kwargs, + device=self.device), + **seqlen_agnostic_kwargs) if (self.observability_config is not None and self.observability_config.collect_model_forward_time): model_forward_end.record() + # Sending KV cache in distributed KV cache transfer setting + # NOTE: the send operation is non-blocking + if self.need_send_kv(model_input, kv_caches): + get_kv_transfer_group().send_kv_caches_and_hidden_states( + # model_executable is used to know which layer the current + # worker is working on, so that we can send KV for only those + # layers. + model_executable, + model_input, + kv_caches, + hidden_or_intermediate_states, + ) + # Compute the logits in the last pipeline stage. if not get_pp_group().is_last_rank: if (self.is_driver_worker @@ -1759,6 +1792,56 @@ def execute_model( return [output] + def need_recv_kv(self, model_input, kv_caches) -> bool: + """Check if we need to receive kv-cache from the other worker. + We need to receive KV when + 1. current vLLM instance is KV cache consumer/decode vLLM instance + 2. this batch is not a profiling run + 3. this batch is a prefill run + + Args: + model_input: input to the model executable + kv_caches: vLLM's paged memory + """ + + prefill_meta = model_input.attn_metadata.prefill_metadata + + # check if the current run is profiling + is_profile_run = (kv_caches[0].numel() == 0) + # check if the current run is prefill + is_prefill_run = prefill_meta is not None + + if self.vllm_config.kv_transfer_config is None: + return False + + return self.vllm_config.kv_transfer_config.is_kv_consumer and ( + not is_profile_run) and is_prefill_run + + def need_send_kv(self, model_input, kv_caches) -> bool: + """Check if we need to send kv-cache to the other worker. + We need to send KV when + 1. current vLLM instance is KV cache producer/prefill vLLM instance + 2. this batch is not a profiling run + 3. this batch is a prefill run + + Args: + model_input: input to the model executable + kv_caches: vLLM's paged memory + """ + + prefill_meta = model_input.attn_metadata.prefill_metadata + + # check if the current run is profiling + is_profile_run = (kv_caches[0].numel() == 0) + # check if the current run is prefill + is_prefill_run = prefill_meta is not None + + if self.vllm_config.kv_transfer_config is None: + return False + + return self.vllm_config.kv_transfer_config.is_kv_producer and ( + not is_profile_run) and is_prefill_run + # NOTE: this is nn.Module so the profiler can properly capture/group # kernels calls made within the graph diff --git a/vllm/worker/worker.py b/vllm/worker/worker.py index d58cb029618e9..094dd5a5d08b3 100644 --- a/vllm/worker/worker.py +++ b/vllm/worker/worker.py @@ -8,8 +8,9 @@ import torch.distributed import vllm.envs as envs -from vllm.config import ParallelConfig, VllmConfig -from vllm.distributed import (ensure_model_parallel_initialized, +from vllm.config import VllmConfig +from vllm.distributed import (ensure_kv_transfer_initialized, + ensure_model_parallel_initialized, init_distributed_environment, set_custom_all_reduce) from vllm.logger import init_logger @@ -144,7 +145,7 @@ def init_device(self) -> None: raise RuntimeError( f"Not support device type: {self.device_config.device}") # Initialize the distributed environment. - init_worker_distributed_environment(self.parallel_config, self.rank, + init_worker_distributed_environment(self.vllm_config, self.rank, self.distributed_init_method, self.local_rank) # Set random seed. @@ -457,20 +458,22 @@ def get_cache_block_size_bytes(self) -> int: def init_worker_distributed_environment( - parallel_config: ParallelConfig, + vllm_config: VllmConfig, rank: int, distributed_init_method: Optional[str] = None, local_rank: int = -1, ) -> None: """Initialize the distributed environment.""" + parallel_config = vllm_config.parallel_config set_custom_all_reduce(not parallel_config.disable_custom_all_reduce) init_distributed_environment(parallel_config.world_size, rank, distributed_init_method, local_rank) - ensure_model_parallel_initialized(parallel_config.tensor_parallel_size, parallel_config.pipeline_parallel_size) + ensure_kv_transfer_initialized(vllm_config) + def _check_if_gpu_supports_dtype(torch_dtype: torch.dtype): # Check if the GPU supports the dtype. diff --git a/vllm/worker/worker_base.py b/vllm/worker/worker_base.py index 7aaa8b453cff1..7c0bc5a678956 100644 --- a/vllm/worker/worker_base.py +++ b/vllm/worker/worker_base.py @@ -43,6 +43,7 @@ def __init__( self.speculative_config = vllm_config.speculative_config self.prompt_adapter_config = vllm_config.prompt_adapter_config self.observability_config = vllm_config.observability_config + self.kv_transfer_config = vllm_config.kv_transfer_config @abstractmethod def init_device(self) -> None: From b18c9bbaba6e1c6dfb92fe52e5a6cb22dd6bfa81 Mon Sep 17 00:00:00 2001 From: Isotr0py Date: Mon, 2 Dec 2024 09:31:09 +0800 Subject: [PATCH 199/397] [Model] Add BNB support to Llava and Pixtral-HF (#10795) Signed-off-by: Isotr0py <2037008807@qq.com> --- vllm/model_executor/models/llava.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/vllm/model_executor/models/llava.py b/vllm/model_executor/models/llava.py index 7fd4b32774798..db7fa82ceb9b7 100644 --- a/vllm/model_executor/models/llava.py +++ b/vllm/model_executor/models/llava.py @@ -287,6 +287,15 @@ def init_vision_tower_for_llava( @INPUT_REGISTRY.register_dummy_data(dummy_data_for_llava) @INPUT_REGISTRY.register_input_processor(input_processor_for_llava) class LlavaForConditionalGeneration(nn.Module, SupportsMultiModal, SupportsPP): + # BitandBytes specific attributes + bitsandbytes_stacked_params_mapping = { + # shard_name, weight_name, index + "q_proj": ("qkv_proj", 0), + "k_proj": ("qkv_proj", 1), + "v_proj": ("qkv_proj", 2), + "gate_proj": ("gate_up_proj", 0), + "up_proj": ("gate_up_proj", 1), + } def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: super().__init__() From b7954776fd338cab442a8004d240f7fe74e4e51b Mon Sep 17 00:00:00 2001 From: cduk <19917266+cduk@users.noreply.github.com> Date: Mon, 2 Dec 2024 02:49:48 +0100 Subject: [PATCH 200/397] =?UTF-8?q?[core]=20Avoid=20metrics=20log=20noise?= =?UTF-8?q?=20when=20idle=20-=20include=20speculative=20decodi=E2=80=A6=20?= =?UTF-8?q?(#10809)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- vllm/engine/metrics.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/engine/metrics.py b/vllm/engine/metrics.py index 5bfd6a9f4b386..4869557ba9b44 100644 --- a/vllm/engine/metrics.py +++ b/vllm/engine/metrics.py @@ -473,13 +473,13 @@ def log(self, stats: Stats) -> None: ) if (stats.cpu_prefix_cache_hit_rate >= 0 or stats.gpu_prefix_cache_hit_rate >= 0): - logger.info( + log_fn( "Prefix cache hit rate: GPU: %.2f%%, CPU: %.2f%%", stats.gpu_prefix_cache_hit_rate * 100, stats.cpu_prefix_cache_hit_rate * 100, ) if self.spec_decode_metrics is not None: - logger.info( + log_fn( self._format_spec_decode_metrics_str( self.spec_decode_metrics)) From 073a4bd1c04164af29843cb5478740e9839d2d8a Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Sun, 1 Dec 2024 17:55:39 -0800 Subject: [PATCH 201/397] [Kernel] Use `out` arg in flash_attn_varlen_func (#10811) Signed-off-by: Woosuk Kwon --- CMakeLists.txt | 2 +- tests/kernels/test_flash_attn.py | 20 +++++++++++++++++--- vllm/v1/attention/backends/flash_attn.py | 6 +++--- 3 files changed, 21 insertions(+), 7 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index f43bf8143458b..c78cdc77a7e42 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -522,7 +522,7 @@ else() FetchContent_Declare( vllm-flash-attn GIT_REPOSITORY https://github.com/vllm-project/flash-attention.git - GIT_TAG fdf6d72b48aea41f4ae6a89139a453dae554abc8 + GIT_TAG 04325b6798bcc326c86fb35af62d05a9c8c8eceb GIT_PROGRESS TRUE # Don't share the vllm-flash-attn build between build types BINARY_DIR ${CMAKE_BINARY_DIR}/vllm-flash-attn diff --git a/tests/kernels/test_flash_attn.py b/tests/kernels/test_flash_attn.py index a20c73345218f..1ae78d7b46c5b 100644 --- a/tests/kernels/test_flash_attn.py +++ b/tests/kernels/test_flash_attn.py @@ -71,6 +71,7 @@ def ref_paged_attn( return torch.cat(outputs, dim=0) +@pytest.mark.parametrize("use_out", [True, False]) @pytest.mark.parametrize("kv_lens", [[1328, 18, 463], [1, 54, 293, 70]]) @pytest.mark.parametrize("num_heads", NUM_HEADS) @pytest.mark.parametrize("head_size", HEAD_SIZES) @@ -81,6 +82,7 @@ def ref_paged_attn( @pytest.mark.parametrize("sliding_window", [None, 256]) @torch.inference_mode() def test_flash_attn_with_paged_kv( + use_out: bool, kv_lens: List[int], num_heads: Tuple[int, int], head_size: int, @@ -116,17 +118,22 @@ def test_flash_attn_with_paged_kv( (num_seqs, max_num_blocks_per_seq), dtype=torch.int32) + q = query.unsqueeze(1) + out = torch.empty_like(q) if use_out else None output = flash_attn_with_kvcache( - q=query.unsqueeze(1), + q=q, k_cache=key_cache, v_cache=value_cache, + out=out, softmax_scale=scale, causal=True, block_table=block_tables, cache_seqlens=kv_lens_tensor, softcap=soft_cap if soft_cap is not None else 0, window_size=window_size, - ).squeeze(1) + ) + output = output if not use_out else out + output = output.squeeze(1) ref_output = ref_paged_attn(query=query, key_cache=key_cache, @@ -141,7 +148,10 @@ def test_flash_attn_with_paged_kv( f"{torch.max(torch.abs(output - ref_output))}" -@pytest.mark.parametrize("seq_lens", [[(1, 1328), (5, 18), (129, 463)]]) +@pytest.mark.parametrize("use_out", [True, False]) +@pytest.mark.parametrize("seq_lens", + [[(1, 1328), (5, 18), + (129, 463)], [(1, 523), (1, 37), (1, 2011)]]) @pytest.mark.parametrize("num_heads", NUM_HEADS) @pytest.mark.parametrize("head_size", HEAD_SIZES) @pytest.mark.parametrize("block_size", BLOCK_SIZES) @@ -151,6 +161,7 @@ def test_flash_attn_with_paged_kv( @pytest.mark.parametrize("num_blocks", NUM_BLOCKS) @torch.inference_mode() def test_varlen_with_paged_kv( + use_out: bool, seq_lens: List[Tuple[int, int]], num_heads: Tuple[int, int], head_size: int, @@ -197,10 +208,12 @@ def test_varlen_with_paged_kv( (num_seqs, max_num_blocks_per_seq), dtype=torch.int32) + out = torch.empty_like(query) if use_out else None output = flash_attn_varlen_func( q=query, k=key_cache, v=value_cache, + out=out, cu_seqlens_q=cu_query_lens, cu_seqlens_k=cu_kv_lens, max_seqlen_q=max_query_len, @@ -211,6 +224,7 @@ def test_varlen_with_paged_kv( block_table=block_tables, softcap=soft_cap if soft_cap is not None else 0, ) + output = output if not use_out else out ref_output = ref_paged_attn( query=query, diff --git a/vllm/v1/attention/backends/flash_attn.py b/vllm/v1/attention/backends/flash_attn.py index e618edf7d35bf..4aa4b296f0efc 100644 --- a/vllm/v1/attention/backends/flash_attn.py +++ b/vllm/v1/attention/backends/flash_attn.py @@ -205,10 +205,12 @@ def unified_v1_flash_attention( v_scale, ) - attn_output = flash_attn_varlen_func( + # Compute attention and update output up to `num_actual_tokens`. + flash_attn_varlen_func( q=query[:num_actual_tokens], k=key_cache, v=value_cache, + out=output[:num_actual_tokens], cu_seqlens_q=attn_metadata.query_start_loc, max_seqlen_q=attn_metadata.max_query_len, cu_seqlens_k=attn_metadata.seq_start_loc, @@ -220,8 +222,6 @@ def unified_v1_flash_attention( block_table=attn_metadata.block_table, softcap=logits_soft_cap, ) - # TODO(woosuk): Remove this unnecessary copy. - output[:num_actual_tokens].copy_(attn_output) def unified_v1_flash_attention_fake( From e25810ae29058299b7bf845c7ed572f2474a1d85 Mon Sep 17 00:00:00 2001 From: Maximilien de Bayser Date: Sun, 1 Dec 2024 23:05:32 -0300 Subject: [PATCH 202/397] Fill TorchSDPAAttentionMetadata seq_lens_field for prefill (#10799) Signed-off-by: Max de Bayser --- vllm/attention/backends/torch_sdpa.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/vllm/attention/backends/torch_sdpa.py b/vllm/attention/backends/torch_sdpa.py index 16e044b618c40..dafa5bb56acda 100644 --- a/vllm/attention/backends/torch_sdpa.py +++ b/vllm/attention/backends/torch_sdpa.py @@ -341,7 +341,11 @@ def build(self, seq_lens: List[int], query_lens: List[int], ) else: block_tables = torch.tensor([]) - seq_lens_tensor = torch.tensor([]) + seq_lens_tensor = torch.tensor( + input_data.seq_lens[:input_data.num_prefills], + dtype=torch.int32, + device="cpu", + ) # For multi-modal models placeholder_index_maps = None From 63a164172dbcc43857dbcf6443a7594faa143151 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sun, 1 Dec 2024 19:27:13 -0800 Subject: [PATCH 203/397] [misc] remove xverse modeling file (#10814) Signed-off-by: youkaichao --- vllm/model_executor/models/registry.py | 2 +- vllm/model_executor/models/xverse.py | 423 ------------------------- 2 files changed, 1 insertion(+), 424 deletions(-) delete mode 100644 vllm/model_executor/models/xverse.py diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index 2b7b69e8c3a95..c66fbce018a62 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -94,7 +94,7 @@ "Starcoder2ForCausalLM": ("starcoder2", "Starcoder2ForCausalLM"), "SolarForCausalLM": ("solar", "SolarForCausalLM"), "TeleChat2ForCausalLM": ("telechat2", "TeleChat2ForCausalLM"), - "XverseForCausalLM": ("xverse", "XverseForCausalLM"), + "XverseForCausalLM": ("llama", "LlamaForCausalLM"), # [Encoder-decoder] "BartModel": ("bart", "BartForConditionalGeneration"), "BartForConditionalGeneration": ("bart", "BartForConditionalGeneration"), diff --git a/vllm/model_executor/models/xverse.py b/vllm/model_executor/models/xverse.py deleted file mode 100644 index 25a0d474e2863..0000000000000 --- a/vllm/model_executor/models/xverse.py +++ /dev/null @@ -1,423 +0,0 @@ -# Adapted from -# https://huggingface.co/xverse/XVERSE-7B/blob/main/modeling_xverse.py -# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. -# -# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX -# and OPT implementations in this library. It has been modified from its -# original forms to accommodate minor architectural differences compared -# to GPT-NeoX and OPT used by the Meta AI team that trained the model. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Inference-only Xverse model compatible with HuggingFace weights.""" -from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union - -import torch -from torch import nn -from transformers import PretrainedConfig - -from vllm.attention import Attention, AttentionMetadata -from vllm.compilation.decorators import support_torch_compile -from vllm.config import CacheConfig, VllmConfig -from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size -from vllm.model_executor.layers.activation import SiluAndMul -from vllm.model_executor.layers.layernorm import RMSNorm -from vllm.model_executor.layers.linear import (MergedColumnParallelLinear, - QKVParallelLinear, - RowParallelLinear) -from vllm.model_executor.layers.logits_processor import LogitsProcessor -from vllm.model_executor.layers.quantization import QuantizationConfig -from vllm.model_executor.layers.rotary_embedding import get_rope -from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler -from vllm.model_executor.layers.vocab_parallel_embedding import ( - ParallelLMHead, VocabParallelEmbedding) -from vllm.model_executor.model_loader.weight_utils import default_weight_loader -from vllm.model_executor.sampling_metadata import SamplingMetadata -from vllm.sequence import IntermediateTensors - -from .interfaces import SupportsLoRA, SupportsPP -from .utils import (is_pp_missing_parameter, - make_empty_intermediate_tensors_factory, make_layers, - maybe_prefix) - - -class XverseMLP(nn.Module): - - def __init__( - self, - hidden_size: int, - intermediate_size: int, - hidden_act: str, - quant_config: Optional[QuantizationConfig] = None, - ) -> None: - super().__init__() - self.gate_up_proj = MergedColumnParallelLinear( - hidden_size, [intermediate_size] * 2, - bias=False, - quant_config=quant_config) - self.down_proj = RowParallelLinear(intermediate_size, - hidden_size, - bias=False, - quant_config=quant_config) - if hidden_act != "silu": - raise ValueError(f"Unsupported activation: {hidden_act}. " - "Only silu is supported for now.") - self.act_fn = SiluAndMul() - - def forward(self, x): - gate, _ = self.gate_up_proj(x) - x = self.act_fn(gate) - x, _ = self.down_proj(x) - return x - - -class XverseAttention(nn.Module): - - def __init__( - self, - hidden_size: int, - num_heads: int, - num_kv_heads: int, - rope_theta: float = 10000, - rope_scaling: Optional[Dict[str, Any]] = None, - max_position_embeddings: int = 8192, - quant_config: Optional[QuantizationConfig] = None, - bias: bool = False, - cache_config: Optional[CacheConfig] = None, - prefix: str = "", - ) -> None: - super().__init__() - self.hidden_size = hidden_size - tp_size = get_tensor_model_parallel_world_size() - self.total_num_heads = num_heads - assert self.total_num_heads % tp_size == 0 - self.num_heads = self.total_num_heads // tp_size - self.total_num_kv_heads = num_kv_heads - # partition the KV heads across multiple tensor parallel GPUs. - assert self.total_num_kv_heads % tp_size == 0 - self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) - self.head_dim = hidden_size // self.total_num_heads - self.q_size = self.num_heads * self.head_dim - self.kv_size = self.num_kv_heads * self.head_dim - self.scaling = self.head_dim**-0.5 - self.rope_theta = rope_theta - self.max_position_embeddings = max_position_embeddings - - self.qkv_proj = QKVParallelLinear( - hidden_size, - self.head_dim, - self.total_num_heads, - self.total_num_kv_heads, - bias=bias, - quant_config=quant_config, - ) - self.o_proj = RowParallelLinear( - self.total_num_heads * self.head_dim, - hidden_size, - bias=bias, - quant_config=quant_config, - ) - - self.rotary_emb = get_rope( - self.head_dim, - rotary_dim=self.head_dim, - max_position=max_position_embeddings, - base=rope_theta, - rope_scaling=rope_scaling, - ) - self.attn = Attention(self.num_heads, - self.head_dim, - self.scaling, - num_kv_heads=self.num_kv_heads, - cache_config=cache_config, - quant_config=quant_config, - prefix=f"{prefix}.attn") - - def forward( - self, - positions: torch.Tensor, - hidden_states: torch.Tensor, - kv_cache: torch.Tensor, - attn_metadata: AttentionMetadata, - ) -> torch.Tensor: - qkv, _ = self.qkv_proj(hidden_states) - q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) - q, k = self.rotary_emb(positions, q, k) - attn_output = self.attn(q, k, v, kv_cache, attn_metadata) - output, _ = self.o_proj(attn_output) - return output - - -class XverseDecoderLayer(nn.Module): - - def __init__( - self, - config: PretrainedConfig, - cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None, - prefix: str = "", - ) -> None: - super().__init__() - self.hidden_size = config.hidden_size - rope_theta = getattr(config, "rope_theta", 10000) - rope_scaling = getattr(config, "rope_scaling", None) - max_position_embeddings = getattr(config, "max_position_embeddings", - 8192) - self.self_attn = XverseAttention( - hidden_size=self.hidden_size, - num_heads=config.num_attention_heads, - num_kv_heads=getattr(config, "num_key_value_heads", - config.num_attention_heads), - rope_theta=rope_theta, - rope_scaling=rope_scaling, - max_position_embeddings=max_position_embeddings, - quant_config=quant_config, - bias=getattr(config, "bias", False), - cache_config=cache_config, - prefix=f"{prefix}.self_attn", - ) - self.mlp = XverseMLP( - hidden_size=self.hidden_size, - intermediate_size=config.intermediate_size, - hidden_act=config.hidden_act, - quant_config=quant_config, - ) - self.input_layernorm = RMSNorm(config.hidden_size, - eps=config.rms_norm_eps) - self.post_attention_layernorm = RMSNorm(config.hidden_size, - eps=config.rms_norm_eps) - - def forward( - self, - positions: torch.Tensor, - hidden_states: torch.Tensor, - kv_cache: torch.Tensor, - attn_metadata: AttentionMetadata, - residual: Optional[torch.Tensor], - ) -> Tuple[torch.Tensor, torch.Tensor]: - # Self Attention - if residual is None: - residual = hidden_states - hidden_states = self.input_layernorm(hidden_states) - else: - hidden_states, residual = self.input_layernorm( - hidden_states, residual) - hidden_states = self.self_attn( - positions=positions, - hidden_states=hidden_states, - kv_cache=kv_cache, - attn_metadata=attn_metadata, - ) - - # Fully Connected - hidden_states, residual = self.post_attention_layernorm( - hidden_states, residual) - hidden_states = self.mlp(hidden_states) - return hidden_states, residual - - -@support_torch_compile -class XverseModel(nn.Module): - - def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): - super().__init__() - config = vllm_config.model_config.hf_config - cache_config = vllm_config.cache_config - quant_config = vllm_config.quant_config - lora_config = vllm_config.lora_config - self.config = config - self.padding_idx = config.pad_token_id - lora_vocab = (lora_config.lora_extra_vocab_size * - (lora_config.max_loras or 1)) if lora_config else 0 - self.vocab_size = config.vocab_size + lora_vocab - self.org_vocab_size = config.vocab_size - self.embed_tokens = VocabParallelEmbedding( - self.vocab_size, - config.hidden_size, - org_num_embeddings=config.vocab_size, - ) - self.start_layer, self.end_layer, self.layers = make_layers( - config.num_hidden_layers, - lambda prefix: XverseDecoderLayer( - config, cache_config, quant_config, prefix=prefix), - prefix=f"{prefix}.layers", - ) - self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) - self.make_empty_intermediate_tensors = ( - make_empty_intermediate_tensors_factory( - ["hidden_states", "residual"], config.hidden_size)) - - def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: - return self.embed_tokens(input_ids) - - def forward( - self, - input_ids: torch.Tensor, - positions: torch.Tensor, - kv_caches: List[torch.Tensor], - attn_metadata: AttentionMetadata, - intermediate_tensors: Optional[IntermediateTensors], - inputs_embeds: Optional[torch.Tensor] = None, - ) -> Union[torch.Tensor, IntermediateTensors]: - if get_pp_group().is_first_rank: - if inputs_embeds is not None: - hidden_states = inputs_embeds - else: - hidden_states = self.get_input_embeddings(input_ids) - residual = None - else: - hidden_states = intermediate_tensors["hidden_states"] - residual = intermediate_tensors["residual"] - for i in range(self.start_layer, self.end_layer): - layer = self.layers[i] - hidden_states, residual = layer( - positions, - hidden_states, - kv_caches[i - self.start_layer], - attn_metadata, - residual, - ) - if not get_pp_group().is_last_rank: - return IntermediateTensors({ - "hidden_states": hidden_states, - "residual": residual - }) - hidden_states, _ = self.norm(hidden_states, residual) - return hidden_states - - -class XverseForCausalLM(nn.Module, SupportsLoRA, SupportsPP): - packed_modules_mapping = { - "qkv_proj": [ - "q_proj", - "k_proj", - "v_proj", - ], - "gate_up_proj": [ - "gate_proj", - "up_proj", - ], - } - - # LoRA specific attributes - supported_lora_modules = [ - "qkv_proj", - "o_proj", - "gate_up_proj", - "down_proj", - "embed_tokens", - "lm_head", - ] - embedding_modules = { - "embed_tokens": "input_embeddings", - "lm_head": "output_embeddings", - } - embedding_padding_modules = ["lm_head"] - - def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): - super().__init__() - - config = vllm_config.model_config.hf_config - quant_config = vllm_config.quant_config - lora_config = vllm_config.lora_config - - self.config = config - self.lora_config = lora_config - - self.quant_config = quant_config - self.model = XverseModel(vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "model")) - self.lm_head = ParallelLMHead(config.vocab_size, - config.hidden_size, - quant_config=quant_config) - if self.config.tie_word_embeddings: - self.lm_head.weight = self.model.embed_tokens.weight - self.logits_processor = LogitsProcessor(config.vocab_size) - self.sampler = get_sampler() - self.make_empty_intermediate_tensors = ( - self.model.make_empty_intermediate_tensors) - - def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: - return self.model.get_input_embeddings(input_ids) - - def forward( - self, - input_ids: torch.Tensor, - positions: torch.Tensor, - kv_caches: List[torch.Tensor], - attn_metadata: AttentionMetadata, - intermediate_tensors: Optional[IntermediateTensors] = None, - inputs_embeds: Optional[torch.Tensor] = None, - ) -> Union[torch.Tensor, IntermediateTensors]: - hidden_states = self.model(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors, - inputs_embeds) - return hidden_states - - def compute_logits( - self, - hidden_states: torch.Tensor, - sampling_metadata: SamplingMetadata, - ) -> Optional[torch.Tensor]: - logits = self.logits_processor(self.lm_head, hidden_states, - sampling_metadata) - return logits - - def sample( - self, - logits: torch.Tensor, - sampling_metadata: SamplingMetadata, - ) -> Optional[SamplerOutput]: - next_tokens = self.sampler(logits, sampling_metadata) - return next_tokens - - def load_weights(self, weights: Iterable[Tuple[str, - torch.Tensor]]) -> Set[str]: - stacked_params_mapping = [ - ("qkv_proj", "q_proj", "q"), - ("qkv_proj", "k_proj", "k"), - ("qkv_proj", "v_proj", "v"), - ("gate_up_proj", "gate_proj", 0), - ("gate_up_proj", "up_proj", 1), - ] - params_dict = dict(self.named_parameters()) - loaded_params: Set[str] = set() - for name, loaded_weight in weights: - if ("rotary_emb.inv_freq" in name - or "rotary_emb.cos_cached" in name - or "rotary_emb.sin_cached" in name): - continue - for (param_name, weight_name, shard_id) in stacked_params_mapping: - if weight_name not in name: - continue - name = name.replace(weight_name, param_name) - # Skip loading extra bias for GPTQ models. - if name.endswith(".bias") and name not in params_dict: - continue - if is_pp_missing_parameter(name, self): - continue - param = params_dict[name] - weight_loader = param.weight_loader - weight_loader(param, loaded_weight, shard_id) - break - else: - # Skip loading extra bias for GPTQ models. - if name.endswith(".bias") and name not in params_dict: - continue - if is_pp_missing_parameter(name, self): - continue - param = params_dict[name] - weight_loader = getattr(param, "weight_loader", - default_weight_loader) - weight_loader(param, loaded_weight) - loaded_params.add(name) - return loaded_params From 995a148575aaacc7889ff0d29a96195c329422ab Mon Sep 17 00:00:00 2001 From: wangxiyuan Date: Mon, 2 Dec 2024 12:14:45 +0800 Subject: [PATCH 204/397] [doc]Update config docstring (#10732) Signed-off-by: wangxiyuan --- vllm/config.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/vllm/config.py b/vllm/config.py index 5d9e2766c7faa..510bd81d66217 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -91,6 +91,8 @@ class ModelConfig: the default version. max_model_len: Maximum length of a sequence (including prompt and output). If None, will be derived from the model. + spec_target_max_model_len: Specify the the maximum length for spec + decoding draft models. quantization: Quantization method that was used to quantize the model weights. If None, we assume the model weights are not quantized. quantization_param_path: Path to JSON file containing scaling factors. @@ -107,6 +109,7 @@ class ModelConfig: to eager mode. Additionally for encoder-decoder models, if the sequence length of the encoder input is larger than this, we fall back to the eager mode. + max_logprobs: Maximum number of log probabilities. Defaults to 20. disable_sliding_window: Whether to disable sliding window. If True, we will disable the sliding window functionality of the model. If the model does not support sliding window, this argument is @@ -119,6 +122,8 @@ class ModelConfig: the model name will be the same as `model`. limit_mm_per_prompt: Maximum number of data items per modality per prompt. Only applicable for multimodal models. + use_async_output_proc: Whether to use async output processor. + Defaults to True. config_format: The config format which shall be loaded. Defaults to 'auto' which defaults to 'hf'. hf_overrides: If a dictionary, contains arguments to be forwarded to the @@ -130,7 +135,7 @@ class ModelConfig: override default neuron config that are specific to Neuron devices, this argument will be used to configure the neuron config that can not be gathered from the vllm arguments. - override_pooling_config: Initialize non default pooling config or + override_pooler_config: Initialize non default pooling config or override default pooling config for the embedding model. """ @@ -734,8 +739,13 @@ class CacheConfig: vLLM execution. swap_space: Size of the CPU swap space per GPU (in GiB). cache_dtype: Data type for kv cache storage. + is_attention_free: Whether the model is attention-free. num_gpu_blocks_override: Number of GPU blocks to use. This overrides the profiled num_gpu_blocks if specified. Does nothing if None. + sliding_window: Sliding window size for the KV cache. Can not work with + prefix caching enabled. + enable_prefix_caching: Whether to enable prefix caching. + cpu_offload_gb: Size of the CPU offload buffer in GiB. """ def __init__( @@ -904,6 +914,7 @@ class LoadConfig: "tensorizer" will use CoreWeave's tensorizer library for fast weight loading. "bitsandbytes" will load nf4 type weights. + model_loader_extra_config: The extra config for the model loader. ignore_patterns: The list of patterns to ignore when loading the model. Default to "original/**/*" to avoid repeated loading of llama's checkpoints. From ef31eabc68099ff2f64bbe5f42dc06101451a18d Mon Sep 17 00:00:00 2001 From: zhou fan <1247714429@qq.com> Date: Mon, 2 Dec 2024 13:36:36 +0800 Subject: [PATCH 205/397] [Model]: add some tests for aria model (#10770) Signed-off-by: xffxff <1247714429@qq.com> Signed-off-by: Isotr0py <2037008807@qq.com> Co-authored-by: Isotr0py <2037008807@qq.com> --- tests/conftest.py | 6 +++- .../vision_language/test_models.py | 30 +++++++++++++++++++ .../vision_language/vlm_utils/core.py | 11 +++++-- .../vision_language/vlm_utils/types.py | 7 +++++ 4 files changed, 51 insertions(+), 3 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 36f1d477fab59..d6be8f5b00af8 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -656,6 +656,7 @@ def __init__( model_name: str, task: TaskOption = "auto", tokenizer_name: Optional[str] = None, + tokenizer_mode: str = "auto", # Use smaller max model length, otherwise bigger model cannot run due # to kv cache size limit. max_model_len: int = 1024, @@ -672,6 +673,7 @@ def __init__( model=model_name, task=task, tokenizer=tokenizer_name, + tokenizer_mode=tokenizer_mode, trust_remote_code=True, dtype=dtype, swap_space=swap_space, @@ -842,6 +844,7 @@ def generate_greedy_logprobs( audios: Optional[PromptAudioInput] = None, videos: Optional[PromptVideoInput] = None, stop_token_ids: Optional[List[int]] = None, + stop: Optional[List[str]] = None, ) -> Union[List[TokensTextLogprobs], List[TokensTextLogprobsPromptLogprobs]]: greedy_logprobs_params = SamplingParams( @@ -849,7 +852,8 @@ def generate_greedy_logprobs( max_tokens=max_tokens, logprobs=num_logprobs, prompt_logprobs=num_prompt_logprobs, - stop_token_ids=stop_token_ids) + stop_token_ids=stop_token_ids, + stop=stop) return self.generate_w_logprobs(prompts, greedy_logprobs_params, diff --git a/tests/models/decoder_only/vision_language/test_models.py b/tests/models/decoder_only/vision_language/test_models.py index 3457ec6b8e73b..dbb0b4d350d10 100644 --- a/tests/models/decoder_only/vision_language/test_models.py +++ b/tests/models/decoder_only/vision_language/test_models.py @@ -8,6 +8,7 @@ import pytest import transformers from transformers import AutoModelForVision2Seq +from transformers.utils import is_flash_attn_2_available from vllm.platforms import current_platform from vllm.utils import cuda_device_count_stateless, identity @@ -134,6 +135,35 @@ marks=[pytest.mark.core_model, pytest.mark.cpu_model], ), #### Extended model tests + "aria": VLMTestInfo( + models=["rhymes-ai/Aria"], + tokenizer_mode="slow", + test_type=( + VLMTestType.IMAGE, + VLMTestType.MULTI_IMAGE, + ), + dtype="bfloat16", + prompt_formatter=lambda img_prompt: f"<|im_start|>user\n{img_prompt}<|im_end|>\n<|im_start|>assistant\n ", # noqa: E501 + img_idx_to_prompt=lambda idx: "<|img|>\n", + max_model_len=4096, + max_num_seqs=2, + single_image_prompts=IMAGE_ASSETS.prompts({ + "stop_sign": "Please describe the image shortly.", + "cherry_blossom": "Please infer the season with reason.", + }), + multi_image_prompt="Describe the two images shortly.", # noqa: E501 + postprocess_inputs=model_utils.get_key_type_post_processor("pixel_values"), + stop_str=["<|im_end|>"], + image_size_factors=[(0.10, 0.15)], + max_tokens=64, + marks=[ + pytest.mark.skipif( + not is_flash_attn_2_available(), + reason="Model needs flash-attn for numeric convergence.", + ), + large_gpu_mark(min_gb=64), + ], + ), "blip2": VLMTestInfo( models=["Salesforce/blip2-opt-2.7b"], test_type=VLMTestType.IMAGE, diff --git a/tests/models/decoder_only/vision_language/vlm_utils/core.py b/tests/models/decoder_only/vision_language/vlm_utils/core.py index 7e8c6dabb15af..88349ef9a3a69 100644 --- a/tests/models/decoder_only/vision_language/vlm_utils/core.py +++ b/tests/models/decoder_only/vision_language/vlm_utils/core.py @@ -29,6 +29,8 @@ def run_test( postprocess_inputs: Callable[[BatchEncoding], BatchEncoding], comparator: Callable[..., None], get_stop_token_ids: Optional[Callable[[AutoTokenizer], List[int]]], + stop_str: Optional[List[str]], + tokenizer_mode: str, limit_mm_per_prompt: Dict[str, int], model_kwargs: Optional[Dict[str, Any]], patch_hf_runner: Optional[Callable[[HfRunner], HfRunner]], @@ -50,11 +52,14 @@ def run_test( # vLLM needs a fresh new process without cuda initialization. # if we run HF first, the cuda initialization will be done and it # will hurt multiprocessing backend with fork method (the default method). - vllm_kwargs = {} + vllm_kwargs: Dict[str, Any] = {} if get_stop_token_ids is not None: vllm_kwargs["stop_token_ids"] = get_stop_token_ids(tokenizer) + if stop_str: + vllm_kwargs["stop"] = stop_str with vllm_runner(model, + tokenizer_mode=tokenizer_mode, max_model_len=max_model_len, max_num_seqs=max_num_seqs, dtype=dtype, @@ -85,6 +90,8 @@ def run_test( hf_kwargs = {} if use_tokenizer_eos: hf_kwargs["eos_token_id"] = tokenizer.eos_token_id + if stop_str: + hf_kwargs["stop_strings"] = stop_str with hf_model, torch.no_grad(): for prompts, media in inputs: @@ -138,4 +145,4 @@ def process_runner_outputs( def process_outputs(output_processor, model, outputs_per_image): """Applies a model specific post-processor function to a runner's output""" return [[output_processor(res, model) for res in outputs] - for outputs in outputs_per_image] + for outputs in outputs_per_image] \ No newline at end of file diff --git a/tests/models/decoder_only/vision_language/vlm_utils/types.py b/tests/models/decoder_only/vision_language/vlm_utils/types.py index 8459476dc2d07..d410fa8c653ce 100644 --- a/tests/models/decoder_only/vision_language/vlm_utils/types.py +++ b/tests/models/decoder_only/vision_language/vlm_utils/types.py @@ -97,6 +97,9 @@ class VLMTestInfo(NamedTuple): # Optional callable which gets a list of token IDs from the model tokenizer get_stop_token_ids: Optional[Callable[[AutoTokenizer], List[int]]] = None + # Optional list of strings to stop generation, useful when stop tokens are + # not special tokens in the tokenizer + stop_str: Optional[List[str]] = None # Exposed options for HF runner model_kwargs: Optional[Dict[str, Any]] = None @@ -148,6 +151,8 @@ class VLMTestInfo(NamedTuple): marks: Optional[List[MarkDecorator]] = None + tokenizer_mode: str = "auto" + def get_non_parametrized_runner_kwargs(self): """Returns a dictionary of expandable kwargs for items that are used in all test types, which are NOT used when creating the parametrized @@ -166,8 +171,10 @@ def get_non_parametrized_runner_kwargs(self): "postprocess_inputs": self.postprocess_inputs, "comparator": self.comparator, "get_stop_token_ids": self.get_stop_token_ids, + "stop_str": self.stop_str, "model_kwargs": self.model_kwargs, "patch_hf_runner": self.patch_hf_runner, + "tokenizer_mode": self.tokenizer_mode } From e95f275f57bcff44b43e1b4300ae6ea4ee871211 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Mon, 2 Dec 2024 18:26:10 +0800 Subject: [PATCH 206/397] [CI/Build] Update `mistral_common` version for tests and docs (#10825) Signed-off-by: DarkLight1337 --- docs/requirements-docs.txt | 2 +- requirements-test.in | 2 +- requirements-test.txt | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/requirements-docs.txt b/docs/requirements-docs.txt index e3e35844405ac..8ea240f59c38f 100644 --- a/docs/requirements-docs.txt +++ b/docs/requirements-docs.txt @@ -12,7 +12,7 @@ pydantic >= 2.8 torch py-cpuinfo transformers -mistral_common >= 1.3.4 +mistral_common >= 1.5.0 aiohttp starlette openai # Required by docs/source/serving/openai_compatible_server.md's vllm.entrypoints.openai.cli_args diff --git a/requirements-test.in b/requirements-test.in index 76f6de2f77c34..44972866ddc4b 100644 --- a/requirements-test.in +++ b/requirements-test.in @@ -20,7 +20,7 @@ timm # required for internvl test torch==2.5.1 transformers_stream_generator # required for qwen-vl test matplotlib # required for qwen-vl test -mistral_common[opencv] >= 1.4.4 # required for pixtral test +mistral_common[opencv] >= 1.5.0 # required for pixtral test datamodel_code_generator # required for minicpm3 test lm-eval[api]==0.4.4 # required for model evaluation test diff --git a/requirements-test.txt b/requirements-test.txt index 65695111e4dc5..a59b85023948b 100644 --- a/requirements-test.txt +++ b/requirements-test.txt @@ -217,7 +217,7 @@ mbstrdecoder==1.1.3 # dataproperty # pytablewriter # typepy -mistral-common[opencv]==1.4.4 +mistral-common[opencv]==1.5.1 # via # -r requirements-test.in # mistral-common From a4c4daf3642ae2629608d5181487739b044fabe8 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Mon, 2 Dec 2024 02:50:10 -0800 Subject: [PATCH 207/397] [misc] use out argument for flash attention (#10822) Signed-off-by: youkaichao --- vllm/attention/backends/abstract.py | 1 + vllm/attention/backends/blocksparse_attn.py | 2 + vllm/attention/backends/flash_attn.py | 55 +++---- vllm/attention/backends/flashinfer.py | 4 + vllm/attention/backends/hpu_attn.py | 1 + vllm/attention/backends/ipex_attn.py | 1 + vllm/attention/backends/pallas.py | 1 + vllm/attention/backends/rocm_flash_attn.py | 1 + vllm/attention/backends/torch_sdpa.py | 1 + vllm/attention/backends/xformers.py | 1 + vllm/attention/layer.py | 76 +++++++++- vllm/config.py | 2 +- vllm/v1/attention/backends/flash_attn.py | 155 +++++--------------- 13 files changed, 144 insertions(+), 157 deletions(-) diff --git a/vllm/attention/backends/abstract.py b/vllm/attention/backends/abstract.py index 5be2d83346d00..aed04361e5fb4 100644 --- a/vllm/attention/backends/abstract.py +++ b/vllm/attention/backends/abstract.py @@ -247,5 +247,6 @@ def forward( k_scale: float = 1.0, v_scale: float = 1.0, attn_type: str = AttentionType.DECODER, + output: Optional[torch.Tensor] = None, ) -> torch.Tensor: raise NotImplementedError diff --git a/vllm/attention/backends/blocksparse_attn.py b/vllm/attention/backends/blocksparse_attn.py index 9e54c3b40c54e..99cb84346d84e 100644 --- a/vllm/attention/backends/blocksparse_attn.py +++ b/vllm/attention/backends/blocksparse_attn.py @@ -360,6 +360,7 @@ def forward( k_scale: float = 1.0, v_scale: float = 1.0, attn_type: str = AttentionType.DECODER, + output: Optional[torch.Tensor] = None, ) -> torch.Tensor: """Forward pass with FlashAttention and PagedAttention. @@ -448,5 +449,6 @@ def forward( blocksparse_head_sliding_step=self.head_sliding_step, ) + assert output is not None # Reshape the output tensor. return output.view(num_tokens, hidden_size) diff --git a/vllm/attention/backends/flash_attn.py b/vllm/attention/backends/flash_attn.py index 32738d1043b1d..c69e12ad78c44 100644 --- a/vllm/attention/backends/flash_attn.py +++ b/vllm/attention/backends/flash_attn.py @@ -638,24 +638,27 @@ def forward( k_scale: float = 1.0, v_scale: float = 1.0, attn_type: str = AttentionType.DECODER, + output: Optional[torch.Tensor] = None, ) -> torch.Tensor: """Forward pass with FlashAttention. Args: - query: shape = [num_tokens, num_heads * head_size] - key: shape = [num_tokens, num_kv_heads * head_size] - value: shape = [num_tokens, num_kv_heads * head_size] + query: shape = [num_tokens, num_heads, head_size] + key: shape = [num_tokens, num_kv_heads, head_size] + value: shape = [num_tokens, num_kv_heads, head_size] + output: shape = [num_tokens, num_heads, head_size] kv_cache = [2, num_blocks, block_size, num_kv_heads, head_size] NOTE: kv_cache will be an empty tensor with shape [0] for profiling run. attn_metadata: Metadata for attention. - Returns: - shape = [num_tokens, num_heads * head_size] + NOTE: It in-place updates the output tensor. """ # NOTE(woosuk): FlashAttention does not support FP8 KV cache. assert k_scale == 1.0 and v_scale == 1.0, ( "key/v_scale is not supported in FlashAttention.") + assert output is not None, "Output tensor must be provided." + if (attn_type == AttentionType.ENCODER and (not attn_metadata.is_all_encoder_attn_metadata_set)): raise AttributeError("Encoder attention requires setting " @@ -666,23 +669,12 @@ def forward( "requires setting cross-attention " "metadata attributes.") - num_heads: int = self.num_heads - head_size: int = self.head_size - num_kv_heads: int = self.num_kv_heads kv_cache_dtype: str = self.kv_cache_dtype softmax_scale: float = self.scale window_size = self.sliding_window alibi_slopes: Optional[torch.Tensor] = self.alibi_slopes logits_soft_cap: Optional[float] = self.logits_soft_cap - num_tokens, hidden_size = query.shape - - # Reshape the query, key, and value tensors. - query = query.view(-1, num_heads, head_size) - if (key is not None) and (value is not None): - key = key.view(-1, num_kv_heads, head_size) - value = value.view(-1, num_kv_heads, head_size) - if kv_cache.numel() > 0: key_cache = kv_cache[0] value_cache = kv_cache[1] @@ -721,13 +713,13 @@ def forward( num_decode_query_tokens) = \ get_num_prefill_decode_query_kv_tokens(attn_metadata, attn_type) decode_query = query[num_prefill_query_tokens:] + decode_output = output[num_prefill_query_tokens:] # QKV for prefill. query = query[:num_prefill_query_tokens] + prefill_output = output[:num_prefill_query_tokens] assert query.shape[0] == num_prefill_query_tokens assert decode_query.shape[0] == num_decode_query_tokens - prefill_output: Optional[torch.Tensor] = None - decode_output: Optional[torch.Tensor] = None if prefill_meta := attn_metadata.prefill_metadata: # Prompt run. if (kv_cache.numel() == 0 or prefill_meta.block_tables is None @@ -741,7 +733,7 @@ def forward( key = key[:num_prefill_kv_tokens] value = value[:num_prefill_kv_tokens] - prefill_output = flash_attn_varlen_func( + flash_attn_varlen_func( q=query, k=key, v=value, @@ -754,6 +746,7 @@ def forward( window_size=window_size, alibi_slopes=alibi_slopes, softcap=logits_soft_cap, + out=prefill_output, ) else: # prefix-enabled attention @@ -761,7 +754,7 @@ def forward( "Only decoder-only models support prefix caching") assert prefill_meta.seq_lens is not None max_seq_len = max(prefill_meta.seq_lens) - prefill_output = flash_attn_varlen_func( # noqa + flash_attn_varlen_func( # noqa q=query, k=key_cache, v=value_cache, @@ -775,6 +768,7 @@ def forward( alibi_slopes=alibi_slopes, block_table=prefill_meta.block_tables, softcap=logits_soft_cap, + out=prefill_output, ) if decode_meta := attn_metadata.decode_metadata: @@ -788,7 +782,7 @@ def forward( assert attn_type == AttentionType.DECODER, ( "Only decoder-only models support max_decode_query_len > 1" ) - decode_output = flash_attn_varlen_func( + flash_attn_varlen_func( q=decode_query, k=key_cache, v=value_cache, @@ -802,6 +796,7 @@ def forward( alibi_slopes=alibi_slopes, softcap=logits_soft_cap, block_table=decode_meta.block_tables, + out=decode_output, ) else: # Use flash_attn_with_kvcache for normal decoding. @@ -810,7 +805,7 @@ def forward( _, block_tables_arg, ) = get_seq_len_block_table_args(decode_meta, False, attn_type) - decode_output = flash_attn_with_kvcache( + flash_attn_with_kvcache( q=decode_query.unsqueeze(1), k_cache=key_cache, v_cache=value_cache, @@ -821,20 +816,8 @@ def forward( window_size=window_size, alibi_slopes=alibi_slopes, softcap=logits_soft_cap, - ).squeeze(1) - - if prefill_output is None: - assert decode_output is not None - return decode_output.view(num_decode_query_tokens, hidden_size) - if decode_output is None: - assert prefill_output is not None - return prefill_output.view(num_prefill_query_tokens, hidden_size) - - assert decode_meta is not None - decode_output = decode_output.squeeze(1) - output = torch.cat([prefill_output, decode_output], dim=0) - return output.view(num_tokens, hidden_size) - + out=decode_output.unsqueeze(1), + ) return output diff --git a/vllm/attention/backends/flashinfer.py b/vllm/attention/backends/flashinfer.py index 1a2024705eb04..e367468d05d26 100644 --- a/vllm/attention/backends/flashinfer.py +++ b/vllm/attention/backends/flashinfer.py @@ -774,7 +774,11 @@ def forward( k_scale: float = 1.0, v_scale: float = 1.0, attn_type: str = AttentionType.DECODER, + output: Optional[torch.Tensor] = None, ) -> torch.Tensor: + + # TODO: directly write to output tensor + if attn_type != AttentionType.DECODER: raise NotImplementedError("Encoder self-attention and " "encoder/decoder cross-attention " diff --git a/vllm/attention/backends/hpu_attn.py b/vllm/attention/backends/hpu_attn.py index 5359941d41fde..2c62e565c04c7 100644 --- a/vllm/attention/backends/hpu_attn.py +++ b/vllm/attention/backends/hpu_attn.py @@ -145,6 +145,7 @@ def forward( k_scale: float = 1.0, v_scale: float = 1.0, attn_type: str = AttentionType.DECODER, + output: Optional[torch.Tensor] = None, ) -> torch.Tensor: """Forward pass with xFormers and PagedAttention. diff --git a/vllm/attention/backends/ipex_attn.py b/vllm/attention/backends/ipex_attn.py index 3b0d51ea4a3d8..21949874bea47 100644 --- a/vllm/attention/backends/ipex_attn.py +++ b/vllm/attention/backends/ipex_attn.py @@ -173,6 +173,7 @@ def forward( k_scale: float = 1.0, v_scale: float = 1.0, attn_type: str = AttentionType.DECODER, + output: Optional[torch.Tensor] = None, ) -> torch.Tensor: """Forward pass with IPEX varlen_attention and PagedAttention. diff --git a/vllm/attention/backends/pallas.py b/vllm/attention/backends/pallas.py index 5988be0e6b687..9809aed0e66f9 100644 --- a/vllm/attention/backends/pallas.py +++ b/vllm/attention/backends/pallas.py @@ -151,6 +151,7 @@ def forward( k_scale: float = 1.0, v_scale: float = 1.0, attn_type: str = AttentionType.DECODER, + output: Optional[torch.Tensor] = None, ) -> torch.Tensor: """Forward pass with Pallas attention. diff --git a/vllm/attention/backends/rocm_flash_attn.py b/vllm/attention/backends/rocm_flash_attn.py index 6a494f4e73cb4..9139c3c1314d8 100644 --- a/vllm/attention/backends/rocm_flash_attn.py +++ b/vllm/attention/backends/rocm_flash_attn.py @@ -415,6 +415,7 @@ def forward( k_scale: float = 1.0, v_scale: float = 1.0, attn_type: str = AttentionType.DECODER, + output: Optional[torch.Tensor] = None, ) -> torch.Tensor: """Forward pass with FlashAttention and PagedAttention. diff --git a/vllm/attention/backends/torch_sdpa.py b/vllm/attention/backends/torch_sdpa.py index dafa5bb56acda..86e952a903f36 100644 --- a/vllm/attention/backends/torch_sdpa.py +++ b/vllm/attention/backends/torch_sdpa.py @@ -431,6 +431,7 @@ def forward( k_scale: float = 1.0, v_scale: float = 1.0, attn_type: str = AttentionType.DECODER, + output: Optional[torch.Tensor] = None, ) -> torch.Tensor: """Forward pass with torch SDPA and PagedAttention. diff --git a/vllm/attention/backends/xformers.py b/vllm/attention/backends/xformers.py index 292575a8736bc..e2e989efb020c 100644 --- a/vllm/attention/backends/xformers.py +++ b/vllm/attention/backends/xformers.py @@ -417,6 +417,7 @@ def forward( k_scale: float = 1.0, v_scale: float = 1.0, attn_type: str = AttentionType.DECODER, + output: Optional[torch.Tensor] = None, ) -> torch.Tensor: """Forward pass with xFormers and PagedAttention. diff --git a/vllm/attention/layer.py b/vllm/attention/layer.py index 17157617248f7..e024eef286f05 100644 --- a/vllm/attention/layer.py +++ b/vllm/attention/layer.py @@ -4,7 +4,6 @@ import torch import torch.nn as nn -import vllm.envs as envs from vllm.attention import AttentionMetadata, AttentionType from vllm.attention.selector import backend_name_to_enum, get_attn_backend from vllm.config import CacheConfig, get_current_vllm_config @@ -12,7 +11,7 @@ from vllm.model_executor.layers.quantization.base_config import ( QuantizationConfig) from vllm.model_executor.layers.quantization.kv_cache import BaseKVCacheMethod -from vllm.platforms import current_platform +from vllm.platforms import _Backend, current_platform from vllm.utils import direct_register_custom_op @@ -97,14 +96,23 @@ def __init__( self.impl = impl_cls(num_heads, head_size, scale, num_kv_heads, alibi_slopes, sliding_window, kv_cache_dtype, blocksparse_params, logits_soft_cap) + self.num_heads = num_heads + self.head_size = head_size + self.num_kv_heads = num_kv_heads self.backend = backend_name_to_enum(attn_backend.get_name()) # For cuda-alike (CUDA and ROCM) and cpu platforms, we control how # torch.compile works by registering the attention as one giant # opaque custom op. For other platforms, we directly call them # and let torch.compile handle them. - self.use_direct_call = envs.VLLM_USE_V1 or not ( - current_platform.is_cuda_alike() or current_platform.is_cpu()) + self.use_direct_call = not current_platform.is_cuda_alike( + ) and not current_platform.is_cpu() + + # For some attention backends, we allocate an output tensor before + # calling the custom op. When piecewise cudagraph is enabled, this + # makes sure the output tensor is allocated inside the cudagraph. + self.use_output = self.backend == _Backend.FLASH_ATTN or \ + self.backend == _Backend.FLASH_ATTN_VLLM_V1 compilation_config = get_current_vllm_config().compilation_config if prefix in compilation_config.static_forward_context: raise ValueError(f"Duplicate layer name: {prefix}") @@ -130,6 +138,22 @@ def forward( self._k_scale, self._v_scale, attn_type=attn_type) + elif self.use_output: + output = torch.empty_like(query) + hidden_size = query.size(-1) + # Reshape the query, key, and value tensors. + # NOTE(woosuk): We do this outside the custom op to minimize the + # CPU overheads from the non-CUDA-graph regions. + query = query.view(-1, self.num_heads, self.head_size) + output = output.view(-1, self.num_heads, self.head_size) + if key is not None: + key = key.view(-1, self.num_kv_heads, self.head_size) + if value is not None: + value = value.view(-1, self.num_kv_heads, self.head_size) + torch.ops.vllm.unified_attention_with_output( + query, key, value, output, kv_cache, attn_type, + self.layer_name) + return output.view(-1, hidden_size) else: return torch.ops.vllm.unified_attention(query, key, value, kv_cache, attn_type, @@ -183,3 +207,47 @@ def unified_attention_fake( fake_impl=unified_attention_fake, dispatch_key=current_platform.dispatch_key, ) + + +def unified_attention_with_output( + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + output: torch.Tensor, + kv_cache: torch.Tensor, + attn_type: str, + layer_name: str, +) -> None: + forward_context: ForwardContext = get_forward_context() + attn_metadata = forward_context.dynamic_forward_context + self = forward_context.static_forward_context[layer_name] + self.impl.forward(query, + key, + value, + kv_cache, + attn_metadata, + self._k_scale, + self._v_scale, + attn_type=attn_type, + output=output) + + +def unified_attention_with_output_fake( + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + output: torch.Tensor, + kv_cache: torch.Tensor, + attn_type: str, + layer_name: str, +) -> None: + return + + +direct_register_custom_op( + op_name="unified_attention_with_output", + op_func=unified_attention_with_output, + mutates_args=["kv_cache", "output"], + fake_impl=unified_attention_with_output_fake, + dispatch_key=current_platform.dispatch_key, +) diff --git a/vllm/config.py b/vllm/config.py index 510bd81d66217..5f50d65ec87e1 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -2238,7 +2238,7 @@ class CompilationConfig(BaseModel): custom_ops: List[str] = Field(default_factory=list) splitting_ops: List[str] = Field(default_factory=lambda: [ "vllm.unified_attention", - "vllm.unified_v1_flash_attention", + "vllm.unified_attention_with_output", ]) use_inductor: bool = True diff --git a/vllm/v1/attention/backends/flash_attn.py b/vllm/v1/attention/backends/flash_attn.py index 4aa4b296f0efc..d37989055c2e5 100644 --- a/vllm/v1/attention/backends/flash_attn.py +++ b/vllm/v1/attention/backends/flash_attn.py @@ -6,8 +6,6 @@ from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl, AttentionMetadata, AttentionType) -from vllm.forward_context import get_forward_context -from vllm.utils import direct_register_custom_op from vllm.vllm_flash_attn import flash_attn_varlen_func @@ -113,13 +111,14 @@ def forward( k_scale: float = 1.0, v_scale: float = 1.0, attn_type: AttentionType = AttentionType.DECODER, + output: Optional[torch.Tensor] = None, ) -> torch.Tensor: """Forward pass with FlashAttention. Args: - query: shape = [num_tokens, num_heads * head_size] - key: shape = [num_tokens, num_kv_heads * head_size] - value: shape = [num_tokens, num_kv_heads * head_size] + query: shape = [num_tokens, num_heads, head_size] + key: shape = [num_tokens, num_kv_heads, head_size] + value: shape = [num_tokens, num_kv_heads, head_size] kv_cache = [2, num_blocks, block_size, num_kv_heads, head_size] attn_metadata: Metadata for attention. Returns: @@ -135,118 +134,42 @@ def forward( assert k_scale == 1.0 and v_scale == 1.0, ( "key/v_scale is not supported in FlashAttention.") - # Reshape the query, key, and value tensors. - # NOTE(woosuk): We do this outside the custom op to minimize the CPU - # overheads from the non-CUDA-graph regions. - query = query.view(-1, self.num_heads, self.head_size) - key = key.view(-1, self.num_kv_heads, self.head_size) - value = value.view(-1, self.num_kv_heads, self.head_size) - - output = torch.empty_like(query) - torch.ops.vllm.unified_v1_flash_attention( - output, - query, - key, - value, - self.num_heads, - self.head_size, - self.num_kv_heads, - kv_cache, + if attn_metadata is None: + # Profiling run. + return output + + num_actual_tokens = attn_metadata.num_actual_tokens + + # Reshape the input keys and values and store them in the cache. + key_cache = kv_cache[0] + value_cache = kv_cache[1] + torch.ops._C_cache_ops.reshape_and_cache_flash( + key[:num_actual_tokens], + value[:num_actual_tokens], + key_cache, + value_cache, + attn_metadata.slot_mapping, self.kv_cache_dtype, k_scale, v_scale, - self.scale, - self.sliding_window, - self.alibi_slopes, - self.logits_soft_cap, ) - return output.view(-1, self.num_heads * self.head_size) - - -def unified_v1_flash_attention( - output: torch.Tensor, - query: torch.Tensor, - key: torch.Tensor, - value: torch.Tensor, - num_heads: int, - head_size: int, - num_kv_heads: int, - kv_cache: torch.Tensor, - kv_cache_dtype: str, - k_scale: float, - v_scale: float, - softmax_scale: float, - window_size: Optional[List[int]] = None, - alibi_slopes: Optional[torch.Tensor] = None, - logits_soft_cap: Optional[float] = None, -) -> None: - context = get_forward_context() - current_metadata = context.dynamic_forward_context - if current_metadata is None: - # Profiling run. - return - - assert current_metadata is not None - assert isinstance(current_metadata, FlashAttentionMetadata) - attn_metadata: FlashAttentionMetadata = current_metadata - num_actual_tokens = attn_metadata.num_actual_tokens - - # Reshape the input keys and values and store them in the cache. - key_cache = kv_cache[0] - value_cache = kv_cache[1] - torch.ops._C_cache_ops.reshape_and_cache_flash( - key[:num_actual_tokens], - value[:num_actual_tokens], - key_cache, - value_cache, - attn_metadata.slot_mapping, - kv_cache_dtype, - k_scale, - v_scale, - ) - - # Compute attention and update output up to `num_actual_tokens`. - flash_attn_varlen_func( - q=query[:num_actual_tokens], - k=key_cache, - v=value_cache, - out=output[:num_actual_tokens], - cu_seqlens_q=attn_metadata.query_start_loc, - max_seqlen_q=attn_metadata.max_query_len, - cu_seqlens_k=attn_metadata.seq_start_loc, - max_seqlen_k=attn_metadata.max_seq_len, - softmax_scale=softmax_scale, - causal=True, - alibi_slopes=alibi_slopes, - window_size=window_size, - block_table=attn_metadata.block_table, - softcap=logits_soft_cap, - ) - - -def unified_v1_flash_attention_fake( - output: torch.Tensor, - query: torch.Tensor, - key: torch.Tensor, - value: torch.Tensor, - num_heads: int, - head_size: int, - num_kv_heads: int, - kv_cache: torch.Tensor, - kv_cache_dtype: str, - k_scale: float, - v_scale: float, - softmax_scale: float, - window_size: Optional[List[int]] = None, - alibi_slopes: Optional[torch.Tensor] = None, - logits_soft_cap: Optional[float] = None, -) -> None: - return - - -direct_register_custom_op( - op_name="unified_v1_flash_attention", - op_func=unified_v1_flash_attention, - mutates_args=["kv_cache", "output"], - fake_impl=unified_v1_flash_attention_fake, -) + + # Compute attention and update output up to `num_actual_tokens`. + flash_attn_varlen_func( + q=query[:num_actual_tokens], + k=key_cache, + v=value_cache, + out=output[:num_actual_tokens], + cu_seqlens_q=attn_metadata.query_start_loc, + max_seqlen_q=attn_metadata.max_query_len, + cu_seqlens_k=attn_metadata.seq_start_loc, + max_seqlen_k=attn_metadata.max_seq_len, + softmax_scale=self.scale, + causal=True, + alibi_slopes=self.alibi_slopes, + window_size=self.sliding_window, + block_table=attn_metadata.block_table, + softcap=self.logits_soft_cap, + ) + + return output From b45f0d79469f583736052b80bfc8b3bab29f50d8 Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Tue, 3 Dec 2024 01:53:36 +0800 Subject: [PATCH 208/397] [Misc][LoRA] Move the implementation of lora bias to punica.py (#10829) Signed-off-by: Jee Jee Li --- tests/lora/test_llama_tp.py | 60 +++++++-------- vllm/lora/fully_sharded_layers.py | 41 +++-------- vllm/lora/layers.py | 113 +++-------------------------- vllm/lora/punica.py | 117 +++++++++++++++++++++++++++--- 4 files changed, 156 insertions(+), 175 deletions(-) diff --git a/tests/lora/test_llama_tp.py b/tests/lora/test_llama_tp.py index aae6310a2a213..d3ca7f878191a 100644 --- a/tests/lora/test_llama_tp.py +++ b/tests/lora/test_llama_tp.py @@ -55,15 +55,7 @@ def do_sample(llm: vllm.LLM, lora_path: str, lora_id: int) -> List[str]: return generated_texts -@fork_new_process_for_each_test -def test_llama_lora(sql_lora_files): - - llm = vllm.LLM(MODEL_PATH, - enable_lora=True, - max_num_seqs=16, - max_loras=4, - tensor_parallel_size=1) - +def generate_and_test(llm, sql_lora_files): print("lora adapter created") assert do_sample(llm, sql_lora_files, lora_id=0) == EXPECTED_NO_LORA_OUTPUT @@ -79,6 +71,17 @@ def test_llama_lora(sql_lora_files): print("removing lora") +@fork_new_process_for_each_test +def test_llama_lora(sql_lora_files): + + llm = vllm.LLM(MODEL_PATH, + enable_lora=True, + max_num_seqs=16, + max_loras=4, + tensor_parallel_size=1) + generate_and_test(llm, sql_lora_files) + + @fork_new_process_for_each_test def test_llama_lora_warmup(sql_lora_files): """Test that the LLM initialization works with a warmup LORA path and @@ -118,20 +121,7 @@ def test_llama_lora_tp4(sql_lora_files): max_loras=4, tensor_parallel_size=4, ) - - print("lora adapter created") - assert do_sample(llm, sql_lora_files, lora_id=0) == EXPECTED_NO_LORA_OUTPUT - - print("lora 1") - assert do_sample(llm, sql_lora_files, lora_id=1) == EXPECTED_LORA_OUTPUT - - print("no lora") - assert do_sample(llm, sql_lora_files, lora_id=0) == EXPECTED_NO_LORA_OUTPUT - - print("lora 2") - assert do_sample(llm, sql_lora_files, lora_id=2) == EXPECTED_LORA_OUTPUT - - print("removing lora") + generate_and_test(llm, sql_lora_files) @multi_gpu_test(num_gpus=4) @@ -146,16 +136,20 @@ def test_llama_lora_tp4_fully_sharded_loras(sql_lora_files): tensor_parallel_size=4, fully_sharded_loras=True, ) - print("lora adapter created") - assert do_sample(llm, sql_lora_files, lora_id=0) == EXPECTED_NO_LORA_OUTPUT - - print("lora 1") - assert do_sample(llm, sql_lora_files, lora_id=1) == EXPECTED_LORA_OUTPUT + generate_and_test(llm, sql_lora_files) - print("no lora") - assert do_sample(llm, sql_lora_files, lora_id=0) == EXPECTED_NO_LORA_OUTPUT - print("lora 2") - assert do_sample(llm, sql_lora_files, lora_id=2) == EXPECTED_LORA_OUTPUT +@multi_gpu_test(num_gpus=4) +@fork_new_process_for_each_test +def test_llama_lora_tp4_fully_sharded_enable_bias(sql_lora_files): - print("removing lora") + llm = vllm.LLM( + MODEL_PATH, + enable_lora=True, + max_num_seqs=16, + max_loras=4, + tensor_parallel_size=4, + fully_sharded_loras=True, + enable_lora_bias=True, + ) + generate_and_test(llm, sql_lora_files) diff --git a/vllm/lora/fully_sharded_layers.py b/vllm/lora/fully_sharded_layers.py index f5c2eced9d2bb..5f2d32defe030 100644 --- a/vllm/lora/fully_sharded_layers.py +++ b/vllm/lora/fully_sharded_layers.py @@ -73,6 +73,7 @@ def apply(self, x: torch.Tensor, self.punica_wrapper.add_expand(output, buffer, self.lora_b_stacked, + self.bias_stacked, add_input=True) # now have column partitioned output @@ -131,27 +132,14 @@ def _mcp_apply(x, bias, layer: QKVParallelLinearWithLora): layer.lora_a_stacked[idx], 1.0) buffers = tensor_model_parallel_all_gather(buffers) - left_offset = 0 - for idx in range(n): - shard_size = layer.lora_b_stacked[idx].shape[2] - - if layer.bias_stacked is not None: - bias = layer.bias_stacked[idx] - if bias is not None: - bias = bias.view(-1, bias.shape[-1]) - bias = bias[layer.punica_wrapper.token_lora_indices] - bias[layer.punica_wrapper.token_lora_indices == -1] = 0 - output[:, left_offset:left_offset + shard_size] += bias - - layer.punica_wrapper.add_expand_slice( - output, - buffers[idx], - layer.lora_b_stacked[idx], - left_offset, - shard_size, - add_input=True, - ) - left_offset += shard_size + layer.punica_wrapper.add_expand_packed_nslice( + output, + buffers, + layer.lora_b_stacked, + layer.bias_stacked, + 1.0, + layer.output_slices, + ) output = output.view(*out_orig_shape) # now have column partitioned and packed output @@ -234,6 +222,7 @@ def apply(self, x: torch.Tensor, self.punica_wrapper.add_expand(output, buffer, self.lora_b_stacked, + self.bias_all, add_input=True) # now have column partitioned output output = output.view(*out_orig_shape) @@ -350,15 +339,9 @@ def apply(self, x: torch.Tensor) -> torch.Tensor: # reduced before being used shard_size = self.lora_b_stacked.shape[2] start_idx = self.tp_rank * shard_size - - if self.bias_stacked is not None: - bias = self.bias_stacked.view(-1, self.bias_stacked.shape[-1]) - bias = bias[self.punica_wrapper.token_lora_indices] - bias[self.punica_wrapper.token_lora_indices == -1] = 0 - output += bias - self.punica_wrapper.add_expand_slice(output, buffer, - self.lora_b_stacked, start_idx, + self.lora_b_stacked, + self.bias_stacked, start_idx, shard_size) output = output.view(*out_orig_shape) return output diff --git a/vllm/lora/layers.py b/vllm/lora/layers.py index 3701988ff692f..73748b5ce511e 100644 --- a/vllm/lora/layers.py +++ b/vllm/lora/layers.py @@ -67,63 +67,6 @@ def dec(*args, **kwargs): return dec -def apply_bias( - indices: torch.Tensor, - output: torch.Tensor, - bias_stacked: torch.Tensor, -): - """Applies bias to output - - Input shapes: - bias_stacked: (num_loras, output_dim) - indices: (batch_size) - output: (batch_size, output_dim) - """ - org_output = output - output = output.view(-1, output.shape[-1]) - indices = indices.view(-1) - - bias_stacked = bias_stacked.view(-1, bias_stacked.shape[-1]) - bias_stacked = bias_stacked[indices] - bias_stacked[indices == -1] = 0 - output += bias_stacked - - return output.view_as(org_output) - - -def apply_bias_packed_nslice( - indices: torch.Tensor, - output: torch.Tensor, - output_slices: Tuple[int, ...], - bias_stacked: Tuple[torch.Tensor, torch.Tensor, torch.Tensor], -): - """Applies bias to output - - Input shapes: - bias_stacked: 3 element tuple of (num_loras, output_dim) - indices: (batch_size) - output: (batch_size, q_slice_size + 2*kv_slice_size) - output_slices: n-1 element tuple of (slice_size...), - where n is number of slices - """ - org_output = output - output = output.view(-1, output.shape[-1]) - indices = indices.view(-1) - - offset_left = 0 - for slice_idx, slice in enumerate(output_slices): - bias = bias_stacked[slice_idx] - if bias is not None: - bias = bias.view(-1, bias.shape[-1]) - bias = bias[indices] - bias[indices == -1] = 0 - output[:, offset_left:offset_left + slice] += bias - - offset_left += slice - - return output.view_as(org_output) - - @dataclass class LoRAMapping(AdapterMapping): is_prefill: bool = False @@ -311,6 +254,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: self.punica_wrapper.add_expand(full_output, full_lora_a_embeddings, self.lora_b_stacked, + bias_all=None, add_input=True) return full_output.view_as(full_output_org) @@ -399,15 +343,9 @@ def set_lora( def apply(self, x: torch.Tensor, bias: Optional[torch.Tensor]) -> torch.Tensor: output = self.base_layer.quant_method.apply(self.base_layer, x, bias) - if self.bias_stacked is not None: - self.indices = self.punica_wrapper.token_lora_indices - output = apply_bias( - self.indices, - output, - self.bias_stacked, - ) self.punica_wrapper.add_lora(output, x, self.lora_a_stacked, - self.lora_b_stacked, 1.0) + self.lora_b_stacked, self.bias_stacked, + 1.0) return output def forward(self, input_): @@ -576,15 +514,9 @@ def set_lora( def apply(self, x: torch.Tensor, bias: Optional[torch.Tensor]) -> torch.Tensor: output = self.base_layer.quant_method.apply(self.base_layer, x, bias) - if self.bias_stacked is not None: - self.indices = self.punica_wrapper.token_lora_indices - output = apply_bias( - self.indices, - output, - self.bias_stacked, - ) self.punica_wrapper.add_lora(output, x, self.lora_a_stacked, - self.lora_b_stacked, 1.0) + self.lora_b_stacked, self.bias_stacked, + 1.0) return output def forward(self, input_): @@ -687,8 +619,8 @@ def create_lora_weights( ) for _ in range(n_slices)) else: self.bias_stacked = None - self.output_dim = self.lora_b_stacked[0].shape[2] + self.output_slices = (self.output_dim, self.output_dim) def reset_lora(self, index: int): self.lora_a_stacked[0][index] = 0 @@ -772,17 +704,9 @@ def set_lora( def apply(self, x: torch.Tensor, bias: Optional[torch.Tensor]) -> torch.Tensor: output = self.base_layer.quant_method.apply(self.base_layer, x, bias) - if self.bias_stacked is not None: - self.indices = self.punica_wrapper.token_lora_indices - output = apply_bias_packed_nslice( - self.indices, - output, - (self.output_dim, self.output_dim), - self.bias_stacked, - ) self.punica_wrapper.add_lora_packed_nslice( - output, x, self.lora_a_stacked, self.lora_b_stacked, 1.0, - (self.output_dim, self.output_dim)) + output, x, self.lora_a_stacked, self.lora_b_stacked, + self.bias_stacked, 1.0, (self.output_dim, self.output_dim)) return output @classmethod @@ -1129,17 +1053,10 @@ def set_lora( def apply(self, x: torch.Tensor, bias: Optional[torch.Tensor]) -> torch.Tensor: output = self.base_layer.quant_method.apply(self.base_layer, x, bias) - if self.bias_stacked is not None: - self.indices = self.punica_wrapper.token_lora_indices - output = apply_bias_packed_nslice( - self.indices, - output, - self.output_slices, - self.bias_stacked, - ) self.punica_wrapper.add_lora_packed_nslice(output, x, self.lora_a_stacked, - self.lora_b_stacked, 1.0, + self.lora_b_stacked, + self.bias_stacked, 1.0, self.output_slices) return output @@ -1264,15 +1181,9 @@ def set_lora( def apply(self, x: torch.Tensor) -> torch.Tensor: output = self.base_layer.quant_method.apply(self.base_layer, x) - if self.bias_stacked is not None: - self.indices = self.punica_wrapper.token_lora_indices - output = apply_bias( - self.indices, - output, - self.bias_stacked, - ) self.punica_wrapper.add_lora(output, x, self.lora_a_stacked, - self.lora_b_stacked, 1.0) + self.lora_b_stacked, self.bias_stacked, + 1.0) return output def forward(self, input_): diff --git a/vllm/lora/punica.py b/vllm/lora/punica.py index 082041f390750..3f775b7ba363e 100644 --- a/vllm/lora/punica.py +++ b/vllm/lora/punica.py @@ -450,6 +450,62 @@ def expand_slice_decode( bgmv_expand_slice(x, w_t_all, y, self.token_lora_indices, y_offset, y_slice_size, add_input) + def apply_bias( + self, + indices: torch.Tensor, + output: torch.Tensor, + bias_stacked: torch.Tensor, + ): + """Applies bias to output + + Input shapes: + bias_stacked: (num_loras, output_dim) + indices: (batch_size) + output: (batch_size, output_dim) + """ + org_output = output + output = output.view(-1, output.shape[-1]) + indices = indices.view(-1) + + bias_stacked = bias_stacked.view(-1, bias_stacked.shape[-1]) + bias_stacked = bias_stacked[indices] + bias_stacked[indices == -1] = 0 + output += bias_stacked + + return output.view_as(org_output) + + def apply_bias_packed_nslice( + self, + indices: torch.Tensor, + output: torch.Tensor, + output_slices: Tuple[int, ...], + bias_stacked: Tuple[Optional[torch.Tensor], ...], + ): + """Applies bias to output + + Input shapes: + bias_stacked: 3 element tuple of (num_loras, output_dim) + indices: (batch_size) + output: (batch_size, q_slice_size + 2*kv_slice_size) + output_slices: n-1 element tuple of (slice_size...), + where n is number of slices + """ + org_output = output + output = output.view(-1, output.shape[-1]) + indices = indices.view(-1) + + offset_left = 0 + for slice_idx, slice in enumerate(output_slices): + bias = bias_stacked[slice_idx] + if bias is not None: + bias = bias.view(-1, bias.shape[-1]) + bias = bias[indices] + bias[indices == -1] = 0 + output[:, offset_left:offset_left + slice] += bias + offset_left += slice + + return output.view_as(org_output) + def add_shrink( self, y: torch.Tensor, @@ -474,16 +530,19 @@ def add_expand( y: torch.Tensor, x: torch.Tensor, w_t_all: torch.Tensor, + bias_all: Optional[torch.Tensor], add_input: bool = True, ): """ - Perform the ` y+=x@w_t_all` computation, which is suitable for the + Perform the ` y+=x@w_t_all+bias` computation, which is suitable for the GEMM of lora'b. When `is_prefill` is true, it indicates that it is currently the prefill stage, and the `expand_prefill` function should be called. Otherwise, it is the decode stage, and the expand_decode function should be called. """ + if bias_all is not None: + y = self.apply_bias(self.token_lora_indices, y, bias_all) expand_fun: Callable = (self.expand_prefill if self.is_prefill else self.expand_decode) @@ -493,23 +552,54 @@ def add_expand_slice(self, y: torch.Tensor, x: torch.Tensor, w_t_all: torch.Tensor, + bias_all: Optional[torch.Tensor], y_offset: Optional[int], y_slice_size: Optional[int], add_input: bool = True): """ Similar to `add_expand` """ + if bias_all is not None: + y = self.apply_bias(self.token_lora_indices, y, bias_all) expand_slice_fun: Callable = (self.expand_slice_prefill if self.is_prefill else self.expand_slice_decode) expand_slice_fun(y, x, w_t_all, y_offset, y_slice_size, add_input) + def add_expand_packed_nslice(self, y: torch.Tensor, x: torch.Tensor, + lora_b_stacked: Tuple[torch.Tensor, ...], + bias_stacked: Optional[Tuple[torch.Tensor, + ...]], + scale: float, + output_slices: Tuple[int, ...]) -> None: + """ + Similar to `add_expand` + """ + y_org = y + y = y.view(-1, y.shape[-1]) + offset_left = 0 + if bias_stacked is not None: + self.apply_bias_packed_nslice(self.token_lora_indices, y, + output_slices, bias_stacked) + for slice_idx in range(len(lora_b_stacked)): + self.add_expand_slice(y, + x[slice_idx], + lora_b_stacked[slice_idx], + None, + offset_left, + output_slices[slice_idx], + add_input=True) + offset_left += output_slices[slice_idx] + + y = y.view_as(y_org) + def add_lora(self, y: torch.Tensor, x: torch.Tensor, wa_t_all: torch.Tensor, wb_t_all: torch.Tensor, + bias_all: Optional[torch.Tensor], scale: float, y_offset: Optional[int] = None, y_slice_size: Optional[int] = None, @@ -522,12 +612,13 @@ def add_lora(self, @ wa_t_all[indices[i], layer_idx, :, :].transpose(-1, -2) @ wb_t_all[indices[i], layer_idx, :, :].transpose(-1, -2) * scale - ).squeeze(0) + ).squeeze(0)+bias[i] Args: y (torch.Tensor): Output tensor. Will be changed in-place. x (torch.Tensor): Input tensor wa_t_all (torch.Tensor): lora_a's weight wb_t_all (torch.Tensor): lora_b's weight + bias_all: (torch.Tensor): lora's bias scale (float): Scaling factor. y_offset (Optional[int], optional): Offset to apply to the starting column of y. @@ -544,27 +635,26 @@ def add_lora(self, buffer = torch.zeros((x.size(0), r), dtype=torch.float32, device=x.device) - + if bias_all is not None: + y = self.apply_bias(self.token_lora_indices, y, bias_all) self.add_shrink(buffer, x, wa_t_all, scale) if y_offset is None and y_slice_size is None: - self.add_expand(y, buffer, wb_t_all, add_input=True) + self.add_expand(y, buffer, wb_t_all, bias_all=None, add_input=True) else: self.add_expand_slice(y, buffer, wb_t_all, + None, y_offset, y_slice_size, add_input=True) y = y.view_as(y_org) def add_lora_packed_nslice(self, y: torch.Tensor, x: torch.Tensor, - lora_a_stacked: Tuple[torch.Tensor, - torch.Tensor, - torch.Tensor], - lora_b_stacked: Tuple[torch.Tensor, - torch.Tensor, - torch.Tensor], - scale: float, + lora_a_stacked: Tuple[torch.Tensor, ...], + lora_b_stacked: Tuple[torch.Tensor, ...], + bias_all: Tuple[Optional[torch.Tensor], + ...], scale: float, output_slices: Tuple[int, ...]) -> None: """ Applies lora to each input. Similar to add_lora, This method is @@ -575,10 +665,13 @@ def add_lora_packed_nslice(self, y: torch.Tensor, x: torch.Tensor, x = x.view(-1, x.shape[-1]) y = y.view(-1, y.shape[-1]) offset_left = 0 + if bias_all is not None: + y = self.apply_bias_packed_nslice(self.token_lora_indices, y, + output_slices, bias_all) # TODO fuse these kernels for slice_idx in range(len(output_slices)): self.add_lora(y, x, lora_a_stacked[slice_idx], - lora_b_stacked[slice_idx], scale, offset_left, + lora_b_stacked[slice_idx], None, scale, offset_left, output_slices[slice_idx]) offset_left += output_slices[slice_idx] From 519cc6ca12dc89eec35bc2579494e399da33c31a Mon Sep 17 00:00:00 2001 From: Yan Ma Date: Tue, 3 Dec 2024 01:53:55 +0800 Subject: [PATCH 209/397] [Misc][XPU] Avoid torch compile for XPU platform (#10747) Signed-off-by: yan ma Co-authored-by: youkaichao --- .buildkite/run-xpu-test.sh | 6 ++++-- vllm/plugins/__init__.py | 4 ++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/.buildkite/run-xpu-test.sh b/.buildkite/run-xpu-test.sh index faeac8e2ded36..50f58f7d70430 100644 --- a/.buildkite/run-xpu-test.sh +++ b/.buildkite/run-xpu-test.sh @@ -12,5 +12,7 @@ remove_docker_container() { docker rm -f xpu-test || true; } trap remove_docker_container EXIT remove_docker_container -# Run the image and launch offline inference -docker run --network host --name xpu-test --device /dev/dri -v /dev/dri/by-path:/dev/dri/by-path --entrypoint="" xpu-test python3 examples/offline_inference.py +# Run the image and test offline inference/tensor parallel +docker run -it -d --name xpu-test --device /dev/dri -v /dev/dri/by-path:/dev/dri/by-path xpu-test /bin/bash +docker exec xpu-test bash -c "python3 examples/offline_inference.py" +docker exec xpu-test bash -c "python3 examples/offline_inference_cli.py -tp 2" diff --git a/vllm/plugins/__init__.py b/vllm/plugins/__init__.py index 3c64726ca3344..81ee9975cdc4a 100644 --- a/vllm/plugins/__init__.py +++ b/vllm/plugins/__init__.py @@ -4,6 +4,7 @@ import torch import vllm.envs as envs +from vllm.platforms import current_platform logger = logging.getLogger(__name__) @@ -25,6 +26,9 @@ def load_general_plugins(): os.environ['TORCHINDUCTOR_COMPILE_THREADS'] = '1' # see https://github.com/vllm-project/vllm/issues/10619 torch._inductor.config.compile_threads = 1 + if current_platform.is_xpu(): + # see https://github.com/pytorch/pytorch/blob/8cada5cbe5450e17c26fb8b358116785324537b2/torch/_dynamo/config.py#L158 # noqa + os.environ['TORCH_COMPILE_DISABLE'] = 'True' global plugins_loaded if plugins_loaded: return From 9b14d978aa8c286b738f107fab4626273f4fc088 Mon Sep 17 00:00:00 2001 From: Jani Monoses Date: Mon, 2 Dec 2024 20:52:19 +0200 Subject: [PATCH 210/397] Fix openvino on GPU (#10793) --- vllm/worker/openvino_worker.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/vllm/worker/openvino_worker.py b/vllm/worker/openvino_worker.py index 205f8a337ce6c..0bf522d5333ed 100644 --- a/vllm/worker/openvino_worker.py +++ b/vllm/worker/openvino_worker.py @@ -489,7 +489,7 @@ def model_profile_run(): block_size = cache_config.block_size seq_num_blocks = (seq_len + block_size - 1) // block_size - seq_data, dummy_multi_modal_data = input_registry \ + dummy_data = input_registry \ .dummy_data_for_profiling(model_config, seq_len, mm_registry) @@ -498,11 +498,11 @@ def model_profile_run(): seq = SequenceGroupMetadata( request_id=str(group_id), is_prompt=True, - seq_data={group_id: seq_data}, + seq_data={group_id: dummy_data.seq_data}, sampling_params=sampling_params, block_tables=block_tables, lora_request=None, - multi_modal_data=dummy_multi_modal_data) + multi_modal_data=dummy_data.multi_modal_data) seqs.append(seq) self.model_runner.block_size = tmp_cache_config.block_size From 4c05edb33ae4ae279421ddf981816d070e8ec37a Mon Sep 17 00:00:00 2001 From: Isotr0py Date: Tue, 3 Dec 2024 07:06:09 +0800 Subject: [PATCH 211/397] [Model] Add TP and BNB quantization support to LlavaMultiModalProjector (#10834) Signed-off-by: Isotr0py <2037008807@qq.com> Co-authored-by: Cyrus Leung --- vllm/model_executor/model_loader/loader.py | 14 +++++++-- vllm/model_executor/models/llava.py | 35 ++++++++++++++-------- 2 files changed, 34 insertions(+), 15 deletions(-) diff --git a/vllm/model_executor/model_loader/loader.py b/vllm/model_executor/model_loader/loader.py index 0e12bc5691538..b4921cc80797f 100644 --- a/vllm/model_executor/model_loader/loader.py +++ b/vllm/model_executor/model_loader/loader.py @@ -1120,7 +1120,14 @@ def _load_weights(self, model_config: ModelConfig, model_config.revision, pre_quant, load_8bit)) - model.load_weights(qweight_iterator) + weights_to_load = {name for name, _ in model.named_parameters()} + loaded_weights = model.load_weights(qweight_iterator) + # Some models may have weights loading tracker unimplemented. + if loaded_weights is not None: + weights_not_loaded = weights_to_load - loaded_weights + if weights_not_loaded: + raise ValueError("Following weights were not initialized from " + f"checkpoint: {weights_not_loaded}") torch.cuda.empty_cache() @@ -1152,9 +1159,10 @@ def _load_weights(self, model_config: ModelConfig, shard_name, weight_name) break + # Models like Clip/Siglip may skip some layers in initialization, + # causing unused quant_param_name in state_dict. if quant_param_name not in param_dict: - raise ValueError( - f"Parameter {quant_param_name} not found in the model.") + continue if quant_param_name not in stacked_quant_state_dict: stacked_quant_state_dict[quant_param_name] = {} diff --git a/vllm/model_executor/models/llava.py b/vllm/model_executor/models/llava.py index db7fa82ceb9b7..d375c1c9da2a9 100644 --- a/vllm/model_executor/models/llava.py +++ b/vllm/model_executor/models/llava.py @@ -13,6 +13,8 @@ from vllm.inputs import (INPUT_REGISTRY, DecoderOnlyInputs, DummyData, InputContext) from vllm.model_executor.layers.activation import get_act_fn +from vllm.model_executor.layers.linear import (ColumnParallelLinear, + RowParallelLinear) from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler from vllm.model_executor.sampling_metadata import SamplingMetadata @@ -59,25 +61,32 @@ class LlavaImageEmbeddingInputs(TypedDict): LlavaImageInputs = Union[LlavaImagePixelInputs, LlavaImageEmbeddingInputs] -# TODO(xwjiang): Run benchmark and decide if TP. class LlavaMultiModalProjector(nn.Module): - def __init__(self, vision_hidden_size: int, text_hidden_size: int, - projector_hidden_act: str): + def __init__(self, + vision_hidden_size: int, + text_hidden_size: int, + projector_hidden_act: str, + quant_config: Optional[QuantizationConfig] = None, + prefix: str = ""): super().__init__() - self.linear_1 = nn.Linear(vision_hidden_size, - text_hidden_size, - bias=True) + self.linear_1 = ColumnParallelLinear(vision_hidden_size, + text_hidden_size, + bias=True, + quant_config=quant_config, + prefix=f"{prefix}.linear_1") self.act = get_act_fn(projector_hidden_act) - self.linear_2 = nn.Linear(text_hidden_size, - text_hidden_size, - bias=True) + self.linear_2 = RowParallelLinear(text_hidden_size, + text_hidden_size, + bias=True, + quant_config=quant_config, + prefix=f"{prefix}.linear_2") def forward(self, image_features: torch.Tensor) -> torch.Tensor: - hidden_states = self.linear_1(image_features) + hidden_states, _ = self.linear_1(image_features) hidden_states = self.act(hidden_states) - hidden_states = self.linear_2(hidden_states) + hidden_states, _ = self.linear_2(hidden_states) return hidden_states @@ -325,7 +334,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: self.multi_modal_projector = LlavaMultiModalProjector( vision_hidden_size=config.vision_config.hidden_size, text_hidden_size=config.text_config.hidden_size, - projector_hidden_act=config.projector_hidden_act) + projector_hidden_act=config.projector_hidden_act, + quant_config=quant_config, + prefix=maybe_prefix(prefix, "multi_modal_projector")) self.language_model = init_vllm_registered_model( vllm_config=vllm_config, From 4433195ab75e2bb367303ba5f34c97521c5677ce Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Mon, 2 Dec 2024 21:26:15 -0500 Subject: [PATCH 212/397] [Bugfix] Prevent benchmark_throughput.py from using duplicated random prompts (#10753) --- benchmarks/benchmark_throughput.py | 47 +++++++++++++++++++----------- 1 file changed, 30 insertions(+), 17 deletions(-) diff --git a/benchmarks/benchmark_throughput.py b/benchmarks/benchmark_throughput.py index 159cf055737ce..1e5967bd9bf8b 100644 --- a/benchmarks/benchmark_throughput.py +++ b/benchmarks/benchmark_throughput.py @@ -294,23 +294,36 @@ def main(args: argparse.Namespace): tokenizer = AutoTokenizer.from_pretrained( args.tokenizer, trust_remote_code=args.trust_remote_code) if args.dataset is None: - # Synthesize a prompt with the given input length. - # As tokenizer may add additional tokens like BOS, we need to try - # different lengths to get the desired input length. - for i in range(-10, 10): - prompt = "hi " * (args.input_len + i) - tokenized_prompt = tokenizer(prompt).input_ids - if len(tokenized_prompt) == args.input_len: - break - else: - raise ValueError( - f"Failed to synthesize a prompt with {args.input_len} tokens.") - requests = [ - SampleRequest(prompt=prompt, - prompt_len=args.input_len, - expected_output_len=args.output_len) - for _ in range(args.num_prompts) - ] + vocab_size = tokenizer.vocab_size + requests = [] + for _ in range(args.num_prompts): + # Synthesize a prompt with the given input length. + candidate_ids = [ + random.randint(0, vocab_size - 1) + for _ in range(args.input_len) + ] + # As tokenizer may add additional tokens like BOS, we need to try + # different lengths to get the desired input length. + for _ in range(5): # Max attempts to correct + candidate_prompt = tokenizer.decode(candidate_ids) + tokenized_len = len(tokenizer.encode(candidate_prompt)) + + if tokenized_len == args.input_len: + break + + # Adjust length based on difference + diff = args.input_len - tokenized_len + if diff > 0: + candidate_ids.extend([ + random.randint(100, vocab_size - 100) + for _ in range(diff) + ]) + else: + candidate_ids = candidate_ids[:diff] + requests.append( + SampleRequest(prompt=candidate_prompt, + prompt_len=args.input_len, + expected_output_len=args.output_len)) else: requests = sample_requests(tokenizer, args) From d746268e92dc97d3a816c70637e20073eeac5103 Mon Sep 17 00:00:00 2001 From: zixuanzhang226 Date: Mon, 2 Dec 2024 19:06:41 -0800 Subject: [PATCH 213/397] [Model] support bitsandbytes quantization with minicpm model (#10842) Signed-off-by: Ubuntu --- vllm/model_executor/models/minicpm.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/vllm/model_executor/models/minicpm.py b/vllm/model_executor/models/minicpm.py index 6254d26c7060d..5a0f202364f26 100644 --- a/vllm/model_executor/models/minicpm.py +++ b/vllm/model_executor/models/minicpm.py @@ -534,6 +534,16 @@ class MiniCPMForCausalLM(nn.Module, SupportsLoRA, SupportsPP): } embedding_padding_modules = ["lm_head"] + # BitandBytes specific attributes + bitsandbytes_stacked_params_mapping = { + # shard_name, weight_name, index + "q_proj": ("qkv_proj", 0), + "k_proj": ("qkv_proj", 1), + "v_proj": ("qkv_proj", 2), + "gate_proj": ("gate_up_proj", 0), + "up_proj": ("gate_up_proj", 1), + } + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): super().__init__() config = vllm_config.model_config.hf_config From a4cf2561599448d4a5c3de4d79c73ca37cb8d647 Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Tue, 3 Dec 2024 12:10:29 +0800 Subject: [PATCH 214/397] [Bugfix] Fix QKVParallelLinearWithShardedLora bias bug (#10844) Signed-off-by: Jee Jee Li --- .buildkite/test-pipeline.yaml | 1 - vllm/lora/fully_sharded_layers.py | 9 +-------- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index f5591f1098534..455f02a2062f1 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -481,7 +481,6 @@ steps: - label: LoRA TP Test (Distributed) num_gpus: 4 - soft_fail: true source_file_dependencies: - vllm/lora - tests/lora diff --git a/vllm/lora/fully_sharded_layers.py b/vllm/lora/fully_sharded_layers.py index 5f2d32defe030..e25e453201f01 100644 --- a/vllm/lora/fully_sharded_layers.py +++ b/vllm/lora/fully_sharded_layers.py @@ -77,13 +77,6 @@ def apply(self, x: torch.Tensor, add_input=True) # now have column partitioned output - if self.bias_stacked is not None: - self.bias_stacked = self.bias_stacked.view( - -1, self.bias_stacked.shape[-1]) - self.bias_stacked = self.bias_stacked[ - self.punica_wrapper.token_lora_indices] - output += self.bias_stacked - output = output.view(*out_orig_shape) return output @@ -222,7 +215,7 @@ def apply(self, x: torch.Tensor, self.punica_wrapper.add_expand(output, buffer, self.lora_b_stacked, - self.bias_all, + self.bias_stacked, add_input=True) # now have column partitioned output output = output.view(*out_orig_shape) From 21fe7b481a3a84dc9ebe2497ec89a17002ad52c5 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Mon, 2 Dec 2024 20:53:23 -0800 Subject: [PATCH 215/397] [core][distributed] add pynccl broadcast (#10843) Signed-off-by: youkaichao --- tests/distributed/test_pynccl.py | 45 ++++++++++++++++++- .../device_communicators/pynccl.py | 19 ++++++++ .../device_communicators/pynccl_wrapper.py | 16 +++++++ 3 files changed, 78 insertions(+), 2 deletions(-) diff --git a/tests/distributed/test_pynccl.py b/tests/distributed/test_pynccl.py index fb24d6bc2c100..4e27babf12cc3 100644 --- a/tests/distributed/test_pynccl.py +++ b/tests/distributed/test_pynccl.py @@ -61,6 +61,7 @@ def worker_fn(): dtype=torch.float32).cuda(pynccl_comm.rank) with pynccl_comm.change_state(enable=True): tensor = pynccl_comm.all_reduce(tensor) + torch.cuda.synchronize() result = tensor.mean().cpu().item() assert result == pynccl_comm.world_size @@ -86,10 +87,12 @@ def multiple_allreduce_worker_fn(): if torch.distributed.get_rank() in [0, 1]: tensor = pynccl_comm.all_reduce(tensor) tensor = pynccl_comm.all_reduce(tensor) + torch.cuda.synchronize() result = tensor.mean().cpu().item() assert result == 4 else: tensor = pynccl_comm.all_reduce(tensor) + torch.cuda.synchronize() result = tensor.mean().cpu().item() assert result == 2 @@ -112,10 +115,12 @@ def multiple_allreduce_with_vllm_worker_fn(): if torch.distributed.get_rank() in [0, 1]: tensor = tensor_model_parallel_all_reduce(tensor) tensor = tensor_model_parallel_all_reduce(tensor) + torch.cuda.synchronize() result = tensor.mean().cpu().item() assert result == 4 else: tensor = tensor_model_parallel_all_reduce(tensor) + torch.cuda.synchronize() result = tensor.mean().cpu().item() assert result == 2 @@ -141,9 +146,9 @@ def worker_fn_with_cudagraph(): graph, stream=pynccl_comm.stream), pynccl_comm.change_state( enable=True): a_out = pynccl_comm.all_reduce(a) - pynccl_comm.stream.synchronize() + torch.cuda.synchronize() graph.replay() - pynccl_comm.stream.synchronize() + torch.cuda.synchronize() assert a_out.mean().cpu().item() == pynccl_comm.world_size**1 @@ -170,6 +175,7 @@ def all_gather_worker_fn(): with pynccl_comm.change_state(enable=True): pynccl_comm.all_gather(result, tensor) + torch.cuda.synchronize() torch.testing.assert_close(result, expected, rtol=1e-5, atol=1e-8) @@ -207,6 +213,7 @@ def reduce_scatter_worker_fn(): with pynccl_comm.change_state(enable=True): pynccl_comm.reduce_scatter(result, tensor) + torch.cuda.synchronize() torch.testing.assert_close(result, expected, rtol=1e-5, atol=1e-8) @@ -241,6 +248,7 @@ def send_recv_worker_fn(): pynccl_comm.recv(tensor, src=(pynccl_comm.rank - 1) % pynccl_comm.world_size) + torch.cuda.synchronize() result = tensor.mean().cpu().item() assert result == 1 @@ -280,6 +288,7 @@ def multiple_send_recv_worker_fn(): pynccl_comm.recv(tensor, src=(pynccl_comm.rank - 1) % pynccl_comm.world_size) + torch.cuda.synchronize() result = tensor.mean().cpu().item() if torch.distributed.get_rank() in [0, 2]: assert result == 1 @@ -293,6 +302,38 @@ def test_pynccl_multiple_send_recv(): distributed_run(multiple_send_recv_worker_fn, 4) +@pytest.mark.skipif(torch.cuda.device_count() < 4, + reason="Need at least 4 GPUs to run the test.") +def test_pynccl_broadcast(): + distributed_run(broadcast_worker_fn, 4) + + +@worker_fn_wrapper +def broadcast_worker_fn(): + # Test broadcast for every root rank. + # Essentially this is an all-gather operation. + pynccl_comm = PyNcclCommunicator(get_world_group().cpu_group, + device=get_world_group().device) + recv_tensors = [ + torch.empty(16, + 1024, + 1024, + dtype=torch.float32, + device=pynccl_comm.device) + for i in range(pynccl_comm.world_size) + ] + recv_tensors[pynccl_comm.rank] = torch.ones( + 16, 1024, 1024, dtype=torch.float32, + device=pynccl_comm.device) * pynccl_comm.rank + + for i in range(pynccl_comm.world_size): + pynccl_comm.broadcast(recv_tensors[i], src=i) + # the broadcast op might be launched in a different stream + # need to synchronize to make sure the tensor is ready + torch.cuda.synchronize() + assert torch.all(recv_tensors[i] == i).cpu().item() + + def test_ncclGetUniqueId(): lib = NCCLLibrary() unique_id = lib.ncclGetUniqueId() diff --git a/vllm/distributed/device_communicators/pynccl.py b/vllm/distributed/device_communicators/pynccl.py index d4e3f81747038..a6800f93f167b 100644 --- a/vllm/distributed/device_communicators/pynccl.py +++ b/vllm/distributed/device_communicators/pynccl.py @@ -197,6 +197,25 @@ def recv(self, tensor: torch.Tensor, src: int, stream=None): ncclDataTypeEnum.from_torch(tensor.dtype), src, self.comm, cudaStream_t(stream.cuda_stream)) + def broadcast(self, tensor: torch.Tensor, src: int, stream=None): + if self.disabled: + return + assert tensor.device == self.device, ( + f"this nccl communicator is created to work on {self.device}, " + f"but the input tensor is on {tensor.device}") + if stream is None: + stream = self.stream + if src == self.rank: + sendbuff = buffer_type(tensor.data_ptr()) + # NCCL requires the sender also to have a receive buffer + recvbuff = buffer_type(tensor.data_ptr()) + else: + sendbuff = buffer_type() + recvbuff = buffer_type(tensor.data_ptr()) + self.nccl.ncclBroadcast(sendbuff, recvbuff, tensor.numel(), + ncclDataTypeEnum.from_torch(tensor.dtype), src, + self.comm, cudaStream_t(stream.cuda_stream)) + @contextmanager def change_state(self, enable: Optional[bool] = None, diff --git a/vllm/distributed/device_communicators/pynccl_wrapper.py b/vllm/distributed/device_communicators/pynccl_wrapper.py index ff88f72470b27..7dea61b6a09f1 100644 --- a/vllm/distributed/device_communicators/pynccl_wrapper.py +++ b/vllm/distributed/device_communicators/pynccl_wrapper.py @@ -189,6 +189,15 @@ class NCCLLibrary: ncclComm_t, cudaStream_t ]), + # ncclResult_t ncclBroadcast( + # const void* sendbuff, void* recvbuff, size_t count, + # ncclDataType_t datatype, int root, ncclComm_t comm, + # cudaStream_t stream); + Function("ncclBroadcast", ncclResult_t, [ + buffer_type, buffer_type, ctypes.c_size_t, ncclDataType_t, + ctypes.c_int, ncclComm_t, cudaStream_t + ]), + # be cautious! this is a collective call, it will block until all # processes in the communicator have called this function. # because Python object destruction can happen in random order, @@ -312,6 +321,13 @@ def ncclRecv(self, recvbuff: buffer_type, count: int, datatype: int, self.NCCL_CHECK(self._funcs["ncclRecv"](recvbuff, count, datatype, src, comm, stream)) + def ncclBroadcast(self, sendbuff: buffer_type, recvbuff: buffer_type, + count: int, datatype: int, root: int, comm: ncclComm_t, + stream: cudaStream_t) -> None: + self.NCCL_CHECK(self._funcs["ncclBroadcast"](sendbuff, recvbuff, count, + datatype, root, comm, + stream)) + def ncclCommDestroy(self, comm: ncclComm_t) -> None: self.NCCL_CHECK(self._funcs["ncclCommDestroy"](comm)) From dc5ce861bf0e10fc002384859b93b1eebbd70933 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Mon, 2 Dec 2024 22:19:02 -0800 Subject: [PATCH 216/397] [torch.compile] remove compilation_context and simplify code (#10838) Signed-off-by: youkaichao --- tests/compile/piecewise/test_simple.py | 9 +- tests/compile/piecewise/test_toy_llama.py | 33 ++++---- .../decoder_only/language/test_jamba.py | 5 +- .../decoder_only/language/test_mamba.py | 5 +- .../test_encoder_decoder_model_runner.py | 4 +- tests/worker/test_model_runner.py | 5 +- vllm/compilation/backends.py | 4 - vllm/compilation/compile_context.py | 23 ----- vllm/config.py | 83 +++++++++++++++++-- vllm/model_executor/models/jamba.py | 6 +- vllm/model_executor/models/mamba.py | 6 +- vllm/v1/worker/gpu_model_runner.py | 14 ++-- vllm/worker/enc_dec_model_runner.py | 6 +- vllm/worker/model_runner.py | 68 ++------------- 14 files changed, 128 insertions(+), 143 deletions(-) delete mode 100644 vllm/compilation/compile_context.py diff --git a/tests/compile/piecewise/test_simple.py b/tests/compile/piecewise/test_simple.py index 7ef502abee345..aa11524812cdd 100644 --- a/tests/compile/piecewise/test_simple.py +++ b/tests/compile/piecewise/test_simple.py @@ -7,7 +7,6 @@ from torch import nn from torch.library import Library -from vllm.compilation.compile_context import set_compile_context from vllm.compilation.counter import compilation_counter from vllm.compilation.decorators import support_torch_compile from vllm.config import (CompilationConfig, CompilationLevel, VllmConfig, @@ -81,6 +80,7 @@ def test_simple_piecewise_compile(): use_cudagraph=True, splitting_ops=["silly.attention"], cudagraph_copy_inputs=True, + cudagraph_capture_sizes=[1, 2], )) with set_current_vllm_config(vllm_config): model = SillyModel(vllm_config=vllm_config, prefix='') @@ -96,11 +96,10 @@ def test_simple_piecewise_compile(): 6, # num_cudagraph_sizes * num_piecewise_capturable_graphs_seen ): - with set_compile_context([1, 2]): - model(inputs) + model(inputs) - model(torch.randn(2).cuda()) - model(torch.randn(1).cuda()) + model(torch.randn(2).cuda()) + model(torch.randn(1).cuda()) input = torch.zeros(2).cuda() global global_counter diff --git a/tests/compile/piecewise/test_toy_llama.py b/tests/compile/piecewise/test_toy_llama.py index dbd5a3bbffeab..07c10a3a18c55 100644 --- a/tests/compile/piecewise/test_toy_llama.py +++ b/tests/compile/piecewise/test_toy_llama.py @@ -13,7 +13,6 @@ from torch import nn from torch.library import Library -from vllm.compilation.compile_context import set_compile_context from vllm.compilation.counter import compilation_counter from vllm.compilation.decorators import support_torch_compile from vllm.config import (CompilationConfig, CompilationLevel, VllmConfig, @@ -256,6 +255,7 @@ def run_model(llama_config, compilation_config = CompilationConfig( level=CompilationLevel.PIECEWISE, use_cudagraph=True, + cudagraph_capture_sizes=[1, 2], ) if split_attn: compilation_config.splitting_ops = ["silly.attention"] @@ -273,10 +273,9 @@ def run_model(llama_config, input_ids = torch.randint(0, llama_config.vocab_size, (B, )).cuda() positions = torch.arange(B).cuda() - with set_compile_context([1, 2]): - model(input_ids, positions) - model(input_ids[:2], positions[:2]) - model(input_ids[:1], positions[:1]) + model(input_ids, positions) + model(input_ids[:2], positions[:2]) + model(input_ids[:1], positions[:1]) input_ids[:2].zero_() output = model(input_ids[:2], positions[:2]) @@ -379,10 +378,13 @@ def benchmark(): level=CompilationLevel.PIECEWISE, use_cudagraph=True, splitting_ops=["silly.attention"], + cudagraph_capture_sizes=cudagraph_sizes, ) else: compilation_config = CompilationConfig( - level=CompilationLevel.PIECEWISE, ) + level=CompilationLevel.PIECEWISE, + cudagraph_capture_sizes=cudagraph_sizes, + ) vllm_config = VllmConfig(compilation_config=compilation_config) with set_current_vllm_config(vllm_config): @@ -396,17 +398,16 @@ def benchmark(): graphs = {} - with set_compile_context(cudagraph_sizes): - model(input_ids, positions) - for b in cudagraph_sizes[::-1]: - if not piecewise: - graph = torch.cuda.CUDAGraph() - with torch.cuda.graph(graph, pool=pool): - output = model(input_ids[:b], positions[:b]) - graphs[b] = (graph, output) - else: + model(input_ids, positions) + for b in cudagraph_sizes[::-1]: + if not piecewise: + graph = torch.cuda.CUDAGraph() + with torch.cuda.graph(graph, pool=pool): output = model(input_ids[:b], positions[:b]) - graphs[b] = (model, output) + graphs[b] = (graph, output) + else: + output = model(input_ids[:b], positions[:b]) + graphs[b] = (model, output) for b in cudagraph_sizes: if piecewise: # noqa is for `Function definition does not bind loop variable` diff --git a/tests/models/decoder_only/language/test_jamba.py b/tests/models/decoder_only/language/test_jamba.py index 87a05b3011393..cae25ae9fa2c8 100644 --- a/tests/models/decoder_only/language/test_jamba.py +++ b/tests/models/decoder_only/language/test_jamba.py @@ -1,8 +1,8 @@ import pytest from tests.utils import multi_gpu_test +from vllm.config import VllmConfig from vllm.sampling_params import SamplingParams -from vllm.worker.model_runner import _get_graph_batch_size from ...utils import check_outputs_equal @@ -189,7 +189,8 @@ def test_mamba_cache_cg_padding( # This test is for verifying that mamba cache is padded to CG captured # batch size. If it's not, a torch RuntimeError will be raised because # tensor dimensions aren't compatible - while len(example_prompts) == _get_graph_batch_size(len(example_prompts)): + while len(example_prompts) == VllmConfig.get_graph_batch_size( + len(example_prompts)): example_prompts.append(example_prompts[0]) try: diff --git a/tests/models/decoder_only/language/test_mamba.py b/tests/models/decoder_only/language/test_mamba.py index 01e208347bff4..35018c3c14dee 100644 --- a/tests/models/decoder_only/language/test_mamba.py +++ b/tests/models/decoder_only/language/test_mamba.py @@ -5,8 +5,8 @@ import pytest from transformers import AutoModelForCausalLM, AutoTokenizer +from vllm.config import VllmConfig from vllm.sampling_params import SamplingParams -from vllm.worker.model_runner import _get_graph_batch_size from ...utils import check_outputs_equal @@ -200,7 +200,8 @@ def test_mamba_cache_cg_padding( # This test is for verifying that mamba cache is padded to CG captured # batch size. If it's not, a torch RuntimeError will be raised because # tensor dimensions aren't compatible - while len(example_prompts) == _get_graph_batch_size(len(example_prompts)): + while len(example_prompts) == VllmConfig.get_graph_batch_size( + len(example_prompts)): example_prompts.append(example_prompts[0]) try: diff --git a/tests/worker/test_encoder_decoder_model_runner.py b/tests/worker/test_encoder_decoder_model_runner.py index 9e166ae64dbfb..5289c91f201cd 100644 --- a/tests/worker/test_encoder_decoder_model_runner.py +++ b/tests/worker/test_encoder_decoder_model_runner.py @@ -4,12 +4,12 @@ import pytest import torch +from vllm.config import VllmConfig from vllm.engine.arg_utils import EngineArgs from vllm.platforms import current_platform from vllm.sequence import SamplingParams, SequenceData, SequenceGroupMetadata from vllm.utils import make_tensor_with_pad from vllm.worker.enc_dec_model_runner import EncoderDecoderModelRunner -from vllm.worker.model_runner import _get_graph_batch_size BATCH_SIZES = [1, 4, 16, 64, 256] @@ -548,7 +548,7 @@ def test_prepare_decode_cuda_graph(batch_size, multiple_seqs_per_seq_group): # With CUDA Graph capture and replay enabled, the decoder and encoder # input sequences will be padded. Create the expected padded tensors # accordingly. - graph_batch_size = _get_graph_batch_size(expanded_batch_size) + graph_batch_size = VllmConfig.get_graph_batch_size(expanded_batch_size) cuda_graph_pad_size = graph_batch_size - expanded_batch_size padded_seq_lens = seq_lens + list(itertools.repeat(1, cuda_graph_pad_size)) padded_encoder_seq_lens = encoder_seq_lens + list( diff --git a/tests/worker/test_model_runner.py b/tests/worker/test_model_runner.py index 433a9b30ba57a..4055524f3e0c7 100644 --- a/tests/worker/test_model_runner.py +++ b/tests/worker/test_model_runner.py @@ -3,13 +3,14 @@ import pytest import torch +from vllm.config import VllmConfig from vllm.distributed.parallel_state import (ensure_model_parallel_initialized, init_distributed_environment) from vllm.engine.arg_utils import EngineArgs from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.sequence import SamplingParams, SequenceData, SequenceGroupMetadata from vllm.utils import get_open_port -from vllm.worker.model_runner import ModelRunner, _get_graph_batch_size +from vllm.worker.model_runner import ModelRunner def _create_model_runner(model: str, *args, **kwargs) -> ModelRunner: @@ -176,7 +177,7 @@ def test_prepare_decode_cuda_graph(batch_size): model_input.attn_metadata, model_input.attn_metadata.slot_mapping) assert len(slot_mapping) == len(input_tokens) - expected_bs = _get_graph_batch_size(len(seq_group_metadata_list)) + expected_bs = VllmConfig.get_graph_batch_size(len(seq_group_metadata_list)) # Verify input metadata is correct for prompts. device = model_runner.device assert attn_metadata.num_prefills == 0 diff --git a/vllm/compilation/backends.py b/vllm/compilation/backends.py index 464bc2af8fd6d..d49a83fe3981f 100644 --- a/vllm/compilation/backends.py +++ b/vllm/compilation/backends.py @@ -242,10 +242,6 @@ def __call__(self, graph: fx.GraphModule, example_inputs) -> Callable: assert not self._called, "VllmBackend can only be called once" self.graph = graph - # config is updated now, because only here can - # we get the sizes to capture for cudagraph - # from compilation context - self.compilation_configs.init_during_runtime() self.configure_post_pass() self.split_gm, self.piecewise_graphs = split_graph( diff --git a/vllm/compilation/compile_context.py b/vllm/compilation/compile_context.py deleted file mode 100644 index 29db3d4c637b9..0000000000000 --- a/vllm/compilation/compile_context.py +++ /dev/null @@ -1,23 +0,0 @@ -from contextlib import contextmanager -from typing import Any - -_compile_context: Any = None - - -def get_compile_context() -> Any: - """Get the current compile context.""" - return _compile_context - - -@contextmanager -def set_compile_context(context: Any): - """A context manager that stores the current compile context, - usually it is a list of sizes to specialize. - """ - global _compile_context - prev_context = _compile_context - _compile_context = context - try: - yield - finally: - _compile_context = prev_context diff --git a/vllm/config.py b/vllm/config.py index 5f50d65ec87e1..326340d3fa655 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -2357,15 +2357,10 @@ def init_backend(self) -> Union[str, Callable]: from vllm.compilation.backends import VllmBackend return VllmBackend(self) - def init_during_runtime(self): + def init_with_cudagraph_sizes(self, sizes_to_specialize: List[int]): """To complete the initialization of config, - we need to know the compile context, which is only available - during the first run of the model. - """ - from vllm.compilation.compile_context import get_compile_context - context = get_compile_context() - context = copy.deepcopy(context) if context is not None else [] - sizes_to_specialize: List[int] = context + we need to know the cudagraph sizes.""" + if self.cudagraph_capture_sizes is None: self.capture_sizes = sizes_to_specialize else: @@ -2386,6 +2381,21 @@ def init_during_runtime(self): self.inductor_compile_sizes = [] self.compile_sizes = self.inductor_compile_sizes + # sort to make sure cudagraph capture sizes are in descending order + self.capture_sizes.sort(reverse=True) + + +_BATCH_SIZE_ALIGNMENT = 8 +# all the token sizes that **can** be captured by cudagraph. +# they can be arbitrarily large. +# currently it includes: 1, 2, 4, 8, 16, 24, 32, 40, ..., 8192. +# the actual sizes to capture will be determined by the model, +# depending on the model's max_num_seqs. +# NOTE: get_graph_batch_size needs to be updated if this list is changed. +_BATCH_SIZES_TO_CAPTURE = [1, 2, 4] + [ + _BATCH_SIZE_ALIGNMENT * i for i in range(1, 1025) +] + @dataclass class VllmConfig: @@ -2413,6 +2423,41 @@ class VllmConfig: kv_transfer_config: KVTransferConfig = field(default=None, init=True) # type: ignore + @staticmethod + def get_graph_batch_size(batch_size: int) -> int: + """Returns the padded batch size given actual batch size. + + Batch sizes are 1, 2, 4, _BATCH_SIZE_ALIGNMENT, + 2*_BATCH_SIZE_ALIGNMENT, 3*_BATCH_SIZE_ALIGNMENT... + """ + if batch_size <= 2: + return batch_size + elif batch_size <= 4: + return 4 + else: + return ((batch_size + _BATCH_SIZE_ALIGNMENT - 1) // + _BATCH_SIZE_ALIGNMENT * _BATCH_SIZE_ALIGNMENT) + + @staticmethod + def get_max_graph_batch_size(max_num_seqs: int) -> int: + """ + max_num_seqs: Maximum number of sequences in a batch. + _BATCH_SIZES_TO_CAPTURE: all the sizes that we want to capture. + + pad the max_num_seqs if necessary by calling get_graph_batch_size, + which will deal with some edge cases like 1, 2, 4. + + if the padded size is in _BATCH_SIZES_TO_CAPTURE, return the padded + size. if not, it means the padded size is larger than the largest size + in _BATCH_SIZES_TO_CAPTURE, return the largest size in + _BATCH_SIZES_TO_CAPTURE. + """ + padded_size = VllmConfig.get_graph_batch_size(max_num_seqs) + if padded_size in _BATCH_SIZES_TO_CAPTURE: + return padded_size + assert padded_size > _BATCH_SIZES_TO_CAPTURE[-1] + return _BATCH_SIZES_TO_CAPTURE[-1] + @staticmethod def _get_quantization_config( model_config: ModelConfig, @@ -2496,6 +2541,28 @@ def __post_init__(self): self.compilation_config.pass_config.enable_reshape = False self.compilation_config.level = CompilationLevel.PIECEWISE + if not envs.VLLM_USE_V1: + max_batchsize_to_capture = 0 + if self.scheduler_config is not None and \ + self.model_config is not None and \ + not self.model_config.enforce_eager: + max_batchsize_to_capture = \ + self.get_max_graph_batch_size( + self.scheduler_config.max_num_seqs) + batch_size_capture_list = [ + size for size in _BATCH_SIZES_TO_CAPTURE + if size <= max_batchsize_to_capture + ] + else: + batch_size_capture_list = [] + if self.model_config is not None and \ + not self.model_config.enforce_eager: + batch_size_capture_list = [1, 2, 4 + ] + [i for i in range(8, 513, 8)] + + self.compilation_config.init_with_cudagraph_sizes( + batch_size_capture_list) + if self.cache_config is not None and \ self.cache_config.cpu_offload_gb > 0 and \ self.compilation_config.level != CompilationLevel.NO_COMPILATION: diff --git a/vllm/model_executor/models/jamba.py b/vllm/model_executor/models/jamba.py index 099ca7e12b288..5d5e8ae1ee532 100644 --- a/vllm/model_executor/models/jamba.py +++ b/vllm/model_executor/models/jamba.py @@ -7,7 +7,7 @@ from vllm.attention.backends.abstract import AttentionMetadata from vllm.attention.layer import Attention -from vllm.config import CacheConfig, VllmConfig +from vllm.config import _BATCH_SIZES_TO_CAPTURE, CacheConfig, VllmConfig from vllm.distributed import get_tensor_model_parallel_world_size from vllm.model_executor.layers.fused_moe import FusedMoE from vllm.model_executor.layers.layernorm import RMSNorm @@ -25,8 +25,6 @@ MambaCacheParams) from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.sequence import IntermediateTensors -from vllm.worker.model_runner import (_BATCH_SIZES_TO_CAPTURE, - _get_graph_batch_size) from .interfaces import HasInnerState, SupportsLoRA from .utils import maybe_prefix @@ -404,7 +402,7 @@ def forward(self, inputs_embeds: Optional[torch.Tensor] = None, **kwargs): if self.mamba_cache is None: - max_batch_size = (_get_graph_batch_size( + max_batch_size = (VllmConfig.get_graph_batch_size( self.scheduler_config.max_num_seqs) if self.scheduler_config else max(_BATCH_SIZES_TO_CAPTURE) + 2) diff --git a/vllm/model_executor/models/mamba.py b/vllm/model_executor/models/mamba.py index ac0d265a961f0..b32032e411b0a 100644 --- a/vllm/model_executor/models/mamba.py +++ b/vllm/model_executor/models/mamba.py @@ -6,7 +6,7 @@ from transformers import MambaConfig from vllm.attention.backends.abstract import AttentionMetadata -from vllm.config import CacheConfig, VllmConfig +from vllm.config import _BATCH_SIZES_TO_CAPTURE, CacheConfig, VllmConfig from vllm.distributed import get_tensor_model_parallel_world_size from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.logits_processor import LogitsProcessor @@ -23,8 +23,6 @@ MambaCacheParams) from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.sequence import IntermediateTensors -from vllm.worker.model_runner import (_BATCH_SIZES_TO_CAPTURE, - _get_graph_batch_size) from .utils import maybe_prefix @@ -187,7 +185,7 @@ def forward(self, inputs_embeds: Optional[torch.Tensor] = None, **kwargs): if self.mamba_cache is None: - max_batch_size = (_get_graph_batch_size( + max_batch_size = (VllmConfig.get_graph_batch_size( self.scheduler_config.max_num_seqs) if self.scheduler_config else max(_BATCH_SIZES_TO_CAPTURE) + 2) self.mamba_cache = MambaCacheManager( diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 1fa47f553dfd6..4692762493f00 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -8,7 +8,6 @@ import torch.distributed import torch.nn as nn -from vllm.compilation.compile_context import set_compile_context from vllm.config import CompilationLevel, VllmConfig from vllm.distributed.parallel_state import graph_capture from vllm.forward_context import set_forward_context @@ -100,7 +99,11 @@ def __init__( == CompilationLevel.PIECEWISE and not self.model_config.enforce_eager) # TODO(woosuk): Provide an option to tune the max cudagraph batch size. - self.cudagraph_batch_sizes = [1, 2, 4] + [i for i in range(8, 513, 8)] + # The convention is different. + # self.cudagraph_batch_sizes sorts in ascending order. + # The batch sizes in the config are in descending order. + self.cudagraph_batch_sizes = list( + reversed(self.vllm_config.compilation_config.capture_sizes)) self.positions = torch.zeros(self.max_num_tokens, dtype=torch.int64, device=self.device) @@ -548,10 +551,9 @@ def profile_run(self) -> None: torch.tensor([], dtype=torch.float32, device=self.device) for _ in range(self.num_attn_layers) ] - with set_compile_context(self.cudagraph_batch_sizes): - # Trigger compilation for general shape. - hidden_states = self._dummy_run(self.model, self.max_num_tokens, - dummy_kv_caches) + # Trigger compilation for general shape. + hidden_states = self._dummy_run(self.model, self.max_num_tokens, + dummy_kv_caches) logits = self.model.compute_logits(hidden_states, None) logits = logits[:self.max_num_tokens] # TODO(woosuk): Consider the memory usage of the sampler. diff --git a/vllm/worker/enc_dec_model_runner.py b/vllm/worker/enc_dec_model_runner.py index ae18c79c980c8..5697fbbaa2041 100644 --- a/vllm/worker/enc_dec_model_runner.py +++ b/vllm/worker/enc_dec_model_runner.py @@ -25,8 +25,7 @@ from vllm.utils import STR_NOT_IMPL_ENC_DEC_BACKEND, make_tensor_with_pad from vllm.worker.model_runner import (GPUModelRunnerBase, ModelInputForGPUBuilder, - ModelInputForGPUWithSamplingMetadata, - _get_graph_batch_size) + ModelInputForGPUWithSamplingMetadata) from vllm.worker.model_runner_base import ( _add_attn_metadata_broadcastable_dict, _add_sampling_metadata_broadcastable_dict) @@ -465,7 +464,8 @@ def _prepare_encoder_model_input_tensors( # We will be using CUDA graph replay for this decode. max_len_of_block_table = self.get_max_block_per_batch() batch_size = len(encoder_seq_lens) - graph_batch_size = _get_graph_batch_size(batch_size) + graph_batch_size = self.vllm_config.get_graph_batch_size( + batch_size) assert graph_batch_size >= batch_size cuda_graph_pad_size = graph_batch_size - batch_size # extend the cross_block_tables and encoder_seq_lens to match diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py index c9f06eef3f907..4388b3c1ee164 100644 --- a/vllm/worker/model_runner.py +++ b/vllm/worker/model_runner.py @@ -18,7 +18,6 @@ from vllm.attention import AttentionMetadata, get_attn_backend from vllm.attention.backends.abstract import AttentionState from vllm.attention.backends.utils import CommonAttentionState -from vllm.compilation.compile_context import set_compile_context from vllm.config import CompilationLevel, VllmConfig from vllm.core.scheduler import SchedulerOutputs from vllm.distributed import get_kv_transfer_group, get_pp_group @@ -63,16 +62,7 @@ logger = init_logger(__name__) LORA_WARMUP_RANK = 8 -_BATCH_SIZE_ALIGNMENT = 8 -# all the token sizes that **can** be captured by cudagraph. -# they can be arbitrarily large. -# currently it includes: 1, 2, 4, 8, 16, 24, 32, 40, ..., 8192. -# the actual sizes to capture will be determined by the model, -# depending on the model's max_num_seqs. -# NOTE: _get_graph_batch_size needs to be updated if this list is changed. -_BATCH_SIZES_TO_CAPTURE = [1, 2, 4] + [ - _BATCH_SIZE_ALIGNMENT * i for i in range(1, 1025) -] + _NUM_WARMUP_ITERS = 2 TModelInputForGPU = TypeVar('TModelInputForGPU', bound="ModelInputForGPU") @@ -763,7 +753,6 @@ def _use_captured_graph(self, max_decode_seq_len: int, max_encoder_seq_len: int = 0) -> bool: return (decode_only and not self.runner.model_config.enforce_eager - and batch_size <= _BATCH_SIZES_TO_CAPTURE[-1] and max_decode_seq_len <= self.runner.max_seq_len_to_capture and max_encoder_seq_len <= self.runner.max_seq_len_to_capture and batch_size <= self.runner.max_batchsize_to_capture) @@ -811,7 +800,7 @@ def _get_cuda_graph_pad_size(self, max_encoder_seq_len): return -1 - graph_batch_size = _get_graph_batch_size(batch_size) + graph_batch_size = VllmConfig.get_graph_batch_size(batch_size) assert graph_batch_size >= batch_size return graph_batch_size - batch_size @@ -1023,7 +1012,7 @@ def __init__( self.sliding_window = model_config.get_sliding_window() self.block_size = cache_config.block_size self.max_seq_len_to_capture = self.model_config.max_seq_len_to_capture - self.max_batchsize_to_capture = _get_max_graph_batch_size( + self.max_batchsize_to_capture = VllmConfig.get_max_graph_batch_size( self.scheduler_config.max_num_seqs) self.graph_runners: List[Dict[int, CUDAGraphRunner]] = [ @@ -1333,14 +1322,7 @@ def profile_run(self) -> None: dtype=self.model_config.dtype, device=self.device) - graph_batch_size = self.max_batchsize_to_capture - batch_size_capture_list = [ - bs for bs in _BATCH_SIZES_TO_CAPTURE if bs <= graph_batch_size - ] - if self.model_config.enforce_eager: - batch_size_capture_list = [] - with set_compile_context(batch_size_capture_list): - self.execute_model(model_input, kv_caches, intermediate_tensors) + self.execute_model(model_input, kv_caches, intermediate_tensors) torch.cuda.synchronize() return @@ -1459,18 +1441,14 @@ def capture_model(self, kv_caches: List[List[torch.Tensor]]) -> None: dtype=self.model_config.dtype, device=self.device) - graph_batch_size = self.max_batchsize_to_capture - batch_size_capture_list = [ - bs for bs in _BATCH_SIZES_TO_CAPTURE if bs <= graph_batch_size - ] - with self.attn_state.graph_capture( max_batch_size), graph_capture() as graph_capture_context: # NOTE: Capturing the largest batch size first may help reduce the # memory usage of CUDA graph. for virtual_engine in range( self.parallel_config.pipeline_parallel_size): - for batch_size in reversed(batch_size_capture_list): + for batch_size in \ + self.vllm_config.compilation_config.capture_sizes: attn_metadata = ( self.attn_state.graph_capture_get_metadata_for_batch( batch_size, @@ -1993,37 +1971,3 @@ def forward( return self.output_buffers["hidden_states"] return self.output_buffers - - -def _get_graph_batch_size(batch_size: int) -> int: - """Returns the padded batch size given actual batch size. - - Batch sizes are 1, 2, 4, _BATCH_SIZE_ALIGNMENT, - 2*_BATCH_SIZE_ALIGNMENT, 3*_BATCH_SIZE_ALIGNMENT... - """ - if batch_size <= 2: - return batch_size - elif batch_size <= 4: - return 4 - else: - return ((batch_size + _BATCH_SIZE_ALIGNMENT - 1) // - _BATCH_SIZE_ALIGNMENT * _BATCH_SIZE_ALIGNMENT) - - -def _get_max_graph_batch_size(max_num_seqs: int) -> int: - """ - max_num_seqs: Maximum number of sequences in a batch. - _BATCH_SIZES_TO_CAPTURE: all the sizes that we want to capture. - - pad the max_num_seqs if necessary by calling _get_graph_batch_size, - which will deal with some edge cases like 1, 2, 4. - - if the padded size is in _BATCH_SIZES_TO_CAPTURE, return the padded size. - if not, it means the padded size is larger than the largest size in - _BATCH_SIZES_TO_CAPTURE, return the largest size in _BATCH_SIZES_TO_CAPTURE. - """ - padded_size = _get_graph_batch_size(max_num_seqs) - if padded_size in _BATCH_SIZES_TO_CAPTURE: - return padded_size - assert padded_size > _BATCH_SIZES_TO_CAPTURE[-1] - return _BATCH_SIZES_TO_CAPTURE[-1] From ef51831ee8dbd64833b25e042d4e984d169202f9 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Tue, 3 Dec 2024 01:46:07 -0500 Subject: [PATCH 217/397] [Doc] Add github links for source code references (#10672) Signed-off-by: Russell Bryant Signed-off-by: DarkLight1337 Co-authored-by: DarkLight1337 --- docs/requirements-docs.txt | 3 +- docs/source/conf.py | 66 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+), 1 deletion(-) diff --git a/docs/requirements-docs.txt b/docs/requirements-docs.txt index 8ea240f59c38f..5c80645b405ae 100644 --- a/docs/requirements-docs.txt +++ b/docs/requirements-docs.txt @@ -16,4 +16,5 @@ mistral_common >= 1.5.0 aiohttp starlette openai # Required by docs/source/serving/openai_compatible_server.md's vllm.entrypoints.openai.cli_args -partial-json-parser # Required by docs/source/serving/openai_compatible_server.md's vllm.entrypoints.openai.cli_args \ No newline at end of file +partial-json-parser # Required by docs/source/serving/openai_compatible_server.md's vllm.entrypoints.openai.cli_args +requests diff --git a/docs/source/conf.py b/docs/source/conf.py index 96ad9a4c26b09..4a1a5fb455ff3 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -10,11 +10,13 @@ # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. +import inspect import logging import os import sys from typing import List +import requests from sphinx.ext import autodoc logger = logging.getLogger(__name__) @@ -34,6 +36,7 @@ extensions = [ "sphinx.ext.napoleon", "sphinx.ext.viewcode", + "sphinx.ext.linkcode", "sphinx.ext.intersphinx", "sphinx_copybutton", "sphinx.ext.autodoc", @@ -94,6 +97,69 @@ def setup(app): generate_examples() +_cached_base: str = "" +_cached_branch: str = "" + + +def get_repo_base_and_branch(pr_number): + global _cached_base, _cached_branch + if _cached_base and _cached_branch: + return _cached_base, _cached_branch + + url = f"https://api.github.com/repos/vllm-project/vllm/pulls/{pr_number}" + response = requests.get(url) + if response.status_code == 200: + data = response.json() + _cached_base = data['head']['repo']['full_name'] + _cached_branch = data['head']['ref'] + return _cached_base, _cached_branch + else: + logger.error("Failed to fetch PR details: %s", response) + return None, None + + +def linkcode_resolve(domain, info): + if domain != 'py': + return None + if not info['module']: + return None + filename = info['module'].replace('.', '/') + module = info['module'] + + # try to determine the correct file and line number to link to + obj = sys.modules[module] + + # get as specific as we can + lineno: int = 0 + filename: str = "" + try: + for part in info['fullname'].split('.'): + obj = getattr(obj, part) + + if not (inspect.isclass(obj) or inspect.isfunction(obj) + or inspect.ismethod(obj)): + obj = obj.__class__ # Get the class of the instance + + lineno = inspect.getsourcelines(obj)[1] + filename = (inspect.getsourcefile(obj) + or f"{filename}.py").split("vllm/", 1)[1] + except Exception: + # For some things, like a class member, won't work, so + # we'll use the line number of the parent (the class) + pass + + if filename.startswith("checkouts/"): + # a PR build on readthedocs + pr_number = filename.split("/")[1] + filename = filename.split("/", 2)[2] + base, branch = get_repo_base_and_branch(pr_number) + if base and branch: + return f"https://github.com/{base}/blob/{branch}/{filename}#L{lineno}" + + # Otherwise, link to the source file on the main branch + return f"https://github.com/vllm-project/vllm/blob/main/{filename}#L{lineno}" + + # Mock out external dependencies here, otherwise the autodoc pages may be blank. autodoc_mock_imports = [ "compressed_tensors", From 3257d449fa0fd3e05aa20cc8c5fff79ad101984f Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Tue, 3 Dec 2024 14:52:57 +0800 Subject: [PATCH 218/397] [Misc] Remove deprecated names (#10817) Signed-off-by: DarkLight1337 --- vllm/engine/async_llm_engine.py | 8 +++++-- vllm/engine/llm_engine.py | 5 ++-- vllm/engine/multiprocessing/__init__.py | 5 +++- vllm/engine/multiprocessing/client.py | 7 ++++-- vllm/entrypoints/llm.py | 11 +++++++++ vllm/inputs/__init__.py | 31 ------------------------- vllm/inputs/data.py | 31 ------------------------- vllm/model_executor/models/aria.py | 5 ++-- vllm/multimodal/__init__.py | 15 ------------ vllm/multimodal/base.py | 15 ------------ 10 files changed, 31 insertions(+), 102 deletions(-) diff --git a/vllm/engine/async_llm_engine.py b/vllm/engine/async_llm_engine.py index 7b1bb7b05708d..4395588d29cda 100644 --- a/vllm/engine/async_llm_engine.py +++ b/vllm/engine/async_llm_engine.py @@ -6,6 +6,8 @@ List, Mapping, Optional, Set, Tuple, Type, Union, overload) from weakref import ReferenceType +from typing_extensions import deprecated + import vllm.envs as envs from vllm.config import (DecodingConfig, LoRAConfig, ModelConfig, ParallelConfig, SchedulerConfig, VllmConfig) @@ -422,7 +424,8 @@ async def get_tokenizer_async(self, return await ( self.get_tokenizer_group().get_lora_tokenizer_async(lora_request)) - @overload # DEPRECATED + @overload + @deprecated("'inputs' will be renamed to 'prompt") async def add_request_async( self, request_id: str, @@ -894,7 +897,8 @@ async def run_engine_loop(engine_ref: ReferenceType): # This method does not need to be async, but kept that way # for backwards compatibility. - @overload # DEPRECATED + @overload + @deprecated("'inputs' will be renamed to 'prompt") def add_request( self, request_id: str, diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index 7911dc8d04500..dd55aa2818621 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -10,7 +10,7 @@ from typing import Set, Type, Union, cast, overload import torch -from typing_extensions import TypeVar +from typing_extensions import TypeVar, deprecated import vllm.envs as envs from vllm.config import (DecodingConfig, LoRAConfig, ModelConfig, @@ -719,7 +719,8 @@ def _add_processed_request( def stop_remote_worker_execution_loop(self) -> None: self.model_executor.stop_remote_worker_execution_loop() - @overload # DEPRECATED + @overload + @deprecated("'inputs' will be renamed to 'prompt") def add_request( self, request_id: str, diff --git a/vllm/engine/multiprocessing/__init__.py b/vllm/engine/multiprocessing/__init__.py index 34c161e9395ae..7020012e8bb86 100644 --- a/vllm/engine/multiprocessing/__init__.py +++ b/vllm/engine/multiprocessing/__init__.py @@ -2,6 +2,8 @@ from enum import Enum from typing import List, Mapping, Optional, Union, overload +from typing_extensions import deprecated + from vllm import PoolingParams from vllm.inputs import PromptType from vllm.lora.request import LoRARequest @@ -32,7 +34,8 @@ class RPCProcessRequest: prompt_adapter_request: Optional[PromptAdapterRequest] = None priority: int = 0 - @overload # DEPRECATED + @overload + @deprecated("'inputs' will be renamed to 'prompt") def __init__( self, *, diff --git a/vllm/engine/multiprocessing/client.py b/vllm/engine/multiprocessing/client.py index d26728e8c6e67..8383e774db20f 100644 --- a/vllm/engine/multiprocessing/client.py +++ b/vllm/engine/multiprocessing/client.py @@ -9,6 +9,7 @@ import psutil import zmq import zmq.asyncio +from typing_extensions import deprecated from zmq import Frame # type: ignore[attr-defined] from zmq.asyncio import Socket @@ -414,7 +415,8 @@ def errored(self) -> bool: def dead_error(self) -> BaseException: return ENGINE_DEAD_ERROR(self._errored_with) - @overload # DEPRECATED + @overload + @deprecated("'inputs' will be renamed to 'prompt") def generate( self, *, @@ -485,7 +487,8 @@ def generate( lora_request, trace_headers, prompt_adapter_request, priority) - @overload # DEPRECATED + @overload + @deprecated("'inputs' will be renamed to 'prompt") def encode( self, *, diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index a25c401b4ea10..65fa9873df28c 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -6,6 +6,7 @@ Union, cast, overload) from tqdm import tqdm +from typing_extensions import deprecated from vllm import envs from vllm.beam_search import (BeamSearchInstance, BeamSearchOutput, @@ -256,6 +257,7 @@ def set_tokenizer(self, tokenizer: AnyTokenizer) -> None: tokenizer_group.tokenizer = get_cached_tokenizer(tokenizer) @overload # LEGACY: single (prompt + optional token ids) + @deprecated("'prompt_token_ids' will become part of 'prompts") def generate( self, prompts: str, @@ -268,6 +270,7 @@ def generate( ... @overload # LEGACY: multi (prompt + optional token ids) + @deprecated("'prompt_token_ids' will become part of 'prompts") def generate( self, prompts: List[str], @@ -280,6 +283,7 @@ def generate( ... @overload # LEGACY: single (token ids + optional prompt) + @deprecated("'prompt_token_ids' will become part of 'prompts") def generate( self, prompts: Optional[str] = None, @@ -293,6 +297,7 @@ def generate( ... @overload # LEGACY: multi (token ids + optional prompt) + @deprecated("'prompt_token_ids' will become part of 'prompts") def generate( self, prompts: Optional[List[str]] = None, @@ -306,6 +311,7 @@ def generate( ... @overload # LEGACY: single or multi token ids [pos-only] + @deprecated("'prompt_token_ids' will become part of 'prompts") def generate( self, prompts: None, @@ -671,6 +677,7 @@ def chat( ) @overload # LEGACY: single (prompt + optional token ids) + @deprecated("'prompt_token_ids' will become part of 'prompts") def encode( self, prompts: str, @@ -683,6 +690,7 @@ def encode( ... @overload # LEGACY: multi (prompt + optional token ids) + @deprecated("'prompt_token_ids' will become part of 'prompts") def encode( self, prompts: List[str], @@ -695,6 +703,7 @@ def encode( ... @overload # LEGACY: single (token ids + optional prompt) + @deprecated("'prompt_token_ids' will become part of 'prompts") def encode( self, prompts: Optional[str] = None, @@ -708,6 +717,7 @@ def encode( ... @overload # LEGACY: multi (token ids + optional prompt) + @deprecated("'prompt_token_ids' will become part of 'prompts") def encode( self, prompts: Optional[List[str]] = None, @@ -721,6 +731,7 @@ def encode( ... @overload # LEGACY: single or multi token ids [pos-only] + @deprecated("'prompt_token_ids' will become part of 'prompts") def encode( self, prompts: None, diff --git a/vllm/inputs/__init__.py b/vllm/inputs/__init__.py index 54fbd7a321a6f..d4402e77a3886 100644 --- a/vllm/inputs/__init__.py +++ b/vllm/inputs/__init__.py @@ -38,34 +38,3 @@ "InputProcessingContext", "InputRegistry", ] - - -def __getattr__(name: str): - import warnings - - if name == "PromptInput": - msg = ("PromptInput has been renamed to PromptType. " - "The original name will be removed in an upcoming version.") - - warnings.warn(DeprecationWarning(msg), stacklevel=2) - - return PromptType - - if name == "LLMInputs": - msg = ("LLMInputs has been renamed to DecoderOnlyInputs. " - "The original name will be removed in an upcoming version.") - - warnings.warn(DeprecationWarning(msg), stacklevel=2) - - return DecoderOnlyInputs - - if name == "EncoderDecoderLLMInputs": - msg = ( - "EncoderDecoderLLMInputs has been renamed to EncoderDecoderInputs. " - "The original name will be removed in an upcoming version.") - - warnings.warn(DeprecationWarning(msg), stacklevel=2) - - return EncoderDecoderInputs - - raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/vllm/inputs/data.py b/vllm/inputs/data.py index fb7dbbebd7b90..e8fc78f1a66f6 100644 --- a/vllm/inputs/data.py +++ b/vllm/inputs/data.py @@ -358,34 +358,3 @@ def to_enc_dec_tuple_list( return [(enc_dec_prompt["encoder_prompt"], enc_dec_prompt["decoder_prompt"]) for enc_dec_prompt in enc_dec_prompts] - - -def __getattr__(name: str): - import warnings - - if name == "PromptInput": - msg = ("PromptInput has been renamed to PromptType. " - "The original name will be removed in an upcoming version.") - - warnings.warn(DeprecationWarning(msg), stacklevel=2) - - return PromptType - - if name == "LLMInputs": - msg = ("LLMInputs has been renamed to DecoderOnlyInputs. " - "The original name will be removed in an upcoming version.") - - warnings.warn(DeprecationWarning(msg), stacklevel=2) - - return DecoderOnlyInputs - - if name == "EncoderDecoderLLMInputs": - msg = ( - "EncoderDecoderLLMInputs has been renamed to EncoderDecoderInputs. " - "The original name will be removed in an upcoming version.") - - warnings.warn(DeprecationWarning(msg), stacklevel=2) - - return EncoderDecoderInputs - - raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/vllm/model_executor/models/aria.py b/vllm/model_executor/models/aria.py index fa6b95f5481ad..dd4b0c75cb84d 100644 --- a/vllm/model_executor/models/aria.py +++ b/vllm/model_executor/models/aria.py @@ -32,9 +32,8 @@ maybe_prefix, merge_multimodal_embeddings) from vllm.multimodal import MULTIMODAL_REGISTRY -from vllm.multimodal.base import MultiModalInputs from vllm.multimodal.image import cached_get_image_processor -from vllm.multimodal.inputs import NestedTensors +from vllm.multimodal.inputs import MultiModalKwargs, NestedTensors from vllm.multimodal.utils import (cached_get_tokenizer, repeat_and_pad_placeholder_tokens) from vllm.sequence import IntermediateTensors @@ -451,7 +450,7 @@ def get_max_multimodal_tokens(ctx): def input_mapper_for_aria(ctx, data): - return MultiModalInputs(data) + return MultiModalKwargs(data) def input_processor(ctx, llm_inputs): diff --git a/vllm/multimodal/__init__.py b/vllm/multimodal/__init__.py index 03a5f3a91f7a1..928c31a2f2843 100644 --- a/vllm/multimodal/__init__.py +++ b/vllm/multimodal/__init__.py @@ -27,18 +27,3 @@ "MULTIMODAL_REGISTRY", "MultiModalRegistry", ] - - -def __getattr__(name: str): - import warnings - - if name == "MultiModalInputs": - msg = ("MultiModalInputs has been renamed to MultiModalKwargs. " - "The original name will take another meaning in an upcoming " - "version.") - - warnings.warn(DeprecationWarning(msg), stacklevel=2) - - return MultiModalKwargs - - raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/vllm/multimodal/base.py b/vllm/multimodal/base.py index bbb8fb4bc1cd1..f93722523728d 100644 --- a/vllm/multimodal/base.py +++ b/vllm/multimodal/base.py @@ -433,18 +433,3 @@ def index_map(self) -> "IndexMap": return MultiModalPlaceholderMap.IndexMap(src=src_indices, dest=dest_indices) - - -def __getattr__(name: str): - import warnings - - if name == "MultiModalInputs": - msg = ("MultiModalInputs has been renamed to MultiModalKwargs. " - "The original name will take another meaning in an upcoming " - "version.") - - warnings.warn(DeprecationWarning(msg), stacklevel=2) - - return MultiModalKwargs - - raise AttributeError(f"module {__name__!r} has no attribute {name!r}") From 9323a3153b20d4a2ca7ac04a2784609d6ce656e0 Mon Sep 17 00:00:00 2001 From: Aaron Pham Date: Tue, 3 Dec 2024 02:17:00 -0500 Subject: [PATCH 219/397] [Core][Performance] Add XGrammar support for guided decoding and set it as default (#10785) Signed-off-by: Aaron Pham Signed-off-by: mgoin Co-authored-by: mgoin --- docs/source/conf.py | 1 + requirements-common.txt | 1 + tests/entrypoints/llm/test_guided_generate.py | 27 ++ .../model_executor/test_guided_processors.py | 3 +- vllm/config.py | 15 +- vllm/engine/arg_utils.py | 9 +- vllm/engine/async_llm_engine.py | 18 +- vllm/engine/llm_engine.py | 15 +- vllm/engine/multiprocessing/client.py | 5 +- .../guided_decoding/__init__.py | 73 ++++- .../guided_decoding/xgrammar_decoding.py | 251 ++++++++++++++++++ 11 files changed, 385 insertions(+), 33 deletions(-) create mode 100644 vllm/model_executor/guided_decoding/xgrammar_decoding.py diff --git a/docs/source/conf.py b/docs/source/conf.py index 4a1a5fb455ff3..e9d9ac68c9560 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -178,6 +178,7 @@ def linkcode_resolve(domain, info): "tensorizer", "pynvml", "outlines", + "xgrammar," "librosa", "soundfile", "gguf", diff --git a/requirements-common.txt b/requirements-common.txt index 02e3d65fb774c..818f72e14be96 100644 --- a/requirements-common.txt +++ b/requirements-common.txt @@ -19,6 +19,7 @@ prometheus-fastapi-instrumentator >= 7.0.0 tiktoken >= 0.6.0 # Required for DBRX tokenizer lm-format-enforcer >= 0.10.9, < 0.11 outlines >= 0.0.43, < 0.1 +xgrammar typing_extensions >= 4.10 filelock >= 3.16.1 # need to contain https://github.com/tox-dev/filelock/pull/317 partial-json-parser # used for parsing partial JSON outputs diff --git a/tests/entrypoints/llm/test_guided_generate.py b/tests/entrypoints/llm/test_guided_generate.py index 67c79415f322a..c3706f696b264 100644 --- a/tests/entrypoints/llm/test_guided_generate.py +++ b/tests/entrypoints/llm/test_guided_generate.py @@ -159,3 +159,30 @@ def test_validation_against_both_guided_decoding_options(sample_regex, llm): sampling_params=sampling_params, use_tqdm=True, guided_options_request=dict(guided_regex=sample_regex)) + + +@pytest.mark.skip_global_cleanup +def test_guided_json_object(llm): + sampling_params = SamplingParams( + temperature=1.0, + max_tokens=100, + guided_decoding=GuidedDecodingParams(json_object=True)) + + outputs = llm.generate( + prompts=("Generate a JSON object describing a person with name " + "and age for John Smith who is 31 years old."), + sampling_params=sampling_params, + use_tqdm=True) + + assert outputs is not None + for output in outputs: + assert output is not None + assert isinstance(output, RequestOutput) + + generated_text = output.outputs[0].text + print(generated_text) + assert generated_text is not None + + # Parse to verify it is valid JSON + parsed_json = json.loads(generated_text) + assert isinstance(parsed_json, dict) diff --git a/tests/model_executor/test_guided_processors.py b/tests/model_executor/test_guided_processors.py index 45fab8e96b968..9f4d81b583141 100644 --- a/tests/model_executor/test_guided_processors.py +++ b/tests/model_executor/test_guided_processors.py @@ -36,7 +36,8 @@ def test_guided_logits_processors(sample_regex, sample_json_schema): @pytest.mark.asyncio -@pytest.mark.parametrize("backend", ["outlines", "lm-format-enforcer"]) +@pytest.mark.parametrize("backend", + ["outlines", "lm-format-enforcer", "xgrammar"]) async def test_guided_logits_processor_black_box(backend: str, sample_regex, sample_json_schema): tokenizer = AutoTokenizer.from_pretrained('HuggingFaceH4/zephyr-7b-beta') diff --git a/vllm/config.py b/vllm/config.py index 326340d3fa655..971eb36d677b8 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -1789,15 +1789,15 @@ class PoolerConfig: step_tag_id: Optional[int] = None """ - If set, only the score corresponding to the ``step_tag_id`` in the + If set, only the score corresponding to the ``step_tag_id`` in the generated sentence should be returned. Otherwise, the scores for all tokens are returned. """ returned_token_ids: Optional[List[int]] = None """ - A list of indices for the vocabulary dimensions to be extracted, - such as the token IDs of ``good_token`` and ``bad_token`` in the + A list of indices for the vocabulary dimensions to be extracted, + such as the token IDs of ``good_token`` and ``bad_token`` in the ``math-shepherd-mistral-7b-prm`` model. """ @@ -2031,11 +2031,12 @@ def get_served_model_name(model: str, class DecodingConfig: """Dataclass which contains the decoding strategy of the engine""" - # Which guided decoding algo to use. 'outlines' / 'lm-format-enforcer' - guided_decoding_backend: str = 'outlines' + # Which guided decoding algo to use. + # 'outlines' / 'lm-format-enforcer' / 'xgrammar' + guided_decoding_backend: str = 'xgrammar' def __post_init__(self): - valid_guided_backends = ['outlines', 'lm-format-enforcer'] + valid_guided_backends = ['outlines', 'lm-format-enforcer', 'xgrammar'] backend = self.guided_decoding_backend if backend not in valid_guided_backends: raise ValueError(f"Invalid guided_decoding_backend '{backend}," @@ -2222,7 +2223,7 @@ class CompilationConfig(BaseModel): from Python, functions can also be passed directly via Python object constructor, e.g. `CompilationConfig(inductor_passes={"a": func})` - custom inductor passes: see PassConfig for more details - + Why we have different sizes for cudagraph and inductor: - cudagraph: a cudagraph captured for a specific size can only be used for the same size. We need to capture all the sizes we want to use. diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 4aa0eebd976c9..3b776c1d9d39f 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -168,7 +168,7 @@ class EngineArgs: scheduler_delay_factor: float = 0.0 enable_chunked_prefill: Optional[bool] = None - guided_decoding_backend: str = 'outlines' + guided_decoding_backend: str = 'xgrammar' # Speculative decoding configuration. speculative_model: Optional[str] = None speculative_model_quantization: Optional[str] = None @@ -364,11 +364,12 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: parser.add_argument( '--guided-decoding-backend', type=str, - default='outlines', - choices=['outlines', 'lm-format-enforcer'], + default='xgrammar', + choices=['outlines', 'lm-format-enforcer', 'xgrammar'], help='Which engine will be used for guided decoding' ' (JSON schema / regex etc) by default. Currently support ' - 'https://github.com/outlines-dev/outlines and ' + 'https://github.com/outlines-dev/outlines,' + 'https://github.com/mlc-ai/xgrammar, and ' 'https://github.com/noamgat/lm-format-enforcer.' ' Can be overridden per request via guided_decoding_backend' ' parameter.') diff --git a/vllm/engine/async_llm_engine.py b/vllm/engine/async_llm_engine.py index 4395588d29cda..60dccd7a0812c 100644 --- a/vllm/engine/async_llm_engine.py +++ b/vllm/engine/async_llm_engine.py @@ -1,4 +1,5 @@ import asyncio +import copy import time import weakref from functools import partial @@ -507,7 +508,8 @@ async def add_request_async( sampling_params=params, tokenizer=await self.get_tokenizer_async(lora_request), default_guided_backend=self.decoding_config. - guided_decoding_backend) + guided_decoding_backend, + model_config=self.model_config) self._add_processed_request( request_id=request_id, @@ -528,22 +530,30 @@ async def check_health_async(self) -> None: async def build_guided_decoding_logits_processor_async( sampling_params: SamplingParams, tokenizer: AnyTokenizer, - default_guided_backend: str) -> SamplingParams: + default_guided_backend: str, + model_config: ModelConfig) -> SamplingParams: """Constructs logits processors based on the guided_decoding, logits_bias, and allowed_token_ids fields in sampling_params. Deletes those fields and adds the constructed logits processors to the logits_processors field. Modifies sampling params in-place and returns the modified sampling params.""" - if (guided_decoding := sampling_params.guided_decoding) is None: + if sampling_params.guided_decoding is None: return sampling_params + # Defensively copy sampling params since guided decoding logits + # processors can have different state for each request + sampling_params = copy.copy(sampling_params) + guided_decoding = sampling_params.guided_decoding + logger.debug("Building guided decoding logits processor. " "Params: %s", guided_decoding) guided_decoding.backend = guided_decoding.backend or default_guided_backend processor = await get_guided_decoding_logits_processor( - guided_params=guided_decoding, tokenizer=tokenizer) + guided_params=guided_decoding, + tokenizer=tokenizer, + model_config=model_config) if processor: if sampling_params.logits_processors is None: diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index dd55aa2818621..af66b307028cf 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -1,3 +1,4 @@ +import copy import time from collections import Counter as collectionsCounter from collections import deque @@ -1024,9 +1025,9 @@ def _update_num_computed_tokens_for_multi_step_prefill( This function updates num_computed_tokens for prompt sequences when Multi-Step is enabled. - seq_group: SequenceGroup to update the num_computed_tokens for. + seq_group: SequenceGroup to update the num_computed_tokens for. seq_group_meta: Metadata of the given SequenceGroup. - is_first_step_output: Optional[bool] - + is_first_step_output: Optional[bool] - When available, is_first_step_output indicates if the appended output token is the output of the first-step in multi-step. A value of None indicates that outputs from all steps in @@ -2036,7 +2037,11 @@ def _build_logits_processors( logits_processors = [] - if (guided_decoding := sampling_params.guided_decoding) is not None: + if sampling_params.guided_decoding is not None: + # Defensively copy sampling params since guided decoding logits + # processors can have different state for each request + sampling_params = copy.copy(sampling_params) + guided_decoding = sampling_params.guided_decoding logger.debug( "Building guided decoding logits processor in " @@ -2047,7 +2052,9 @@ def _build_logits_processors( self.decoding_config.guided_decoding_backend processor = get_local_guided_decoding_logits_processor( - guided_params=guided_decoding, tokenizer=tokenizer) + guided_params=guided_decoding, + tokenizer=tokenizer, + model_config=self.model_config) if processor: logits_processors.append(processor) diff --git a/vllm/engine/multiprocessing/client.py b/vllm/engine/multiprocessing/client.py index 8383e774db20f..d21136c03d7d2 100644 --- a/vllm/engine/multiprocessing/client.py +++ b/vllm/engine/multiprocessing/client.py @@ -474,8 +474,8 @@ def generate( trace_headers: OpenTelemetry trace headers. prompt_adapter_request: Prompt Adapter request to use for generation, if any. - priority: Priority of the request (lower means earlier handling). - Any priority other than 0 will lead to an error if the + priority: Priority of the request (lower means earlier handling). + Any priority other than 0 will lead to an error if the scheduling policy is not "priority". """ if inputs is not None: @@ -589,6 +589,7 @@ async def _process_request( default_guided_backend=(self.decoding_config.guided_decoding_backend if self.decoding_config else DecodingConfig.guided_decoding_backend), + model_config=self.model_config ) # 1) Create output queue for this requests. diff --git a/vllm/model_executor/guided_decoding/__init__.py b/vllm/model_executor/guided_decoding/__init__.py index d7b67425fcbc0..23c31fcfd7f05 100644 --- a/vllm/model_executor/guided_decoding/__init__.py +++ b/vllm/model_executor/guided_decoding/__init__.py @@ -1,14 +1,54 @@ -from typing import Optional +from __future__ import annotations -from vllm.logits_process import LogitsProcessor -from vllm.sampling_params import GuidedDecodingParams +from typing import TYPE_CHECKING + +from vllm.logger import init_logger + +if TYPE_CHECKING: + from transformers import PreTrainedTokenizer + + from vllm.config import ModelConfig + from vllm.logits_process import LogitsProcessor + from vllm.sampling_params import GuidedDecodingParams + +logger = init_logger(__name__) + + +def maybe_backend_fallback( + guided_params: GuidedDecodingParams) -> GuidedDecodingParams: + # lm-format-enforce doesn't support grammar, fallback to xgrammar + if (guided_params.backend == "lm-format-enforcer" + and guided_params.grammar is not None): + logger.warning( + "lm-format-enforcer does not support grammar guided decoding. " + "Falling back to use xgrammar instead.") + guided_params.backend = "xgrammar" + + if guided_params.backend == "xgrammar": + # xgrammar doesn't support regex or choice, fallback to outlines + if guided_params.regex is not None or guided_params.choice is not None: + logger.warning( + "xgrammar only supports json or grammar guided decoding. " + "Falling back to use outlines instead.") + guided_params.backend = "outlines" + + # xgrammar only supports EBNF grammars and uses the GBNF format + # https://github.com/ggerganov/llama.cpp/blob/master/grammars/README.md + elif (guided_params.grammar is not None + and "::=" not in guided_params.grammar): + logger.warning("xgrammar only supports EBNF grammars. " + "Falling back to use outlines instead.") + guided_params.backend = "outlines" + + return guided_params async def get_guided_decoding_logits_processor( - guided_params: GuidedDecodingParams, - tokenizer) -> Optional[LogitsProcessor]: + guided_params: GuidedDecodingParams, tokenizer: PreTrainedTokenizer, + model_config: ModelConfig) -> LogitsProcessor | None: + guided_params = maybe_backend_fallback(guided_params) # CFG grammar not supported by LMFE, so we use outlines instead - if guided_params.backend == 'outlines' or guided_params.grammar: + if guided_params.backend == 'outlines': # NOTE: lazy import outlines to avoid https://github.com/vllm-project/vllm/issues/4193 from vllm.model_executor.guided_decoding.outlines_decoding import ( # noqa get_outlines_guided_decoding_logits_processor) @@ -19,17 +59,23 @@ async def get_guided_decoding_logits_processor( get_local_lm_format_enforcer_guided_decoding_logits_processor) return get_local_lm_format_enforcer_guided_decoding_logits_processor( guided_params, tokenizer) + if guided_params.backend == 'xgrammar': + from vllm.model_executor.guided_decoding.xgrammar_decoding import ( # noqa + get_local_xgrammar_guided_decoding_logits_processor) + return get_local_xgrammar_guided_decoding_logits_processor( + guided_params, tokenizer, model_config) raise ValueError( f"Unknown guided decoding backend '{guided_params.backend}'. " - "Must be one of 'outlines, 'lm-format-enforcer'") + "Must be one of 'outlines, 'lm-format-enforcer', 'xgrammar'") def get_local_guided_decoding_logits_processor( - guided_params: GuidedDecodingParams, - tokenizer) -> Optional[LogitsProcessor]: + guided_params: GuidedDecodingParams, tokenizer: PreTrainedTokenizer, + model_config: ModelConfig) -> LogitsProcessor | None: + guided_params = maybe_backend_fallback(guided_params) # CFG grammar not supported by LMFE, so we use outlines instead - if guided_params.backend == 'outlines' or guided_params.grammar: + if guided_params.backend == 'outlines': # NOTE: lazy import outlines to avoid https://github.com/vllm-project/vllm/issues/4193 from vllm.model_executor.guided_decoding.outlines_decoding import ( # noqa get_local_outlines_guided_decoding_logits_processor) @@ -40,7 +86,12 @@ def get_local_guided_decoding_logits_processor( get_local_lm_format_enforcer_guided_decoding_logits_processor) return get_local_lm_format_enforcer_guided_decoding_logits_processor( guided_params, tokenizer) + if guided_params.backend == 'xgrammar': + from vllm.model_executor.guided_decoding.xgrammar_decoding import ( # noqa + get_local_xgrammar_guided_decoding_logits_processor) + return get_local_xgrammar_guided_decoding_logits_processor( + guided_params, tokenizer, model_config) raise ValueError( f"Unknown guided decoding backend '{guided_params.backend}'. " - "Must be one of 'outlines, 'lm-format-enforcer'") + "Must be one of 'outlines, 'lm-format-enforcer', 'xgrammar'") diff --git a/vllm/model_executor/guided_decoding/xgrammar_decoding.py b/vllm/model_executor/guided_decoding/xgrammar_decoding.py new file mode 100644 index 0000000000000..8287cd6cf3aa0 --- /dev/null +++ b/vllm/model_executor/guided_decoding/xgrammar_decoding.py @@ -0,0 +1,251 @@ +# noqa: UP007 +from __future__ import annotations + +import json +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Any, NamedTuple + +import torch +from transformers import PreTrainedTokenizerFast + +try: + import xgrammar as xgr + from xgrammar.base import _core as xgr_core +except ImportError: + pass + +if TYPE_CHECKING: + from transformers import PreTrainedTokenizer + + from vllm.config import ModelConfig + from vllm.sampling_params import GuidedDecodingParams + + +# TODO: passing batch size to max threads here +def get_local_xgrammar_guided_decoding_logits_processor( + guided_params: GuidedDecodingParams, + tokenizer: PreTrainedTokenizer, + model_config: ModelConfig, + max_threads: int = 8): + config = GrammarConfig.from_guided_params(guided_params=guided_params, + model_config=model_config, + tokenizer=tokenizer, + max_threads=max_threads) + return XGrammarLogitsProcessor(config) + + +class TokenizerData(NamedTuple): + """Immutable container for cached tokenizer data.""" + encoded_vocab: list[str] + stop_token_ids: list[int] | None + backend_str: str + + +class TokenizerDataCache: + """Cache manager for tokenizer data to avoid repeated processing.""" + _cache: dict[int, TokenizerData] = {} + + @classmethod + def get_tokenizer_data(cls, + tokenizer: PreTrainedTokenizer) -> TokenizerData: + tokenizer_hash = hash(tokenizer) + + if tokenizer_hash not in cls._cache: + # Vendored from xgrammar logic since we cannot pickle the tokenizer + # https://github.com/mlc-ai/xgrammar/blob/d77c0a0173ef14779c918e3be7966ba852f7910f/python/xgrammar/tokenizer_info.py#L98 # noqa: E501 + try: + encoded_vocab = [ + token for token, _ in sorted(tokenizer.get_vocab().items(), + key=lambda x: x[1]) + ] + except AttributeError as e: + raise ValueError( + f"Cannot get the vocabulary of the tokenizer " + f"{type(tokenizer)}. The tokenizer should have a " + "get_vocab method.") from e + + stop_token_ids = None + backend_str = xgr.VocabType.RAW + if isinstance(tokenizer, PreTrainedTokenizerFast): + backend_str = tokenizer.backend_tokenizer.to_str() + if stop_token_ids is None and hasattr( + tokenizer, + "eos_token_id") and tokenizer.eos_token_id is not None: + stop_token_ids = [tokenizer.eos_token_id] + + cls._cache[tokenizer_hash] = TokenizerData( + encoded_vocab=encoded_vocab, + stop_token_ids=stop_token_ids, + backend_str=backend_str) + + return cls._cache[tokenizer_hash] + + +class GrammarCompilerCache: + """ + Cache for GrammarCompiler instances based on tokenizer. + + This cache reduces the overhead of creating new compiler instances when + using the same tokenizer configuration. + """ + _cache: dict[str, xgr.GrammarCompiler] = {} + + @classmethod + def get_compiler(cls, config: GrammarConfig) -> xgr.GrammarCompiler: + cache_key = str(config.tokenizer_hash) + + if cache_key not in cls._cache: + assert config.encoded_vocab is not None + tokenizer_info = xgr.TokenizerInfo._create_from_handle( + xgr_core.TokenizerInfo.from_huggingface( + config.encoded_vocab, config.backend_str, + config.vocab_size, config.stop_token_ids)) + cls._cache[cache_key] = xgr.GrammarCompiler( + tokenizer_info, max_threads=config.max_threads) + + return cls._cache[cache_key] + + +@dataclass +class GrammarConfig: + """Serializable configuration for grammar compilation""" + tokenizer_hash: int + vocab_size: int + json_str: str | None = None + grammar_str: str | None = None + json_object: bool | None = None + max_threads: int = 8 + # Only populated if tokenizer_hash not in cache + encoded_vocab: list[str] | None = None + stop_token_ids: list[int] | None = None + backend_str: str | None = None + + @classmethod + def from_guided_params(cls, + guided_params: GuidedDecodingParams, + model_config: ModelConfig, + tokenizer: PreTrainedTokenizer, + max_threads: int = 8) -> GrammarConfig: + + tokenizer_hash = hash(tokenizer) + # Only get tokenizer data if not already cached + if tokenizer_hash in TokenizerDataCache._cache: + encoded_vocab = None + stop_token_ids = None + backend_str = None + else: + tokenizer_data = TokenizerDataCache.get_tokenizer_data(tokenizer) + encoded_vocab = tokenizer_data.encoded_vocab + stop_token_ids = tokenizer_data.stop_token_ids + backend_str = tokenizer_data.backend_str + + if guided_params.json: + if not isinstance(guided_params.json, str): + json_str = json.dumps(guided_params.json) + else: + json_str = guided_params.json + return cls(json_str=json_str, + vocab_size=model_config.hf_config.vocab_size, + encoded_vocab=encoded_vocab, + stop_token_ids=stop_token_ids, + backend_str=backend_str, + tokenizer_hash=tokenizer_hash, + max_threads=max_threads) + elif guided_params.grammar: + return cls(grammar_str=guided_params.grammar, + vocab_size=model_config.hf_config.vocab_size, + encoded_vocab=encoded_vocab, + stop_token_ids=stop_token_ids, + backend_str=backend_str, + tokenizer_hash=tokenizer_hash, + max_threads=max_threads) + elif guided_params.json_object: + return cls(json_object=True, + vocab_size=model_config.hf_config.vocab_size, + encoded_vocab=encoded_vocab, + stop_token_ids=stop_token_ids, + backend_str=backend_str, + tokenizer_hash=tokenizer_hash, + max_threads=max_threads) + else: + raise ValueError( + "Currently only support JSON and EBNF grammar mode for xgrammar" + ) + + +@dataclass +class XGrammarLogitsProcessor: + """Wrapper class to support pickle protocol""" + config: GrammarConfig + + ctx: xgr.CompiledGrammar | None = None + token_bitmask: torch.Tensor = None # type: ignore[assignment] + matchers: list[xgr.GrammarMatcher] = field(default_factory=list) + batch_size: int = field(default=1) + prefilled: bool = field(default=False) + + def __getstate__(self) -> dict[str, Any]: + return {'config': self.config} + + def __setstate__(self, state: dict[str, Any]): + self.config = state['config'] + + self.ctx = None + self.matchers = [] + self.batch_size = 1 + self.token_bitmask = None # type: ignore[assignment] + self.prefilled = False + + def _ensure_ctx(self): + """Lazily initialize the processor in the worker process""" + if self.ctx is None: + compiler = GrammarCompilerCache.get_compiler(self.config) + if self.config.json_str is not None: + self.ctx = compiler.compile_json_schema(self.config.json_str) + elif self.config.grammar_str is not None: + self.ctx = compiler.compile_grammar(self.config.grammar_str) + elif self.config.json_object: + self.ctx = compiler.compile_builtin_json_grammar() + else: + raise ValueError( + "Invalid configuration for xgrammar logits processor") + + def __call__(self, input_ids: list[int], + scores: torch.Tensor) -> torch.Tensor: + if self.ctx is None: + self._ensure_ctx() + + if len(self.matchers) == 0: + self.matchers = [ + xgr.GrammarMatcher(self.ctx) for _ in range(self.batch_size) + ] + self.token_bitmask = xgr.allocate_token_bitmask( + self.batch_size, self.config.vocab_size) + + if not self.prefilled: + # Have not sampled a token yet + self.prefilled = True + else: + for i, matcher in enumerate(self.matchers): + if not matcher.is_terminated(): + sampled_token = input_ids[-1] + assert self.matchers[i].accept_token(sampled_token) + + for i, matcher in enumerate(self.matchers): + if not matcher.is_terminated(): + # @ubospica: ideally, fill_next_token_bitmask should be + # parallelized with model decoding + # See https://github.com/vllm-project/vllm/pull/10785/files#r1864278303 + matcher.fill_next_token_bitmask(self.token_bitmask, i) + + # token_bitmask is a CPU tensor for use with accept_token and + # fill_next_token_bitmask so we move it to the device of scores + device_type = scores.device.type + if device_type != "cuda": + scores = scores.to("cpu") + xgr.apply_token_bitmask_inplace(scores, + self.token_bitmask.to(scores.device)) + if device_type != "cuda": + scores = scores.to(device_type) + + return scores From f6084f63248a89df52bed9d9c24d6604f87e51f3 Mon Sep 17 00:00:00 2001 From: Yang Zheng <50227060+zhengy001@users.noreply.github.com> Date: Tue, 3 Dec 2024 17:01:39 +0800 Subject: [PATCH 220/397] [Speculative Decoding] Move indices to device before filtering output (#10850) Co-authored-by: Yang Zheng(SW)(Alex) --- vllm/spec_decode/multi_step_worker.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/vllm/spec_decode/multi_step_worker.py b/vllm/spec_decode/multi_step_worker.py index d249b37c780e4..676ac5eb3609d 100644 --- a/vllm/spec_decode/multi_step_worker.py +++ b/vllm/spec_decode/multi_step_worker.py @@ -120,6 +120,9 @@ def sampler_output( indices_of_seq_with_bonus_tokens) model_outputs.append(model_output) + # move indices to device to avoid stream sync + indices_of_seq_with_bonus_tokens = torch.tensor( + indices_of_seq_with_bonus_tokens, device=self.device) filtered_model_outputs = self._filter_model_output( model_outputs, indices_of_seq_with_bonus_tokens) return filtered_model_outputs, True @@ -189,7 +192,7 @@ def _expand_execute_model_request( @staticmethod def _filter_model_output( expanded_batch_outputs: List[SamplerOutput], - output_indices_to_retain: List[int]) -> List[SamplerOutput]: + output_indices_to_retain: torch.Tensor) -> List[SamplerOutput]: """ Filters the model output to include only the specified sequence outputs. This method contracts the expanded batch output from the @@ -199,8 +202,8 @@ def _filter_model_output( Args: expanded_batch_output (List[SamplerOutput]): The expanded output batch from the model. - output_indices_to_retain (List[int]): Indices of the model outputs - to retain. + output_indices_to_retain (torch.Tensor): Indices of the model + outputs to retain. Returns: List[SamplerOutput]: A list containing the filtered model From 3bc94cab695387eb16be90b6368029f56ce5dbc7 Mon Sep 17 00:00:00 2001 From: Alexander Matveev <59768536+alexm-neuralmagic@users.noreply.github.com> Date: Tue, 3 Dec 2024 05:33:10 -0500 Subject: [PATCH 221/397] [V1] VLM - Run the mm_mapper preprocessor in the frontend process (#10640) Signed-off-by: Roger Wang Co-authored-by: Michael Goin Co-authored-by: Roger Wang --- tests/v1/engine/test_engine_core.py | 3 +-- tests/v1/engine/test_engine_core_client.py | 3 +-- vllm/inputs/data.py | 24 +++++++++++++++++++++- vllm/v1/engine/__init__.py | 7 +++---- vllm/v1/engine/core.py | 7 ------- vllm/v1/engine/processor.py | 13 ++++++++++-- vllm/v1/request.py | 15 +++++++------- 7 files changed, 47 insertions(+), 25 deletions(-) diff --git a/tests/v1/engine/test_engine_core.py b/tests/v1/engine/test_engine_core.py index bd11ff1877064..fef44ac29c41f 100644 --- a/tests/v1/engine/test_engine_core.py +++ b/tests/v1/engine/test_engine_core.py @@ -27,9 +27,8 @@ def make_request() -> EngineCoreRequest: request_id=uuid.uuid4(), prompt=PROMPT, prompt_token_ids=PROMPT_TOKENS, - mm_data=None, + mm_inputs=None, mm_placeholders=None, - mm_processor_kwargs=None, sampling_params=SamplingParams(), eos_token_id=None, arrival_time=time.time(), diff --git a/tests/v1/engine/test_engine_core_client.py b/tests/v1/engine/test_engine_core_client.py index 582192196aaf9..4e003a25e91d2 100644 --- a/tests/v1/engine/test_engine_core_client.py +++ b/tests/v1/engine/test_engine_core_client.py @@ -29,9 +29,8 @@ def make_request(params: SamplingParams) -> EngineCoreRequest: request_id=str(uuid.uuid4()), prompt=PROMPT, prompt_token_ids=PROMPT_TOKENS, - mm_data=None, + mm_inputs=None, mm_placeholders=None, - mm_processor_kwargs=None, sampling_params=params, eos_token_id=None, arrival_time=time.time(), diff --git a/vllm/inputs/data.py b/vllm/inputs/data.py index e8fc78f1a66f6..85aaaa776907f 100644 --- a/vllm/inputs/data.py +++ b/vllm/inputs/data.py @@ -7,7 +7,8 @@ from typing_extensions import NotRequired, TypedDict, TypeVar, assert_never if TYPE_CHECKING: - from vllm.multimodal import MultiModalDataDict, MultiModalPlaceholderDict + from vllm.multimodal import (MultiModalDataDict, MultiModalKwargs, + MultiModalPlaceholderDict) from vllm.multimodal.inputs import MultiModalInputsV2 @@ -150,6 +151,12 @@ class TokenInputs(TypedDict): if the model supports it. """ + multi_modal_inputs: NotRequired["MultiModalKwargs"] + """ + Optional multi-modal inputs to pass to the model, + if the model supports it. + """ + multi_modal_placeholders: NotRequired["MultiModalPlaceholderDict"] """ Placeholder ranges for the multi-modal data. @@ -169,6 +176,7 @@ def token_inputs( token_type_ids: Optional[List[int]] = None, prompt: Optional[str] = None, multi_modal_data: Optional["MultiModalDataDict"] = None, + multi_modal_inputs: Optional["MultiModalKwargs"] = None, multi_modal_placeholders: Optional["MultiModalPlaceholderDict"] = None, mm_processor_kwargs: Optional[Dict[str, Any]] = None, ) -> TokenInputs: @@ -181,6 +189,8 @@ def token_inputs( inputs["token_type_ids"] = token_type_ids if multi_modal_data is not None: inputs["multi_modal_data"] = multi_modal_data + if multi_modal_inputs is not None: + inputs["multi_modal_inputs"] = multi_modal_inputs if multi_modal_placeholders is not None: inputs["multi_modal_placeholders"] = multi_modal_placeholders if mm_processor_kwargs is not None: @@ -273,6 +283,18 @@ def multi_modal_data(self) -> "MultiModalDataDict": assert_never(inputs) + @cached_property + def multi_modal_inputs(self) -> Union[Dict, "MultiModalKwargs"]: + inputs = self.inputs + + if inputs["type"] == "token": + return inputs.get("multi_modal_inputs", {}) + + if inputs["type"] == "multimodal": + return inputs.get("mm_kwargs", {}) + + assert_never(inputs) + @cached_property def multi_modal_placeholders(self) -> "MultiModalPlaceholderDict": inputs = self.inputs diff --git a/vllm/v1/engine/__init__.py b/vllm/v1/engine/__init__.py index 967124fd850ea..3cf0e610ae7af 100644 --- a/vllm/v1/engine/__init__.py +++ b/vllm/v1/engine/__init__.py @@ -1,11 +1,11 @@ import enum from dataclasses import dataclass -from typing import Any, Dict, List, Optional, Union +from typing import List, Optional, Union import msgspec from vllm.lora.request import LoRARequest -from vllm.multimodal import MultiModalDataDict, MultiModalPlaceholderDict +from vllm.multimodal import MultiModalKwargs, MultiModalPlaceholderDict from vllm.sampling_params import RequestOutputKind, SamplingParams @@ -35,9 +35,8 @@ class EngineCoreRequest: # always be tokenized? prompt: Optional[str] prompt_token_ids: List[int] - mm_data: Optional[MultiModalDataDict] + mm_inputs: Optional[List[MultiModalKwargs]] mm_placeholders: Optional[MultiModalPlaceholderDict] - mm_processor_kwargs: Optional[Dict[str, Any]] sampling_params: SamplingParams eos_token_id: Optional[int] arrival_time: float diff --git a/vllm/v1/engine/core.py b/vllm/v1/engine/core.py index 34f99dd30ef2e..397a33eed3896 100644 --- a/vllm/v1/engine/core.py +++ b/vllm/v1/engine/core.py @@ -84,14 +84,7 @@ def _initialize_kv_caches(self, def add_request(self, request: EngineCoreRequest): """Add request to the scheduler.""" - req = Request.from_engine_core_request(request) - # FIXME(woosuk): The input mapping (e.g., PIL images to tensors) may - # take 10-50 ms, which can cause a spike in the latency. We should - # consider moving this to a separate thread. - if req.mm_data: - req.mm_inputs = self.mm_input_mapper.process_inputs( - req.mm_data, req.mm_processor_kwargs) self.scheduler.add_request(req) def abort_requests(self, request_ids: List[str]): diff --git a/vllm/v1/engine/processor.py b/vllm/v1/engine/processor.py index 5c1577190c75a..7a1ea2530abda 100644 --- a/vllm/v1/engine/processor.py +++ b/vllm/v1/engine/processor.py @@ -14,6 +14,7 @@ from vllm.transformers_utils.config import try_get_generation_config from vllm.transformers_utils.tokenizer_group import BaseTokenizerGroup from vllm.v1.engine import DetokenizerRequest, EngineCoreRequest +from vllm.v1.engine.mm_input_mapper import MMInputMapper class Processor: @@ -39,6 +40,9 @@ def __init__( self.input_processor = input_registry.create_input_processor( model_config) + # Multi-modal (huggingface) input mapper + self.mm_input_mapper = MMInputMapper(model_config) + # TODO: run in an ThreadpoolExecutor or BackgroundProcess. # This ideally should releases the GIL, so we should not block the # asyncio loop while this is running. @@ -96,6 +100,12 @@ def process_inputs( sampling_params.update_from_generation_config( self.generation_config_fields, eos_token_id) + # Preprocess multi-modal data + mm_inputs = self.mm_input_mapper.process_inputs( + decoder_inputs.multi_modal_data, + decoder_inputs.mm_processor_kwargs) if len( + decoder_inputs.multi_modal_data) > 0 else None + # Make Request for Detokenizer. detokenizer_request = DetokenizerRequest( request_id, @@ -113,9 +123,8 @@ def process_inputs( request_id, decoder_inputs.prompt, decoder_inputs.prompt_token_ids, - decoder_inputs.multi_modal_data, + mm_inputs, decoder_inputs.multi_modal_placeholders, - decoder_inputs.mm_processor_kwargs, sampling_params, eos_token_id, arrival_time, diff --git a/vllm/v1/request.py b/vllm/v1/request.py index 51fb4003e5fe0..6bc1e4d5c769f 100644 --- a/vllm/v1/request.py +++ b/vllm/v1/request.py @@ -45,9 +45,6 @@ def __init__( self._all_token_ids: List[int] = self.prompt_token_ids.copy() self.num_computed_tokens = 0 - # Raw multimodal data before the mm input mapper (e.g., PIL images). - self.mm_data = self.inputs.multi_modal_data - self.mm_processor_kwargs = self.inputs.mm_processor_kwargs mm_positions = self.inputs.multi_modal_placeholders if mm_positions: # FIXME(woosuk): Support other modalities. @@ -55,7 +52,10 @@ def __init__( else: self.mm_positions = [] # Output of the mm input mapper (e.g., image tensors). - self.mm_inputs: List[MultiModalKwargs] = [] + if self.inputs.multi_modal_inputs: + self.mm_inputs = self.inputs.multi_modal_inputs + else: + self.mm_inputs: List[MultiModalKwargs] = [] @classmethod def from_engine_core_request(cls, request: EngineCoreRequest) -> "Request": @@ -64,9 +64,10 @@ def from_engine_core_request(cls, request: EngineCoreRequest) -> "Request": inputs=token_inputs( prompt_token_ids=request.prompt_token_ids, prompt=request.prompt, - multi_modal_data=request.mm_data, + multi_modal_data=None, + multi_modal_inputs=request.mm_inputs, multi_modal_placeholders=request.mm_placeholders, - mm_processor_kwargs=request.mm_processor_kwargs, + mm_processor_kwargs=None, ), sampling_params=request.sampling_params, eos_token_id=request.eos_token_id, @@ -110,7 +111,7 @@ def get_finished_reason(self) -> Union[str, None]: return RequestStatus.get_finished_reason(self.status) def has_encoder_inputs(self) -> bool: - return len(self.mm_data) > 0 + return len(self.mm_inputs) > 0 @property def num_encoder_inputs(self) -> int: From 2f2cdc745a7a569637c58cfd5f6789c1d0741c84 Mon Sep 17 00:00:00 2001 From: Yan Ma Date: Wed, 4 Dec 2024 01:16:31 +0800 Subject: [PATCH 222/397] [MISC][XPU] quick fix for XPU CI (#10859) Signed-off-by: yan ma --- .buildkite/run-xpu-test.sh | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.buildkite/run-xpu-test.sh b/.buildkite/run-xpu-test.sh index 50f58f7d70430..e0a12afbe7320 100644 --- a/.buildkite/run-xpu-test.sh +++ b/.buildkite/run-xpu-test.sh @@ -13,6 +13,7 @@ trap remove_docker_container EXIT remove_docker_container # Run the image and test offline inference/tensor parallel -docker run -it -d --name xpu-test --device /dev/dri -v /dev/dri/by-path:/dev/dri/by-path xpu-test /bin/bash -docker exec xpu-test bash -c "python3 examples/offline_inference.py" -docker exec xpu-test bash -c "python3 examples/offline_inference_cli.py -tp 2" +docker run --name xpu-test --device /dev/dri -v /dev/dri/by-path:/dev/dri/by-path --entrypoint="" xpu-test sh -c ' + python3 examples/offline_inference.py + python3 examples/offline_inference_cli.py -tp 2 +' From 7090c27bb2cb0d9c4e0acd644e484291df3aff2a Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Tue, 3 Dec 2024 13:32:21 -0500 Subject: [PATCH 223/397] [Bugfix] Only require XGrammar on x86 (#10865) Signed-off-by: mgoin --- requirements-common.txt | 2 +- .../guided_decoding/__init__.py | 7 +++++ vllm/platforms/__init__.py | 4 +-- vllm/platforms/interface.py | 26 +++++++++++++++++++ 4 files changed, 36 insertions(+), 3 deletions(-) diff --git a/requirements-common.txt b/requirements-common.txt index 818f72e14be96..72fb020a82c4e 100644 --- a/requirements-common.txt +++ b/requirements-common.txt @@ -19,7 +19,7 @@ prometheus-fastapi-instrumentator >= 7.0.0 tiktoken >= 0.6.0 # Required for DBRX tokenizer lm-format-enforcer >= 0.10.9, < 0.11 outlines >= 0.0.43, < 0.1 -xgrammar +xgrammar >= 0.1.5; platform_machine == "x86_64" typing_extensions >= 4.10 filelock >= 3.16.1 # need to contain https://github.com/tox-dev/filelock/pull/317 partial-json-parser # used for parsing partial JSON outputs diff --git a/vllm/model_executor/guided_decoding/__init__.py b/vllm/model_executor/guided_decoding/__init__.py index 23c31fcfd7f05..3340bad38ab73 100644 --- a/vllm/model_executor/guided_decoding/__init__.py +++ b/vllm/model_executor/guided_decoding/__init__.py @@ -3,6 +3,7 @@ from typing import TYPE_CHECKING from vllm.logger import init_logger +from vllm.platforms import CpuArchEnum, current_platform if TYPE_CHECKING: from transformers import PreTrainedTokenizer @@ -25,6 +26,12 @@ def maybe_backend_fallback( guided_params.backend = "xgrammar" if guided_params.backend == "xgrammar": + # xgrammar only has x86 wheels for linux, fallback to outlines + if current_platform.get_cpu_architecture() is not CpuArchEnum.X86: + logger.warning("xgrammar is only supported on x86 CPUs. " + "Falling back to use outlines instead.") + guided_params.backend = "outlines" + # xgrammar doesn't support regex or choice, fallback to outlines if guided_params.regex is not None or guided_params.choice is not None: logger.warning( diff --git a/vllm/platforms/__init__.py b/vllm/platforms/__init__.py index 7cb8ac4b0a1e0..419237c252ffd 100644 --- a/vllm/platforms/__init__.py +++ b/vllm/platforms/__init__.py @@ -1,5 +1,5 @@ from .interface import _Backend # noqa: F401 -from .interface import Platform, PlatformEnum, UnspecifiedPlatform +from .interface import CpuArchEnum, Platform, PlatformEnum, UnspecifiedPlatform current_platform: Platform @@ -120,4 +120,4 @@ def cuda_is_jetson() -> bool: else: current_platform = UnspecifiedPlatform() -__all__ = ['Platform', 'PlatformEnum', 'current_platform'] +__all__ = ['Platform', 'PlatformEnum', 'current_platform', 'CpuArchEnum'] diff --git a/vllm/platforms/interface.py b/vllm/platforms/interface.py index eac2b413f9271..0be7df7941b8b 100644 --- a/vllm/platforms/interface.py +++ b/vllm/platforms/interface.py @@ -1,4 +1,5 @@ import enum +import platform import random from typing import TYPE_CHECKING, NamedTuple, Optional, Tuple, Union @@ -37,6 +38,14 @@ class PlatformEnum(enum.Enum): UNSPECIFIED = enum.auto() +class CpuArchEnum(enum.Enum): + X86 = enum.auto() + ARM = enum.auto() + POWERPC = enum.auto() + OTHER = enum.auto() + UNKNOWN = enum.auto() + + class DeviceCapability(NamedTuple): major: int minor: int @@ -184,6 +193,23 @@ def verify_quantization(cls, quant: str) -> None: f"{quant} quantization is currently not supported in " f"{cls.device_name}.") + @classmethod + def get_cpu_architecture(cls) -> CpuArchEnum: + """ + Determine the CPU architecture of the current system. + Returns CpuArchEnum indicating the architecture type. + """ + machine = platform.machine().lower() + + if machine in ("x86_64", "amd64", "i386", "i686"): + return CpuArchEnum.X86 + elif machine.startswith("arm") or machine.startswith("aarch"): + return CpuArchEnum.ARM + elif machine.startswith("ppc"): + return CpuArchEnum.POWERPC + + return CpuArchEnum.OTHER if machine else CpuArchEnum.UNKNOWN + class UnspecifiedPlatform(Platform): _enum = PlatformEnum.UNSPECIFIED From 7c32b6861e20b6521959b6cc1ce7ccc84614974d Mon Sep 17 00:00:00 2001 From: tomeras91 <57313761+tomeras91@users.noreply.github.com> Date: Tue, 3 Dec 2024 21:13:31 +0200 Subject: [PATCH 224/397] [Frontend] correctly record prefill and decode time metrics (#10853) Signed-off-by: Tomer Asida --- vllm/engine/metrics.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/engine/metrics.py b/vllm/engine/metrics.py index 4869557ba9b44..a5ae21c3966a7 100644 --- a/vllm/engine/metrics.py +++ b/vllm/engine/metrics.py @@ -599,9 +599,9 @@ def _log_prometheus(self, stats: Stats) -> None: stats.time_queue_requests) self._log_histogram(self.metrics.histogram_inference_time_request, stats.time_inference_requests) - self._log_histogram(self.metrics.histogram_decode_time_request, - stats.time_prefill_requests) self._log_histogram(self.metrics.histogram_prefill_time_request, + stats.time_prefill_requests) + self._log_histogram(self.metrics.histogram_decode_time_request, stats.time_decode_requests) self._log_histogram(self.metrics.histogram_time_in_queue_request, stats.time_in_queue_requests) From a061fe601eb165f11a4808b3ab1ac57d99e0d84e Mon Sep 17 00:00:00 2001 From: Gregory Shtrasberg <156009573+gshtras@users.noreply.github.com> Date: Tue, 3 Dec 2024 15:47:55 -0500 Subject: [PATCH 225/397] [Build][Bugfix] Using the correct type hint (#10866) Signed-off-by: Gregory Shtrasberg --- vllm/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/utils.py b/vllm/utils.py index 0165a22582e7b..07bf82e24cbe6 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -1540,9 +1540,9 @@ def __len__(self): return len(self._factory) -class ClassRegistry(UserDict[type[T], _V]): +class ClassRegistry(UserDict[Type[T], _V]): - def __getitem__(self, key: type[T]) -> _V: + def __getitem__(self, key: Type[T]) -> _V: for cls in key.mro(): if cls in self.data: return self.data[cls] From 381ac93bb5a41347a025367bc58119cb45357095 Mon Sep 17 00:00:00 2001 From: "Chendi.Xue" Date: Tue, 3 Dec 2024 18:21:06 -0600 Subject: [PATCH 226/397] [Benchmark] Benchmark structured output with datasets (#10557) Signed-off-by: Aaron Pham Signed-off-by: Chendi Xue Co-authored-by: Aaron Pham --- benchmarks/benchmark_guided.py | 494 ++++++++++++++++++ .../structured_schema_1.json | 113 ++++ 2 files changed, 607 insertions(+) create mode 100644 benchmarks/benchmark_guided.py create mode 100644 benchmarks/structured_schemas/structured_schema_1.json diff --git a/benchmarks/benchmark_guided.py b/benchmarks/benchmark_guided.py new file mode 100644 index 0000000000000..1a0e62598bfcb --- /dev/null +++ b/benchmarks/benchmark_guided.py @@ -0,0 +1,494 @@ +"""Benchmark guided decoding throughput.""" +import argparse +import dataclasses +import json +import os +import random +import time +from typing import List + +import datasets +import pandas as pd +import uvloop +from transformers import AutoTokenizer, PreTrainedTokenizerBase + +from vllm.engine.arg_utils import AsyncEngineArgs, EngineArgs +from vllm.entrypoints.openai.api_server import ( + build_async_engine_client_from_engine_args) +from vllm.sampling_params import GuidedDecodingParams +from vllm.utils import FlexibleArgumentParser, merge_async_iterators + + +@dataclasses.dataclass +class SampleRequest: + """A class representing a single inference request for benchmarking. + + Attributes: + prompt: The input text prompt for the model. + multi_modal_data: Optional dictionary containing multi-modal data (e.g. + images). + prompt_len: The length of the prompt in tokens. + expected_output_len: The expected length of the output in tokens. + """ + prompt: str + prompt_len: int + expected_output_len: int + schema: dict + structure_type: str = 'json' + completion: str = None + + +def run_vllm(requests: List[SampleRequest], + engine_args: EngineArgs, + n: int, + guided_decoding_rate: float = 1.0, + warmup: bool = False) -> float: + from vllm import LLM, SamplingParams + llm = LLM(**vars(engine_args)) + + # Add the requests to the engine. + prompts: List[str] = [] + sampling_params: List[SamplingParams] = [] + # create a list containing random selected true or false + guided_decoding_req_idx = random.sample( + range(len(requests)), int(len(requests) * guided_decoding_rate)) + + if warmup: + print(">>>>> Running warmup prompt, for the first 5") + # We setup the first 5 requests to warmup FSM + # if using xgrammar dataset, we will skip warmup + warmup_requests = requests[:5] + for i, request in enumerate(warmup_requests): + prompts.append(request.prompt) + sampling_params.append( + SamplingParams( + n=n, + temperature=1.0, + top_p=1.0, + ignore_eos=True, + max_tokens=request.expected_output_len, + guided_decoding=GuidedDecodingParams(json=request.schema) + if guided_decoding_rate > 0 else None, + )) + llm.generate(prompts, sampling_params, use_tqdm=False) + + print(">>>>> Benchmark started...") + prompts = [] + sampling_params = [] + for i, request in enumerate(requests): + prompts.append(request.prompt) + sampling_params.append( + SamplingParams( + n=n, + temperature=1.0, + top_p=1.0, + ignore_eos=True, + max_tokens=request.expected_output_len, + guided_decoding=GuidedDecodingParams( + **{request.structure_type: request.schema}) + if i in guided_decoding_req_idx else None, + )) + + start = time.perf_counter() + outputs = llm.generate(prompts, sampling_params, use_tqdm=False) + ret = [] + for output, request in zip(outputs, requests): + generated_text = output.outputs[0].text + ret.append({ + "generated": generated_text, + "expected": request.completion + }) + end = time.perf_counter() + return end - start, ret + + +async def run_vllm_async( + requests: List[SampleRequest], + engine_args: AsyncEngineArgs, + n: int, + guided_decoding_rate: float = 1.0, + warmup: bool = False, + disable_frontend_multiprocessing: bool = False) -> float: + from vllm import SamplingParams + + async with build_async_engine_client_from_engine_args( + engine_args, disable_frontend_multiprocessing) as llm: + + # Add the requests to the engine. + prompts: List[str] = [] + sampling_params: List[SamplingParams] = [] + guided_decoding_req_idx = random.sample( + range(len(requests)), int(len(requests) * guided_decoding_rate)) + + if warmup: + print(">>>>>> Running warmup prompt, for the first 5") + # We setup the first 5 requests to warmup FSM + # if using xgrammar dataset, we will skip warmup + warmup_requests = requests[:5] + for i, request in enumerate(warmup_requests): + prompts.append(request.prompt) + sampling_params.append( + SamplingParams( + n=n, + temperature=1.0, + top_p=1.0, + ignore_eos=True, + max_tokens=request.expected_output_len, + guided_decoding=GuidedDecodingParams( + json=request.schema) + if guided_decoding_rate > 0 else None, + )) + generators = [] + for i, (prompt, sp) in enumerate(zip(prompts, sampling_params)): + generator = llm.generate(prompt, sp, request_id=f"test{i}") + generators.append(generator) + all_gens = merge_async_iterators(*generators) + async for i, res in all_gens: + pass + + print(">>>>> Benchmark started...") + prompts = [] + sampling_params = [] + for i, request in enumerate(requests): + prompts.append(request.prompt) + sampling_params.append( + SamplingParams( + n=n, + temperature=1.0, + top_p=1.0, + ignore_eos=True, + max_tokens=request.expected_output_len, + guided_decoding=GuidedDecodingParams(json=request.schema) + if i in guided_decoding_req_idx else None, + )) + + generators = [] + start_time = [] + latencies = [] + start = time.perf_counter() + for i, (prompt, sp) in enumerate(zip(prompts, sampling_params)): + generator = llm.generate(prompt, sp, request_id=f"test{i}") + generators.append(generator) + start_time.append(time.perf_counter()) + latencies.append([]) + all_gens = merge_async_iterators(*generators) + generated_texts = [''] * len(requests) + async for i, res in all_gens: + generated_texts[i] = res.outputs[0].text + lat = time.perf_counter() - start_time[i] + latencies[i].append(lat) + ret = [{ + 'generated': gt, + 'expected': req.completion + } for gt, req in zip(generated_texts, requests)] + end = time.perf_counter() + first_latency = pd.Series([lat[0] * 1000 for lat in latencies]) + next_latency = pd.Series([(lat[-1] - lat[0]) / len(lat[1:]) * 1000 + for lat in latencies]) + return end - start, ret, (first_latency, next_latency) + + +def sample_requests(tokenizer: PreTrainedTokenizerBase, + args: argparse.Namespace) -> List[SampleRequest]: + if args.dataset == 'json': + if args.json_schema_path is None: + dir_path = os.path.dirname(os.path.realpath(__file__)) + args.json_schema_path = os.path.join(dir_path, + "structured_schemas", + "structured_schema_1.json") + with open(args.json_schema_path) as f: + schema = json.load(f) + prompt = f"Generate an example of a user profile given the following schema: {json.dumps(schema)}" # noqa: E501 + input_len = len(tokenizer(prompt).input_ids) + print(f"Input length of the prompt: {input_len} tokens") + requests = [ + SampleRequest(prompt=prompt, + prompt_len=input_len, + expected_output_len=args.output_len, + schema=schema, + structure_type=args.structure_type) + for _ in range(args.num_prompts) + ] + + elif args.dataset == "grammar": + schema = """ + ?start: select_statement + + ?select_statement: "SELECT " column_list " FROM " table_name + + ?column_list: column_name ("," column_name)* + + ?table_name: identifier + + ?column_name: identifier + + ?identifier: /[a-zA-Z_][a-zA-Z0-9_]*/ + """ + prompt = "Generate an SQL query to show the 'username' \ + and 'email' from the 'users' table." + + input_len = len(tokenizer(prompt).input_ids) + print(f"Input length of the prompt: {input_len} tokens") + requests = [ + SampleRequest(prompt=prompt, + prompt_len=input_len, + expected_output_len=args.output_len, + schema=schema, + structure_type=args.structure_type) + for _ in range(args.num_prompts) + ] + + elif args.dataset == "regex": + regex = r"\w+@\w+\.com\n" + args.regex = regex + prompt = "Generate an email address for Alan Turing, \ + who works in Enigma. End in .com and new line. \ + Example result: alan.turing@enigma.com\n" + + input_len = len(tokenizer(prompt).input_ids) + print(f"Input length of the prompt: {input_len} tokens") + requests = [ + SampleRequest(prompt=prompt, + prompt_len=input_len, + expected_output_len=args.output_len, + schema=regex, + structure_type=args.structure_type) + for _ in range(args.num_prompts) + ] + + elif args.dataset == "choice": + choice = ["Positive", "Negative"] + args.choice = choice + prompt = "Classify this sentiment: vLLM is wonderful!" + input_len = len(tokenizer(prompt).input_ids) + print(f"Input length of the prompt: {input_len} tokens") + requests = [ + SampleRequest(prompt=prompt, + prompt_len=input_len, + expected_output_len=args.output_len, + schema=choice, + structure_type=args.structure_type) + for _ in range(args.num_prompts) + ] + + elif args.dataset == "xgrammar_bench": + args.warmup = False + requests: List[SampleRequest] = [] + dataset = datasets.load_dataset("NousResearch/json-mode-eval", + split="train") + print(f"dataset has {len(dataset)} entries") + len_dataset = len(dataset) + for data_point_idx in range(args.num_prompts): + idx = data_point_idx + while idx >= len_dataset: + idx -= len_dataset + schema = dataset["schema"][idx] + prompt = tokenizer.apply_chat_template(dataset["prompt"][idx], + tokenize=False) + input_len = len(tokenizer(prompt).input_ids) + completion = dataset["completion"][idx] + + requests.append( + SampleRequest(prompt=prompt, + prompt_len=input_len, + expected_output_len=args.output_len, + schema=schema, + completion=completion)) + + return requests + + +def evaluate(ret, args): + + def _eval_correctness_json(expected, actual): + # extract json string from string using regex + import re + actual = actual.replace('\n', '').replace(' ', '').strip() + try: + actual = re.search(r'\{.*\}', actual).group() + actual = json.loads(actual) + except Exception: + return False + + return True + + def _eval_correctness_choice(expected, actual): + return actual in args.choice + + def _eval_correctness_regex(expected, actual): + import re + return re.match(args.regex, actual) is not None + + def _eval_correctness(expected, actual): + if args.structure_type == 'json': + return _eval_correctness_json(expected, actual) + elif args.structure_type == 'regex': + return _eval_correctness_regex(expected, actual) + elif args.structure_type == 'choice': + return _eval_correctness_choice(expected, actual) + else: + return None + + scores = [] + for res in ret: + score = _eval_correctness(res['expected'], res['generated']) + res['correctness'] = score + scores.append(score) + + not_none_scores = [score for score in scores if score is not None] + + return (sum(not_none_scores) / len(not_none_scores) * + 100) if len(not_none_scores) > 0 else None + + +def main(args: argparse.Namespace): + print(args) + random.seed(args.seed) + + # async engine is working for 'regex', 'choice' and 'grammar' + if args.dataset == 'grammar': + args.structure_type = 'grammar' + args.async_engine = False + elif args.dataset == 'regex': + args.structure_type = 'regex' + args.async_engine = False + elif args.dataset == 'choice': + args.structure_type = 'choice' + args.async_engine = False + else: + args.structure_type = 'json' + + if args.no_guided_decoding: + args.guided_decoding_ratio = 0 + if args.save_results: + result_file_name = f'{args.guided_decoding_ratio}guided' + result_file_name += f"_{args.model.split('/')[-1]}" + result_file_name += f"_{args.dataset}" + result_file_name += f"_{args.num_prompts}" + result_file_name += f"_out{args.output_len}" + result_file_name += f"_async{args.async_engine}" + result_file_name += f"_warmup{args.warmup}" + result_file_name += f"_chunkedprefill{args.enable_chunked_prefill}" + result_file_name += ".txt" + else: + result_file_name = None + + # Synthesize a prompt with the given input length. + tokenizer = AutoTokenizer.from_pretrained( + args.tokenizer, trust_remote_code=args.trust_remote_code) + requests = sample_requests(tokenizer, args) + + if args.async_engine: + engine_args = AsyncEngineArgs.from_cli_args(args) + elapsed_time, ret, (first_latency, next_latency) = uvloop.run( + run_vllm_async(requests, engine_args, args.n, + args.guided_decoding_ratio, args.warmup, + args.disable_frontend_multiprocessing)) + else: + engine_args = EngineArgs.from_cli_args(args) + elapsed_time, ret = run_vllm(requests, engine_args, args.n, + args.guided_decoding_ratio, args.warmup) + first_latency, next_latency = None, None + + score = evaluate(ret, args) + total_num_tokens = sum(request.prompt_len + request.expected_output_len + for request in requests) + total_output_tokens = sum(request.expected_output_len + for request in requests) + if first_latency is not None: + latency_breakdown = "\nFirst token latency(msecs):\n" + latency_breakdown += f"{first_latency.describe()}" + latency_breakdown += "\nNext token latency(msecs):\n" + latency_breakdown += f"{next_latency.describe()}" + print( + f"Throughput: {len(requests) / elapsed_time:.2f} requests/s, " + f"{total_num_tokens / elapsed_time:.2f} total tokens/s, " + f"{total_output_tokens / elapsed_time:.2f} output tokens/s", + f"Correct rate is {score} %", + f"{latency_breakdown if first_latency is not None else ''}") + + # Output JSON results if specified + if args.output_json or result_file_name: + results = { + "elapsed_time": elapsed_time, + "num_requests": len(requests), + "total_num_tokens": total_num_tokens, + "total_output_tokens": total_output_tokens, + "requests_per_second": len(requests) / elapsed_time, + "tokens_per_second": f"{total_num_tokens / elapsed_time:.2f}", + "output_tokens_per_second": + f"{total_output_tokens / elapsed_time:.2f}", + "correct_rate(%)": score + } + results = {"outputs": ret, **results} + if first_latency is not None: + results["first_token_latency(msecs)"] = first_latency.describe( + ).to_dict() + results["next_token_latency(msecs)"] = next_latency.describe( + ).to_dict() + if args.output_json: + with open(args.output_json, "w") as f: + json.dump(results, f, indent=4) + elif result_file_name: + with open(result_file_name, "w") as f: + json.dump(results, f, indent=4) + + +if __name__ == "__main__": + parser = FlexibleArgumentParser(description="Benchmark guided decoding.") + parser = AsyncEngineArgs.add_cli_args(parser) + + parser.add_argument("--output-len", + type=int, + default=512, + help="Output length for each request. Overrides the " + "output length from the dataset.") + parser.add_argument( + "--dataset", + default='json', + choices=['json', 'grammar', 'regex', 'choice', 'xgrammar_bench']) + parser.add_argument("--json_schema_path", + type=str, + default=None, + help="Path to json schema.") + parser.add_argument("--n", + type=int, + default=1, + help="Number of generated sequences per prompt.") + parser.add_argument("--num-prompts", + type=int, + default=10, + help="Number of prompts to process.") + parser.add_argument( + '--output-json', + type=str, + default=None, + help='Path to save the throughput results in JSON format.') + parser.add_argument("--async-engine", + action='store_true', + default=False, + help="Use vLLM async engine rather than LLM class.") + parser.add_argument("--no-guided-decoding", + action='store_true', + default=False, + help="Whether to disable JSON decoding or not.") + parser.add_argument("--guided-decoding-ratio", + type=float, + default=1.0, + help="Ratio of Guided Decoding requests") + parser.add_argument("--disable-frontend-multiprocessing", + action='store_true', + default=False, + help="Disable decoupled async engine frontend.") + parser.add_argument("--warmup", + action="store_true", + default=False, + help="Run warmup prompts before benchmark.") + parser.add_argument("--save-results", + action="store_true", + default=False, + help="save output results.") + args = parser.parse_args() + if args.tokenizer is None: + args.tokenizer = args.model + main(args) diff --git a/benchmarks/structured_schemas/structured_schema_1.json b/benchmarks/structured_schemas/structured_schema_1.json new file mode 100644 index 0000000000000..6003698469e8d --- /dev/null +++ b/benchmarks/structured_schemas/structured_schema_1.json @@ -0,0 +1,113 @@ +{ + "$schema": + "https://json-schema.org/draft/2020-12/schema", + "title": + "User Profile", + "type": + "object", + "properties": { + "userId": { + "type": "string", + "description": "Unique identifier for the user." + }, + "personalInfo": { + "type": "object", + "properties": { + "firstName": { + "type": "string", + "description": "The user's first name." + }, + "lastName": { + "type": "string", + "description": "The user's last name." + }, + "age": { + "type": "integer", + "minimum": 0, + "description": "The user's age." + }, + "phoneNumbers": { + "type": + "array", + "items": { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": ["home", "work", "mobile"], + "description": "Type of phone number." + }, + "number": { + "type": "string", + "pattern": "^\\+?[1-9]\\d{1,14}$", + "description": "Phone number in E.164 format." + } + }, + "required": ["type", "number"] + }, + "description": + "List of phone numbers associated with the user." + } + }, + "required": ["firstName", "lastName"] + }, + "address": { + "type": "object", + "properties": { + "street": { + "type": "string", + "description": "Street address." + }, + "city": { + "type": "string", + "description": "City name." + }, + "state": { + "type": "string", + "description": "State or province." + }, + "postalCode": { + "type": "string", + "pattern": "^\\d{5}(-\\d{4})?$", + "description": "Postal code." + }, + "country": { + "type": "string", + "description": "Country name." + } + }, + "required": ["street", "city", "state", "postalCode", "country"] + }, + "preferences": { + "type": "object", + "properties": { + "newsletterSubscribed": { + "type": + "boolean", + "description": + "Indicates if the user is subscribed to the newsletter." + }, + "favoriteCategories": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of user's favorite categories." + } + }, + "required": ["newsletterSubscribed"] + }, + "accountStatus": { + "type": "string", + "enum": ["active", "inactive", "suspended"], + "description": "Current status of the user's account." + }, + "registrationDate": { + "type": "string", + "format": "date-time", + "description": "ISO 8601 formatted date-time of user registration." + } + }, + "required": + ["userId", "personalInfo", "address", "accountStatus", "registrationDate"] +} \ No newline at end of file From d2bd88b1226fc93ba42cdcba51daff5e026343f0 Mon Sep 17 00:00:00 2001 From: Tyler Michael Smith Date: Tue, 3 Dec 2024 22:23:21 -0500 Subject: [PATCH 227/397] [CI/Build] Replace mean with torch.all in test_pynccl.py (#10876) Signed-off-by: Tyler Michael Smith --- tests/distributed/test_pynccl.py | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/tests/distributed/test_pynccl.py b/tests/distributed/test_pynccl.py index 4e27babf12cc3..3e9b0e10a11d8 100644 --- a/tests/distributed/test_pynccl.py +++ b/tests/distributed/test_pynccl.py @@ -62,8 +62,7 @@ def worker_fn(): with pynccl_comm.change_state(enable=True): tensor = pynccl_comm.all_reduce(tensor) torch.cuda.synchronize() - result = tensor.mean().cpu().item() - assert result == pynccl_comm.world_size + assert torch.all(tensor == pynccl_comm.world_size).cpu().item() @pytest.mark.skipif(torch.cuda.device_count() < 2, @@ -88,13 +87,11 @@ def multiple_allreduce_worker_fn(): tensor = pynccl_comm.all_reduce(tensor) tensor = pynccl_comm.all_reduce(tensor) torch.cuda.synchronize() - result = tensor.mean().cpu().item() - assert result == 4 + assert torch.all(tensor == 4).cpu().item() else: tensor = pynccl_comm.all_reduce(tensor) torch.cuda.synchronize() - result = tensor.mean().cpu().item() - assert result == 2 + assert torch.all(tensor == 2).cpu().item() @pytest.mark.skipif(torch.cuda.device_count() < 4, @@ -116,13 +113,11 @@ def multiple_allreduce_with_vllm_worker_fn(): tensor = tensor_model_parallel_all_reduce(tensor) tensor = tensor_model_parallel_all_reduce(tensor) torch.cuda.synchronize() - result = tensor.mean().cpu().item() - assert result == 4 + assert torch.all(tensor == 4).cpu().item() else: tensor = tensor_model_parallel_all_reduce(tensor) torch.cuda.synchronize() - result = tensor.mean().cpu().item() - assert result == 2 + assert torch.all(tensor == 2).cpu().item() @pytest.mark.skipif(torch.cuda.device_count() < 4, @@ -149,7 +144,7 @@ def worker_fn_with_cudagraph(): torch.cuda.synchronize() graph.replay() torch.cuda.synchronize() - assert a_out.mean().cpu().item() == pynccl_comm.world_size**1 + assert torch.all(a_out == pynccl_comm.world_size).cpu().item() @worker_fn_wrapper @@ -249,8 +244,7 @@ def send_recv_worker_fn(): src=(pynccl_comm.rank - 1) % pynccl_comm.world_size) torch.cuda.synchronize() - result = tensor.mean().cpu().item() - assert result == 1 + assert torch.all(tensor == 1).cpu().item() @pytest.mark.skipif(torch.cuda.device_count() < 2, @@ -289,11 +283,10 @@ def multiple_send_recv_worker_fn(): src=(pynccl_comm.rank - 1) % pynccl_comm.world_size) torch.cuda.synchronize() - result = tensor.mean().cpu().item() if torch.distributed.get_rank() in [0, 2]: - assert result == 1 + assert torch.all(tensor == 1).cpu().item() else: - assert result == 2 + assert torch.all(tensor == 2).cpu().item() @pytest.mark.skipif(torch.cuda.device_count() < 4, From b5b647b084de3a5a29d35ca527c9901f8e6a4e7e Mon Sep 17 00:00:00 2001 From: wangxiyuan Date: Wed, 4 Dec 2024 12:32:21 +0800 Subject: [PATCH 228/397] Drop ROCm load format check (#10767) Signed-off-by: wangxiyuan --- vllm/config.py | 23 +++-------------------- 1 file changed, 3 insertions(+), 20 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index 971eb36d677b8..1cbab8ea30249 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -931,7 +931,9 @@ def __post_init__(self): if isinstance(model_loader_extra_config, str): self.model_loader_extra_config = json.loads( model_loader_extra_config) - self._verify_load_format() + if isinstance(self.load_format, str): + load_format = self.load_format.lower() + self.load_format = LoadFormat(load_format) if self.ignore_patterns is not None and len(self.ignore_patterns) > 0: logger.info( @@ -940,25 +942,6 @@ def __post_init__(self): else: self.ignore_patterns = ["original/**/*"] - def _verify_load_format(self) -> None: - if not isinstance(self.load_format, str): - return - - load_format = self.load_format.lower() - self.load_format = LoadFormat(load_format) - - rocm_not_supported_load_format: List[str] = [] - if current_platform.is_rocm( - ) and load_format in rocm_not_supported_load_format: - rocm_supported_load_format = [ - f for f in LoadFormat.__members__ - if (f not in rocm_not_supported_load_format) - ] - raise ValueError( - f"load format '{load_format}' is not supported in ROCm. " - f"Supported load formats are " - f"{rocm_supported_load_format}") - @dataclass class ParallelConfig: From fa2dea61df9bb3fa3dbd081f42f464c45e3db5b2 Mon Sep 17 00:00:00 2001 From: "Kevin H. Luu" Date: Tue, 3 Dec 2024 23:02:16 -0800 Subject: [PATCH 229/397] [ci/build] Change queue name for Release jobs (#10875) --- .buildkite/release-pipeline.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.buildkite/release-pipeline.yaml b/.buildkite/release-pipeline.yaml index f78e360b7afd3..173b52f072502 100644 --- a/.buildkite/release-pipeline.yaml +++ b/.buildkite/release-pipeline.yaml @@ -1,7 +1,7 @@ steps: - label: "Build wheel - CUDA 12.1" agents: - queue: cpu_queue + queue: cpu_queue_postmerge commands: - "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --build-arg CUDA_VERSION=12.1.0 --tag vllm-ci:build-image --target build --progress plain ." - "mkdir artifacts" @@ -18,7 +18,7 @@ steps: - label: "Build wheel - CUDA 11.8" # depends_on: block-build-cu118-wheel agents: - queue: cpu_queue + queue: cpu_queue_postmerge commands: - "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --build-arg CUDA_VERSION=11.8.0 --tag vllm-ci:build-image --target build --progress plain ." - "mkdir artifacts" From c9ca4fce3f48e27801e1bad03d4bc0b963567d24 Mon Sep 17 00:00:00 2001 From: "Kevin H. Luu" Date: Tue, 3 Dec 2024 23:02:40 -0800 Subject: [PATCH 230/397] [ci/build] Job to build and push release image (#10877) --- .buildkite/release-pipeline.yaml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.buildkite/release-pipeline.yaml b/.buildkite/release-pipeline.yaml index 173b52f072502..93e118fb3eab8 100644 --- a/.buildkite/release-pipeline.yaml +++ b/.buildkite/release-pipeline.yaml @@ -26,3 +26,16 @@ steps: - "bash .buildkite/upload-wheels.sh" env: DOCKER_BUILDKIT: "1" + + - block: "Build release image" + depends_on: ~ + key: block-release-image-build + + - label: "Build release image" + depends_on: block-release-image-build + agents: + queue: cpu_queue_postmerge + commands: + - "aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/q9t5s3a7" + - "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --build-arg CUDA_VERSION=12.1.0 --tag public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT --target vllm-openai --progress plain ." + - "docker push public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT" From 8db957ee3a8234574430d9e570e520501d8539e9 Mon Sep 17 00:00:00 2001 From: jianzheng <57654625+o2363286@users.noreply.github.com> Date: Wed, 4 Dec 2024 16:48:22 +0800 Subject: [PATCH 231/397] =?UTF-8?q?[bugfix]=20fixed=20parameter=20?= =?UTF-8?q?=E2=80=9Cn=E2=80=9D=20when=20set=20parameter=20=E2=80=9Cbestof?= =?UTF-8?q?=E2=80=9D=20>=201=20(#10854)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: jianzheng <57654625+o2363286@users.noreply.github.com> --- vllm/sampling_params.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/vllm/sampling_params.py b/vllm/sampling_params.py index 5c6df5aaf5446..fc77f3ca529b2 100644 --- a/vllm/sampling_params.py +++ b/vllm/sampling_params.py @@ -293,8 +293,9 @@ def __post_init__(self) -> None: raise ValueError( f"best_of must be greater than or equal to n, " f"got n={self.n} and best_of={self.best_of}.") - self._real_n = self.n - self.n = self.best_of + if not self._real_n: + self._real_n = self.n + self.n = self.best_of if 0 < self.temperature < _MAX_TEMP: logger.warning( From c92acb9693c0504d7dabed2a0251b9f5d4ddaebb Mon Sep 17 00:00:00 2001 From: "Kevin H. Luu" Date: Wed, 4 Dec 2024 01:01:20 -0800 Subject: [PATCH 232/397] [ci/build] Update vLLM postmerge ECR repo (#10887) --- .buildkite/nightly-benchmarks/benchmark-pipeline.yaml | 6 +++--- .buildkite/nightly-benchmarks/scripts/wait-for-image.sh | 4 ++-- docs/source/getting_started/installation.rst | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml b/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml index 3db77d5f16022..dd2ce454ecb2d 100644 --- a/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml +++ b/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml @@ -21,7 +21,7 @@ steps: podSpec: priorityClassName: perf-benchmark containers: - - image: public.ecr.aws/q9t5s3a7/vllm-ci-test-repo:$BUILDKITE_COMMIT + - image: public.ecr.aws/q9t5s3a7/vllm-ci-postmerge-repo:$BUILDKITE_COMMIT command: - bash .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh resources: @@ -51,7 +51,7 @@ steps: queue: H200 plugins: - docker#v5.12.0: - image: public.ecr.aws/q9t5s3a7/vllm-ci-test-repo:$BUILDKITE_COMMIT + image: public.ecr.aws/q9t5s3a7/vllm-ci-postmerge-repo:$BUILDKITE_COMMIT command: - bash - .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh @@ -71,7 +71,7 @@ steps: queue: H100 plugins: - docker#v5.12.0: - image: public.ecr.aws/q9t5s3a7/vllm-ci-test-repo:$BUILDKITE_COMMIT + image: public.ecr.aws/q9t5s3a7/vllm-ci-postmerge-repo:$BUILDKITE_COMMIT command: - bash - .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh diff --git a/.buildkite/nightly-benchmarks/scripts/wait-for-image.sh b/.buildkite/nightly-benchmarks/scripts/wait-for-image.sh index 19f7160e68a4d..aa0f7ade808e0 100644 --- a/.buildkite/nightly-benchmarks/scripts/wait-for-image.sh +++ b/.buildkite/nightly-benchmarks/scripts/wait-for-image.sh @@ -1,6 +1,6 @@ #!/bin/sh -TOKEN=$(curl -s -L "https://public.ecr.aws/token?service=public.ecr.aws&scope=repository:q9t5s3a7/vllm-ci-test-repo:pull" | jq -r .token) -URL="https://public.ecr.aws/v2/q9t5s3a7/vllm-ci-test-repo/manifests/$BUILDKITE_COMMIT" +TOKEN=$(curl -s -L "https://public.ecr.aws/token?service=public.ecr.aws&scope=repository:q9t5s3a7/vllm-ci-postmerge-repo:pull" | jq -r .token) +URL="https://public.ecr.aws/v2/q9t5s3a7/vllm-ci-postmerge-repo/manifests/$BUILDKITE_COMMIT" TIMEOUT_SECONDS=10 diff --git a/docs/source/getting_started/installation.rst b/docs/source/getting_started/installation.rst index e3dbbc9affe66..52412fa8437b9 100644 --- a/docs/source/getting_started/installation.rst +++ b/docs/source/getting_started/installation.rst @@ -73,7 +73,7 @@ Another way to access the latest code is to use the docker images: .. code-block:: console $ export VLLM_COMMIT=33f460b17a54acb3b6cc0b03f4a17876cff5eafd # use full commit hash from the main branch - $ docker pull public.ecr.aws/q9t5s3a7/vllm-ci-test-repo:${VLLM_COMMIT} + $ docker pull public.ecr.aws/q9t5s3a7/vllm-ci-postmerge-repo:${VLLM_COMMIT} These docker images are used for CI and testing only, and they are not intended for production use. They will be expired after several days. From 01d079fd8e65ed9a243ebbf6b771393607942907 Mon Sep 17 00:00:00 2001 From: Xin Yang <105740670+xyang16@users.noreply.github.com> Date: Wed, 4 Dec 2024 09:40:16 -0800 Subject: [PATCH 233/397] [LoRA] Change lora_tokenizers capacity (#10796) Signed-off-by: Xin Yang --- tests/lora/test_tokenizer_group.py | 20 +++++++++++++++++++ vllm/engine/llm_engine.py | 2 +- vllm/engine/multiprocessing/client.py | 3 +-- .../tokenizer_group/__init__.py | 9 +++++---- .../tokenizer_group/tokenizer_group.py | 3 ++- vllm/v1/engine/async_llm.py | 2 +- vllm/v1/engine/llm_engine.py | 2 +- 7 files changed, 31 insertions(+), 10 deletions(-) diff --git a/tests/lora/test_tokenizer_group.py b/tests/lora/test_tokenizer_group.py index daa39b2a3dba1..d225a3f7d6c06 100644 --- a/tests/lora/test_tokenizer_group.py +++ b/tests/lora/test_tokenizer_group.py @@ -17,6 +17,7 @@ async def test_tokenizer_group_lora(sql_lora_files, tokenizer_group_type): tokenizer_id="gpt2", enable_lora=True, max_num_seqs=1, + max_loras=1, max_input_length=None, ) lora_request = LoRARequest("1", 1, sql_lora_files) @@ -53,3 +54,22 @@ def test_get_lora_tokenizer(sql_lora_files, tmp_path): lora_request = LoRARequest("1", 1, str(tmp_path)) tokenizer = get_lora_tokenizer(lora_request) assert not tokenizer + + +@pytest.mark.parametrize("enable_lora", [True, False]) +@pytest.mark.parametrize("max_num_seqs", [1, 2]) +@pytest.mark.parametrize("max_loras", [1, 2]) +def test_lora_tokenizers(enable_lora, max_num_seqs, max_loras): + tokenizer_group = get_tokenizer_group( + get_tokenizer_pool_config(None), + tokenizer_id="gpt2", + enable_lora=enable_lora, + max_num_seqs=max_num_seqs, + max_loras=max_loras, + max_input_length=None, + ) + if enable_lora: + assert tokenizer_group.lora_tokenizers.capacity == max( + max_num_seqs, max_loras) + else: + assert tokenizer_group.lora_tokenizers.capacity == 0 diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index af66b307028cf..1f3c6197ba1a8 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -620,7 +620,7 @@ def _init_tokenizer(self) -> BaseTokenizerGroup: model_config=self.model_config, scheduler_config=self.scheduler_config, parallel_config=self.parallel_config, - enable_lora=bool(self.lora_config)) + lora_config=self.lora_config) def _verify_args(self) -> None: self.model_config.verify_with_parallel_config(self.parallel_config) diff --git a/vllm/engine/multiprocessing/client.py b/vllm/engine/multiprocessing/client.py index d21136c03d7d2..7e4f81b2cf8e2 100644 --- a/vllm/engine/multiprocessing/client.py +++ b/vllm/engine/multiprocessing/client.py @@ -94,8 +94,7 @@ def __init__(self, ipc_path: str, engine_config: VllmConfig, model_config=self.model_config, scheduler_config=engine_config.scheduler_config, parallel_config=engine_config.parallel_config, - enable_lora=bool(engine_config.lora_config), - ) + lora_config=engine_config.lora_config) self.input_preprocessor = InputPreprocessor(self.model_config, self.tokenizer) diff --git a/vllm/transformers_utils/tokenizer_group/__init__.py b/vllm/transformers_utils/tokenizer_group/__init__.py index 6a114b513f382..c0b3d2585a962 100644 --- a/vllm/transformers_utils/tokenizer_group/__init__.py +++ b/vllm/transformers_utils/tokenizer_group/__init__.py @@ -1,7 +1,7 @@ from typing import Optional, Type -from vllm.config import (ModelConfig, ParallelConfig, SchedulerConfig, - TokenizerPoolConfig) +from vllm.config import (LoRAConfig, ModelConfig, ParallelConfig, + SchedulerConfig, TokenizerPoolConfig) from vllm.executor.ray_utils import ray from .base_tokenizer_group import AnyTokenizer, BaseTokenizerGroup @@ -16,10 +16,11 @@ def init_tokenizer_from_configs(model_config: ModelConfig, scheduler_config: SchedulerConfig, parallel_config: ParallelConfig, - enable_lora: bool): + lora_config: LoRAConfig): init_kwargs = dict(tokenizer_id=model_config.tokenizer, - enable_lora=enable_lora, + enable_lora=bool(lora_config), max_num_seqs=scheduler_config.max_num_seqs, + max_loras=lora_config.max_loras if lora_config else 0, max_input_length=None, tokenizer_mode=model_config.tokenizer_mode, trust_remote_code=model_config.trust_remote_code, diff --git a/vllm/transformers_utils/tokenizer_group/tokenizer_group.py b/vllm/transformers_utils/tokenizer_group/tokenizer_group.py index e516eeabaadef..761b07f34d2f9 100644 --- a/vllm/transformers_utils/tokenizer_group/tokenizer_group.py +++ b/vllm/transformers_utils/tokenizer_group/tokenizer_group.py @@ -21,8 +21,9 @@ def __init__(self, tokenizer_id: str, enable_lora: bool, max_num_seqs: int, self.enable_lora = enable_lora self.max_input_length = max_input_length self.tokenizer = get_tokenizer(self.tokenizer_id, **tokenizer_config) + max_loras = tokenizer_config.get("max_loras", 0) self.lora_tokenizers = LRUCache[AnyTokenizer]( - capacity=max_num_seqs if enable_lora else 0) + capacity=max(max_loras, max_num_seqs) if enable_lora else 0) @classmethod def from_config(cls, tokenizer_pool_config: Optional[TokenizerPoolConfig], diff --git a/vllm/v1/engine/async_llm.py b/vllm/v1/engine/async_llm.py index 7335c637f0f79..4ef372fd8464b 100644 --- a/vllm/v1/engine/async_llm.py +++ b/vllm/v1/engine/async_llm.py @@ -51,7 +51,7 @@ def __init__( model_config=vllm_config.model_config, scheduler_config=vllm_config.scheduler_config, parallel_config=vllm_config.parallel_config, - enable_lora=bool(vllm_config.lora_config)) + lora_config=vllm_config.lora_config) self.tokenizer.ping() # Request streams (map of request_id -> AsyncStream). diff --git a/vllm/v1/engine/llm_engine.py b/vllm/v1/engine/llm_engine.py index bd19d998a4adb..312c0242a45dd 100644 --- a/vllm/v1/engine/llm_engine.py +++ b/vllm/v1/engine/llm_engine.py @@ -46,7 +46,7 @@ def __init__( model_config=vllm_config.model_config, scheduler_config=vllm_config.scheduler_config, parallel_config=vllm_config.parallel_config, - enable_lora=bool(vllm_config.lora_config)) + lora_config=vllm_config.lora_config) self.tokenizer.ping() # Processor (convert Inputs --> EngineCoreRequests) From 10398b4706ee71d0bddc32c1d33b11e73df12a27 Mon Sep 17 00:00:00 2001 From: Isotr0py Date: Thu, 5 Dec 2024 02:11:08 +0800 Subject: [PATCH 234/397] [Model] Consolidate ViTs attention implementation without mask (#10893) Signed-off-by: Isotr0py <2037008807@qq.com> --- vllm/attention/layer.py | 63 +++++++++++++++++++ vllm/model_executor/models/blip.py | 45 ++----------- vllm/model_executor/models/clip.py | 46 ++------------ .../models/glm4_vision_encoder.py | 22 ++----- .../models/idefics2_vision_model.py | 25 ++------ vllm/model_executor/models/intern_vit.py | 28 ++------- vllm/model_executor/models/internvl.py | 23 ++++--- vllm/model_executor/models/molmo.py | 38 +++-------- vllm/model_executor/models/siglip.py | 45 ++----------- 9 files changed, 109 insertions(+), 226 deletions(-) diff --git a/vllm/attention/layer.py b/vllm/attention/layer.py index e024eef286f05..05d997279893b 100644 --- a/vllm/attention/layer.py +++ b/vllm/attention/layer.py @@ -3,6 +3,7 @@ import torch import torch.nn as nn +import torch.nn.functional as F from vllm.attention import AttentionMetadata, AttentionType from vllm.attention.selector import backend_name_to_enum, get_attn_backend @@ -168,6 +169,68 @@ def extra_repr(self) -> str: return s +class MultiHeadAttention(nn.Module): + """Multi-headed attention without any cache, used for ViT.""" + + def __init__( + self, + num_heads: int, + head_size: int, + scale: float, + num_kv_heads: Optional[int] = None, + ): + super().__init__() + self.num_heads = num_heads + self.head_size = head_size + self.scale = scale + self.num_kv_heads = num_heads if num_kv_heads is None else num_kv_heads + + dtype = torch.get_default_dtype() + attn_backend = get_attn_backend(head_size, + dtype, + kv_cache_dtype=None, + block_size=16, + is_attention_free=False) + if attn_backend in {_Backend.FLASH_ATTN, _Backend.FLASH_ATTN_VLLM_V1}: + attn_backend = _Backend.XFORMERS + + self.attn_backend = attn_backend if attn_backend in { + _Backend.TORCH_SDPA, _Backend.XFORMERS + } else _Backend.TORCH_SDPA + + def forward( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + ) -> torch.Tensor: + """Input shape: batch_size x seq_len x hidden_size""" + # TODO(Isotr0py): Use existing backend implementations and support FA2 + bsz, q_len, _ = query.size() + kv_len = key.size(1) + + query = query.view(bsz, q_len, self.num_heads, self.head_size) + key = key.view(bsz, kv_len, self.num_kv_heads, self.head_size) + value = value.view(bsz, kv_len, self.num_kv_heads, self.head_size) + + if self.attn_backend == _Backend.XFORMERS: + from xformers import ops as xops + + out = xops.memory_efficient_attention_forward(query, + key, + value, + scale=self.scale) + elif self.attn_backend == _Backend.TORCH_SDPA: + query, key, value = (x.transpose(1, 2) + for x in (query, key, value)) + out = F.scaled_dot_product_attention(query, + key, + value, + scale=self.scale) + out = out.transpose(1, 2) + return out.view(bsz, q_len, -1) + + def unified_attention( query: torch.Tensor, key: torch.Tensor, diff --git a/vllm/model_executor/models/blip.py b/vllm/model_executor/models/blip.py index 6af59697160a0..42a239cadac46 100644 --- a/vllm/model_executor/models/blip.py +++ b/vllm/model_executor/models/blip.py @@ -4,11 +4,10 @@ import torch import torch.nn as nn -import torch.nn.functional as F from PIL import Image from transformers import Blip2VisionConfig, BlipVisionConfig -from vllm.attention.selector import _Backend +from vllm.attention.layer import MultiHeadAttention from vllm.config import ModelConfig from vllm.distributed import divide, get_tensor_model_parallel_world_size from vllm.inputs import DecoderOnlyInputs, token_inputs @@ -22,8 +21,6 @@ repeat_and_pad_placeholder_tokens) from vllm.sequence import SequenceData -from .utils import get_vit_attn_backend - def get_blip_patch_grid_length(*, image_size: int, patch_size: int) -> int: assert image_size % patch_size == 0 @@ -205,11 +202,8 @@ def __init__( self.tp_size = get_tensor_model_parallel_world_size() self.num_heads_per_partition = divide(self.num_heads, self.tp_size) - # Detect attention implementation. - self.attn_backend = get_vit_attn_backend(support_fa=False) - if self.attn_backend not in {_Backend.TORCH_SDPA, _Backend.XFORMERS}: - raise RuntimeError( - f"BLIP does not support {self.attn_backend} backend now.") + self.attn = MultiHeadAttention(self.num_heads_per_partition, + self.head_dim, self.scale) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, @@ -220,41 +214,10 @@ def forward( hidden_states: torch.Tensor, ): """Input shape: Batch x Time x Channel""" - bsz, tgt_len, _ = hidden_states.size() qkv_states, _ = self.qkv(hidden_states) query_states, key_states, value_states = qkv_states.chunk(3, dim=-1) - query_states = query_states.view(bsz, tgt_len, - self.num_heads_per_partition, - self.head_dim) - key_states = key_states.view(bsz, tgt_len, - self.num_heads_per_partition, - self.head_dim) - value_states = value_states.view(bsz, tgt_len, - self.num_heads_per_partition, - self.head_dim) - - if self.attn_backend == _Backend.XFORMERS: - from xformers import ops as xops - - out = xops.memory_efficient_attention_forward(query_states, - key_states, - value_states, - p=self.dropout, - scale=self.scale) - elif self.attn_backend == _Backend.TORCH_SDPA: - query_states, key_states, value_states = (x.transpose(1, 2) - for x in (query_states, - key_states, - value_states)) - out = F.scaled_dot_product_attention(query_states, - key_states, - value_states, - dropout_p=self.dropout, - scale=self.scale) - out = out.transpose(1, 2) - - out = out.view(bsz, tgt_len, -1) + out = self.attn(query_states, key_states, value_states) attn_output, _ = self.projection(out) return attn_output, None diff --git a/vllm/model_executor/models/clip.py b/vllm/model_executor/models/clip.py index cd89519e95986..a5300dfd986f3 100644 --- a/vllm/model_executor/models/clip.py +++ b/vllm/model_executor/models/clip.py @@ -5,11 +5,10 @@ import numpy as np import torch import torch.nn as nn -import torch.nn.functional as F from PIL import Image from transformers import CLIPVisionConfig -from vllm.attention.selector import _Backend +from vllm.attention.layer import MultiHeadAttention from vllm.config import ModelConfig from vllm.distributed import divide, get_tensor_model_parallel_world_size from vllm.inputs import DecoderOnlyInputs, token_inputs @@ -25,8 +24,6 @@ resolve_visual_encoder_outputs) from vllm.sequence import SequenceData -from .utils import get_vit_attn_backend - def get_clip_patch_grid_length(*, image_size: int, patch_size: int) -> int: assert image_size % patch_size == 0 @@ -235,11 +232,8 @@ def __init__( self.tp_size = get_tensor_model_parallel_world_size() self.num_heads_per_partition = divide(self.num_heads, self.tp_size) - # Detect attention implementation. - self.attn_backend = get_vit_attn_backend(support_fa=False) - if self.attn_backend not in {_Backend.TORCH_SDPA, _Backend.XFORMERS}: - raise RuntimeError( - f"CLIP does not support {self.attn_backend} backend now.") + self.attn = MultiHeadAttention(self.num_heads_per_partition, + self.head_dim, self.scale) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, @@ -250,42 +244,10 @@ def forward( hidden_states: torch.Tensor, ): """Input shape: Batch x Time x Channel""" - bsz, tgt_len, _ = hidden_states.size() qkv_states, _ = self.qkv_proj(hidden_states) query_states, key_states, value_states = qkv_states.chunk(3, dim=-1) - - query_states = query_states.view(bsz, tgt_len, - self.num_heads_per_partition, - self.head_dim) - key_states = key_states.view(bsz, tgt_len, - self.num_heads_per_partition, - self.head_dim) - value_states = value_states.view(bsz, tgt_len, - self.num_heads_per_partition, - self.head_dim) - - if self.attn_backend == _Backend.XFORMERS: - from xformers import ops as xops - - out = xops.memory_efficient_attention_forward(query_states, - key_states, - value_states, - p=self.dropout, - scale=self.scale) - elif self.attn_backend == _Backend.TORCH_SDPA: - query_states, key_states, value_states = (x.transpose(1, 2) - for x in (query_states, - key_states, - value_states)) - out = F.scaled_dot_product_attention(query_states, - key_states, - value_states, - dropout_p=self.dropout, - scale=self.scale) - out = out.transpose(1, 2) - - out = out.view(bsz, tgt_len, -1) + out = self.attn(query_states, key_states, value_states) attn_output, _ = self.out_proj(out) return attn_output, None diff --git a/vllm/model_executor/models/glm4_vision_encoder.py b/vllm/model_executor/models/glm4_vision_encoder.py index f37ab0f82d52a..39a5736eb199b 100644 --- a/vllm/model_executor/models/glm4_vision_encoder.py +++ b/vllm/model_executor/models/glm4_vision_encoder.py @@ -8,6 +8,7 @@ from torch import nn from torch.nn import LayerNorm +from vllm.attention.layer import MultiHeadAttention from vllm.distributed import get_tensor_model_parallel_world_size from vllm.model_executor.layers.activation import SiluAndMul, get_act_fn from vllm.model_executor.layers.linear import (ColumnParallelLinear, @@ -77,27 +78,16 @@ def __init__( quant_config=quant_config, ) + self.attn = MultiHeadAttention(self.num_heads_per_rank, self.head_dim, + self.scale) self.output_dropout = torch.nn.Dropout(config.dropout_prob) def forward(self, x: torch.Tensor) -> torch.Tensor: - B, L, _ = x.shape qkv, _ = self.query_key_value(x) # B, L, 3 * H * D q, k, v = qkv.chunk(3, dim=-1) - q = q.reshape(B, L, self.num_heads_per_rank, - self.head_dim).permute(0, 2, 1, 3) # B, H, L, D - k = k.reshape(B, L, self.num_heads_per_rank, - self.head_dim).permute(0, 2, 1, 3) # B, H, L, D - v = v.reshape(B, L, self.num_heads_per_rank, - self.head_dim).permute(0, 2, 1, 3) # B, H, L, D - - out = torch.nn.functional.scaled_dot_product_attention(q, - k, - v, - attn_mask=None, - dropout_p=0., - is_causal=False) - - output, _ = self.dense(out.transpose(1, 2).view(B, L, -1)) + + out = self.attn(q, k, v) + output, _ = self.dense(out) output = self.output_dropout(output) return output diff --git a/vllm/model_executor/models/idefics2_vision_model.py b/vllm/model_executor/models/idefics2_vision_model.py index 16192928beb1f..e430a158d869a 100644 --- a/vllm/model_executor/models/idefics2_vision_model.py +++ b/vllm/model_executor/models/idefics2_vision_model.py @@ -21,8 +21,8 @@ from torch import nn from transformers.models.idefics2.configuration_idefics2 import ( Idefics2Config, Idefics2VisionConfig) -from xformers import ops as xops +from vllm.attention.layer import MultiHeadAttention from vllm.distributed import divide, get_tensor_model_parallel_world_size from vllm.model_executor.layers.activation import get_act_fn from vllm.model_executor.layers.linear import (ColumnParallelLinear, @@ -141,35 +141,18 @@ def __init__( ) self.tp_size = get_tensor_model_parallel_world_size() self.num_heads_per_partition = divide(self.num_heads, self.tp_size) - self.is_causal = False + self.attn = MultiHeadAttention(self.num_heads_per_partition, + self.head_dim, self.scale) def forward( self, hidden_states: torch.Tensor, ) -> torch.Tensor: - batch_size, q_len, _ = hidden_states.size() qkv, _ = self.qkv_proj( hidden_states ) # batch_size, q_len, 3 * num_heads_per_partition * head_dim query_states, key_states, value_states = qkv.chunk(3, dim=-1) - query_states = query_states.view(batch_size, q_len, - self.num_heads_per_partition, - self.head_dim) - key_states = key_states.view(batch_size, q_len, - self.num_heads_per_partition, - self.head_dim) - value_states = value_states.view(batch_size, q_len, - self.num_heads_per_partition, - self.head_dim) - # see: https://facebookresearch.github.io/xformers/components/ops.html - out = xops.memory_efficient_attention_forward( - query_states, - key_states, - value_states, - p=self.dropout, - scale=self.scale, - ) - out = out.view(batch_size, q_len, -1) + out = self.attn(query_states, key_states, value_states) attn_output, _ = self.out_proj(out) return attn_output diff --git a/vllm/model_executor/models/intern_vit.py b/vllm/model_executor/models/intern_vit.py index c4346fcb3bd2a..7ff68bd60e8ad 100644 --- a/vllm/model_executor/models/intern_vit.py +++ b/vllm/model_executor/models/intern_vit.py @@ -12,7 +12,7 @@ import torch.nn.functional as F from transformers import PretrainedConfig -from vllm.attention.selector import _Backend +from vllm.attention.layer import MultiHeadAttention from vllm.distributed import (divide, get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size, split_tensor_along_last_dim, @@ -25,8 +25,6 @@ from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.model_loader.weight_utils import default_weight_loader -from .utils import get_vit_attn_backend - NORM2FN = { 'rms_norm': RMSNorm, 'layer_norm': nn.LayerNorm, @@ -183,10 +181,8 @@ def __init__( prefix=f"{prefix}.proj", ) - self.attn_backend = get_vit_attn_backend(support_fa=False) - if self.attn_backend not in {_Backend.TORCH_SDPA, _Backend.XFORMERS}: - raise RuntimeError( - f"InternViT does not support {self.attn_backend} backend now.") + self.attn = MultiHeadAttention(self.num_heads_per_partition, + self.head_dim, self.scale) def _apply_qk_norm(self, q: torch.Tensor, k: torch.Tensor): if self.tp_size > 1: @@ -209,23 +205,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: if self.qk_normalization: q, k = self._apply_qk_norm(q, k) - q = q.view(B, N, self.num_heads_per_partition, self.head_dim) - k = k.view(B, N, self.num_heads_per_partition, self.head_dim) - v = v.view(B, N, self.num_heads_per_partition, self.head_dim) - - if self.attn_backend == _Backend.XFORMERS: - from xformers import ops as xops - - out = xops.memory_efficient_attention_forward(q, - k, - v, - scale=self.scale) - elif self.attn_backend == _Backend.TORCH_SDPA: - q, k, v = (x.transpose(1, 2) for x in (q, k, v)) - out = F.scaled_dot_product_attention(q, k, v, scale=self.scale) - out = out.transpose(1, 2) - - out = out.view(B, N, -1) + out = self.attn(q, k, v) out, _ = self.proj(out) return out diff --git a/vllm/model_executor/models/internvl.py b/vllm/model_executor/models/internvl.py index 86aab38032450..d5a7781fecfc3 100644 --- a/vllm/model_executor/models/internvl.py +++ b/vllm/model_executor/models/internvl.py @@ -482,6 +482,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: self.mlp1 = self._init_mlp1(config) self.img_context_token_id = None + self.visual_token_mask = None self.make_empty_intermediate_tensors = ( self.language_model.make_empty_intermediate_tensors) @@ -635,13 +636,12 @@ def _process_image_input( return image_embeds - def _get_visual_token_mask(self, input_ids: torch.Tensor) -> torch.Tensor: + def _set_visual_token_mask(self, input_ids: torch.Tensor) -> torch.Tensor: if self.is_mono: - visual_token_mask = ( + self.visual_token_mask = ( input_ids == self.img_context_token_id).reshape(-1, 1) else: - visual_token_mask = None - return visual_token_mask + self.visual_token_mask = None def get_multimodal_embeddings(self, **kwargs) -> Optional[NestedTensors]: image_input = self._parse_and_validate_image_input(**kwargs) @@ -658,6 +658,7 @@ def get_input_embeddings( inputs_embeds = self.language_model.get_input_embeddings(input_ids) if multimodal_embeddings is not None: assert self.img_context_token_id is not None + self._set_visual_token_mask(input_ids) inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, self.img_context_token_id) @@ -674,7 +675,6 @@ def forward( **kwargs: object, ) -> Union[SamplerOutput, IntermediateTensors]: - visual_token_mask = None if intermediate_tensors is not None: input_ids = None inputs_embeds = None @@ -695,16 +695,15 @@ def forward( "intermediate_tensors": intermediate_tensors, "inputs_embeds": inputs_embeds, } - if self.img_context_token_id is not None: - visual_token_mask = self._get_visual_token_mask(input_ids) - # We always overwrite it back to None after computing visual token - # mask so that this doesn't need to depend on encoder output + if self.visual_token_mask is not None: + # overwrite visual_token_mask and img_context_token_id back to None, + # so that this doesn't need to depend on encoder output + forward_kwargs.update( + {"visual_token_mask": self.visual_token_mask}) + self.visual_token_mask = None self.img_context_token_id = None - if self.is_mono: - forward_kwargs.update({"visual_token_mask": visual_token_mask}) - hidden_states = self.language_model.model(**forward_kwargs) return hidden_states diff --git a/vllm/model_executor/models/molmo.py b/vllm/model_executor/models/molmo.py index 98caa6857e211..d1fcbd167c199 100644 --- a/vllm/model_executor/models/molmo.py +++ b/vllm/model_executor/models/molmo.py @@ -13,6 +13,7 @@ from transformers import PretrainedConfig from vllm.attention import Attention, AttentionMetadata +from vllm.attention.layer import MultiHeadAttention from vllm.compilation.decorators import support_torch_compile from vllm.config import CacheConfig, VllmConfig from vllm.distributed import (get_pp_group, get_tensor_model_parallel_rank, @@ -38,14 +39,12 @@ from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs from vllm.multimodal.inputs import NestedTensors from vllm.multimodal.utils import cached_get_tokenizer -from vllm.platforms import _Backend from vllm.sequence import (VLLM_TOKEN_ID_ARRAY_TYPE, IntermediateTensors, SequenceData) from vllm.transformers_utils.processor import get_processor from .interfaces import SupportsMultiModal, SupportsPP -from .utils import (AutoWeightsLoader, WeightsMapper, get_vit_attn_backend, - is_pp_missing_parameter, +from .utils import (AutoWeightsLoader, WeightsMapper, is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) @@ -188,13 +187,11 @@ def __init__( quant_config=quant_config, ) - # Detect attention implementation. - self.attn_backend: _Backend = get_vit_attn_backend(support_fa=True) - if self.attn_backend not in { - _Backend.FLASH_ATTN, _Backend.TORCH_SDPA, _Backend.XFORMERS - }: - raise RuntimeError( - f"Molmo does not support {self.attn_backend} backend now.") + self.scale = self.head_dim**-0.5 + self.attn = MultiHeadAttention(self.num_heads, + self.head_dim, + self.scale, + num_kv_heads=self.num_kv_heads) def forward(self, inputs_q: torch.Tensor, @@ -210,25 +207,8 @@ def forward(self, xq, _ = self.wq(inputs_q) xk, _ = self.wk(inputs_k) xv, _ = self.wv(inputs_v) - q_shape = xq.size()[:-1] + (self.num_heads, self.head_dim) - kv_shape = xk.size()[:-1] + (self.num_kv_heads, self.head_dim) - xq = xq.view(*q_shape) - xk = xk.view(*kv_shape) - xv = xv.view(*kv_shape) - - if self.attn_backend == _Backend.FLASH_ATTN: - from flash_attn import flash_attn_func - output = flash_attn_func(xq, xk, xv, dropout_p=0.0, causal=False) - elif self.attn_backend == _Backend.TORCH_SDPA: - xq, xk, xv = (rearrange(x, "b s h d -> b h s d") - for x in (xq, xk, xv)) - output = F.scaled_dot_product_attention(xq, xk, xv) - output = rearrange(output, "b h s d -> b s h d ") - elif self.attn_backend == _Backend.XFORMERS: - from xformers import ops as xops - output = xops.memory_efficient_attention_forward(xq, xk, xv, p=0) - - output = rearrange(output, "b s h d -> b s (h d)").contiguous() + + output = self.attn(xq, xk, xv) output, _ = self.wo(output) return output diff --git a/vllm/model_executor/models/siglip.py b/vllm/model_executor/models/siglip.py index deaed0ba7e4ce..6fb9e2cc4584f 100644 --- a/vllm/model_executor/models/siglip.py +++ b/vllm/model_executor/models/siglip.py @@ -6,12 +6,11 @@ import numpy as np import torch -import torch.nn.functional as F from PIL import Image from torch import nn from transformers import SiglipVisionConfig -from vllm.attention.selector import _Backend +from vllm.attention.layer import MultiHeadAttention from vllm.config import ModelConfig from vllm.distributed import divide, get_tensor_model_parallel_world_size from vllm.inputs import DecoderOnlyInputs, token_inputs @@ -29,8 +28,6 @@ resolve_visual_encoder_outputs) from vllm.sequence import SequenceData -from .utils import get_vit_attn_backend - def get_siglip_patch_grid_length(*, image_size: int, patch_size: int) -> int: # Since interpolation is applied, the image size need not be divisible @@ -291,52 +288,18 @@ def __init__( self.tp_size = get_tensor_model_parallel_world_size() self.num_heads_per_partition = divide(self.num_heads, self.tp_size) - self.attn_backend = get_vit_attn_backend(support_fa=False) - if self.attn_backend not in {_Backend.TORCH_SDPA, _Backend.XFORMERS}: - raise RuntimeError( - f"SIGLIP does not support {self.attn_backend} backend now.") + self.attn = MultiHeadAttention(self.num_heads_per_partition, + self.head_dim, self.scale) def forward( self, hidden_states: torch.Tensor, ) -> torch.Tensor: """Input shape: Batch x Time x Channel""" - batch_size, q_len, _ = hidden_states.size() - qkv_states, _ = self.qkv_proj(hidden_states) query_states, key_states, value_states = qkv_states.chunk(3, dim=-1) - query_states = query_states.view(batch_size, q_len, - self.num_heads_per_partition, - self.head_dim) - key_states = key_states.view(batch_size, q_len, - self.num_heads_per_partition, - self.head_dim) - value_states = value_states.view(batch_size, q_len, - self.num_heads_per_partition, - self.head_dim) - - if self.attn_backend == _Backend.XFORMERS: - from xformers import ops as xops - - out = xops.memory_efficient_attention_forward(query_states, - key_states, - value_states, - p=self.dropout, - scale=self.scale) - elif self.attn_backend == _Backend.TORCH_SDPA: - query_states, key_states, value_states = (x.transpose(1, 2) - for x in (query_states, - key_states, - value_states)) - out = F.scaled_dot_product_attention(query_states, - key_states, - value_states, - dropout_p=self.dropout, - scale=self.scale) - out = out.transpose(1, 2) - - out = out.view(batch_size, q_len, -1) + out = self.attn(query_states, key_states, value_states) attn_output, _ = self.out_proj(out) return attn_output, None From 82eb5ea8f3bd3aabbe5c2fd43e37d263768603c5 Mon Sep 17 00:00:00 2001 From: "Chendi.Xue" Date: Wed, 4 Dec 2024 15:28:21 -0600 Subject: [PATCH 235/397] Benchmark serving structured output (#10880) Signed-off-by: Chendi Xue Co-authored-by: Michael Goin --- benchmarks/backend_request_func.py | 6 + benchmarks/benchmark_serving_guided.py | 881 +++++++++++++++++++++++++ 2 files changed, 887 insertions(+) create mode 100644 benchmarks/benchmark_serving_guided.py diff --git a/benchmarks/backend_request_func.py b/benchmarks/backend_request_func.py index c3fed56e8a956..b67849038cf0d 100644 --- a/benchmarks/backend_request_func.py +++ b/benchmarks/backend_request_func.py @@ -24,6 +24,7 @@ class RequestFuncInput: model: str best_of: int = 1 logprobs: Optional[int] = None + extra_body: Optional[dict] = None multi_modal_content: Optional[dict] = None ignore_eos: bool = False @@ -36,6 +37,7 @@ class RequestFuncOutput: ttft: float = 0.0 # Time to first token itl: List[float] = field( default_factory=list) # List of inter-token latencies + tpot: float = 0.0 # avg next-token latencies prompt_len: int = 0 error: str = "" @@ -242,6 +244,8 @@ async def async_request_openai_completions( "stream": True, "ignore_eos": request_func_input.ignore_eos, } + if request_func_input.extra_body: + payload.update(request_func_input.extra_body) headers = { "Authorization": f"Bearer {os.environ.get('OPENAI_API_KEY')}" } @@ -336,6 +340,8 @@ async def async_request_openai_chat_completions( "stream": True, "ignore_eos": request_func_input.ignore_eos, } + if request_func_input.extra_body: + payload.update(request_func_input.extra_body) headers = { "Content-Type": "application/json", "Authorization": f"Bearer {os.environ.get('OPENAI_API_KEY')}", diff --git a/benchmarks/benchmark_serving_guided.py b/benchmarks/benchmark_serving_guided.py new file mode 100644 index 0000000000000..4435d87e18a8a --- /dev/null +++ b/benchmarks/benchmark_serving_guided.py @@ -0,0 +1,881 @@ +r"""Benchmark online serving throughput with guided decoding. + +On the server side, run one of the following commands: + (vLLM OpenAI API server) + vllm serve --disable-log-requests + + (TGI backend) + ./launch_tgi_server.sh + +On the client side, run: + python benchmarks/benchmark_serving.py \ + --backend \ + --model \ + --dataset json \ + --guided-decoding-ratio 1.0 \ + --guided-decoding-backend xgrammar \ + --request-rate 10 \ + --num-prompts 1000 + + when using tgi backend, add + --endpoint /generate_stream + to the end of the command above. +""" +import argparse +import asyncio +import dataclasses +import json +import os +import random +import time +import warnings +from dataclasses import dataclass +from typing import AsyncGenerator, List, Optional, Tuple + +import datasets +import numpy as np +import pandas as pd +from backend_request_func import (ASYNC_REQUEST_FUNCS, RequestFuncInput, + RequestFuncOutput) +from tqdm.asyncio import tqdm +from transformers import PreTrainedTokenizerBase + +try: + from vllm.transformers_utils.tokenizer import get_tokenizer +except ImportError: + from backend_request_func import get_tokenizer + +try: + from vllm.utils import FlexibleArgumentParser +except ImportError: + from argparse import ArgumentParser as FlexibleArgumentParser + +MILLISECONDS_TO_SECONDS_CONVERSION = 1000 + + +@dataclass +class BenchmarkMetrics: + completed: int + total_input: int + total_output: int + request_throughput: float + request_goodput: float + output_throughput: float + total_token_throughput: float + mean_ttft_ms: float + median_ttft_ms: float + std_ttft_ms: float + percentiles_ttft_ms: List[Tuple[float, float]] + mean_tpot_ms: float + median_tpot_ms: float + std_tpot_ms: float + percentiles_tpot_ms: List[Tuple[float, float]] + mean_itl_ms: float + median_itl_ms: float + std_itl_ms: float + percentiles_itl_ms: List[Tuple[float, float]] + # E2EL stands for end-to-end latency per request. + # It is the time taken on the client side from sending + # a request to receiving a complete response. + mean_e2el_ms: float + median_e2el_ms: float + std_e2el_ms: float + percentiles_e2el_ms: List[Tuple[float, float]] + + +@dataclasses.dataclass +class SampleRequest: + """A class representing a single inference request for benchmarking. + + Attributes: + prompt: The input text prompt for the model. + multi_modal_data: Optional dictionary containing multi-modal data (e.g. + images). + prompt_len: The length of the prompt in tokens. + expected_output_len: The expected length of the output in tokens. + """ + prompt: str + prompt_len: int + expected_output_len: int + schema: dict + structure_type: str + completion: str = None + + +def sample_requests(tokenizer: PreTrainedTokenizerBase, + args: argparse.Namespace) -> List[SampleRequest]: + if args.dataset == 'json': + if args.json_schema_path is None: + dir_path = os.path.dirname(os.path.realpath(__file__)) + args.json_schema_path = os.path.join(dir_path, + "structured_schemas", + "structured_schema_1.json") + with open(args.json_schema_path) as f: + schema = json.load(f) + prompt = f"Generate an example of a user profile given the following schema: {json.dumps(schema)}" # noqa: E501 + input_len = len(tokenizer(prompt).input_ids) + print(f"Input length of the prompt: {input_len} tokens") + requests = [ + SampleRequest(prompt=prompt, + prompt_len=input_len, + expected_output_len=args.output_len, + schema=schema, + structure_type=args.structure_type) + for _ in range(args.num_prompts) + ] + + elif args.dataset == "grammar": + schema = """ + ?start: select_statement + + ?select_statement: "SELECT " column_list " FROM " table_name + + ?column_list: column_name ("," column_name)* + + ?table_name: identifier + + ?column_name: identifier + + ?identifier: /[a-zA-Z_][a-zA-Z0-9_]*/ + """ + prompt = "Generate an SQL query to show the 'username' \ + and 'email' from the 'users' table." + + input_len = len(tokenizer(prompt).input_ids) + print(f"Input length of the prompt: {input_len} tokens") + requests = [ + SampleRequest(prompt=prompt, + prompt_len=input_len, + expected_output_len=args.output_len, + schema=schema, + structure_type=args.structure_type) + for _ in range(args.num_prompts) + ] + + elif args.dataset == "regex": + regex = r"\w+@\w+\.com\n" + args.regex = regex + prompt = "Generate an email address for Alan Turing, \ + who works in Enigma. End in .com and new line. \ + Example result: alan.turing@enigma.com\n" + + input_len = len(tokenizer(prompt).input_ids) + print(f"Input length of the prompt: {input_len} tokens") + requests = [ + SampleRequest(prompt=prompt, + prompt_len=input_len, + expected_output_len=args.output_len, + schema=regex, + structure_type=args.structure_type) + for _ in range(args.num_prompts) + ] + + elif args.dataset == "choice": + choice = ["Positive", "Negative"] + args.choice = choice + prompt = "Classify this sentiment: vLLM is wonderful!" + input_len = len(tokenizer(prompt).input_ids) + print(f"Input length of the prompt: {input_len} tokens") + requests = [ + SampleRequest(prompt=prompt, + prompt_len=input_len, + expected_output_len=args.output_len, + schema=choice, + structure_type=args.structure_type) + for _ in range(args.num_prompts) + ] + + elif args.dataset == "xgrammar_bench": + requests: List[SampleRequest] = [] + dataset = datasets.load_dataset("NousResearch/json-mode-eval", + split="train") + print(f"dataset has {len(dataset)} entries") + len_dataset = len(dataset) + for data_point_idx in range(args.num_prompts): + idx = data_point_idx + while idx >= len_dataset: + idx -= len_dataset + schema = dataset["schema"][idx] + prompt = tokenizer.apply_chat_template(dataset["prompt"][idx], + tokenize=False) + input_len = len(tokenizer(prompt).input_ids) + completion = dataset["completion"][idx] + + requests.append( + SampleRequest(prompt=prompt, + prompt_len=input_len, + expected_output_len=args.output_len, + schema=schema, + structure_type=args.structure_type, + completion=completion)) + + return requests + + +async def get_request( + input_requests: List[SampleRequest], + request_rate: float, + burstiness: float = 1.0, +) -> AsyncGenerator[Tuple[int, SampleRequest], None]: + """ + Asynchronously generates requests at a specified rate + with OPTIONAL burstiness. + + Args: + input_requests: + A list of input requests, each represented as a tuple. + request_rate: + The rate at which requests are generated (requests/s). + burstiness (optional): + The burstiness factor of the request generation. + Only takes effect when request_rate is not inf. + Default value is 1, which follows a Poisson process. + Otherwise, the request intervals follow a gamma distribution. + A lower burstiness value (0 < burstiness < 1) results + in more bursty requests, while a higher burstiness value + (burstiness > 1) results in a more uniform arrival of requests. + """ + input_requests = iter(input_requests) + + # Calculate scale parameter theta to maintain the desired request_rate. + assert burstiness > 0, ( + f"A positive burstiness factor is expected, but given {burstiness}.") + theta = 1.0 / (request_rate * burstiness) + + for i, request in enumerate(input_requests): + yield i, request + + if request_rate == float("inf"): + # If the request rate is infinity, then we don't need to wait. + continue + + # Sample the request interval from the gamma distribution. + # If burstiness is 1, it follows exponential distribution. + interval = np.random.gamma(shape=burstiness, scale=theta) + # The next request will be sent after the interval. + await asyncio.sleep(interval) + + +def calculate_metrics( + input_requests: List[Tuple[str, int, int]], + outputs: List[RequestFuncOutput], + dur_s: float, + tokenizer: PreTrainedTokenizerBase, + selected_percentile_metrics: List[str], + selected_percentiles: List[float], +) -> Tuple[BenchmarkMetrics, List[int]]: + actual_output_lens: List[int] = [] + total_input = 0 + completed = 0 + good_completed = 0 + itls: List[float] = [] + tpots: List[float] = [] + all_tpots: List[float] = [] + ttfts: List[float] = [] + e2els: List[float] = [] + for i in range(len(outputs)): + if outputs[i].success: + # We use the tokenizer to count the number of output tokens for all + # serving backends instead of looking at len(outputs[i].itl) since + # multiple output tokens may be bundled together + # Note : this may inflate the output token count slightly + output_len = len( + tokenizer(outputs[i].generated_text, + add_special_tokens=False).input_ids) + actual_output_lens.append(output_len) + total_input += input_requests[i].prompt_len + tpot = 0 + if output_len > 1: + tpot = (outputs[i].latency - outputs[i].ttft) / (output_len - + 1) + tpots.append(tpot) + outputs[i].tpot = sum(tpots) / len(tpots) if len(tpots) else 0 + # Note: if output_len <= 1, we regard tpot as 0 for goodput + all_tpots.append(tpot) + itls += outputs[i].itl + ttfts.append(outputs[i].ttft) + e2els.append(outputs[i].latency) + completed += 1 + else: + actual_output_lens.append(0) + + if completed == 0: + warnings.warn( + "All requests failed. This is likely due to a misconfiguration " + "on the benchmark arguments.", + stacklevel=2) + metrics = BenchmarkMetrics( + completed=completed, + total_input=total_input, + total_output=sum(actual_output_lens), + request_throughput=completed / dur_s, + request_goodput=good_completed / dur_s, + output_throughput=sum(actual_output_lens) / dur_s, + total_token_throughput=(total_input + sum(actual_output_lens)) / dur_s, + mean_ttft_ms=np.mean(ttfts or 0) * + 1000, # ttfts is empty if streaming is not supported by backend + std_ttft_ms=np.std(ttfts or 0) * 1000, + median_ttft_ms=np.median(ttfts or 0) * 1000, + percentiles_ttft_ms=[(p, np.percentile(ttfts or 0, p) * 1000) + for p in selected_percentiles], + mean_tpot_ms=np.mean(tpots or 0) * 1000, + std_tpot_ms=np.std(tpots or 0) * 1000, + median_tpot_ms=np.median(tpots or 0) * 1000, + percentiles_tpot_ms=[(p, np.percentile(tpots or 0, p) * 1000) + for p in selected_percentiles], + mean_itl_ms=np.mean(itls or 0) * 1000, + std_itl_ms=np.std(itls or 0) * 1000, + median_itl_ms=np.median(itls or 0) * 1000, + percentiles_itl_ms=[(p, np.percentile(itls or 0, p) * 1000) + for p in selected_percentiles], + mean_e2el_ms=np.mean(e2els or 0) * 1000, + std_e2el_ms=np.std(e2els or 0) * 1000, + median_e2el_ms=np.median(e2els or 0) * 1000, + percentiles_e2el_ms=[(p, np.percentile(e2els or 0, p) * 1000) + for p in selected_percentiles], + ) + + return metrics, actual_output_lens + + +async def benchmark( + backend: str, + api_url: str, + base_url: str, + model_id: str, + tokenizer: PreTrainedTokenizerBase, + input_requests: List[SampleRequest], + request_rate: float, + burstiness: float, + disable_tqdm: bool, + profile: bool, + selected_percentile_metrics: List[str], + selected_percentiles: List[str], + ignore_eos: bool, + max_concurrency: Optional[int], + guided_decoding_ratio: float, + guided_decoding_backend: str, +): + if backend in ASYNC_REQUEST_FUNCS: + request_func = ASYNC_REQUEST_FUNCS[backend] + else: + raise ValueError(f"Unknown backend: {backend}") + + def prepare_extra_body(request) -> dict: + extra_body = {} + # Add the schema to the extra_body + extra_body[request.structure_type] = request.schema + # Add the specific guided_decoding_backend + extra_body["guided_decoding_backend"] = guided_decoding_backend + return extra_body + + print("Starting initial single prompt test run...") + guided_decoding_req_idx = random.sample( + range(len(input_requests)), + int(len(input_requests) * guided_decoding_ratio)) + + test_request = input_requests[0] + test_input = RequestFuncInput( + model=model_id, + prompt=test_request.prompt, + api_url=api_url, + prompt_len=test_request.prompt_len, + output_len=test_request.expected_output_len, + ignore_eos=ignore_eos, + extra_body=prepare_extra_body(test_request), + ) + test_output = await request_func(request_func_input=test_input) + if not test_output.success: + raise ValueError( + "Initial test run failed - Please make sure benchmark arguments " + f"are correctly specified. Error: {test_output.error}") + else: + print("Initial test run completed. Starting main benchmark run...") + + if profile: + print("Starting profiler...") + profile_input = RequestFuncInput( + model=model_id, + prompt=test_request.prompt, + api_url=base_url + "/start_profile", + prompt_len=test_request.prompt_len, + output_len=test_request.expected_output_len, + ignore_eos=ignore_eos, + extra_body=prepare_extra_body(test_request), + ) + profile_output = await request_func(request_func_input=profile_input) + if profile_output.success: + print("Profiler started") + + if burstiness == 1.0: + distribution = "Poisson process" + else: + distribution = "Gamma distribution" + + print(f"Traffic request rate: {request_rate}") + print(f"Burstiness factor: {burstiness} ({distribution})") + print(f"Maximum request concurrency: {max_concurrency}") + + pbar = None if disable_tqdm else tqdm(total=len(input_requests)) + + # This can be used once the minimum Python version is 3.10 or higher, + # and it will simplify the code in limited_request_func. + # semaphore = (asyncio.Semaphore(max_concurrency) + # if max_concurrency else contextlib.nullcontext()) + semaphore = (asyncio.Semaphore(max_concurrency) + if max_concurrency else None) + + async def limited_request_func(request_func_input, pbar): + if semaphore is None: + return await request_func(request_func_input=request_func_input, + pbar=pbar) + async with semaphore: + return await request_func(request_func_input=request_func_input, + pbar=pbar) + + benchmark_start_time = time.perf_counter() + tasks: List[asyncio.Task] = [] + expected: List[str] = [] + async for i, request in get_request(input_requests, request_rate, + burstiness): + extra_body = prepare_extra_body( + request) if i in guided_decoding_req_idx else None + request_func_input = RequestFuncInput( + model=model_id, + prompt=request.prompt, + api_url=api_url, + prompt_len=request.prompt_len, + output_len=request.expected_output_len, + ignore_eos=ignore_eos, + extra_body=extra_body, + ) + expected.append(request.completion) + tasks.append( + asyncio.create_task( + limited_request_func(request_func_input=request_func_input, + pbar=pbar))) + outputs: List[RequestFuncOutput] = await asyncio.gather(*tasks) + + if profile: + print("Stopping profiler...") + profile_input = RequestFuncInput( + model=model_id, + prompt=test_request.prompt, + api_url=base_url + "/stop_profile", + prompt_len=test_request.prompt_len, + output_len=test_request.expected_output_len, + extra_body={test_request.structure_type: test_request.schema}, + ) + profile_output = await request_func(request_func_input=profile_input) + if profile_output.success: + print("Profiler stopped") + + if pbar is not None: + pbar.close() + + benchmark_duration = time.perf_counter() - benchmark_start_time + + metrics, actual_output_lens = calculate_metrics( + input_requests=input_requests, + outputs=outputs, + dur_s=benchmark_duration, + tokenizer=tokenizer, + selected_percentile_metrics=selected_percentile_metrics, + selected_percentiles=selected_percentiles, + ) + + print("{s:{c}^{n}}".format(s=' Serving Benchmark Result ', n=50, c='=')) + print("{:<40} {:<10}".format("Successful requests:", metrics.completed)) + print("{:<40} {:<10.2f}".format("Benchmark duration (s):", + benchmark_duration)) + print("{:<40} {:<10}".format("Total input tokens:", metrics.total_input)) + print("{:<40} {:<10}".format("Total generated tokens:", + metrics.total_output)) + print("{:<40} {:<10.2f}".format("Request throughput (req/s):", + metrics.request_throughput)) + print("{:<40} {:<10.2f}".format("Output token throughput (tok/s):", + metrics.output_throughput)) + print("{:<40} {:<10.2f}".format("Total Token throughput (tok/s):", + metrics.total_token_throughput)) + + result = { + "duration": + benchmark_duration, + "completed": + metrics.completed, + "total_input_tokens": + metrics.total_input, + "total_output_tokens": + metrics.total_output, + "request_throughput": + metrics.request_throughput, + "output_throughput": + metrics.output_throughput, + "total_token_throughput": + metrics.total_token_throughput, + "ttft_description": + pd.Series([output.ttft for output in outputs]).describe().to_dict(), + "tpot_description": + pd.Series([output.tpot for output in outputs]).describe().to_dict(), + "input_lens": [output.prompt_len for output in outputs], + "output_lens": + actual_output_lens, + "ttfts": [output.ttft for output in outputs], + "itls": [output.itl for output in outputs], + "errors": [output.error for output in outputs], + } + + ret = [{ + 'generated': output.generated_text, + 'expected': gt + } for output, gt in zip(outputs, expected)] + + def process_one_metric( + # E.g., "ttft" + metric_attribute_name: str, + # E.g., "TTFT" + metric_name: str, + # E.g., "Time to First Token" + metric_header: str, + ): + # This function prints and adds statistics of the specified + # metric. + if metric_attribute_name not in selected_percentile_metrics: + return + print("{s:{c}^{n}}".format(s=metric_header, n=50, c='-')) + print("{:<40} {:<10.2f}".format( + f"Mean {metric_name} (ms):", + getattr(metrics, f"mean_{metric_attribute_name}_ms"))) + print("{:<40} {:<10.2f}".format( + f"Median {metric_name} (ms):", + getattr(metrics, f"median_{metric_attribute_name}_ms"))) + result[f"mean_{metric_attribute_name}_ms"] = getattr( + metrics, f"mean_{metric_attribute_name}_ms") + result[f"median_{metric_attribute_name}_ms"] = getattr( + metrics, f"median_{metric_attribute_name}_ms") + result[f"std_{metric_attribute_name}_ms"] = getattr( + metrics, f"std_{metric_attribute_name}_ms") + for p, value in getattr(metrics, + f"percentiles_{metric_attribute_name}_ms"): + p_word = str(int(p)) if int(p) == p else str(p) + print("{:<40} {:<10.2f}".format(f"P{p_word} {metric_name} (ms):", + value)) + result[f"p{p_word}_{metric_attribute_name}_ms"] = value + + process_one_metric("ttft", "TTFT", "Time to First Token") + process_one_metric("tpot", "TPOT", + "Time per Output Token (excl. 1st token)") + process_one_metric("itl", "ITL", "Inter-token Latency") + process_one_metric("e2el", "E2EL", "End-to-end Latency") + + print("=" * 50) + + return result, ret + + +def evaluate(ret, args): + + def _eval_correctness_json(expected, actual): + # extract json string from string using regex + import re + actual = actual.replace('\n', '').replace(' ', '').strip() + try: + actual = re.search(r'\{.*\}', actual).group() + actual = json.loads(actual) + except Exception: + return False + + return True + + def _eval_correctness_choice(expected, actual): + return actual in args.choice + + def _eval_correctness_regex(expected, actual): + import re + return re.match(args.regex, actual) is not None + + def _eval_correctness(expected, actual): + if args.structure_type == 'guided_json': + return _eval_correctness_json(expected, actual) + elif args.structure_type == 'guided_regex': + return _eval_correctness_regex(expected, actual) + elif args.structure_type == 'guided_choice': + return _eval_correctness_choice(expected, actual) + else: + return None + + scores = [] + for res in ret: + score = _eval_correctness(res['expected'], res['generated']) + res['correctness'] = score + scores.append(score) + + not_none_scores = [score for score in scores if score is not None] + + return (sum(not_none_scores) / len(not_none_scores) * + 100) if len(not_none_scores) > 0 else None + + +def main(args: argparse.Namespace): + print(args) + random.seed(args.seed) + np.random.seed(args.seed) + + backend = args.backend + model_id = args.model + tokenizer_id = args.tokenizer if args.tokenizer is not None else args.model + + if args.base_url is not None: + api_url = f"{args.base_url}{args.endpoint}" + base_url = f"{args.base_url}" + else: + api_url = f"http://{args.host}:{args.port}{args.endpoint}" + base_url = f"http://{args.host}:{args.port}" + + tokenizer = get_tokenizer(tokenizer_id, + trust_remote_code=args.trust_remote_code) + + if args.dataset == 'grammar': + args.structure_type = 'guided_grammar' + elif args.dataset == 'regex': + args.structure_type = 'guided_regex' + elif args.dataset == 'choice': + args.structure_type = 'guided_choice' + else: + args.structure_type = 'guided_json' + + if args.no_guided_decoding: + args.guided_decoding_ratio = 0 + if args.save_results: + result_file_name = f'{args.guided_decoding_ratio}guided' + result_file_name += f"_{backend}" + result_file_name += f"_{args.request_rate}qps" + result_file_name += f"_{args.model.split('/')[-1]}" + result_file_name += f"_{args.dataset}" + result_file_name += f"_{args.num_prompts}" + result_file_name += f"_out{args.output_len}" + result_file_name += ".txt" + else: + result_file_name = None + + input_requests = sample_requests(tokenizer, args) + + benchmark_result, ret = asyncio.run( + benchmark( + backend=backend, + api_url=api_url, + base_url=base_url, + model_id=model_id, + tokenizer=tokenizer, + input_requests=input_requests, + request_rate=args.request_rate, + burstiness=args.burstiness, + disable_tqdm=args.disable_tqdm, + profile=args.profile, + selected_percentile_metrics=args.percentile_metrics.split(","), + selected_percentiles=[ + float(p) for p in args.metric_percentiles.split(",") + ], + ignore_eos=args.ignore_eos, + max_concurrency=args.max_concurrency, + guided_decoding_ratio=args.guided_decoding_ratio, + guided_decoding_backend=args.guided_decoding_backend, + )) + + # Save config and results to json + score = evaluate(ret, args) + print("correct_rate(%)", score, '\n') + if args.save_results: + results = { + "backend": + backend, + "model_id": + model_id, + "tokenizer_id": + tokenizer_id, + "num_prompts": + args.num_prompts, + "request_rate": + args.request_rate if args.request_rate < float("inf") else "inf", + "burstiness": + args.burstiness, + "max_concurrency": + args.max_concurrency, + "correct_rate(%)": + score + } + results = {"outputs": ret, **results, **benchmark_result} + + # Save to file + if args.result_filename: + result_file_name = args.result_filename + if args.result_dir: + result_file_name = os.path.join(args.result_dir, result_file_name) + with open(result_file_name, "w", encoding='utf-8') as outfile: + json.dump(results, outfile, indent=4) + + +if __name__ == "__main__": + parser = FlexibleArgumentParser( + description="Benchmark the online serving throughput.") + parser.add_argument( + "--backend", + type=str, + default="vllm", + choices=list(ASYNC_REQUEST_FUNCS.keys()), + ) + parser.add_argument( + "--base-url", + type=str, + default=None, + help="Server or API base url if not using http host and port.", + ) + parser.add_argument("--host", type=str, default="localhost") + parser.add_argument("--port", type=int, default=8000) + parser.add_argument( + "--endpoint", + type=str, + default="/v1/completions", + help="API endpoint.", + ) + parser.add_argument( + "--dataset", + default='json', + choices=['json', 'grammar', 'regex', 'choice', 'xgrammar_bench']) + parser.add_argument("--json_schema_path", + type=str, + default=None, + help="Path to json schema.") + parser.add_argument( + "--max-concurrency", + type=int, + default=None, + help="Maximum number of concurrent requests. This can be used " + "to help simulate an environment where a higher level component " + "is enforcing a maximum number of concurrent requests. While the " + "--request-rate argument controls the rate at which requests are " + "initiated, this argument will control how many are actually allowed " + "to execute at a time. This means that when used in combination, the " + "actual request rate may be lower than specified with --request-rate, " + "if the server is not processing requests fast enough to keep up.") + parser.add_argument( + "--model", + type=str, + required=True, + help="Name of the model.", + ) + parser.add_argument( + "--tokenizer", + type=str, + help= + "Name or path of the tokenizer, if not using the default tokenizer.", # noqa: E501 + ) + parser.add_argument( + "--num-prompts", + type=int, + default=1000, + help="Number of prompts to process.", + ) + parser.add_argument( + "--output-len", + type=int, + default=128, + help="Number of output tokens.", + ) + parser.add_argument( + "--request-rate", + type=float, + default=float("inf"), + help="Number of requests per second. If this is inf, " + "then all the requests are sent at time 0. " + "Otherwise, we use Poisson process or gamma distribution " + "to synthesize the request arrival times.", + ) + parser.add_argument( + "--burstiness", + type=float, + default=1.0, + help="Burstiness factor of the request generation. " + "Only take effect when request_rate is not inf. " + "Default value is 1, which follows Poisson process. " + "Otherwise, the request intervals follow a gamma distribution. " + "A lower burstiness value (0 < burstiness < 1) results in more " + "bursty requests. A higher burstiness value (burstiness > 1) " + "results in a more uniform arrival of requests.", + ) + parser.add_argument("--seed", type=int, default=0) + parser.add_argument( + "--trust-remote-code", + action="store_true", + help="Trust remote code from huggingface", + ) + parser.add_argument( + "--disable-tqdm", + action="store_true", + help="Specify to disable tqdm progress bar.", + ) + parser.add_argument( + "--save-results", + action="store_true", + help="Specify to save benchmark results to a json file", + ) + parser.add_argument( + "--profile", + action="store_true", + help="Use Torch Profiler. The endpoint must be launched with " + "VLLM_TORCH_PROFILER_DIR to enable profiler.", + ) + parser.add_argument( + "--result-dir", + type=str, + default=None, + help="Specify directory to save benchmark json results." + "If not specified, results are saved in the current directory.", + ) + parser.add_argument( + "--result-filename", + type=str, + default=None, + help="Specify the filename to save benchmark json results." + "If not specified, results will be saved in " + "{backend}-{args.request_rate}qps-{base_model_id}-{current_dt}.json" + " format.", + ) + parser.add_argument( + "--ignore-eos", + action="store_true", + help="Set ignore_eos flag when sending the benchmark request." + "Warning: ignore_eos is not supported in deepspeed_mii and tgi.") + parser.add_argument( + "--percentile-metrics", + type=str, + default="ttft,tpot,itl", + help="Comma-seperated list of selected metrics to report percentils. " + "This argument specifies the metrics to report percentiles. " + "Allowed metric names are \"ttft\", \"tpot\", \"itl\", \"e2el\". " + "Default value is \"ttft,tpot,itl\".") + parser.add_argument( + "--metric-percentiles", + type=str, + default="99", + help="Comma-seperated list of percentiles for selected metrics. " + "To report 25-th, 50-th, and 75-th percentiles, use \"25,50,75\". " + "Default value is \"99\". " + "Use \"--percentile-metrics\" to select metrics.", + ) + parser.add_argument("--no-guided-decoding", + action='store_true', + default=False, + help="Whether to disable JSON decoding or not.") + parser.add_argument("--guided-decoding-ratio", + type=float, + default=1.0, + help="Ratio of Guided Decoding requests") + parser.add_argument("--guided-decoding-backend", + type=str, + choices=["outlines", "lm-format-enforcer", "xgrammar"], + default="xgrammar", + help="Backend to use for guided decoding") + + args = parser.parse_args() + main(args) From e4c34c23de2a90ab837772ac182638ac3bc1636d Mon Sep 17 00:00:00 2001 From: Daniele <36171005+dtrifiro@users.noreply.github.com> Date: Wed, 4 Dec 2024 22:48:13 +0100 Subject: [PATCH 236/397] [CI/Build] improve python-only dev setup (#9621) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Daniele Trifirò Signed-off-by: youkaichao Co-authored-by: youkaichao --- docs/source/getting_started/installation.rst | 41 +++------ python_only_dev.py | 96 ++------------------ setup.py | 83 ++++++++++++++++- vllm/envs.py | 3 +- 4 files changed, 102 insertions(+), 121 deletions(-) diff --git a/docs/source/getting_started/installation.rst b/docs/source/getting_started/installation.rst index 52412fa8437b9..9b6cb0e80d60e 100644 --- a/docs/source/getting_started/installation.rst +++ b/docs/source/getting_started/installation.rst @@ -21,7 +21,7 @@ You can install vLLM using pip: .. code-block:: console $ # (Recommended) Create a new conda environment. - $ conda create -n myenv python=3.10 -y + $ conda create -n myenv python=3.12 -y $ conda activate myenv $ # Install vLLM with CUDA 12.1. @@ -89,45 +89,24 @@ Build from source Python-only build (without compilation) --------------------------------------- -If you only need to change Python code, you can simply build vLLM without compilation. - -The first step is to install the latest vLLM wheel: - -.. code-block:: console - - pip install https://vllm-wheels.s3.us-west-2.amazonaws.com/nightly/vllm-1.0.0.dev-cp38-abi3-manylinux1_x86_64.whl - -You can find more information about vLLM's wheels `above <#install-the-latest-code>`_. - -After verifying that the installation is successful, you can use `the following script `_: +If you only need to change Python code, you can build and install vLLM without compilation. Using `pip's ``--editable`` flag `_, changes you make to the code will be reflected when you run vLLM: .. code-block:: console $ git clone https://github.com/vllm-project/vllm.git $ cd vllm - $ python python_only_dev.py + $ VLLM_USE_PRECOMPILED=1 pip install --editable . -The script will: +This will download the latest nightly wheel and use the compiled libraries from there in the install. -* Find the installed vLLM package in the current environment. -* Copy built files to the current directory. -* Rename the installed vLLM package. -* Symbolically link the current directory to the installed vLLM package. - -Now, you can edit the Python code in the current directory, and the changes will be reflected when you run vLLM. - -Once you have finished editing or want to install another vLLM wheel, you should exit the development environment using `the same script `_ with the ``--quit-dev`` (or ``-q`` for short) flag: +The ``VLLM_PRECOMPILED_WHEEL_LOCATION`` environment variable can be used instead of ``VLLM_USE_PRECOMPILED`` to specify a custom path or URL to the wheel file. For example, to use the `0.6.1.post1 PyPi wheel `_: .. code-block:: console - $ python python_only_dev.py --quit-dev - -The ``--quit-dev`` flag will: - -* Remove the symbolic link from the current directory to the vLLM package. -* Restore the original vLLM package from the backup. + $ export VLLM_PRECOMPILED_WHEEL_LOCATION=https://files.pythonhosted.org/packages/4a/4c/ee65ba33467a4c0de350ce29fbae39b9d0e7fcd887cc756fa993654d1228/vllm-0.6.3.post1-cp38-abi3-manylinux1_x86_64.whl + $ pip install --editable . -If you update the vLLM wheel and rebuild from the source to make further edits, you will need to repeat the `Python-only build <#python-only-build>`_ steps again. +You can find more information about vLLM's wheels `above <#install-the-latest-code>`_. .. note:: @@ -148,9 +127,13 @@ If you want to modify C++ or CUDA code, you'll need to build vLLM from source. T .. tip:: Building from source requires a lot of compilation. If you are building from source repeatedly, it's more efficient to cache the compilation results. + For example, you can install `ccache `_ using ``conda install ccache`` or ``apt install ccache`` . As long as ``which ccache`` command can find the ``ccache`` binary, it will be used automatically by the build system. After the first build, subsequent builds will be much faster. + `sccache `_ works similarly to ``ccache``, but has the capability to utilize caching in remote storage environments. + The following environment variables can be set to configure the vLLM ``sccache`` remote: ``SCCACHE_BUCKET=vllm-build-sccache SCCACHE_REGION=us-west-2 SCCACHE_S3_NO_CREDENTIALS=1``. We also recommend setting ``SCCACHE_IDLE_TIMEOUT=0``. + Use an existing PyTorch installation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/python_only_dev.py b/python_only_dev.py index 1ca0f5c30b741..f70b4984025b3 100644 --- a/python_only_dev.py +++ b/python_only_dev.py @@ -1,92 +1,14 @@ -# enable python only development -# copy compiled files to the current directory directly +msg = """Old style python only build (without compilation) is deprecated, please check https://docs.vllm.ai/en/latest/getting_started/installation.html#python-only-build-without-compilation for the new way to do python only build (without compilation). -import argparse -import os -import shutil -import subprocess -import sys -import warnings +TL;DR: -parser = argparse.ArgumentParser( - description="Development mode for python-only code") -parser.add_argument('-q', - '--quit-dev', - action='store_true', - help='Set the flag to quit development mode') -args = parser.parse_args() +VLLM_USE_PRECOMPILED=1 pip install -e . -# cannot directly `import vllm` , because it will try to -# import from the current directory -output = subprocess.run([sys.executable, "-m", "pip", "show", "vllm"], - capture_output=True) +or -assert output.returncode == 0, "vllm is not installed" +export VLLM_COMMIT=33f460b17a54acb3b6cc0b03f4a17876cff5eafd # use full commit hash from the main branch +export VLLM_PRECOMPILED_WHEEL_LOCATION=https://vllm-wheels.s3.us-west-2.amazonaws.com/${VLLM_COMMIT}/vllm-1.0.0.dev-cp38-abi3-manylinux1_x86_64.whl +pip install -e . +""" # noqa -text = output.stdout.decode("utf-8") - -package_path = None -for line in text.split("\n"): - if line.startswith("Location: "): - package_path = line.split(": ")[1] - break - -assert package_path is not None, "could not find package path" - -cwd = os.getcwd() - -assert cwd != package_path, "should not import from the current directory" - -files_to_copy = [ - "vllm/_C.abi3.so", - "vllm/_moe_C.abi3.so", - "vllm/vllm_flash_attn/vllm_flash_attn_c.abi3.so", - "vllm/vllm_flash_attn/flash_attn_interface.py", - "vllm/vllm_flash_attn/__init__.py", - # "vllm/_version.py", # not available in nightly wheels yet -] - -# Try to create _version.py to avoid version related warning -# Refer to https://github.com/vllm-project/vllm/pull/8771 -try: - from setuptools_scm import get_version - get_version(write_to="vllm/_version.py") -except ImportError: - warnings.warn( - "To avoid warnings related to vllm._version, " - "you should install setuptools-scm by `pip install setuptools-scm`", - stacklevel=2) - -if not args.quit_dev: - for file in files_to_copy: - src = os.path.join(package_path, file) - dst = file - print(f"Copying {src} to {dst}") - shutil.copyfile(src, dst) - - pre_built_vllm_path = os.path.join(package_path, "vllm") - tmp_path = os.path.join(package_path, "vllm_pre_built") - current_vllm_path = os.path.join(cwd, "vllm") - - print(f"Renaming {pre_built_vllm_path} to {tmp_path} for backup") - shutil.copytree(pre_built_vllm_path, tmp_path) - shutil.rmtree(pre_built_vllm_path) - - print(f"Linking {current_vllm_path} to {pre_built_vllm_path}") - os.symlink(current_vllm_path, pre_built_vllm_path) -else: - vllm_symlink_path = os.path.join(package_path, "vllm") - vllm_backup_path = os.path.join(package_path, "vllm_pre_built") - current_vllm_path = os.path.join(cwd, "vllm") - - print(f"Unlinking {current_vllm_path} to {vllm_symlink_path}") - assert os.path.islink( - vllm_symlink_path - ), f"not in dev mode: {vllm_symlink_path} is not a symbolic link" - assert current_vllm_path == os.readlink( - vllm_symlink_path - ), "current directory is not the source code of package" - os.unlink(vllm_symlink_path) - - print(f"Recovering backup from {vllm_backup_path} to {vllm_symlink_path}") - os.rename(vllm_backup_path, vllm_symlink_path) +print(msg) diff --git a/setup.py b/setup.py index b936589869e76..182dabe449674 100644 --- a/setup.py +++ b/setup.py @@ -249,6 +249,74 @@ def run(self): self.copy_file(file, dst_file) +class repackage_wheel(build_ext): + """Extracts libraries and other files from an existing wheel.""" + default_wheel = "https://vllm-wheels.s3.us-west-2.amazonaws.com/nightly/vllm-1.0.0.dev-cp38-abi3-manylinux1_x86_64.whl" + + def run(self) -> None: + wheel_location = os.getenv("VLLM_PRECOMPILED_WHEEL_LOCATION", + self.default_wheel) + + assert _is_cuda( + ), "VLLM_USE_PRECOMPILED is only supported for CUDA builds" + + import zipfile + + if os.path.isfile(wheel_location): + wheel_path = wheel_location + print(f"Using existing wheel={wheel_path}") + else: + # Download the wheel from a given URL, assume + # the filename is the last part of the URL + wheel_filename = wheel_location.split("/")[-1] + + import tempfile + + # create a temporary directory to store the wheel + temp_dir = tempfile.mkdtemp(prefix="vllm-wheels") + wheel_path = os.path.join(temp_dir, wheel_filename) + + print(f"Downloading wheel from {wheel_location} to {wheel_path}") + + from urllib.request import urlretrieve + + try: + urlretrieve(wheel_location, filename=wheel_path) + except Exception as e: + from setuptools.errors import SetupError + + raise SetupError( + f"Failed to get vLLM wheel from {wheel_location}") from e + + with zipfile.ZipFile(wheel_path) as wheel: + files_to_copy = [ + "vllm/_C.abi3.so", + "vllm/_moe_C.abi3.so", + "vllm/vllm_flash_attn/vllm_flash_attn_c.abi3.so", + "vllm/vllm_flash_attn/flash_attn_interface.py", + "vllm/vllm_flash_attn/__init__.py", + # "vllm/_version.py", # not available in nightly wheels yet + ] + file_members = filter(lambda x: x.filename in files_to_copy, + wheel.filelist) + + for file in file_members: + print(f"Extracting and including {file.filename} " + "from existing wheel") + package_name = os.path.dirname(file.filename).replace("/", ".") + file_name = os.path.basename(file.filename) + + if package_name not in package_data: + package_data[package_name] = [] + + wheel.extract(file) + if file_name.endswith(".py"): + # python files shouldn't be added to package_data + continue + + package_data[package_name].append(file_name) + + def _is_hpu() -> bool: is_hpu_available = True try: @@ -403,6 +471,8 @@ def get_vllm_version() -> str: # skip this for source tarball, required for pypi if "sdist" not in sys.argv: version += f"{sep}cu{cuda_version_str}" + if envs.VLLM_USE_PRECOMPILED: + version += ".precompiled" elif _is_hip(): # Get the HIP version hipcc_version = get_hipcc_rocm_version() @@ -514,13 +584,18 @@ def _read_requirements(filename: str) -> List[str]: package_data = { "vllm": ["py.typed", "model_executor/layers/fused_moe/configs/*.json"] } -if envs.VLLM_USE_PRECOMPILED: - ext_modules = [] - package_data["vllm"].append("*.so") if _no_device(): ext_modules = [] +if not ext_modules: + cmdclass = {} +else: + cmdclass = { + "build_ext": + repackage_wheel if envs.VLLM_USE_PRECOMPILED else cmake_build_ext + } + setup( name="vllm", version=get_vllm_version(), @@ -557,7 +632,7 @@ def _read_requirements(filename: str) -> List[str]: "audio": ["librosa", "soundfile"], # Required for audio processing "video": ["decord"] # Required for video processing }, - cmdclass={"build_ext": cmake_build_ext} if len(ext_modules) > 0 else {}, + cmdclass=cmdclass, package_data=package_data, entry_points={ "console_scripts": [ diff --git a/vllm/envs.py b/vllm/envs.py index c896770e5f6bc..28797ac1e4af2 100644 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -113,7 +113,8 @@ def get_default_config_root(): # If set, vllm will use precompiled binaries (*.so) "VLLM_USE_PRECOMPILED": - lambda: bool(os.environ.get("VLLM_USE_PRECOMPILED")), + lambda: bool(os.environ.get("VLLM_USE_PRECOMPILED")) or bool( + os.environ.get("VLLM_PRECOMPILED_WHEEL_LOCATION")), # CMake build type # If not set, defaults to "Debug" or "RelWithDebInfo" From 2a56e1264f3f0f32e25de42c32eac67cbc86a098 Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Wed, 4 Dec 2024 16:54:05 -0800 Subject: [PATCH 237/397] [V1] Fix when max_model_len is not divisible by block_size (#10903) Signed-off-by: Woosuk Kwon --- vllm/v1/worker/gpu_model_runner.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 4692762493f00..e8d964a722f60 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -260,7 +260,8 @@ def _prepare_inputs(self, scheduler_output: "SchedulerOutput"): # E.g., [0, 1, 0, 1, 2, 3, 4, 0, 1, 2] # -> [0, 1, M, M + 1, M + 2, M + 3, M + 4, 2 * M, 2 * M + 1, 2 * M + 2] # where M is the max_model_len. - token_indices = positions_np + req_indices * self.max_model_len + token_indices = (positions_np + + req_indices * self.input_batch.token_ids_cpu.shape[1]) token_indices = torch.from_numpy(token_indices) input_ids = torch.empty((total_num_scheduled_tokens, ), dtype=torch.int32, @@ -273,9 +274,15 @@ def _prepare_inputs(self, scheduler_output: "SchedulerOutput"): out=input_ids) # Calculate the slot mapping. + # E.g., [0, 1, 0, 1, 2, 3, 4, 0, 1, 2] + # -> [0, 0, K, K, K + 1, K + 1, K + 2, 2 * K, 2 * K, 2 * K + 1] + # where K is the max_num_blocks_per_req and the block size is 2. + # NOTE(woosuk): We can't simply use `token_indices // block_size` here + # because M (max_model_len) is not necessarily divisible by block_size. block_numbers = self.input_batch.block_table_cpu_tensor.flatten()[ - token_indices // self.block_size] - block_offsets = token_indices % self.block_size + req_indices * self.max_num_blocks_per_req + + positions_np // self.block_size] + block_offsets = torch.from_numpy(positions_np % self.block_size) slot_mapping = torch.empty((total_num_scheduled_tokens, ), dtype=torch.int32, device="cpu", From 7883c2bbe7d0ab47160d205822f7b188a5a2771b Mon Sep 17 00:00:00 2001 From: "Kevin H. Luu" Date: Wed, 4 Dec 2024 17:02:17 -0800 Subject: [PATCH 238/397] [benchmark] Make H100 benchmark optional (#10908) --- .buildkite/nightly-benchmarks/benchmark-pipeline.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml b/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml index dd2ce454ecb2d..64ba1b32fb074 100644 --- a/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml +++ b/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml @@ -65,10 +65,15 @@ steps: - VLLM_USAGE_SOURCE - HF_TOKEN + - block: "Run H100 Benchmark" + key: block-h100 + depends_on: ~ + - label: "H100" # skip: "use this flag to conditionally skip the benchmark step, useful for PR testing" agents: queue: H100 + depends_on: block-h100 plugins: - docker#v5.12.0: image: public.ecr.aws/q9t5s3a7/vllm-ci-postmerge-repo:$BUILDKITE_COMMIT From 8d370e91cb0049dc150c85710a08e85952504bfc Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Wed, 4 Dec 2024 22:14:06 -0500 Subject: [PATCH 239/397] [Bugfix] Fallback to outlines for complex json schemas (#10899) Signed-off-by: mgoin --- tests/entrypoints/conftest.py | 31 +++++++++++++ tests/entrypoints/llm/test_guided_generate.py | 28 ++++++++++++ .../guided_decoding/__init__.py | 43 +++++++++++++++++++ 3 files changed, 102 insertions(+) diff --git a/tests/entrypoints/conftest.py b/tests/entrypoints/conftest.py index e7ef5637c8ccb..0f7d15e1d85aa 100644 --- a/tests/entrypoints/conftest.py +++ b/tests/entrypoints/conftest.py @@ -69,6 +69,37 @@ def sample_json_schema(): } +@pytest.fixture +def sample_complex_json_schema(): + return { + "type": "object", + "properties": { + "score": { + "type": "integer", + "minimum": 0, + "maximum": 100 # Numeric range + }, + "grade": { + "type": "string", + "pattern": "^[A-D]$" # Regex pattern + }, + "email": { + "type": "string", + "pattern": "^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}$" + }, + "tags": { + "type": "array", + "items": { + "type": "string", + "pattern": + "^[a-z]{1,10}$" # Combining length and pattern restrictions + } + } + }, + "required": ["score", "grade", "email", "tags"] + } + + @pytest.fixture def sample_guided_choice(): return [ diff --git a/tests/entrypoints/llm/test_guided_generate.py b/tests/entrypoints/llm/test_guided_generate.py index c3706f696b264..de6257cfc551c 100644 --- a/tests/entrypoints/llm/test_guided_generate.py +++ b/tests/entrypoints/llm/test_guided_generate.py @@ -76,6 +76,34 @@ def test_guided_json_completion(sample_json_schema, llm): jsonschema.validate(instance=output_json, schema=sample_json_schema) +@pytest.mark.skip_global_cleanup +def test_guided_complex_json_completion(sample_complex_json_schema, llm): + sampling_params = SamplingParams( + temperature=1.0, + max_tokens=1000, + guided_decoding=GuidedDecodingParams(json=sample_complex_json_schema)) + outputs = llm.generate(prompts=[ + f"Give an example JSON for an assignment grade " + f"that fits this schema: {sample_complex_json_schema}" + ] * 2, + sampling_params=sampling_params, + use_tqdm=True) + + assert outputs is not None + + for output in outputs: + assert output is not None + assert isinstance(output, RequestOutput) + prompt = output.prompt + + generated_text = output.outputs[0].text + assert generated_text is not None + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + output_json = json.loads(generated_text) + jsonschema.validate(instance=output_json, + schema=sample_complex_json_schema) + + @pytest.mark.skip_global_cleanup def test_guided_choice_completion(sample_guided_choice, llm): sampling_params = SamplingParams( diff --git a/vllm/model_executor/guided_decoding/__init__.py b/vllm/model_executor/guided_decoding/__init__.py index 3340bad38ab73..a81377341e095 100644 --- a/vllm/model_executor/guided_decoding/__init__.py +++ b/vllm/model_executor/guided_decoding/__init__.py @@ -15,6 +15,40 @@ logger = init_logger(__name__) +def has_xgrammar_unsupported_json_features(schema: dict) -> bool: + """Check if JSON schema contains features unsupported by xgrammar.""" + + def check_object(obj: dict) -> bool: + if not isinstance(obj, dict): + return False + + # Check for pattern restrictions + if "pattern" in obj: + return True + + # Check for numeric ranges + if obj.get("type") in ("integer", "number") and any( + key in obj for key in [ + "minimum", "maximum", "exclusiveMinimum", + "exclusiveMaximum", "multipleOf" + ]): + return True + + # Recursively check all nested objects and arrays + for value in obj.values(): + if isinstance(value, dict): + if check_object(value): + return True + elif isinstance(value, list): + for item in value: + if isinstance(item, dict) and check_object(item): + return True + + return False + + return check_object(schema) + + def maybe_backend_fallback( guided_params: GuidedDecodingParams) -> GuidedDecodingParams: # lm-format-enforce doesn't support grammar, fallback to xgrammar @@ -47,6 +81,15 @@ def maybe_backend_fallback( "Falling back to use outlines instead.") guided_params.backend = "outlines" + # xgrammar doesn't support some JSON schema features + elif (guided_params.json is not None + and has_xgrammar_unsupported_json_features(guided_params.json)): + logger.warning( + "xgrammar does not support advanced JSON schema features like " + "patterns or numeric ranges. " + "Falling back to use outlines instead.") + guided_params.backend = "outlines" + return guided_params From aa39a8e17537f9127b3da65dba6b33067bfd2f78 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Thu, 5 Dec 2024 11:19:35 +0800 Subject: [PATCH 240/397] [Doc] Create a new "Usage" section (#10827) Signed-off-by: DarkLight1337 --- .../design/multimodal/multimodal_index.rst | 5 +- docs/source/index.rst | 25 +- .../models/enabling_multimodal_inputs.rst | 2 +- docs/source/models/supported_models.rst | 19 +- .../serving/openai_compatible_server.md | 4 +- .../compatibility_matrix.rst | 0 docs/source/{models => usage}/engine_args.rst | 0 docs/source/{serving => usage}/env_vars.rst | 0 docs/source/{serving => usage}/faq.rst | 2 + docs/source/{models => usage}/lora.rst | 4 +- .../vlm.rst => usage/multimodal_inputs.rst} | 248 ++++++++++++------ docs/source/{models => usage}/performance.rst | 0 docs/source/{models => usage}/spec_decode.rst | 8 +- .../{models => usage}/structured_outputs.rst | 0 docs/source/{serving => usage}/usage_stats.md | 0 vllm/attention/backends/rocm_flash_attn.py | 2 +- vllm/config.py | 8 +- vllm/engine/arg_utils.py | 2 +- vllm/engine/output_processor/multi_step.py | 2 +- vllm/executor/cpu_executor.py | 2 +- vllm/platforms/cpu.py | 2 +- vllm/spec_decode/spec_decode_worker.py | 2 +- vllm/utils.py | 2 +- vllm/worker/multi_step_model_runner.py | 2 +- vllm/worker/utils.py | 2 +- 25 files changed, 218 insertions(+), 125 deletions(-) rename docs/source/{serving => usage}/compatibility_matrix.rst (100%) rename docs/source/{models => usage}/engine_args.rst (100%) rename docs/source/{serving => usage}/env_vars.rst (100%) rename docs/source/{serving => usage}/faq.rst (99%) rename docs/source/{models => usage}/lora.rst (99%) rename docs/source/{models/vlm.rst => usage/multimodal_inputs.rst} (62%) rename docs/source/{models => usage}/performance.rst (100%) rename docs/source/{models => usage}/spec_decode.rst (98%) rename docs/source/{models => usage}/structured_outputs.rst (100%) rename docs/source/{serving => usage}/usage_stats.md (100%) diff --git a/docs/source/design/multimodal/multimodal_index.rst b/docs/source/design/multimodal/multimodal_index.rst index 30f543abc20c7..c6d47f90b62d5 100644 --- a/docs/source/design/multimodal/multimodal_index.rst +++ b/docs/source/design/multimodal/multimodal_index.rst @@ -7,7 +7,7 @@ Multi-Modality vLLM provides experimental support for multi-modal models through the :mod:`vllm.multimodal` package. -Multi-modal inputs can be passed alongside text and token prompts to :ref:`supported models ` +Multi-modal inputs can be passed alongside text and token prompts to :ref:`supported models ` via the ``multi_modal_data`` field in :class:`vllm.inputs.PromptType`. Currently, vLLM only has built-in support for image data. You can extend vLLM to process additional modalities @@ -15,9 +15,6 @@ by following :ref:`this guide `. Looking to add your own multi-modal model? Please follow the instructions listed :ref:`here `. -.. - TODO: Add usage of --limit-mm-per-prompt when multi-image input is officially supported - Guides ++++++ diff --git a/docs/source/index.rst b/docs/source/index.rst index 0692e949f1c77..86b1eed2d26ba 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -85,12 +85,8 @@ Documentation serving/deploying_with_nginx serving/distributed_serving serving/metrics - serving/env_vars - serving/usage_stats serving/integrations serving/tensorizer - serving/compatibility_matrix - serving/faq .. toctree:: :maxdepth: 1 @@ -99,12 +95,21 @@ Documentation models/supported_models models/adding_model models/enabling_multimodal_inputs - models/engine_args - models/lora - models/vlm - models/structured_outputs - models/spec_decode - models/performance + +.. toctree:: + :maxdepth: 1 + :caption: Usage + + usage/lora + usage/multimodal_inputs + usage/structured_outputs + usage/spec_decode + usage/compatibility_matrix + usage/performance + usage/faq + usage/engine_args + usage/env_vars + usage/usage_stats .. toctree:: :maxdepth: 1 diff --git a/docs/source/models/enabling_multimodal_inputs.rst b/docs/source/models/enabling_multimodal_inputs.rst index 49b5285c45590..5c1236e1a8972 100644 --- a/docs/source/models/enabling_multimodal_inputs.rst +++ b/docs/source/models/enabling_multimodal_inputs.rst @@ -3,7 +3,7 @@ Enabling Multimodal Inputs ========================== -This document walks you through the steps to extend a vLLM model so that it accepts :ref:`multi-modal ` inputs. +This document walks you through the steps to extend a vLLM model so that it accepts :ref:`multi-modal inputs `. .. seealso:: :ref:`adding_a_new_model` diff --git a/docs/source/models/supported_models.rst b/docs/source/models/supported_models.rst index 9f3b6f59068e2..5b416e04da745 100644 --- a/docs/source/models/supported_models.rst +++ b/docs/source/models/supported_models.rst @@ -471,6 +471,8 @@ Sentence Pair Scoring .. note:: These models are supported in both offline and online inference via Score API. +.. _supported_mm_models: + Multimodal Language Models ^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -489,8 +491,6 @@ On the other hand, modalities separated by :code:`/` are mutually exclusive. - e.g.: :code:`T / I` means that the model supports text-only and image-only inputs, but not text-with-image inputs. -.. _supported_vlms: - Text Generation --------------- @@ -646,6 +646,21 @@ Text Generation | :sup:`E` Pre-computed embeddings can be inputted for this modality. | :sup:`+` Multiple items can be inputted per text prompt for this modality. +.. important:: + To enable multiple multi-modal items per text prompt, you have to set :code:`limit_mm_per_prompt` (offline inference) + or :code:`--limit-mm-per-prompt` (online inference). For example, to enable passing up to 4 images per text prompt: + + .. code-block:: python + + llm = LLM( + model="Qwen/Qwen2-VL-7B-Instruct", + limit_mm_per_prompt={"image": 4}, + ) + + .. code-block:: bash + + vllm serve Qwen/Qwen2-VL-7B-Instruct --limit-mm-per-prompt image=4 + .. note:: vLLM currently only supports adding LoRA to the language backbone of multimodal models. diff --git a/docs/source/serving/openai_compatible_server.md b/docs/source/serving/openai_compatible_server.md index c39cef85897ed..d75e90807ca1d 100644 --- a/docs/source/serving/openai_compatible_server.md +++ b/docs/source/serving/openai_compatible_server.md @@ -32,7 +32,7 @@ We currently support the following OpenAI APIs: - [Completions API](https://platform.openai.com/docs/api-reference/completions) - *Note: `suffix` parameter is not supported.* - [Chat Completions API](https://platform.openai.com/docs/api-reference/chat) - - [Vision](https://platform.openai.com/docs/guides/vision)-related parameters are supported; see [Using VLMs](../models/vlm.rst). + - [Vision](https://platform.openai.com/docs/guides/vision)-related parameters are supported; see [Multimodal Inputs](../usage/multimodal_inputs.rst). - *Note: `image_url.detail` parameter is not supported.* - We also support `audio_url` content type for audio files. - Refer to [vllm.entrypoints.chat_utils](https://github.com/vllm-project/vllm/tree/main/vllm/entrypoints/chat_utils.py) for the exact schema. @@ -41,7 +41,7 @@ We currently support the following OpenAI APIs: - [Embeddings API](https://platform.openai.com/docs/api-reference/embeddings) - Instead of `inputs`, you can pass in a list of `messages` (same schema as Chat Completions API), which will be treated as a single prompt to the model according to its chat template. - - This enables multi-modal inputs to be passed to embedding models, see [Using VLMs](../models/vlm.rst). + - This enables multi-modal inputs to be passed to embedding models, see [this page](../usage/multimodal_inputs.rst) for details. - *Note: You should run `vllm serve` with `--task embedding` to ensure that the model is being run in embedding mode.* ## Score API for Cross Encoder Models diff --git a/docs/source/serving/compatibility_matrix.rst b/docs/source/usage/compatibility_matrix.rst similarity index 100% rename from docs/source/serving/compatibility_matrix.rst rename to docs/source/usage/compatibility_matrix.rst diff --git a/docs/source/models/engine_args.rst b/docs/source/usage/engine_args.rst similarity index 100% rename from docs/source/models/engine_args.rst rename to docs/source/usage/engine_args.rst diff --git a/docs/source/serving/env_vars.rst b/docs/source/usage/env_vars.rst similarity index 100% rename from docs/source/serving/env_vars.rst rename to docs/source/usage/env_vars.rst diff --git a/docs/source/serving/faq.rst b/docs/source/usage/faq.rst similarity index 99% rename from docs/source/serving/faq.rst rename to docs/source/usage/faq.rst index 9e858e612c8bf..ce327abd5fa20 100644 --- a/docs/source/serving/faq.rst +++ b/docs/source/usage/faq.rst @@ -1,3 +1,5 @@ +.. _faq: + Frequently Asked Questions =========================== diff --git a/docs/source/models/lora.rst b/docs/source/usage/lora.rst similarity index 99% rename from docs/source/models/lora.rst rename to docs/source/usage/lora.rst index ef0177eaf2162..c2c6fa2aebfaf 100644 --- a/docs/source/models/lora.rst +++ b/docs/source/usage/lora.rst @@ -1,7 +1,7 @@ .. _lora: -Using LoRA adapters -=================== +LoRA Adapters +============= This document shows you how to use `LoRA adapters `_ with vLLM on top of a base model. diff --git a/docs/source/models/vlm.rst b/docs/source/usage/multimodal_inputs.rst similarity index 62% rename from docs/source/models/vlm.rst rename to docs/source/usage/multimodal_inputs.rst index bcbe50a25fa09..c93f65327e31b 100644 --- a/docs/source/models/vlm.rst +++ b/docs/source/usage/multimodal_inputs.rst @@ -1,34 +1,31 @@ -.. _vlm: +.. _multimodal_inputs: -Using VLMs -========== +Multimodal Inputs +================= -vLLM provides experimental support for Vision Language Models (VLMs). See the :ref:`list of supported VLMs here `. -This document shows you how to run and serve these models using vLLM. +This page teaches you how to pass multi-modal inputs to :ref:`multi-modal models ` in vLLM. .. note:: - We are actively iterating on VLM support. See `this RFC `_ for upcoming changes, + We are actively iterating on multi-modal support. See `this RFC `_ for upcoming changes, and `open an issue on GitHub `_ if you have any feedback or feature requests. Offline Inference ----------------- -Single-image input -^^^^^^^^^^^^^^^^^^ - -The :class:`~vllm.LLM` class can be instantiated in much the same way as language-only models. - -.. code-block:: python - - llm = LLM(model="llava-hf/llava-1.5-7b-hf") - -To pass an image to the model, note the following in :class:`vllm.inputs.PromptType`: +To input multi-modal data, follow this schema in :class:`vllm.inputs.PromptType`: * ``prompt``: The prompt should follow the format that is documented on HuggingFace. * ``multi_modal_data``: This is a dictionary that follows the schema defined in :class:`vllm.multimodal.MultiModalDataDict`. +Image +^^^^^ + +You can pass a single image to the :code:`'image'` field of the multi-modal dictionary, as shown in the following examples: + .. code-block:: python + llm = LLM(model="llava-hf/llava-1.5-7b-hf") + # Refer to the HuggingFace repo for the correct format to use prompt = "USER: \nWhat is the content of this image?\nASSISTANT:" @@ -41,41 +38,6 @@ To pass an image to the model, note the following in :class:`vllm.inputs.PromptT "multi_modal_data": {"image": image}, }) - for o in outputs: - generated_text = o.outputs[0].text - print(generated_text) - - # Inference with image embeddings as input - image_embeds = torch.load(...) # torch.Tensor of shape (1, image_feature_size, hidden_size of LM) - outputs = llm.generate({ - "prompt": prompt, - "multi_modal_data": {"image": image_embeds}, - }) - - for o in outputs: - generated_text = o.outputs[0].text - print(generated_text) - - # Inference with image embeddings as input with additional parameters - # Specifically, we are conducting a trial run of Qwen2VL and MiniCPM-V with the new input format, which utilizes additional parameters. - mm_data = {} - - image_embeds = torch.load(...) # torch.Tensor of shape (num_images, image_feature_size, hidden_size of LM) - # For Qwen2VL, image_grid_thw is needed to calculate positional encoding. - mm_data['image'] = { - "image_embeds": image_embeds, - "image_grid_thw": torch.load(...) # torch.Tensor of shape (1, 3), - } - # For MiniCPM-V, image_size_list is needed to calculate details of the sliced image. - mm_data['image'] = { - "image_embeds": image_embeds, - "image_size_list": [image.size] # list of image sizes - } - outputs = llm.generate({ - "prompt": prompt, - "multi_modal_data": mm_data, - }) - for o in outputs: generated_text = o.outputs[0].text print(generated_text) @@ -102,12 +64,7 @@ To pass an image to the model, note the following in :class:`vllm.inputs.PromptT A code example can be found in `examples/offline_inference_vision_language.py `_. -Multi-image input -^^^^^^^^^^^^^^^^^ - -Multi-image input is only supported for a subset of VLMs, as shown :ref:`here `. - -To enable multiple multi-modal items per text prompt, you have to set ``limit_mm_per_prompt`` for the :class:`~vllm.LLM` class. +To substitute multiple images inside the same text prompt, you can pass in a list of images instead: .. code-block:: python @@ -118,10 +75,6 @@ To enable multiple multi-modal items per text prompt, you have to set ``limit_mm limit_mm_per_prompt={"image": 2}, # The maximum number to accept ) -Instead of passing in a single image, you can pass in a list of images. - -.. code-block:: python - # Refer to the HuggingFace repo for the correct format to use prompt = "<|user|>\n<|image_1|>\n<|image_2|>\nWhat is the content of each image?<|end|>\n<|assistant|>\n" @@ -169,30 +122,114 @@ Multi-image input can be extended to perform video captioning. We show this with generated_text = o.outputs[0].text print(generated_text) +Video +^^^^^ + +You can pass a list of NumPy arrays directly to the :code:`'video'` field of the multi-modal dictionary +instead of using multi-image input. + +Please refer to `examples/offline_inference_vision_language.py `_ for more details. + +Audio +^^^^^ + +You can pass a tuple :code:`(array, sampling_rate)` to the :code:`'audio'` field of the multi-modal dictionary. + +Please refer to `examples/offline_inference_audio_language.py `_ for more details. + +Embedding +^^^^^^^^^ + +To input pre-computed embeddings belonging to a data type (i.e. image, video, or audio) directly to the language model, +pass a tensor of shape :code:`(num_items, feature_size, hidden_size of LM)` to the corresponding field of the multi-modal dictionary. + +.. code-block:: python + + # Inference with image embeddings as input + llm = LLM(model="llava-hf/llava-1.5-7b-hf") + + # Refer to the HuggingFace repo for the correct format to use + prompt = "USER: \nWhat is the content of this image?\nASSISTANT:" + + # Embeddings for single image + # torch.Tensor of shape (1, image_feature_size, hidden_size of LM) + image_embeds = torch.load(...) + + outputs = llm.generate({ + "prompt": prompt, + "multi_modal_data": {"image": image_embeds}, + }) + + for o in outputs: + generated_text = o.outputs[0].text + print(generated_text) + +For Qwen2-VL and MiniCPM-V, we accept additional parameters alongside the embeddings: + +.. code-block:: python + + # Construct the prompt based on your model + prompt = ... + + # Embeddings for multiple images + # torch.Tensor of shape (num_images, image_feature_size, hidden_size of LM) + image_embeds = torch.load(...) + + # Qwen2-VL + llm = LLM("Qwen/Qwen2-VL-2B-Instruct", limit_mm_per_prompt={"image": 4}) + mm_data = { + "image": { + "image_embeds": image_embeds, + # image_grid_thw is needed to calculate positional encoding. + "image_grid_thw": torch.load(...), # torch.Tensor of shape (1, 3), + } + } + + # MiniCPM-V + llm = LLM("openbmb/MiniCPM-V-2_6", trust_remote_code=True, limit_mm_per_prompt={"image": 4}) + mm_data = { + "image": { + "image_embeds": image_embeds, + # image_size_list is needed to calculate details of the sliced image. + "image_size_list": [image.size for image in images], # list of image sizes + } + } + + outputs = llm.generate({ + "prompt": prompt, + "multi_modal_data": mm_data, + }) + + for o in outputs: + generated_text = o.outputs[0].text + print(generated_text) + Online Inference ---------------- -OpenAI Vision API -^^^^^^^^^^^^^^^^^ +Our OpenAI-compatible server accepts multi-modal data via the `Chat Completions API `_. + +.. important:: + A chat template is **required** to use Chat Completions API. + + Although most models come with a chat template, for others you have to define one yourself. + The chat template can be inferred based on the documentation on the model's HuggingFace repo. + For example, LLaVA-1.5 (``llava-hf/llava-1.5-7b-hf``) requires a chat template that can be found `here `__. + +Image +^^^^^ -You can serve vision language models with vLLM's HTTP server that is compatible with `OpenAI Vision API `_. +Image input is supported according to `OpenAI Vision API `_. +Here is a simple example using Phi-3.5-Vision. -Below is an example on how to launch the same ``microsoft/Phi-3.5-vision-instruct`` with vLLM's OpenAI-compatible API server. +First, launch the OpenAI-compatible server: .. code-block:: bash vllm serve microsoft/Phi-3.5-vision-instruct --task generate \ --trust-remote-code --max-model-len 4096 --limit-mm-per-prompt image=2 -.. important:: - Since OpenAI Vision API is based on `Chat Completions API `_, - a chat template is **required** to launch the API server. - - Although Phi-3.5-Vision comes with a chat template, for other models you may have to provide one if the model's tokenizer does not come with it. - The chat template can be inferred based on the documentation on the model's HuggingFace repo. - For example, LLaVA-1.5 (``llava-hf/llava-1.5-7b-hf``) requires a chat template that can be found `here `_. - -To consume the server, you can use the OpenAI client like in the example below: +Then, you can use the OpenAI client as follows: .. code-block:: python @@ -252,22 +289,59 @@ A full code example can be found in `examples/openai_chat_completion_client_for_ .. note:: - By default, the timeout for fetching images through http url is ``5`` seconds. You can override this by setting the environment variable: + By default, the timeout for fetching images through HTTP URL is ``5`` seconds. + You can override this by setting the environment variable: .. code-block:: console $ export VLLM_IMAGE_FETCH_TIMEOUT= -Chat Embeddings API -^^^^^^^^^^^^^^^^^^^ +Video +^^^^^ + +Instead of :code:`image_url`, you can pass a video file via :code:`video_url`. + +You can use `these tests `_ as reference. + +.. note:: + + By default, the timeout for fetching videos through HTTP URL url is ``30`` seconds. + You can override this by setting the environment variable: + + .. code-block:: console + + $ export VLLM_VIDEO_FETCH_TIMEOUT= -vLLM's Chat Embeddings API is a superset of OpenAI's `Embeddings API `_, -where a list of ``messages`` can be passed instead of batched ``inputs``. This enables multi-modal inputs to be passed to embedding models. +Audio +^^^^^ + +Instead of :code:`image_url`, you can pass an audio file via :code:`audio_url`. + +A full code example can be found in `examples/openai_chat_completion_client_for_multimodal.py `_. + +.. note:: + + By default, the timeout for fetching audios through HTTP URL is ``10`` seconds. + You can override this by setting the environment variable: + + .. code-block:: console + + $ export VLLM_AUDIO_FETCH_TIMEOUT= + +Embedding +^^^^^^^^^ + +vLLM's Embeddings API is a superset of OpenAI's `Embeddings API `_, +where a list of chat ``messages`` can be passed instead of batched ``inputs``. This enables multi-modal inputs to be passed to embedding models. .. tip:: The schema of ``messages`` is exactly the same as in Chat Completions API. + You can refer to the above tutorials for more details on how to pass each type of multi-modal data. -In this example, we will serve the ``TIGER-Lab/VLM2Vec-Full`` model. +Usually, embedding models do not expect chat-based input, so we need to use a custom chat template to format the text and images. +Refer to the examples below for illustration. + +Here is an end-to-end example using VLM2Vec. To serve the model: .. code-block:: bash @@ -279,10 +353,8 @@ In this example, we will serve the ``TIGER-Lab/VLM2Vec-Full`` model. Since VLM2Vec has the same model architecture as Phi-3.5-Vision, we have to explicitly pass ``--task embedding`` to run this model in embedding mode instead of text generation mode. -.. important:: - - VLM2Vec does not expect chat-based input. We use a `custom chat template `_ - to combine the text and images together. + The custom chat template is completely different from the original one for this model, + and can be found `here `__. Since the request schema is not defined by OpenAI client, we post a request to the server using the lower-level ``requests`` library: @@ -310,7 +382,7 @@ Since the request schema is not defined by OpenAI client, we post a request to t response_json = response.json() print("Embedding output:", response_json["data"][0]["embedding"]) -Here is an example for serving the ``MrLight/dse-qwen2-2b-mrl-v1`` model. +Below is another example, this time using the ``MrLight/dse-qwen2-2b-mrl-v1`` model. .. code-block:: bash @@ -319,8 +391,10 @@ Here is an example for serving the ``MrLight/dse-qwen2-2b-mrl-v1`` model. .. important:: - Like with VLM2Vec, we have to explicitly pass ``--task embedding``. Additionally, ``MrLight/dse-qwen2-2b-mrl-v1`` requires an EOS token for embeddings, - which is handled by the jinja template. + Like with VLM2Vec, we have to explicitly pass ``--task embedding``. + + Additionally, ``MrLight/dse-qwen2-2b-mrl-v1`` requires an EOS token for embeddings, which is handled + by `this custom chat template `__. .. important:: diff --git a/docs/source/models/performance.rst b/docs/source/usage/performance.rst similarity index 100% rename from docs/source/models/performance.rst rename to docs/source/usage/performance.rst diff --git a/docs/source/models/spec_decode.rst b/docs/source/usage/spec_decode.rst similarity index 98% rename from docs/source/models/spec_decode.rst rename to docs/source/usage/spec_decode.rst index d57ffec53215d..67e8ede7654b7 100644 --- a/docs/source/models/spec_decode.rst +++ b/docs/source/usage/spec_decode.rst @@ -1,7 +1,7 @@ .. _spec_decode: -Speculative decoding in vLLM -============================ +Speculative decoding +==================== .. warning:: Please note that speculative decoding in vLLM is not yet optimized and does @@ -182,7 +182,7 @@ speculative decoding, breaking down the guarantees into three key areas: 3. **vLLM Logprob Stability** - vLLM does not currently guarantee stable token log probabilities (logprobs). This can result in different outputs for the same request across runs. For more details, see the FAQ section - titled *Can the output of a prompt vary across runs in vLLM?* in the `FAQs <../serving/faq>`_. + titled *Can the output of a prompt vary across runs in vLLM?* in the :ref:`FAQs `. **Conclusion** @@ -197,7 +197,7 @@ can occur due to following factors: **Mitigation Strategies** -For mitigation strategies, please refer to the FAQ entry *Can the output of a prompt vary across runs in vLLM?* in the `FAQs <../serving/faq>`_. +For mitigation strategies, please refer to the FAQ entry *Can the output of a prompt vary across runs in vLLM?* in the :ref:`FAQs `. Resources for vLLM contributors ------------------------------- diff --git a/docs/source/models/structured_outputs.rst b/docs/source/usage/structured_outputs.rst similarity index 100% rename from docs/source/models/structured_outputs.rst rename to docs/source/usage/structured_outputs.rst diff --git a/docs/source/serving/usage_stats.md b/docs/source/usage/usage_stats.md similarity index 100% rename from docs/source/serving/usage_stats.md rename to docs/source/usage/usage_stats.md diff --git a/vllm/attention/backends/rocm_flash_attn.py b/vllm/attention/backends/rocm_flash_attn.py index 9139c3c1314d8..19daeb729ee61 100644 --- a/vllm/attention/backends/rocm_flash_attn.py +++ b/vllm/attention/backends/rocm_flash_attn.py @@ -430,7 +430,7 @@ def forward( Returns: shape = [num_tokens, num_heads * head_size] """ - # Reminder: Please update docs/source/serving/compatibility_matrix.rst + # Reminder: Please update docs/source/usage/compatibility_matrix.rst # If the feature combo become valid if attn_type != AttentionType.DECODER: raise NotImplementedError("Encoder self-attention and " diff --git a/vllm/config.py b/vllm/config.py index 1cbab8ea30249..5c904914a71cf 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -509,7 +509,7 @@ def verify_async_output_proc(self, parallel_config, speculative_config, self.use_async_output_proc = False return - # Reminder: Please update docs/source/serving/compatibility_matrix.rst + # Reminder: Please update docs/source/usage/compatibility_matrix.rst # If the feature combo become valid if device_config.device_type not in ("cuda", "tpu", "xpu", "hpu"): logger.warning( @@ -525,7 +525,7 @@ def verify_async_output_proc(self, parallel_config, speculative_config, self.use_async_output_proc = False return - # Reminder: Please update docs/source/serving/compatibility_matrix.rst + # Reminder: Please update docs/source/usage/compatibility_matrix.rst # If the feature combo become valid if device_config.device_type == "cuda" and self.enforce_eager: logger.warning( @@ -540,7 +540,7 @@ def verify_async_output_proc(self, parallel_config, speculative_config, if self.task == "embedding": self.use_async_output_proc = False - # Reminder: Please update docs/source/serving/compatibility_matrix.rst + # Reminder: Please update docs/source/usage/compatibility_matrix.rst # If the feature combo become valid if speculative_config: logger.warning("Async output processing is not supported with" @@ -1704,7 +1704,7 @@ def verify_with_model_config(self, model_config: ModelConfig): model_config.quantization) def verify_with_scheduler_config(self, scheduler_config: SchedulerConfig): - # Reminder: Please update docs/source/serving/compatibility_matrix.rst + # Reminder: Please update docs/source/usage/compatibility_matrix.rst # If the feature combo become valid if scheduler_config.chunked_prefill_enabled: raise ValueError("LoRA is not supported with chunked prefill yet.") diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 3b776c1d9d39f..0b304658f012c 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -1111,7 +1111,7 @@ def create_engine_config(self, disable_logprobs=self.disable_logprobs_during_spec_decoding, ) - # Reminder: Please update docs/source/serving/compatibility_matrix.rst + # Reminder: Please update docs/source/usage/compatibility_matrix.rst # If the feature combo become valid if self.num_scheduler_steps > 1: if speculative_config is not None: diff --git a/vllm/engine/output_processor/multi_step.py b/vllm/engine/output_processor/multi_step.py index 7a6ebb430541f..a9b638ed02a1e 100644 --- a/vllm/engine/output_processor/multi_step.py +++ b/vllm/engine/output_processor/multi_step.py @@ -65,7 +65,7 @@ def process_prompt_logprob(self, seq_group: SequenceGroup, @staticmethod @functools.lru_cache def _log_prompt_logprob_unsupported_warning_once(): - # Reminder: Please update docs/source/serving/compatibility_matrix.rst + # Reminder: Please update docs/source/usage/compatibility_matrix.rst # If the feature combo become valid logger.warning( "Prompt logprob is not supported by multi step workers. " diff --git a/vllm/executor/cpu_executor.py b/vllm/executor/cpu_executor.py index 336f9bc8efb20..6b4cb5a9a1d61 100644 --- a/vllm/executor/cpu_executor.py +++ b/vllm/executor/cpu_executor.py @@ -23,7 +23,7 @@ class CPUExecutor(ExecutorBase): def _init_executor(self) -> None: assert self.device_config.device_type == "cpu" - # Reminder: Please update docs/source/serving/compatibility_matrix.rst + # Reminder: Please update docs/source/usage/compatibility_matrix.rst # If the feature combo become valid assert self.lora_config is None, "cpu backend doesn't support LoRA" diff --git a/vllm/platforms/cpu.py b/vllm/platforms/cpu.py index b5333fbd6f502..680ee74129739 100644 --- a/vllm/platforms/cpu.py +++ b/vllm/platforms/cpu.py @@ -46,7 +46,7 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: import vllm.envs as envs from vllm.utils import GiB_bytes model_config = vllm_config.model_config - # Reminder: Please update docs/source/serving/compatibility_matrix.rst + # Reminder: Please update docs/source/usage/compatibility_matrix.rst # If the feature combo become valid if not model_config.enforce_eager: logger.warning( diff --git a/vllm/spec_decode/spec_decode_worker.py b/vllm/spec_decode/spec_decode_worker.py index 53634f7b0b366..ced7f53827665 100644 --- a/vllm/spec_decode/spec_decode_worker.py +++ b/vllm/spec_decode/spec_decode_worker.py @@ -104,7 +104,7 @@ def create_spec_worker(*args, **kwargs) -> "SpecDecodeWorker": return spec_decode_worker -# Reminder: Please update docs/source/serving/compatibility_matrix.rst +# Reminder: Please update docs/source/usage/compatibility_matrix.rst # If the feature combo become valid class SpecDecodeWorker(LoraNotSupportedWorkerBase): """Worker which implements speculative decoding. diff --git a/vllm/utils.py b/vllm/utils.py index 07bf82e24cbe6..6cee4847e57b4 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -47,7 +47,7 @@ # Exception strings for non-implemented encoder/decoder scenarios -# Reminder: Please update docs/source/serving/compatibility_matrix.rst +# Reminder: Please update docs/source/usage/compatibility_matrix.rst # If the feature combo become valid STR_NOT_IMPL_ENC_DEC_SWA = \ diff --git a/vllm/worker/multi_step_model_runner.py b/vllm/worker/multi_step_model_runner.py index 3ee0fb4dc943e..3ca0d88a42183 100644 --- a/vllm/worker/multi_step_model_runner.py +++ b/vllm/worker/multi_step_model_runner.py @@ -817,7 +817,7 @@ def _pythonize_sampler_output( for sgdx, (seq_group, sample_result) in enumerate(zip(seq_groups, samples_list)): - # Reminder: Please update docs/source/serving/compatibility_matrix.rst + # Reminder: Please update docs/source/usage/compatibility_matrix.rst # If the feature combo become valid # (Check for Guided Decoding) if seq_group.sampling_params.logits_processors: diff --git a/vllm/worker/utils.py b/vllm/worker/utils.py index f43635464ef00..5f71ec0c14df8 100644 --- a/vllm/worker/utils.py +++ b/vllm/worker/utils.py @@ -13,7 +13,7 @@ def assert_enc_dec_mr_supported_scenario( a supported scenario. ''' - # Reminder: Please update docs/source/serving/compatibility_matrix.rst + # Reminder: Please update docs/source/usage/compatibility_matrix.rst # If the feature combo become valid if enc_dec_mr.cache_config.enable_prefix_caching: From 1f958a7d52b24314e41c4bb56c51b1dce5405e05 Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Thu, 5 Dec 2024 13:20:26 +0800 Subject: [PATCH 241/397] [Bugfix] Fix BNB loader target_modules (#10720) Signed-off-by: Jee Jee Li --- vllm/model_executor/model_loader/loader.py | 64 ++-------------------- 1 file changed, 6 insertions(+), 58 deletions(-) diff --git a/vllm/model_executor/model_loader/loader.py b/vllm/model_executor/model_loader/loader.py index b4921cc80797f..a0ea0e5fad3c2 100644 --- a/vllm/model_executor/model_loader/loader.py +++ b/vllm/model_executor/model_loader/loader.py @@ -6,7 +6,6 @@ import glob import inspect import itertools -import json import math import os import warnings @@ -18,7 +17,7 @@ import huggingface_hub import numpy as np import torch -from huggingface_hub import HfApi, hf_hub_download +from huggingface_hub import HfApi from torch import nn from transformers import AutoModelForCausalLM from transformers.utils import SAFE_WEIGHTS_INDEX_NAME @@ -704,51 +703,9 @@ def __init__(self, load_config: LoadConfig): self.unsharded_weights_modules: List[str] = [] # Save the module names that are sharded by column. self.column_sharded_weights_modules: List[str] = [] - # we don't need to quantize the whole model, only the target modules - # that are specified in the adapter config file. If the adapter config - # file is not provided, we will quantize the default modules. - if (not load_config.model_loader_extra_config - or "qlora_adapter_name_or_path" - not in load_config.model_loader_extra_config): - self.target_modules = [] - return - - qlora_adapter = load_config.model_loader_extra_config[ - "qlora_adapter_name_or_path"] - - config_file_path = self._get_config_file(qlora_adapter) - - with open(config_file_path) as f: - config = json.load(f) - self.target_modules = config["target_modules"] - # TODO: target_modules could be either a list or a regex string. - # We need to handle both cases. - assert isinstance(self.target_modules, - list), "Unsupported target_modules: " - f"{self.target_modules}" - - def _get_config_file(self, qlora_adapter: str) -> str: - is_local = os.path.isdir(qlora_adapter) - config_file_path = None - if is_local: - for file in self.possible_config_file_names: - config_file_path = os.path.join(qlora_adapter, file) - if os.path.exists(config_file_path): - break - else: - hf_api = HfApi() - repo_files = hf_api.list_repo_files(repo_id=qlora_adapter) - for file in self.possible_config_file_names: - if file in repo_files: - config_file_path = hf_hub_download(repo_id=qlora_adapter, - filename=file) - break - - if not config_file_path: - raise ValueError( - f"Cannot find adapter config file in {qlora_adapter}") - - return config_file_path + # Store all module names (from transformers) that support + # BNB quantization. + self.target_modules: List[str] = [] def _get_weight_files( self, @@ -1030,25 +987,16 @@ def _get_bnb_target_modules(self, model: nn.Module) -> None: inverse_stacked_mapping[packed] = [] inverse_stacked_mapping[packed].insert(idx, orig) - linear_module_lst = [] for name, module in model.named_modules(): if isinstance(module, (LinearBase, )): last_name = name.split(".")[-1] if sub_modules := inverse_stacked_mapping.get(last_name, []): # Map vllm's names to transformers' names. for sub_name in sub_modules: - linear_module_lst.append( + self.target_modules.append( name.replace(last_name, sub_name)) else: - linear_module_lst.append(name) - if self.target_modules: - # Update self.target_modules - self.target_modules = [ - qual_name for qual_name in linear_module_lst - if any(t in qual_name for t in self.target_modules) - ] - else: - self.target_modules = linear_module_lst + self.target_modules.append(name) assert (self.target_modules ), "vllm currently does not support BNB quantization for" f" {type(model).__name__}" From 39c89e71a84779c0758ec603efcded7a48bb5fc0 Mon Sep 17 00:00:00 2001 From: Travis Johnson Date: Wed, 4 Dec 2024 22:54:06 -0700 Subject: [PATCH 242/397] [Misc] Update llama 3.2 template to support system prompt with images (#10901) Signed-off-by: Travis Johnson --- examples/tool_chat_template_llama3.2_json.jinja | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/examples/tool_chat_template_llama3.2_json.jinja b/examples/tool_chat_template_llama3.2_json.jinja index 39f902c1c3c40..2b290c0eede03 100644 --- a/examples/tool_chat_template_llama3.2_json.jinja +++ b/examples/tool_chat_template_llama3.2_json.jinja @@ -26,13 +26,11 @@ {%- endfor %} {%- endfor %} - {#- This block extracts the system message, so we can slot it into the right place. #} {%- if messages[0]['role'] == 'system' %} {%- if messages[0]['content'] is string %} {%- set system_message = messages[0]['content']|trim %} {%- else %} - {#- Support vLLM's transforming of a content string to JSON. #} {%- set system_message = messages[0]['content'][0]['text']|trim %} {%- endif %} {%- set messages = messages[1:] %} @@ -44,14 +42,8 @@ {%- endif %} {%- endif %} -{#- Including an image is not compatible with a system message #} -{%- if image_ns.has_images and not system_message == "" %} - {{- raise_exception("Prompting with images is incompatible with system messages and tool use.") }} -{%- endif %} - - -{#- System message, if there are no images #} -{%- if not image_ns.has_images %} +{#- System message if there are no images, if the user supplied one, or if tools are used (default tool system message) #} +{%- if system_message or not image_ns.has_images %} {{- "<|start_header_id|>system<|end_header_id|>\n\n" }} {%- if tools is not none %} {{- "Environment: ipython\n" }} From 571da8fc431ec36427ee1034a7779b23229b015e Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Thu, 5 Dec 2024 21:22:28 +0800 Subject: [PATCH 243/397] [Misc][LoRA] Clean up the function interface of Punica (#10917) Signed-off-by: Jee Jee Li --- tests/lora/test_layers.py | 42 ++- vllm/lora/fully_sharded_layers.py | 175 +++++----- vllm/lora/layers.py | 538 +++++++++++------------------- vllm/lora/models.py | 8 +- vllm/lora/punica.py | 365 ++++++++++---------- 5 files changed, 497 insertions(+), 631 deletions(-) diff --git a/tests/lora/test_layers.py b/tests/lora/test_layers.py index 15e576cb065c7..a113e3f7abc1e 100644 --- a/tests/lora/test_layers.py +++ b/tests/lora/test_layers.py @@ -565,7 +565,9 @@ def _pretest(): @pytest.mark.parametrize("num_loras", [1, 2, 4, 8]) @pytest.mark.parametrize("device", CUDA_DEVICES) @pytest.mark.parametrize("stage", STAGES) -def test_linear_replicated(dist_init, num_loras, device, stage) -> None: +@pytest.mark.parametrize("bias_enabled", [True, False]) +def test_linear_replicated(dist_init, num_loras, device, stage, + bias_enabled) -> None: torch.cuda.set_device(device) torch.set_default_device(device) @@ -573,7 +575,8 @@ def test_linear_replicated(dist_init, num_loras, device, stage) -> None: max_loras = 8 lora_config = LoRAConfig(max_loras=max_loras, max_lora_rank=8, - lora_dtype=torch.float16) + lora_dtype=torch.float16, + bias_enabled=bias_enabled) def create_random_linear_replicated_layer(): @@ -585,7 +588,12 @@ def create_random_linear_replicated_layer(): lora_linear = ReplicatedLinearWithLoRA(linear) lora_linear.create_lora_weights(max_loras, lora_config) - + assert (lora_linear.n_slices == len(lora_linear.lora_a_stacked) == len( + lora_linear.lora_b_stacked) == 1) + if bias_enabled: + assert len(lora_linear.lora_bias_stacked) == lora_linear.n_slices + else: + assert lora_linear.lora_bias_stacked is None return linear, lora_linear for i in range(10): @@ -669,8 +677,9 @@ def create_random_linear_replicated_layer(): @pytest.mark.parametrize("fully_shard", [True, False]) @pytest.mark.parametrize("device", CUDA_DEVICES) @pytest.mark.parametrize("stage", STAGES) +@pytest.mark.parametrize("bias_enabled", [True, False]) def test_linear_parallel(dist_init, num_loras, orientation, fully_shard, - device, stage) -> None: + device, stage, bias_enabled) -> None: torch.cuda.set_device(device) torch.set_default_device(device) @@ -679,7 +688,8 @@ def test_linear_parallel(dist_init, num_loras, orientation, fully_shard, lora_config = LoRAConfig(max_loras=max_loras, max_lora_rank=8, fully_sharded_loras=fully_shard, - lora_dtype=torch.float16) + lora_dtype=torch.float16, + bias_enabled=bias_enabled) def create_random_linear_parallel_layer(): if orientation == "row": @@ -700,7 +710,12 @@ def create_random_linear_parallel_layer(): if not fully_shard else ColumnParallelLinearWithShardedLoRA(linear)) lora_linear.create_lora_weights(max_loras, lora_config) - + assert (lora_linear.n_slices == len(lora_linear.lora_a_stacked) == len( + lora_linear.lora_b_stacked) == 1) + if bias_enabled: + assert len(lora_linear.lora_bias_stacked) == lora_linear.n_slices + else: + assert lora_linear.lora_bias_stacked is None return linear, lora_linear for i in range(10): @@ -784,8 +799,9 @@ def create_random_linear_parallel_layer(): @pytest.mark.parametrize("fully_shard", [True, False]) @pytest.mark.parametrize("device", CUDA_DEVICES) @pytest.mark.parametrize("stage", STAGES) +@pytest.mark.parametrize("bias_enabled", [True, False]) def test_column_parallel_packed(dist_init, num_loras, repeats, fully_shard, - device, stage) -> None: + device, stage, bias_enabled) -> None: torch.cuda.set_device(device) torch.set_default_device(device) @@ -794,7 +810,8 @@ def test_column_parallel_packed(dist_init, num_loras, repeats, fully_shard, lora_config = LoRAConfig(max_loras=max_loras, max_lora_rank=8, fully_sharded_loras=fully_shard, - lora_dtype=torch.float16) + lora_dtype=torch.float16, + bias_enabled=bias_enabled) def create_column_parallel_packed_layer(): if repeats == 2: @@ -832,10 +849,16 @@ class FakeConfig: num_key_value_heads = 32 num_attention_heads = 32 + n_slices = repeats lora_linear.create_lora_weights(max_loras, lora_config, model_config=FakeConfig()) - + assert (lora_linear.n_slices == len(lora_linear.lora_a_stacked) == len( + lora_linear.lora_b_stacked) == n_slices) + if bias_enabled: + assert len(lora_linear.lora_bias_stacked) == lora_linear.n_slices + else: + assert lora_linear.lora_bias_stacked is None return linear, lora_linear for i in range(10): @@ -911,7 +934,6 @@ class FakeConfig: 512, lora_config.lora_extra_vocab_size, ) - # lora_linear.set_mapping(*mapping_info) lora_result = lora_linear(torch.cat(inputs))[0] expected_result = linear(torch.cat(inputs))[0] diff --git a/vllm/lora/fully_sharded_layers.py b/vllm/lora/fully_sharded_layers.py index e25e453201f01..545ec21ca74c1 100644 --- a/vllm/lora/fully_sharded_layers.py +++ b/vllm/lora/fully_sharded_layers.py @@ -1,5 +1,5 @@ # pylint: disable=unused-argument -from typing import TYPE_CHECKING, List, Optional, Union +from typing import TYPE_CHECKING, List, Optional, Tuple, Union, cast import torch import torch.nn as nn @@ -32,6 +32,44 @@ def dec(*args, **kwargs): return dec +def _mcp_apply(x, bias, layer: ColumnParallelLinearWithLoRA): + """ + For `ColumnParallelLinearWithLoRA` or classes that inherit from + `ColumnParallelLinearWithLoRA`, they share the same `apply` logic. + """ + assert (layer.n_slices == len(layer.lora_a_stacked) == len( + layer.lora_b_stacked) == len(layer.output_slices)) + if layer.lora_bias_stacked is not None: + assert layer.n_slices == len(layer.lora_bias_stacked) + + output = layer.base_layer.quant_method.apply(layer.base_layer, x, bias) + + x = x.view(-1, x.shape[-1]) + output, out_orig_shape = output.view(-1, output.shape[-1]), output.shape + + # Since communication is needed, the buffer is directly initialized as a + # tensor rather than a tuple of tensor. + buffers = torch.zeros( + (layer.n_slices, x.shape[0], layer.lora_a_stacked[0].shape[2]), + dtype=torch.float32, + device=x.device, + ) + + layer.punica_wrapper.add_shrink(buffers, x, layer.lora_a_stacked, 1.0) + buffers = tensor_model_parallel_all_gather(buffers) + layer.punica_wrapper.add_expand(output, + buffers, + layer.lora_b_stacked, + layer.lora_bias_stacked, + layer.output_slices, + offset_start=0, + add_input=True) + + output = output.view(*out_orig_shape) + # now have column partitioned and packed output + return output + + # these layers are based on the tensor parallelism strategy given in # Y. Sheng et al., S-LoRA: Serving Thousands of Concurrent LoRA Adapters. 2023, # https://arxiv.org/abs/2311.03285. @@ -51,34 +89,15 @@ class ColumnParallelLinearWithShardedLoRA(ColumnParallelLinearWithLoRA): # gather operation. def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor: tp_rank = get_tensor_model_parallel_rank() - shard_size = self.lora_a_stacked.shape[2] + shard_size = self.lora_a_stacked[0].shape[2] start_idx = tp_rank * shard_size lora_a = lora_a[:, start_idx:start_idx + shard_size] return lora_a - def apply(self, x: torch.Tensor, - bias: Optional[torch.Tensor]) -> torch.Tensor: - output = self.base_layer.quant_method.apply(self.base_layer, x, bias) - - x = x.view(-1, x.shape[-1]) - output, out_orig_shape = output.view(-1, - output.shape[-1]), output.shape - buffer = torch.zeros( - (x.shape[0], self.lora_a_stacked.shape[2]), - dtype=torch.float32, - device=x.device, - ) - self.punica_wrapper.add_shrink(buffer, x, self.lora_a_stacked, 1.0) - buffer = tensor_model_parallel_all_gather(buffer) - self.punica_wrapper.add_expand(output, - buffer, - self.lora_b_stacked, - self.bias_stacked, - add_input=True) - # now have column partitioned output - - output = output.view(*out_orig_shape) - return output + def apply(self, + x: torch.Tensor, + bias: Optional[torch.Tensor] = None) -> torch.Tensor: + return _mcp_apply(x, bias, self) @classmethod @_fully_sharded_can_replace @@ -99,46 +118,6 @@ def can_replace_layer( ) -def _mcp_apply(x, bias, layer: QKVParallelLinearWithLora): - """ - MergedColumnParallelLinearWithShardedLoRA and - MergedQKVParallelLinearWithShardedLora share the same - LoRa weight application method. - - The main difference is the step by shard_size for lora_b which can - vary for MergedQKVParallelLinearWithShardedLora but is constant for - MergedColumnParallelLinearWithShardedLoRA. - """ - # expecting 2 for column parallel and 3 for qkv - n = len(layer.lora_a_stacked) - output = layer.base_layer.quant_method.apply(layer.base_layer, x, bias) - - x = x.view(-1, x.shape[-1]) - output, out_orig_shape = output.view(-1, output.shape[-1]), output.shape - buffers = torch.zeros( - (n, x.shape[0], layer.lora_a_stacked[0].shape[2]), - dtype=torch.float32, - device=x.device, - ) - for idx in range(n): - layer.punica_wrapper.add_shrink(buffers[idx], x, - layer.lora_a_stacked[idx], 1.0) - - buffers = tensor_model_parallel_all_gather(buffers) - layer.punica_wrapper.add_expand_packed_nslice( - output, - buffers, - layer.lora_b_stacked, - layer.bias_stacked, - 1.0, - layer.output_slices, - ) - - output = output.view(*out_orig_shape) - # now have column partitioned and packed output - return output - - class MergedColumnParallelLinearWithShardedLoRA( MergedColumnParallelLinearWithLoRA): """ @@ -162,8 +141,9 @@ def slice_lora_a( ] return lora_a - def apply(self, x: torch.Tensor, - bias: Optional[torch.Tensor]) -> torch.Tensor: + def apply(self, + x: torch.Tensor, + bias: Optional[torch.Tensor] = None) -> torch.Tensor: return _mcp_apply(x, bias, self) @classmethod @@ -195,31 +175,15 @@ class QKVParallelLinearWithShardedLora(QKVParallelLinearWithLora): def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor: tp_rank = get_tensor_model_parallel_rank() - shard_size = self.lora_a_stacked.shape[2] + shard_size = self.lora_a_stacked[0].shape[2] start_idx = tp_rank * shard_size lora_a = lora_a[:, start_idx:start_idx + shard_size] return lora_a - def apply(self, x: torch.Tensor, - bias: Optional[torch.Tensor]) -> torch.Tensor: - output = self.base_layer.quant_method.apply(self.base_layer, x, bias) - - x = x.view(-1, x.shape[-1]) - output, out_orig_shape = output.view(-1, - output.shape[-1]), output.shape - buffer = torch.zeros((x.shape[0], self.lora_a_stacked.shape[2]), - dtype=torch.float32, - device=x.device) - self.punica_wrapper.add_shrink(buffer, x, self.lora_a_stacked, 1.0) - buffer = tensor_model_parallel_all_gather(buffer) - self.punica_wrapper.add_expand(output, - buffer, - self.lora_b_stacked, - self.bias_stacked, - add_input=True) - # now have column partitioned output - output = output.view(*out_orig_shape) - return output + def apply(self, + x: torch.Tensor, + bias: Optional[torch.Tensor] = None) -> torch.Tensor: + return _mcp_apply(x, bias, self) @classmethod @_fully_sharded_can_replace @@ -260,8 +224,9 @@ def slice_lora_a( ] return lora_a - def apply(self, x: torch.Tensor, - bias: Optional[torch.Tensor]) -> torch.Tensor: + def apply(self, + x: torch.Tensor, + bias: Optional[torch.Tensor] = None) -> torch.Tensor: return _mcp_apply(x, bias, self) @classmethod @@ -294,7 +259,7 @@ class RowParallelLinearWithShardedLoRA(RowParallelLinearWithLoRA): """ def slice_lora_b(self, lora_b: torch.Tensor) -> torch.Tensor: - shard_size = self.lora_b_stacked.shape[2] + shard_size = self.lora_b_stacked[0].shape[2] start_idx = self.tp_rank * shard_size end_idx = (self.tp_rank + 1) * shard_size lora_b = lora_b[:, start_idx:end_idx] @@ -303,20 +268,24 @@ def slice_lora_b(self, lora_b: torch.Tensor) -> torch.Tensor: def slice_bias(self, bias: torch.Tensor) -> torch.Tensor: if bias is None: return bias - shard_size = self.bias_stacked.shape[2] + self.lora_bias_stacked = cast(Tuple[torch.Tensor, ...], + self.lora_bias_stacked) + shard_size = self.lora_bias_stacked[0].shape[2] start_idx = self.tp_rank * shard_size end_idx = (self.tp_rank + 1) * shard_size bias = bias[start_idx:end_idx] return bias - def apply(self, x: torch.Tensor) -> torch.Tensor: + def apply(self, + x: torch.Tensor, + bias: Optional[torch.Tensor] = None) -> torch.Tensor: output = self.base_layer.quant_method.apply(self.base_layer, x) x = x.view(-1, x.shape[-1]) output, out_orig_shape = output.view(-1, output.shape[-1]), output.shape buffer = torch.zeros( - (x.shape[0], self.lora_a_stacked.shape[2]), + (self.n_slices, x.shape[0], self.lora_a_stacked[0].shape[2]), dtype=torch.float32, device=x.device, ) @@ -330,12 +299,18 @@ def apply(self, x: torch.Tensor) -> torch.Tensor: # remains is a standard all_reduce. User should be aware though that # the output is not the same as a normal row_parallel, it should be # reduced before being used - shard_size = self.lora_b_stacked.shape[2] - start_idx = self.tp_rank * shard_size - self.punica_wrapper.add_expand_slice(output, buffer, - self.lora_b_stacked, - self.bias_stacked, start_idx, - shard_size) + # NOTE offset are based on the rank. + shard_size = self.lora_b_stacked[0].shape[2] + offset_start = self.tp_rank * shard_size + self.punica_wrapper.add_expand( + output, + buffer, + self.lora_b_stacked, + self.lora_bias_stacked, + self.output_slices, + offset_start=offset_start, + add_input=True, + ) output = output.view(*out_orig_shape) return output diff --git a/vllm/lora/layers.py b/vllm/lora/layers.py index 73748b5ce511e..473e4bedf3d60 100644 --- a/vllm/lora/layers.py +++ b/vllm/lora/layers.py @@ -1,7 +1,7 @@ # pylint: disable=unused-argument import math from dataclasses import dataclass -from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union, cast import torch import torch.nn as nn @@ -18,11 +18,14 @@ tensor_model_parallel_gather) from vllm.distributed.utils import divide from vllm.lora.punica import PunicaWrapper +# yapf: disable from vllm.model_executor.layers.linear import (ColumnParallelLinear, + LinearBase, MergedColumnParallelLinear, QKVParallelLinear, ReplicatedLinear, RowParallelLinear) +# yapf: enable from vllm.model_executor.layers.logits_processor import LogitsProcessor from vllm.model_executor.layers.rotary_embedding import ( LinearScalingRotaryEmbedding, RotaryEmbedding) @@ -249,13 +252,10 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: full_lora_a_embeddings.shape[1], -1, ) - - # Embedding layer only need expand op - self.punica_wrapper.add_expand(full_output, - full_lora_a_embeddings, - self.lora_b_stacked, - bias_all=None, - add_input=True) + self.punica_wrapper.add_lora_embedding(full_output, + full_lora_a_embeddings, + self.lora_b_stacked, + add_input=True) return full_output.view_as(full_output_org) @classmethod @@ -269,14 +269,19 @@ def can_replace_layer( return type(source_layer) is VocabParallelEmbedding -class ReplicatedLinearWithLoRA(BaseLayerWithLoRA): +class BaseLinearLayerWithLoRA(BaseLayerWithLoRA): - def __init__(self, base_layer: ReplicatedLinear) -> None: + def __init__(self, base_layer: LinearBase): super().__init__() self.base_layer = base_layer self.input_size = self.base_layer.input_size - self.output_size = self.base_layer.output_size self.device = _get_lora_device(self.base_layer) + self.lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]] = None + + self.output_slices: Tuple[int, ...] + self.tp_size: int + self.output_size: int + self.n_slices: int def create_lora_weights( self, @@ -285,39 +290,64 @@ def create_lora_weights( model_config: Optional[PretrainedConfig] = None, ) -> None: self.lora_config = lora_config - lora_a_output_size = lora_config.max_lora_rank - self.lora_a_stacked = torch.zeros( - max_loras, - 1, - lora_a_output_size, - self.input_size, - dtype=lora_config.lora_dtype, - device=self.device, - ) - self.lora_b_stacked = torch.zeros( - max_loras, - 1, - self.output_size, - lora_config.max_lora_rank, - dtype=lora_config.lora_dtype, - device=self.device, - ) - if lora_config.bias_enabled: - self.bias_stacked = torch.zeros( + # + if isinstance(self.base_layer, ReplicatedLinear): + lora_a_out_size = lora_config.max_lora_rank + lora_b_out_size = self.output_size + + elif isinstance(self.base_layer, ColumnParallelLinear): + lora_a_out_size = (lora_config.max_lora_rank if + not lora_config.fully_sharded_loras else divide( + lora_config.max_lora_rank, self.tp_size)) + lora_b_out_size = self.output_size + + elif isinstance(self.base_layer, RowParallelLinear): + lora_a_out_size = lora_config.max_lora_rank + lora_b_out_size = (self.output_size if + not lora_config.fully_sharded_loras else divide( + self.output_size, self.tp_size)) + else: + raise NotImplementedError + + self.lora_a_stacked = tuple( + torch.zeros( max_loras, 1, - self.output_size, + lora_a_out_size, + self.input_size, dtype=lora_config.lora_dtype, device=self.device, - ) - else: - self.bias_stacked = None + ) for _ in range(self.n_slices)) + self.lora_b_stacked = tuple( + torch.zeros( + max_loras, + 1, + lora_b_out_size, + lora_config.max_lora_rank, + dtype=lora_config.lora_dtype, + device=self.device, + ) for _ in range(self.n_slices)) + if lora_config.bias_enabled: + lora_bias_out_size = lora_b_out_size + self.lora_bias_stacked = tuple( + torch.zeros( + max_loras, + 1, + lora_bias_out_size, + dtype=lora_config.lora_dtype, + device=self.device, + ) for _ in range(self.n_slices)) + self.output_slices = (self.lora_b_stacked[0].shape[2], ) def reset_lora(self, index: int): - self.lora_a_stacked[index] = 0 - self.lora_b_stacked[index] = 0 - if self.lora_config.bias_enabled: - self.bias_stacked[index] = 0 + for s_index in range(self.n_slices): + self.lora_a_stacked[s_index][index] = 0 + self.lora_b_stacked[s_index][index] = 0 + if self.lora_config.bias_enabled: + # Make mypy happy + self.lora_bias_stacked = cast(Tuple[torch.Tensor, ...], + self.lora_bias_stacked) + self.lora_bias_stacked[s_index][index] = 0 def set_lora( self, @@ -325,29 +355,56 @@ def set_lora( lora_a: torch.Tensor, lora_b: torch.Tensor, embeddings_tensor: Optional[torch.Tensor], - bias: Optional[torch.Tensor] = None, + lora_bias: Optional[torch.Tensor] = None, ): - self.reset_lora(index) + # Except for QKVParallelLinearWithLora and + # MergedColumnParallelLinearWithLoRA, all other linear LoRA layers + # store weights in a tuple of size 1. These two layers will + # override this function. + assert (len(self.lora_a_stacked) == len(self.lora_b_stacked) == + self.n_slices == 1) - self.lora_a_stacked[index, - 0, :lora_a.shape[1], :lora_a.shape[0]].copy_( - lora_a.T, non_blocking=True) - self.lora_b_stacked[index, - 0, :lora_b.shape[1], :lora_b.shape[0]].copy_( - lora_b.T, non_blocking=True) - if bias is not None: - self.bias_stacked[index, - 0, :bias.shape[0]].copy_(bias.T, - non_blocking=True) - - def apply(self, x: torch.Tensor, - bias: Optional[torch.Tensor]) -> torch.Tensor: + self.reset_lora(index) + if self.tp_size > 1: + lora_a = self.slice_lora_a(lora_a) + lora_b = self.slice_lora_b(lora_b) + if lora_bias is not None: + lora_bias = self.slice_bias(lora_bias) + + self.lora_a_stacked[0][index, + 0, :lora_a.shape[1], :lora_a.shape[0]].copy_( + lora_a.T, non_blocking=True) + self.lora_b_stacked[0][index, + 0, :lora_b.shape[1], :lora_b.shape[0]].copy_( + lora_b.T, non_blocking=True) + if lora_bias is not None: + + self.lora_bias_stacked = cast(Tuple[torch.Tensor, ...], + self.lora_bias_stacked) + assert len(self.lora_bias_stacked) + self.lora_bias_stacked[0][index, 0, :lora_bias.shape[0]].copy_( + lora_bias.T, non_blocking=True) + + def apply(self, + x: torch.Tensor, + bias: Optional[torch.Tensor] = None) -> torch.Tensor: output = self.base_layer.quant_method.apply(self.base_layer, x, bias) - self.punica_wrapper.add_lora(output, x, self.lora_a_stacked, - self.lora_b_stacked, self.bias_stacked, - 1.0) + self.punica_wrapper.add_lora_linear(output, x, self.lora_a_stacked, + self.lora_b_stacked, + self.lora_bias_stacked, 1.0, + self.output_slices) return output + +class ReplicatedLinearWithLoRA(BaseLinearLayerWithLoRA): + + def __init__(self, base_layer: ReplicatedLinear) -> None: + super().__init__(base_layer, ) + # To ensure interface compatibility, set to 1 always. + self.tp_size = 1 + self.output_size = self.base_layer.output_size + self.n_slices = 1 + def forward(self, input_): """Forward of ReplicatedLinearWithLoRA @@ -380,73 +437,26 @@ def can_replace_layer( return type(source_layer) is ReplicatedLinear -class ColumnParallelLinearWithLoRA(BaseLayerWithLoRA): +class ColumnParallelLinearWithLoRA(BaseLinearLayerWithLoRA): """ LoRA on top of ColumnParallelLinear layer. - LoRA B is sliced for tensor parallelism. + There are two types for the `base_layer`: + 1. ColumnParallelLinear, e.g.`dense_h_to_4h` in `FalconForCausalLM`. + 2. MergedColumnParallelLinear, e.g.`gate_up_proj` in `Phi3ForCausalLM`. """ def __init__(self, base_layer: ColumnParallelLinear) -> None: - super().__init__() + super().__init__(base_layer) # The base_layer type is ColumnParallelLinear or # MergedColumnParallelLinear, their weight sharding logic is # inconsistent when TP is greater than 1. self.is_merged_col_linear = type( base_layer) is MergedColumnParallelLinear - - self.base_layer = base_layer self.tp_size = get_tensor_model_parallel_world_size() - self.input_size = self.base_layer.input_size self.output_size = self.base_layer.output_size_per_partition - self.device = _get_lora_device(self.base_layer) - - def create_lora_weights( - self, - max_loras: int, - lora_config: LoRAConfig, - model_config: Optional[PretrainedConfig] = None, - ) -> None: - self.lora_config = lora_config - self.tp_size = get_tensor_model_parallel_world_size() - lora_a_output_size_per_partition = ( - lora_config.max_lora_rank if not lora_config.fully_sharded_loras - else divide(lora_config.max_lora_rank, self.tp_size)) - self.lora_a_stacked = torch.zeros( - max_loras, - 1, - lora_a_output_size_per_partition, - self.input_size, - dtype=lora_config.lora_dtype, - device=self.device, - ) - self.lora_b_stacked = torch.zeros( - max_loras, - 1, - self.output_size, - lora_config.max_lora_rank, - dtype=lora_config.lora_dtype, - device=self.device, - ) - - if lora_config.bias_enabled: - self.bias_stacked = torch.zeros( - max_loras, - 1, - self.output_size, - dtype=lora_config.lora_dtype, - device=self.device, - ) - else: - self.bias_stacked = None - - self.output_dim = self.lora_b_stacked.shape[2] - - def reset_lora(self, index: int): - self.lora_a_stacked[index] = 0 - self.lora_b_stacked[index] = 0 - if self.lora_config.bias_enabled: - self.bias_stacked[index] = 0 + # There is only one LoRA layer + self.n_slices = 1 def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor: return lora_a @@ -485,40 +495,6 @@ def slice_bias(self, bias: torch.Tensor) -> torch.Tensor: bias = bias[start_idx:end_idx] return bias - def set_lora( - self, - index: int, - lora_a: torch.Tensor, - lora_b: torch.Tensor, - embeddings_tensor: Optional[torch.Tensor], - bias: Optional[torch.Tensor] = None, - ): - self.reset_lora(index) - - if self.tp_size > 1: - lora_a = self.slice_lora_a(lora_a) - lora_b = self.slice_lora_b(lora_b) - bias = self.slice_bias(bias) - - self.lora_a_stacked[index, - 0, :lora_a.shape[1], :lora_a.shape[0]].copy_( - lora_a.T, non_blocking=True) - self.lora_b_stacked[index, - 0, :lora_b.shape[1], :lora_b.shape[0]].copy_( - lora_b.T, non_blocking=True) - if bias is not None: - self.bias_stacked[index, - 0, :bias.shape[0]].copy_(bias.T, - non_blocking=True) - - def apply(self, x: torch.Tensor, - bias: Optional[torch.Tensor]) -> torch.Tensor: - output = self.base_layer.quant_method.apply(self.base_layer, x, bias) - self.punica_wrapper.add_lora(output, x, self.lora_a_stacked, - self.lora_b_stacked, self.bias_stacked, - 1.0) - return output - def forward(self, input_): """Forward of ColumnParallelLinear @@ -568,6 +544,8 @@ class MergedColumnParallelLinearWithLoRA(ColumnParallelLinearWithLoRA): def __init__(self, base_layer: MergedColumnParallelLinear) -> None: super().__init__(base_layer) + # There are two LoRA layers + self.n_slices = len(self.base_layer.output_sizes) def create_lora_weights( self, @@ -575,9 +553,13 @@ def create_lora_weights( lora_config: LoRAConfig, model_config: Optional[PretrainedConfig] = None, ) -> None: + """ + The main reason for overriding this function is to enhance code + maintainability. + """ self.lora_config = lora_config - n_slices = 2 - if not (len(self.base_layer.output_sizes) == n_slices + + if not (len(self.base_layer.output_sizes) == self.n_slices == 2 and self.base_layer.output_sizes[0] == self.base_layer.output_sizes[1]): raise ValueError( @@ -598,7 +580,7 @@ def create_lora_weights( self.input_size, dtype=lora_config.lora_dtype, device=self.device, - ) for _ in range(n_slices)) + ) for _ in range(self.n_slices)) self.lora_b_stacked = tuple( torch.zeros( max_loras, @@ -607,30 +589,19 @@ def create_lora_weights( lora_config.max_lora_rank, dtype=lora_config.lora_dtype, device=self.device, - ) for _ in range(n_slices)) + ) for _ in range(self.n_slices)) if lora_config.bias_enabled: - self.bias_stacked = tuple( + self.lora_bias_stacked = tuple( torch.zeros( max_loras, 1, self.output_size // 2, dtype=lora_config.lora_dtype, device=self.device, - ) for _ in range(n_slices)) - else: - self.bias_stacked = None + ) for _ in range(self.n_slices)) self.output_dim = self.lora_b_stacked[0].shape[2] self.output_slices = (self.output_dim, self.output_dim) - def reset_lora(self, index: int): - self.lora_a_stacked[0][index] = 0 - self.lora_a_stacked[1][index] = 0 - self.lora_b_stacked[0][index] = 0 - self.lora_b_stacked[1][index] = 0 - if self.lora_config.bias_enabled: - self.bias_stacked[0][index] = 0 - self.bias_stacked[1][index] = 0 - def slice_lora_a( self, lora_a: List[Union[torch.Tensor, None]] ) -> List[Union[torch.Tensor, None]]: @@ -668,15 +639,15 @@ def set_lora( lora_a: torch.Tensor, lora_b: torch.Tensor, embeddings_tensor: Optional[torch.Tensor], - bias: Optional[torch.Tensor] = None, + lora_bias: Optional[torch.Tensor] = None, ): self.reset_lora(index) if self.tp_size > 1: lora_a = self.slice_lora_a(lora_a) lora_b = self.slice_lora_b(lora_b) - if bias is not None: - bias = self.slice_bias(bias) + if lora_bias is not None: + lora_bias = self.slice_bias(lora_bias) if lora_a[0] is not None: self.lora_a_stacked[0][ @@ -685,10 +656,11 @@ def set_lora( self.lora_b_stacked[0][ index, 0, :lora_b[0].shape[1], :lora_b[0].shape[0]].copy_( lora_b[0].T, non_blocking=True) - if bias is not None and bias[0] is not None: - self.bias_stacked[0][index, - 0, :bias[0].shape[0]].copy_(bias[0].T, - non_blocking=True) + if lora_bias is not None and lora_bias[0] is not None: + self.lora_bias_stacked = cast(Tuple[torch.Tensor, ...], + self.lora_bias_stacked) + self.lora_bias_stacked[0][index, 0, :lora_bias[0].shape[0]].copy_( + lora_bias[0].T, non_blocking=True) if lora_a[1] is not None: self.lora_a_stacked[1][ index, 0, :lora_a[1].shape[1], :lora_a[1].shape[0]].copy_( @@ -696,18 +668,11 @@ def set_lora( self.lora_b_stacked[1][ index, 0, :lora_b[1].shape[1], :lora_b[1].shape[0]].copy_( lora_b[1].T, non_blocking=True) - if bias is not None and bias[1] is not None: - self.bias_stacked[1][index, - 0, :bias[1].shape[0]].copy_(bias[1].T, - non_blocking=True) - - def apply(self, x: torch.Tensor, - bias: Optional[torch.Tensor]) -> torch.Tensor: - output = self.base_layer.quant_method.apply(self.base_layer, x, bias) - self.punica_wrapper.add_lora_packed_nslice( - output, x, self.lora_a_stacked, self.lora_b_stacked, - self.bias_stacked, 1.0, (self.output_dim, self.output_dim)) - return output + if lora_bias is not None and lora_bias[1] is not None: + self.lora_bias_stacked = cast(Tuple[torch.Tensor, ...], + self.lora_bias_stacked) + self.lora_bias_stacked[1][index, 0, :lora_bias[1].shape[0]].copy_( + lora_bias[1].T, non_blocking=True) @classmethod @_not_fully_sharded_can_replace @@ -737,7 +702,6 @@ class QKVParallelLinearWithLora(ColumnParallelLinearWithLoRA): def __init__(self, base_layer: QKVParallelLinear) -> None: super().__init__(base_layer) - self.tp_size = get_tensor_model_parallel_world_size() self.q_proj_total_size = (self.base_layer.total_num_heads * self.base_layer.head_size) self.q_proj_shard_size = (self.base_layer.num_heads * @@ -746,6 +710,8 @@ def __init__(self, base_layer: QKVParallelLinear) -> None: self.base_layer.head_size) self.kv_proj_total_size = (self.base_layer.total_num_kv_heads * self.base_layer.head_size) + # There is only one LoRA layer + self.n_slices = 1 def slice_lora_b(self, lora_b: torch.Tensor) -> torch.Tensor: tp_rank = get_tensor_model_parallel_rank() @@ -780,32 +746,6 @@ def slice_bias(self, bias: torch.Tensor) -> torch.Tensor: bias = torch.cat([bias_q, bias_k, bias_v], dim=1) return bias - def set_lora( - self, - index: int, - lora_a: torch.Tensor, - lora_b: torch.Tensor, - embeddings_tensor: Optional[torch.Tensor], - bias: Optional[torch.Tensor] = None, - ): - self.reset_lora(index) - if self.tp_size > 1: - lora_a = self.slice_lora_a(lora_a) - lora_b = self.slice_lora_b(lora_b) - if bias is not None: - bias = self.slice_bias(bias) - - self.lora_a_stacked[index, - 0, :lora_a.shape[1], :lora_a.shape[0]].copy_( - lora_a.T, non_blocking=True) - self.lora_b_stacked[index, - 0, :lora_b.shape[1], :lora_b.shape[0]].copy_( - lora_b.T, non_blocking=True) - if bias is not None: - self.bias_stacked[index, - 0, :bias.shape[0]].copy_(bias.T, - non_blocking=True) - @classmethod @_not_fully_sharded_can_replace def can_replace_layer(cls, source_layer: nn.Module, @@ -828,6 +768,10 @@ class MergedQKVParallelLinearWithLora(ColumnParallelLinearWithLoRA): def __init__(self, base_layer: QKVParallelLinear) -> None: super().__init__(base_layer) + # There are three LoRA layer. + self.n_slices = len(self.base_layer.output_sizes) + self.tp_size = get_tensor_model_parallel_world_size() + self.tp_rank = get_tensor_model_parallel_rank() def create_lora_weights( self, @@ -835,9 +779,16 @@ def create_lora_weights( lora_config: LoRAConfig, model_config: Optional[PretrainedConfig] = None, ) -> None: + """ + The main reason for overloading this function is to handle inconsistent + weight dimensions in qkv lora. + """ self.lora_config = lora_config - self.tp_size = get_tensor_model_parallel_world_size() - self.tp_rank = get_tensor_model_parallel_rank() + + if not (len(self.base_layer.output_sizes) == self.n_slices == 3): + raise ValueError( + "LoRAColumnParallelLinear3Slice requires 3 slices.") + self.q_proj_shard_size = (self.base_layer.num_heads * self.base_layer.head_size) self.kv_proj_shard_size = (self.base_layer.num_kv_heads * @@ -902,7 +853,7 @@ def create_lora_weights( ), ) if lora_config.bias_enabled: - self.bias_stacked = ( + self.lora_bias_stacked = ( torch.zeros( max_loras, 1, @@ -925,9 +876,6 @@ def create_lora_weights( device=self.device, ), ) - else: - self.bias_stacked = None - self.output_slices = ( self.q_proj_shard_size, self.kv_proj_shard_size, @@ -939,18 +887,6 @@ def create_lora_weights( self.indices: torch.Tensor self.indices_len: List[int] - def reset_lora(self, index: int): - self.lora_a_stacked[0][index] = 0 - self.lora_b_stacked[0][index] = 0 - self.lora_a_stacked[1][index] = 0 - self.lora_b_stacked[1][index] = 0 - self.lora_a_stacked[2][index] = 0 - self.lora_b_stacked[2][index] = 0 - if self.lora_config.bias_enabled: - self.bias_stacked[0][index] = 0 - self.bias_stacked[1][index] = 0 - self.bias_stacked[2][index] = 0 - def slice_lora_a( self, lora_a: List[Union[torch.Tensor, None]] ) -> List[Union[torch.Tensor, None]]: @@ -1000,15 +936,15 @@ def set_lora( lora_a: torch.Tensor, lora_b: torch.Tensor, embeddings_tensor: Optional[torch.Tensor], - bias: Optional[torch.Tensor] = None, + lora_bias: Optional[torch.Tensor] = None, ): self.reset_lora(index) if self.tp_size > 1: lora_a = self.slice_lora_a(lora_a) lora_b = self.slice_lora_b(lora_b) - if bias is not None: - bias = self.slice_bias(bias) + if lora_bias is not None: + lora_bias = self.slice_bias(lora_bias) if lora_b[0] is not None: lora_b_q = lora_b[0] @@ -1039,26 +975,24 @@ def set_lora( index, 0, :lora_a[2].shape[1], :lora_a[2].shape[0]].copy_( lora_a[2].T, non_blocking=True) - if bias is not None: - if bias[0] is not None: - self.bias_stacked[0][index, 0, :bias[0].shape[0]].copy_( - bias[0].T, non_blocking=True) - if bias[1] is not None: - self.bias_stacked[1][index, 0, :bias[1].shape[0]].copy_( - bias[1].T, non_blocking=True) - if bias[2] is not None: - self.bias_stacked[2][index, 0, :bias[2].shape[0]].copy_( - bias[2].T, non_blocking=True) - - def apply(self, x: torch.Tensor, - bias: Optional[torch.Tensor]) -> torch.Tensor: - output = self.base_layer.quant_method.apply(self.base_layer, x, bias) - self.punica_wrapper.add_lora_packed_nslice(output, x, - self.lora_a_stacked, - self.lora_b_stacked, - self.bias_stacked, 1.0, - self.output_slices) - return output + if lora_bias is not None: + self.lora_bias_stacked = cast(Tuple[torch.Tensor, ...], + self.lora_bias_stacked) + if lora_bias[0] is not None: + self.lora_bias_stacked[0][index, + 0, :lora_bias[0].shape[0]].copy_( + lora_bias[0].T, + non_blocking=True) + if lora_bias[1] is not None: + self.lora_bias_stacked[1][index, + 0, :lora_bias[1].shape[0]].copy_( + lora_bias[1].T, + non_blocking=True) + if lora_bias[2] is not None: + self.lora_bias_stacked[2][index, + 0, :lora_bias[2].shape[0]].copy_( + lora_bias[2].T, + non_blocking=True) @classmethod @_not_fully_sharded_can_replace @@ -1073,76 +1007,25 @@ def can_replace_layer( and len(packed_modules_list) == 3) -class RowParallelLinearWithLoRA(BaseLayerWithLoRA): +class RowParallelLinearWithLoRA(BaseLinearLayerWithLoRA): def __init__(self, base_layer: RowParallelLinear) -> None: - super().__init__() - self.base_layer = base_layer + super().__init__(base_layer) + + self.tp_size = get_tensor_model_parallel_world_size() + # reset input_size self.input_size = self.base_layer.input_size_per_partition self.output_size = self.base_layer.output_size - self.device = _get_lora_device(self.base_layer) - def create_lora_weights( - self, - max_loras: int, - lora_config: LoRAConfig, - model_config: Optional[PretrainedConfig] = None, - ) -> None: - self.lora_config = lora_config self.tp_rank = get_tensor_model_parallel_rank() - self.lora_a_stacked = torch.zeros( - ( - max_loras, - 1, - lora_config.max_lora_rank, - self.input_size, - ), - dtype=lora_config.lora_dtype, - device=self.device, - ) - tp_size = get_tensor_model_parallel_world_size() - lora_b_output_size_per_partition = ( - self.output_size if not lora_config.fully_sharded_loras else - divide(self.output_size, tp_size)) - - self.lora_b_stacked = torch.zeros( - ( - max_loras, - 1, - lora_b_output_size_per_partition, - lora_config.max_lora_rank, - ), - dtype=lora_config.lora_dtype, - device=self.device, - ) - - if lora_config.bias_enabled: - self.bias_stacked = torch.zeros( - ( - max_loras, - 1, - self.output_size, - ), - dtype=lora_config.lora_dtype, - device=self.device, - ) - else: - self.bias_stacked = None - # Lazily initialized - self.indices: torch.Tensor - self.indices_len: List[int] - - def reset_lora(self, index: int): - self.lora_a_stacked[index] = 0 - self.lora_b_stacked[index] = 0 - if self.lora_config.bias_enabled: - self.bias_stacked[index] = 0 + # There is only one LoRA layer. + self.n_slices = 1 def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor: - tensor_model_parallel_rank = get_tensor_model_parallel_rank() + shard_size = self.input_size - start_idx = tensor_model_parallel_rank * shard_size - end_idx = (tensor_model_parallel_rank + 1) * shard_size + start_idx = self.tp_rank * shard_size + end_idx = (self.tp_rank + 1) * shard_size lora_a = lora_a[start_idx:end_idx, :] return lora_a @@ -1152,40 +1035,6 @@ def slice_lora_b(self, lora_b: torch.Tensor) -> torch.Tensor: def slice_bias(self, bias: torch.Tensor) -> torch.Tensor: return bias - def set_lora( - self, - index: int, - lora_a: torch.Tensor, - lora_b: torch.Tensor, - embeddings_tensor: Optional[torch.Tensor], - bias: Optional[torch.Tensor] = None, - ): - self.reset_lora(index) - - if self.base_layer.tp_size > 1: - lora_a = self.slice_lora_a(lora_a) - lora_b = self.slice_lora_b(lora_b) - if bias is not None: - bias = self.slice_bias(bias) - - self.lora_a_stacked[index, - 0, :lora_a.shape[1], :lora_a.shape[0]].copy_( - lora_a.T, non_blocking=True) - self.lora_b_stacked[index, - 0, :lora_b.shape[1], :lora_b.shape[0]].copy_( - lora_b.T, non_blocking=True) - if bias is not None: - self.bias_stacked[index, - 0, :bias.shape[0]].copy_(bias.T, - non_blocking=True) - - def apply(self, x: torch.Tensor) -> torch.Tensor: - output = self.base_layer.quant_method.apply(self.base_layer, x) - self.punica_wrapper.add_lora(output, x, self.lora_a_stacked, - self.lora_b_stacked, self.bias_stacked, - 1.0) - return output - def forward(self, input_): """Forward of RowParallelLinear @@ -1203,10 +1052,9 @@ def forward(self, input_): input_parallel = input_ else: # TODO: simplify code below - tp_rank = get_tensor_model_parallel_rank() splitted_input = split_tensor_along_last_dim( input_, num_partitions=self.base_layer.tp_size) - input_parallel = splitted_input[tp_rank].contiguous() + input_parallel = splitted_input[self.tp_rank].contiguous() # Matrix multiply. output_parallel = self.apply(input_parallel) diff --git a/vllm/lora/models.py b/vllm/lora/models.py index 2ffefe61427e3..9855b57d0c9c9 100644 --- a/vllm/lora/models.py +++ b/vllm/lora/models.py @@ -555,17 +555,17 @@ def create_dummy_lora( input_dim, output_dim, rank, - module.lora_a_stacked.dtype, + module.lora_a_stacked[0].dtype, "cpu", embeddings_tensor_dim=embeddings_tensor_dim, bias_enabled=bias_enabled) else: lora = LoRALayerWeights.create_dummy_lora_weights( module_name, - module.lora_a_stacked.shape[-1], - module.lora_b_stacked.shape[-2], + module.lora_a_stacked[0].shape[-1], + module.lora_b_stacked[0].shape[-2], rank, - module.lora_a_stacked.dtype, + module.lora_a_stacked[0].dtype, "cpu", bias_enabled=bias_enabled, ) diff --git a/vllm/lora/punica.py b/vllm/lora/punica.py index 3f775b7ba363e..563d1181d6fcb 100644 --- a/vllm/lora/punica.py +++ b/vllm/lora/punica.py @@ -362,7 +362,7 @@ def long_lora_indices(self) -> torch.Tensor: long_lora_len = self.indices_len[4] return self._long_lora_indices[:long_lora_len] - def shrink_prefill( + def _shrink_prefill( self, y: torch.Tensor, x: torch.Tensor, @@ -380,7 +380,7 @@ def shrink_prefill( scale, ) - def shrink_decode( + def _shrink_decode( self, y: torch.Tensor, x: torch.Tensor, @@ -389,7 +389,7 @@ def shrink_decode( ): bgmv_shrink(x, w_t_all, y, self.token_lora_indices, scale) - def expand_prefill( + def _expand_prefill( self, y: torch.Tensor, x: torch.Tensor, @@ -407,7 +407,7 @@ def expand_prefill( add_input, ) - def expand_decode( + def _expand_decode( self, y: torch.Tensor, x: torch.Tensor, @@ -416,7 +416,7 @@ def expand_decode( ): bgmv_expand(x, w_t_all, y, self.token_lora_indices, add_input) - def expand_slice_prefill( + def _expand_slice_prefill( self, y: torch.Tensor, x: torch.Tensor, @@ -438,7 +438,7 @@ def expand_slice_prefill( add_input, ) - def expand_slice_decode( + def _expand_slice_decode( self, y: torch.Tensor, x: torch.Tensor, @@ -450,41 +450,35 @@ def expand_slice_decode( bgmv_expand_slice(x, w_t_all, y, self.token_lora_indices, y_offset, y_slice_size, add_input) - def apply_bias( - self, - indices: torch.Tensor, - output: torch.Tensor, - bias_stacked: torch.Tensor, - ): - """Applies bias to output - - Input shapes: - bias_stacked: (num_loras, output_dim) - indices: (batch_size) - output: (batch_size, output_dim) + def _apply_expand(self, + y: torch.Tensor, + x: torch.Tensor, + w_t_all: torch.Tensor, + y_offset: Optional[int], + y_slice_size: Optional[int], + add_input: bool = True): + """ + Perform the ` y[:,y_offset:y_offset+y_slice_size]+=x@w_t_all` + computation, which is suitable for the + GEMM of lora'b. """ - org_output = output - output = output.view(-1, output.shape[-1]) - indices = indices.view(-1) - - bias_stacked = bias_stacked.view(-1, bias_stacked.shape[-1]) - bias_stacked = bias_stacked[indices] - bias_stacked[indices == -1] = 0 - output += bias_stacked - return output.view_as(org_output) + expand_slice_fun: Callable = (self._expand_slice_prefill + if self.is_prefill else + self._expand_slice_decode) + expand_slice_fun(y, x, w_t_all, y_offset, y_slice_size, add_input) - def apply_bias_packed_nslice( + def _apply_bias( self, indices: torch.Tensor, output: torch.Tensor, output_slices: Tuple[int, ...], - bias_stacked: Tuple[Optional[torch.Tensor], ...], + lora_bias_stacked: Tuple[Optional[torch.Tensor], ...], ): """Applies bias to output Input shapes: - bias_stacked: 3 element tuple of (num_loras, output_dim) + lora_bias_stacked: 3 element tuple of (num_loras, output_dim) indices: (batch_size) output: (batch_size, q_slice_size + 2*kv_slice_size) output_slices: n-1 element tuple of (slice_size...), @@ -496,7 +490,7 @@ def apply_bias_packed_nslice( offset_left = 0 for slice_idx, slice in enumerate(output_slices): - bias = bias_stacked[slice_idx] + bias = lora_bias_stacked[slice_idx] if bias is not None: bias = bias.view(-1, bias.shape[-1]) bias = bias[indices] @@ -506,7 +500,7 @@ def apply_bias_packed_nslice( return output.view_as(org_output) - def add_shrink( + def _apply_shrink( self, y: torch.Tensor, x: torch.Tensor, @@ -517,188 +511,215 @@ def add_shrink( Perform the ` y+=x@w_t_all` computation, which is suitable for the GEMM of lora'a. When `is_prefill is` true, it indicates that it is currently the - prefill stage, and the `shrink_prefill` function should be called. - Otherwise, it is the decode stage, and the shrink_decode function + prefill stage, and the `_shrink_prefill` function should be called. + Otherwise, it is the decode stage, and the _shrink_decode function should be called. """ - shrink_fun: Callable = (self.shrink_prefill - if self.is_prefill else self.shrink_decode) + y_org = y + y = y.view(-1, y.shape[-1]) + shrink_fun: Callable = (self._shrink_prefill + if self.is_prefill else self._shrink_decode) shrink_fun(y, x, w_t_all, scale) + y = y.view_as(y_org) - def add_expand( + def add_shrink( self, - y: torch.Tensor, + y: Union[Tuple[torch.Tensor, ...], torch.Tensor], x: torch.Tensor, - w_t_all: torch.Tensor, - bias_all: Optional[torch.Tensor], - add_input: bool = True, + lora_a_stacked: Tuple[torch.Tensor, ...], + scale: float, ): """ - Perform the ` y+=x@w_t_all+bias` computation, which is suitable for the - GEMM of lora'b. - When `is_prefill` is true, it indicates that it is currently the - prefill stage, and the `expand_prefill` function should be called. - Otherwise, it is the decode stage, and the expand_decode function + Performs GEMM for multiple slices of lora_a. + When `is_prefill is` true, it indicates that it is currently the + prefill stage, and the `_shrink_prefill` function should be called. + Otherwise, it is the decode stage, and the _shrink_decode function should be called. - """ - if bias_all is not None: - y = self.apply_bias(self.token_lora_indices, y, bias_all) - - expand_fun: Callable = (self.expand_prefill - if self.is_prefill else self.expand_decode) - expand_fun(y, x, w_t_all, add_input) - - def add_expand_slice(self, - y: torch.Tensor, - x: torch.Tensor, - w_t_all: torch.Tensor, - bias_all: Optional[torch.Tensor], - y_offset: Optional[int], - y_slice_size: Optional[int], - add_input: bool = True): - """ - Similar to `add_expand` - """ - if bias_all is not None: - y = self.apply_bias(self.token_lora_indices, y, bias_all) + + Semantics: + for i in range(len(lora_a_stacked)): + y[i] += (x @ lora_a_stacked[i]) * scale + + Args: + y (Union[Tuple[torch.Tensor, ...], torch.Tensor]): Output tensors + x (torch.Tensor): Input tensor + lora_a_stacked (Tuple[torch.Tensor, ...]): lora_a's weights + scale (float): Scaling factor for the operation + """ - expand_slice_fun: Callable = (self.expand_slice_prefill - if self.is_prefill else - self.expand_slice_decode) - expand_slice_fun(y, x, w_t_all, y_offset, y_slice_size, add_input) + x = x.view(-1, x.shape[-1]) + # TODO fuse these kernels + for slice_idx in range(len(lora_a_stacked)): + self._apply_shrink(y[slice_idx], x, lora_a_stacked[slice_idx], + scale) - def add_expand_packed_nslice(self, y: torch.Tensor, x: torch.Tensor, - lora_b_stacked: Tuple[torch.Tensor, ...], - bias_stacked: Optional[Tuple[torch.Tensor, - ...]], - scale: float, - output_slices: Tuple[int, ...]) -> None: - """ - Similar to `add_expand` + def add_expand( + self, + y: torch.Tensor, + x: Union[Tuple[torch.Tensor, ...], torch.Tensor], + lora_b_stacked: Tuple[torch.Tensor, ...], + lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]], + output_slices: Tuple[int, ...], + offset_start: int = 0, + add_input=True, + ) -> None: """ + Performs GEMM and bias addition for multiple slices of lora_b. + + Semantics: + for i in range(len(lora_b_stacked)): + slice = output_slices[i] + y[:, offset:offset+slice] += x[i] @ lora_b_stacked[i] + + lora_bias_stacked[i] + offset += slice + + Args: + y (torch.Tensor): Output tensor. + x (Union[Tuple[torch.Tensor, ...], torch.Tensor]): Input tensors + lora_b_stacked (Tuple[torch.Tensor, ...]): lora_b's weight + lora_bias_stacked (Optional[Tuple[torch.Tensor, ...]]): + bias's weight + output_slices (Tuple[int, ...]): Every slice's size + add_input (bool): Defaults to True. + """ y_org = y y = y.view(-1, y.shape[-1]) - offset_left = 0 - if bias_stacked is not None: - self.apply_bias_packed_nslice(self.token_lora_indices, y, - output_slices, bias_stacked) + offset_left = offset_start + if lora_bias_stacked is not None: + self._apply_bias(self.token_lora_indices, y, output_slices, + lora_bias_stacked) for slice_idx in range(len(lora_b_stacked)): - self.add_expand_slice(y, - x[slice_idx], - lora_b_stacked[slice_idx], - None, - offset_left, - output_slices[slice_idx], - add_input=True) + self._apply_expand( + y, + x[slice_idx], + lora_b_stacked[slice_idx], + offset_left, + output_slices[slice_idx], + add_input=add_input, + ) offset_left += output_slices[slice_idx] - y = y.view_as(y_org) - def add_lora(self, - y: torch.Tensor, - x: torch.Tensor, - wa_t_all: torch.Tensor, - wb_t_all: torch.Tensor, - bias_all: Optional[torch.Tensor], - scale: float, - y_offset: Optional[int] = None, - y_slice_size: Optional[int] = None, - *, - buffer: Optional[torch.Tensor] = None) -> None: + def add_lora_embedding( + self, + y: torch.Tensor, + x: torch.Tensor, + lora_b_stacked: torch.Tensor, + add_input: bool = True, + ): + """ + Applies lora specifically for VocabParallelEmbeddingWithLoRA. + + Semantics: + y += x @ lora_b_stacked + + Args: + y (torch.Tensor): Output tensor. + x (torch.Tensor): Input tensor. + lora_b_stacked (torch.Tensor): lora_b's weights. + add_input (bool): Default to True. + + """ + + # Embedding layer only need expand op + expand_fun: Callable = (self._expand_prefill + if self.is_prefill else self._expand_decode) + expand_fun(y, x, lora_b_stacked, add_input) + + def add_lora_linear( + self, + y: torch.Tensor, + x: torch.Tensor, + lora_a_stacked: Tuple[torch.Tensor, ...], + lora_b_stacked: Tuple[torch.Tensor, ...], + lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]], + scale: float, + output_slices: Tuple[int, ...], + *, + buffer: Optional[Tuple[torch.Tensor, ...]] = None) -> None: """ + Applicable to linear-related lora. + Semantics: - y[i] += ( - x[i].unsqueeze(0) - @ wa_t_all[indices[i], layer_idx, :, :].transpose(-1, -2) - @ wb_t_all[indices[i], layer_idx, :, :].transpose(-1, -2) - * scale - ).squeeze(0)+bias[i] + for i in range(len(lora_a_stacked)): + y[i] += ( + x[i].unsqueeze(0) + @ lora_a_stacked[indices[i], layer_idx, :, :] + @ lora_b_stacked[indices[i], layer_idx, :, :] + * scale + ).squeeze(0)+lora_bias_stacked[i] + Args: - y (torch.Tensor): Output tensor. Will be changed in-place. + y (torch.Tensor): Output tensor. Will be changed in-place. x (torch.Tensor): Input tensor - wa_t_all (torch.Tensor): lora_a's weight - wb_t_all (torch.Tensor): lora_b's weight - bias_all: (torch.Tensor): lora's bias + lora_a_stacked (Tuple[torch.Tensor, ...]): lora_a's weight. + lora_b_stacked (Tuple[torch.Tensor, ...]): lora_b's weight. + lora_bias_stacked (Optional[Tuple[torch.Tensor, ...]]): lora's bias. scale (float): Scaling factor. - y_offset (Optional[int], optional): Offset to apply to the starting - column of y. - y_slice_size (Optional[int], optional): Size of the y column slice. - buffer (Optional[torch.Tensor], optional): Defaults to None. + output_slices (Tuple[int, ...]): Every slice's size. + buffer (Optional[Tuple[torch.Tensor, ...]]): Defaults to None. """ - y_org = y - y = y.view(-1, y.shape[-1]) - x = x.view(-1, x.shape[-1]) - r = wb_t_all.size(-1) + + assert len(lora_a_stacked) == len(lora_b_stacked) == len(output_slices) + if lora_bias_stacked is not None: + assert len(lora_bias_stacked) == len(output_slices) + y = self._apply_bias(self.token_lora_indices, y, output_slices, + lora_bias_stacked) + if buffer is None: + r = lora_b_stacked[0].size(-1) # We set the buffer to be float32 by default ,refer to: # https://github.com/triton-lang/triton/issues/1387 - buffer = torch.zeros((x.size(0), r), - dtype=torch.float32, - device=x.device) - if bias_all is not None: - y = self.apply_bias(self.token_lora_indices, y, bias_all) - self.add_shrink(buffer, x, wa_t_all, scale) - if y_offset is None and y_slice_size is None: - self.add_expand(y, buffer, wb_t_all, bias_all=None, add_input=True) - else: - self.add_expand_slice(y, - buffer, - wb_t_all, - None, - y_offset, - y_slice_size, - add_input=True) - y = y.view_as(y_org) - - def add_lora_packed_nslice(self, y: torch.Tensor, x: torch.Tensor, - lora_a_stacked: Tuple[torch.Tensor, ...], - lora_b_stacked: Tuple[torch.Tensor, ...], - bias_all: Tuple[Optional[torch.Tensor], - ...], scale: float, - output_slices: Tuple[int, ...]) -> None: - """ - Applies lora to each input. Similar to add_lora, This method is - used for layers that are composed of multiple sublayers - (slices) packed together. - """ - y_org = y - x = x.view(-1, x.shape[-1]) - y = y.view(-1, y.shape[-1]) - offset_left = 0 - if bias_all is not None: - y = self.apply_bias_packed_nslice(self.token_lora_indices, y, - output_slices, bias_all) - # TODO fuse these kernels - for slice_idx in range(len(output_slices)): - self.add_lora(y, x, lora_a_stacked[slice_idx], - lora_b_stacked[slice_idx], None, scale, offset_left, - output_slices[slice_idx]) - offset_left += output_slices[slice_idx] - - y = y.view_as(y_org) + buffer = tuple( + torch.zeros( + (x.size(0), r), dtype=torch.float32, device=x.device) + for _ in range(len(output_slices))) + self.add_shrink(buffer, x, lora_a_stacked, scale) + self.add_expand(y, + buffer, + lora_b_stacked, + None, + output_slices, + add_input=True) def add_lora_logits(self, y: torch.Tensor, x: torch.Tensor, - wa_t_all: torch.Tensor, - wb_t_all: torch.Tensor, + lora_a_stacked: torch.Tensor, + lora_b_stacked: torch.Tensor, scale, *, buffer: Optional[torch.Tensor] = None) -> None: """ - LogitsProcessorWithLoRA always using bgmv - """ + Applies lora specifically for LogitsProcessorWithLoRA. + + Semantics: + buffer = (x @ lora_a_stacked) * scale + y += buffer @ lora_b_stacked + + Args: + y (torch.Tensor): Output tensor. + x (torch.Tensor): Input tensor. + lora_a_stacked (torch.Tensor): lora_a's weights. + lora_b_stacked (torch.Tensor):lora_b's weights. + scale (float): Scaling factor. + buffer (Optional[torch.Tensor]):Default to None. + """ y_org = y y = y.view(-1, y.shape[-1]) x = x.view(-1, x.shape[-1]) - r = wb_t_all.size(-1) + r = lora_b_stacked.size(-1) if buffer is None: # We set the buffer to be float32 by default ,refer to: # https://github.com/triton-lang/triton/issues/1387 buffer = torch.zeros((x.size(0), r), dtype=torch.float32, device=x.device) - - bgmv_shrink(x, wa_t_all, buffer, self.sampler_indices, scale) - bgmv_expand(buffer, wb_t_all, y, self.sampler_indices, add_inputs=True) + # LogitsProcessorWithLoRA always using bgmv. + bgmv_shrink(x, lora_a_stacked, buffer, self.sampler_indices, scale) + bgmv_expand(buffer, + lora_b_stacked, + y, + self.sampler_indices, + add_inputs=True) y = y.view_as(y_org) From 998eeafe58c0263323b7fd8813c8b3d3f839bcbc Mon Sep 17 00:00:00 2001 From: Isotr0py Date: Fri, 6 Dec 2024 00:05:52 +0800 Subject: [PATCH 244/397] [CI/Build] Bump test transformers version (#10106) Signed-off-by: Isotr0py <2037008807@qq.com> Signed-off-by: DarkLight1337 Co-authored-by: DarkLight1337 --- requirements-test.txt | 2 +- .../vision_language/test_models.py | 25 +------------------ .../vision_language/test_pixtral.py | 2 +- .../vision_language/test_llava_next.py | 4 --- tests/models/test_initialization.py | 5 ---- 5 files changed, 3 insertions(+), 35 deletions(-) diff --git a/requirements-test.txt b/requirements-test.txt index a59b85023948b..19369254dbe26 100644 --- a/requirements-test.txt +++ b/requirements-test.txt @@ -550,7 +550,7 @@ tqdm==4.66.6 # transformers tqdm-multiprocess==0.0.11 # via lm-eval -transformers==4.45.2 +transformers==4.46.3 # via # lm-eval # peft diff --git a/tests/models/decoder_only/vision_language/test_models.py b/tests/models/decoder_only/vision_language/test_models.py index dbb0b4d350d10..924f19c4448b8 100644 --- a/tests/models/decoder_only/vision_language/test_models.py +++ b/tests/models/decoder_only/vision_language/test_models.py @@ -6,7 +6,6 @@ from typing import Type import pytest -import transformers from transformers import AutoModelForVision2Seq from transformers.utils import is_flash_attn_2_available @@ -187,12 +186,6 @@ comparator=check_outputs_equal, max_tokens=8, dtype="bfloat16", - marks=[ - pytest.mark.skipif( - transformers.__version__ < "4.46.2", - reason="Model broken in HF, see huggingface/transformers#34379" - ), - ] ), "fuyu": VLMTestInfo( models=["adept/fuyu-8b"], @@ -243,13 +236,7 @@ max_model_len=8192, max_num_seqs=2, auto_cls=AutoModelForVision2Seq, - marks=[ - pytest.mark.skipif( - transformers.__version__ < "4.46.0", - reason="Model introduced in HF >= 4.46.0" - ), - large_gpu_mark(min_gb=48), - ], + marks=[large_gpu_mark(min_gb=48)], ), "intern_vl": VLMTestInfo( models=[ @@ -318,12 +305,6 @@ auto_cls=AutoModelForVision2Seq, vllm_output_post_proc=model_utils.llava_video_vllm_to_hf_output, image_sizes=[((1669, 2560), (2560, 1669), (183, 488), (488, 183))], - marks=[ - pytest.mark.skipif( - transformers.__version__ < "4.46.2", - reason="Model broken with changes in transformers 4.46" - ) - ], ), "minicpmv_25": VLMTestInfo( models=["openbmb/MiniCPM-Llama3-V-2_5"], @@ -404,10 +385,6 @@ cuda_device_count_stateless() < 2, reason="Need at least 2 GPUs to run the test.", ), - pytest.mark.skipif( - transformers.__version__ < "4.46.2", - reason="Model broken in HF, see huggingface/transformers#34379" - ) ], **COMMON_BROADCAST_SETTINGS # type: ignore ), diff --git a/tests/models/decoder_only/vision_language/test_pixtral.py b/tests/models/decoder_only/vision_language/test_pixtral.py index 6233860747b9c..90c0fab99054c 100644 --- a/tests/models/decoder_only/vision_language/test_pixtral.py +++ b/tests/models/decoder_only/vision_language/test_pixtral.py @@ -228,7 +228,7 @@ def test_model_engine(vllm_runner, model: str, dtype: str) -> None: name_1="output") -@large_gpu_test(min_gb=24) +@large_gpu_test(min_gb=48) @pytest.mark.parametrize( "prompt,expected_ranges", [(_create_engine_inputs_hf(IMG_URLS[:1]), [{ diff --git a/tests/models/embedding/vision_language/test_llava_next.py b/tests/models/embedding/vision_language/test_llava_next.py index 329c6ba279f89..bab8d3897579e 100644 --- a/tests/models/embedding/vision_language/test_llava_next.py +++ b/tests/models/embedding/vision_language/test_llava_next.py @@ -2,7 +2,6 @@ import pytest import torch.nn.functional as F -import transformers from transformers import AutoModelForVision2Seq from ....conftest import IMAGE_ASSETS, HfRunner, PromptImageInput, VllmRunner @@ -86,9 +85,6 @@ def _run_test( ) -@pytest.mark.skipif(transformers.__version__.startswith("4.46"), - reason="Model broken with changes in transformers 4.46") -@pytest.mark.core_model @pytest.mark.parametrize("model", MODELS) @pytest.mark.parametrize("dtype", ["half"]) def test_models_text( diff --git a/tests/models/test_initialization.py b/tests/models/test_initialization.py index 2a072737db043..3b728f2744fca 100644 --- a/tests/models/test_initialization.py +++ b/tests/models/test_initialization.py @@ -1,7 +1,6 @@ from unittest.mock import patch import pytest -import transformers from transformers import PretrainedConfig from vllm import LLM @@ -11,10 +10,6 @@ @pytest.mark.parametrize("model_arch", HF_EXAMPLE_MODELS.get_supported_archs()) def test_can_initialize(model_arch): - if (model_arch in {"Idefics3ForConditionalGeneration", "GlmForCausalLM"} - and transformers.__version__ < "4.46.0"): - pytest.skip(reason="Model introduced in HF >= 4.46.0") - model_info = HF_EXAMPLE_MODELS.get_hf_info(model_arch) if not model_info.is_available_online: pytest.skip("Model is not available online") From a43065272f73a7468b1a35dd44fb5b0ed80f88c7 Mon Sep 17 00:00:00 2001 From: Konrad Zawora Date: Thu, 5 Dec 2024 17:47:46 +0100 Subject: [PATCH 245/397] [Misc][Gaudi] Avoid torch.compile and enable lazy collectives (#10897) Signed-off-by: Konrad Zawora --- vllm/plugins/__init__.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/vllm/plugins/__init__.py b/vllm/plugins/__init__.py index 81ee9975cdc4a..ae6e5c0a3481f 100644 --- a/vllm/plugins/__init__.py +++ b/vllm/plugins/__init__.py @@ -29,6 +29,20 @@ def load_general_plugins(): if current_platform.is_xpu(): # see https://github.com/pytorch/pytorch/blob/8cada5cbe5450e17c26fb8b358116785324537b2/torch/_dynamo/config.py#L158 # noqa os.environ['TORCH_COMPILE_DISABLE'] = 'True' + if current_platform.is_hpu(): + # NOTE(kzawora): PT HPU lazy backend (PT_HPU_LAZY_MODE = 1) + # does not support torch.compile + # Eager backend (PT_HPU_LAZY_MODE = 0) must be selected for + # torch.compile support + is_lazy = os.environ.get('PT_HPU_LAZY_MODE', '1') == '1' + if is_lazy: + # see https://github.com/pytorch/pytorch/blob/43c5f59/torch/_dynamo/config.py#L158 + torch._dynamo.config.disable = True + # NOTE(kzawora) multi-HPU inference with HPUGraphs (lazy-only) + # requires enabling lazy collectives + # see https://docs.habana.ai/en/latest/PyTorch/Inference_on_PyTorch/Inference_Using_HPU_Graphs.html # noqa: E501 + os.environ['PT_HPU_ENABLE_LAZY_COLLECTIVES'] = 'true' + global plugins_loaded if plugins_loaded: return From 9743d64e4e04a88174c76553fcbffa33a18c7db5 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Thu, 5 Dec 2024 08:54:47 -0800 Subject: [PATCH 246/397] [ci][build] add tests for python only compilation (#10915) Signed-off-by: youkaichao --- .buildkite/test-pipeline.yaml | 11 +++++-- setup.py | 13 ++++---- .../lazy_torch_compile.py} | 0 tests/standalone_tests/python_only_compile.sh | 30 +++++++++++++++++++ 4 files changed, 46 insertions(+), 8 deletions(-) rename tests/{test_lazy_torch_compile.py => standalone_tests/lazy_torch_compile.py} (100%) create mode 100644 tests/standalone_tests/python_only_compile.sh diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index 455f02a2062f1..bf0de3f69f14e 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -50,9 +50,9 @@ steps: - tests/multimodal - tests/test_utils - tests/worker - - tests/test_lazy_torch_compile.py + - tests/standalone_tests/lazy_torch_compile.py commands: - - python3 test_lazy_torch_compile.py + - python3 standalone_tests/lazy_torch_compile.py - pytest -v -s mq_llm_engine # MQLLMEngine - pytest -v -s async_engine # AsyncLLMEngine - NUM_SCHEDULER_STEPS=4 pytest -v -s async_engine/test_async_llm_engine.py @@ -61,6 +61,13 @@ steps: - pytest -v -s test_utils.py # Utils - pytest -v -s worker # Worker +- label: Python-only Installation Test + source_file_dependencies: + - tests/standalone_tests/python_only_compile.sh + - setup.py + commands: + - bash standalone_tests/python_only_compile.sh + - label: Basic Correctness Test # 30min #mirror_hardwares: [amd] fast_check: true diff --git a/setup.py b/setup.py index 182dabe449674..fcfaa207c176a 100644 --- a/setup.py +++ b/setup.py @@ -465,14 +465,15 @@ def get_vllm_version() -> str: if envs.VLLM_TARGET_DEVICE == "empty": version += f"{sep}empty" elif _is_cuda(): - cuda_version = str(get_nvcc_cuda_version()) - if cuda_version != MAIN_CUDA_VERSION: - cuda_version_str = cuda_version.replace(".", "")[:3] - # skip this for source tarball, required for pypi - if "sdist" not in sys.argv: - version += f"{sep}cu{cuda_version_str}" if envs.VLLM_USE_PRECOMPILED: version += ".precompiled" + else: + cuda_version = str(get_nvcc_cuda_version()) + if cuda_version != MAIN_CUDA_VERSION: + cuda_version_str = cuda_version.replace(".", "")[:3] + # skip this for source tarball, required for pypi + if "sdist" not in sys.argv: + version += f"{sep}cu{cuda_version_str}" elif _is_hip(): # Get the HIP version hipcc_version = get_hipcc_rocm_version() diff --git a/tests/test_lazy_torch_compile.py b/tests/standalone_tests/lazy_torch_compile.py similarity index 100% rename from tests/test_lazy_torch_compile.py rename to tests/standalone_tests/lazy_torch_compile.py diff --git a/tests/standalone_tests/python_only_compile.sh b/tests/standalone_tests/python_only_compile.sh new file mode 100644 index 0000000000000..f00895c0997f1 --- /dev/null +++ b/tests/standalone_tests/python_only_compile.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# This script tests if the python only compilation works correctly +# for users who do not have any compilers installed on their system + +set -e +set -x + +cd /vllm-workspace/ + +# uninstall vllm +pip3 uninstall -y vllm +# restore the original files +mv test_docs/vllm ./vllm + +# remove all compilers +apt remove --purge build-essential -y +apt autoremove -y + +echo 'import os; os.system("touch /tmp/changed.file")' >> vllm/__init__.py + +VLLM_USE_PRECOMPILED=1 pip3 install -vvv -e . + +# Run the script +python3 -c 'import vllm' + +# Check if the clangd log file was created +if [ ! -f /tmp/changed.file ]; then + echo "changed.file was not created, python only compilation failed" + exit 1 +fi From db87eb6c67271eb61ba9fd8559ce811a1a398a4d Mon Sep 17 00:00:00 2001 From: youkaichao Date: Thu, 5 Dec 2024 20:30:41 -0800 Subject: [PATCH 247/397] [torch.compile] use size tuning for specific sizes (#10933) Signed-off-by: youkaichao --- vllm/compilation/backends.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/vllm/compilation/backends.py b/vllm/compilation/backends.py index d49a83fe3981f..9773ba8cec779 100644 --- a/vllm/compilation/backends.py +++ b/vllm/compilation/backends.py @@ -43,6 +43,12 @@ def wrap_inductor(graph, if additional_inductor_config is not None: current_config.update(additional_inductor_config) + if isinstance(runtime_shape, int): + # for a specific batchsize, tuning triton kernel parameters + # can be beneficial + current_config["max_autotune"] = True + current_config["coordinate_descent_tuning"] = True + # inductor can inplace modify the graph, so we need to copy it # see https://github.com/pytorch/pytorch/issues/138980 graph = copy.deepcopy(graph) From b031a455a9fa9d57952281dac2a1146d6440790f Mon Sep 17 00:00:00 2001 From: youkaichao Date: Fri, 6 Dec 2024 02:07:15 -0800 Subject: [PATCH 248/397] [torch.compile] add logging for compilation time (#10941) Signed-off-by: youkaichao Co-authored-by: Woosuk Kwon --- vllm/compilation/backends.py | 56 ++++++++++++++++++++++++++++------ vllm/compilation/decorators.py | 5 +++ vllm/compilation/monitor.py | 14 +++++++++ vllm/config.py | 2 ++ vllm/engine/llm_engine.py | 4 +++ vllm/v1/engine/core.py | 4 +++ 6 files changed, 75 insertions(+), 10 deletions(-) create mode 100644 vllm/compilation/monitor.py diff --git a/vllm/compilation/backends.py b/vllm/compilation/backends.py index 9773ba8cec779..84dde558626af 100644 --- a/vllm/compilation/backends.py +++ b/vllm/compilation/backends.py @@ -1,5 +1,6 @@ import copy import dataclasses +import time from contextlib import ExitStack from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple from unittest.mock import patch @@ -14,6 +15,7 @@ from .counter import compilation_counter from .inductor_pass import InductorPass +from .monitor import end_monitoring_torch_compile from .pass_manager import PostGradPassManager logger = init_logger(__name__) @@ -22,20 +24,21 @@ def wrap_inductor(graph, example_inputs, additional_inductor_config, - do_logging=False, + compilation_config: CompilationConfig, + graph_index: int = 0, + num_graphs: int = 1, runtime_shape: Optional[int] = None, use_inductor: bool = True): + if graph_index == 0: + # before compiling the first graph, record the start time + global compilation_start_time + compilation_start_time = time.time() + if not use_inductor: return graph compilation_counter.num_inductor_compilations += 1 - if do_logging: - if runtime_shape is None: - logger.info("Compiling a graph for general shape") - else: - logger.info("Compiling a graph for shape %s", runtime_shape) - from torch._inductor import config current_config = config.shallow_copy_dict() from torch._inductor.compile_fx import compile_fx @@ -52,7 +55,23 @@ def wrap_inductor(graph, # inductor can inplace modify the graph, so we need to copy it # see https://github.com/pytorch/pytorch/issues/138980 graph = copy.deepcopy(graph) - return compile_fx(graph, example_inputs, config_patches=current_config) + compiled_graph = compile_fx(graph, + example_inputs, + config_patches=current_config) + + # after compiling the last graph, record the end time + if graph_index == num_graphs - 1: + now = time.time() + elapsed = now - compilation_start_time + compilation_config.compilation_time += elapsed + if runtime_shape is None: + logger.info("Compiling a graph for general shape takes %.2f s", + elapsed) + else: + logger.info("Compiling a graph for shape %s takes %.2f s", + runtime_shape, elapsed) + + return compiled_graph @dataclasses.dataclass @@ -114,6 +133,8 @@ def split_graph(graph: fx.GraphModule, # we share the global graph pool among all the backends global_graph_pool = None +compilation_start_time = 0.0 + class PiecewiseCompileInterpreter(torch.fx.Interpreter): """Code adapted from `torch.fx.passes.shape_prop.ShapeProp`. @@ -157,12 +178,15 @@ def call_module(self, target: torch.fx.node.Target, sym_shape_indices = [ i for i, x in enumerate(args) if isinstance(x, torch.SymInt) ] + global compilation_start_time compiled_graph_for_general_shape = wrap_inductor( submod, args, self.compilation_configs.inductor_compile_config, + self.compilation_configs, + graph_index=index, + num_graphs=len(self.compile_submod_names), runtime_shape=None, - do_logging=index == 0, use_inductor=self.compilation_configs.use_inductor) self.module.__dict__[target] = PiecewiseBackend( @@ -379,6 +403,8 @@ def __init__(self, graph: fx.GraphModule, # the entries for different shapes that we need to either # compile or capture cudagraph self.concrete_size_entries: Dict[int, ConcreteSizeEntry] = {} + self.to_be_compiled_sizes: Set[int] = self.compile_sizes.union( + self.capture_sizes) for shape in self.compile_sizes.union(self.capture_sizes): self.concrete_size_entries[shape] = ConcreteSizeEntry( runtime_shape=shape, @@ -389,6 +415,9 @@ def __init__(self, graph: fx.GraphModule, def __call__(self, *args) -> Any: if not self.first_run_finished: self.first_run_finished = True + # no specific sizes to compile + if self.is_last_graph and not self.to_be_compiled_sizes: + end_monitoring_torch_compile(self.compilation_configs) return self.compiled_graph_for_general_shape(*args) runtime_shape = args[self.sym_shape_indices[0]] @@ -403,15 +432,22 @@ def __call__(self, *args) -> Any: if entry.need_to_compile and not entry.compiled: entry.compiled = True + self.to_be_compiled_sizes.remove(runtime_shape) # args are real arguments entry.runnable = wrap_inductor( self.graph, args, self.compilation_configs.inductor_compile_config, + self.compilation_configs, + graph_index=self.piecewise_compile_index, + num_graphs=self.total_piecewise_compiles, runtime_shape=runtime_shape, - do_logging=self.is_first_graph, use_inductor=self.compilation_configs.use_inductor) + # finished compilations for all required shapes + if self.is_last_graph and not self.to_be_compiled_sizes: + end_monitoring_torch_compile(self.compilation_configs) + if not entry.use_cudagraph: return entry.runnable(*args) diff --git a/vllm/compilation/decorators.py b/vllm/compilation/decorators.py index 8700243c9d904..a32dced57e5b3 100644 --- a/vllm/compilation/decorators.py +++ b/vllm/compilation/decorators.py @@ -11,6 +11,8 @@ from vllm.sequence import IntermediateTensors from vllm.utils import supports_dynamo +from .monitor import start_monitoring_torch_compile + logger = init_logger(__name__) _T = TypeVar("_T", bound=type[nn.Module]) @@ -155,6 +157,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = '', **kwargs): TorchCompileWrapperWithCustomDispatcher.__init__( self, compilation_level=vllm_config.compilation_config.level) + if vllm_config.compilation_config.level == CompilationLevel.PIECEWISE: + start_monitoring_torch_compile(vllm_config.compilation_config) + cls.__init__ = __init__ def __call__(self, *args, **kwargs): diff --git a/vllm/compilation/monitor.py b/vllm/compilation/monitor.py new file mode 100644 index 0000000000000..f718e46423212 --- /dev/null +++ b/vllm/compilation/monitor.py @@ -0,0 +1,14 @@ +from vllm.config import CompilationConfig, CompilationLevel +from vllm.logger import init_logger + +logger = init_logger(__name__) + + +def start_monitoring_torch_compile(compilation_config: CompilationConfig): + pass + + +def end_monitoring_torch_compile(compilation_config: CompilationConfig): + if compilation_config.level == CompilationLevel.PIECEWISE: + logger.info("graph compilation takes %.2f s in total", + compilation_config.compilation_time) diff --git a/vllm/config.py b/vllm/config.py index 5c904914a71cf..a5e2702035a5c 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -2281,6 +2281,7 @@ def model_post_init(self, __context: Any) -> None: # keep track of enabled and disabled custom ops enabled_custom_ops: Counter[str] = PrivateAttr disabled_custom_ops: Counter[str] = PrivateAttr + compilation_time: float = PrivateAttr # Per-model forward context # Mainly used to store attention cls @@ -2319,6 +2320,7 @@ def model_post_init(self, __context: Any) -> None: self.enabled_custom_ops = Counter() self.disabled_custom_ops = Counter() self.static_forward_context = {} + self.compilation_time = 0.0 def init_backend(self) -> Union[str, Callable]: if self.level == CompilationLevel.NO_COMPILATION: diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index 1f3c6197ba1a8..26a8c94099a11 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -473,6 +473,7 @@ def _initialize_kv_caches(self) -> None: The workers will determine the number of blocks in both the GPU cache and the swap CPU cache. """ + start = time.time() num_gpu_blocks, num_cpu_blocks = ( self.model_executor.determine_num_available_blocks()) @@ -488,6 +489,9 @@ def _initialize_kv_caches(self) -> None: self.cache_config.num_cpu_blocks = num_cpu_blocks self.model_executor.initialize_cache(num_gpu_blocks, num_cpu_blocks) + elapsed = time.time() - start + logger.info(("init engine (profile, create kv cache, " + "warmup model) took %.2f seconds"), elapsed) @classmethod def _get_executor_cls(cls, diff --git a/vllm/v1/engine/core.py b/vllm/v1/engine/core.py index 397a33eed3896..751eb3b40a68d 100644 --- a/vllm/v1/engine/core.py +++ b/vllm/v1/engine/core.py @@ -67,6 +67,7 @@ def __init__( def _initialize_kv_caches(self, cache_config: CacheConfig) -> Tuple[int, int]: + start = time.time() num_gpu_blocks, _ = self.model_executor.determine_num_available_blocks( ) @@ -80,6 +81,9 @@ def _initialize_kv_caches(self, num_cpu_blocks = 0 self.model_executor.initialize_cache(num_gpu_blocks) + elapsed = time.time() - start + logger.info(("init engine (profile, create kv cache, " + "warmup model) took %.2f seconds"), elapsed) return num_gpu_blocks, num_cpu_blocks def add_request(self, request: EngineCoreRequest): From 222f5b082a62d0b2675cb461e223ae43368eea92 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Fri, 6 Dec 2024 18:41:23 +0800 Subject: [PATCH 249/397] [CI/Build] Fix broken multimodal test (#10950) --- tests/models/embedding/vision_language/test_llava_next.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/models/embedding/vision_language/test_llava_next.py b/tests/models/embedding/vision_language/test_llava_next.py index bab8d3897579e..329c6ba279f89 100644 --- a/tests/models/embedding/vision_language/test_llava_next.py +++ b/tests/models/embedding/vision_language/test_llava_next.py @@ -2,6 +2,7 @@ import pytest import torch.nn.functional as F +import transformers from transformers import AutoModelForVision2Seq from ....conftest import IMAGE_ASSETS, HfRunner, PromptImageInput, VllmRunner @@ -85,6 +86,9 @@ def _run_test( ) +@pytest.mark.skipif(transformers.__version__.startswith("4.46"), + reason="Model broken with changes in transformers 4.46") +@pytest.mark.core_model @pytest.mark.parametrize("model", MODELS) @pytest.mark.parametrize("dtype", ["half"]) def test_models_text( From a1887f2c96480e597db8c35cb8389c4025fb4db9 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Fri, 6 Dec 2024 03:01:23 -0800 Subject: [PATCH 250/397] [torch.compile] fix deprecated code (#10948) Signed-off-by: youkaichao --- vllm/compilation/backends.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/compilation/backends.py b/vllm/compilation/backends.py index 84dde558626af..1206424ae1e3f 100644 --- a/vllm/compilation/backends.py +++ b/vllm/compilation/backends.py @@ -40,7 +40,7 @@ def wrap_inductor(graph, compilation_counter.num_inductor_compilations += 1 from torch._inductor import config - current_config = config.shallow_copy_dict() + current_config = config.get_config_copy() from torch._inductor.compile_fx import compile_fx if additional_inductor_config is not None: From 8b5963185512eb7799f12240570e0ac7e7462a88 Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Fri, 6 Dec 2024 10:34:29 -0500 Subject: [PATCH 251/397] [Core] Support Lark grammars for XGrammar (#10870) Signed-off-by: mgoin --- .../guided_decoding/__init__.py | 8 - .../guided_decoding/xgrammar_decoding.py | 17 +- .../guided_decoding/xgrammar_utils.py | 162 ++++++++++++++++++ 3 files changed, 178 insertions(+), 9 deletions(-) create mode 100644 vllm/model_executor/guided_decoding/xgrammar_utils.py diff --git a/vllm/model_executor/guided_decoding/__init__.py b/vllm/model_executor/guided_decoding/__init__.py index a81377341e095..e631aec928ec5 100644 --- a/vllm/model_executor/guided_decoding/__init__.py +++ b/vllm/model_executor/guided_decoding/__init__.py @@ -73,14 +73,6 @@ def maybe_backend_fallback( "Falling back to use outlines instead.") guided_params.backend = "outlines" - # xgrammar only supports EBNF grammars and uses the GBNF format - # https://github.com/ggerganov/llama.cpp/blob/master/grammars/README.md - elif (guided_params.grammar is not None - and "::=" not in guided_params.grammar): - logger.warning("xgrammar only supports EBNF grammars. " - "Falling back to use outlines instead.") - guided_params.backend = "outlines" - # xgrammar doesn't support some JSON schema features elif (guided_params.json is not None and has_xgrammar_unsupported_json_features(guided_params.json)): diff --git a/vllm/model_executor/guided_decoding/xgrammar_decoding.py b/vllm/model_executor/guided_decoding/xgrammar_decoding.py index 8287cd6cf3aa0..b59a2269d2cd5 100644 --- a/vllm/model_executor/guided_decoding/xgrammar_decoding.py +++ b/vllm/model_executor/guided_decoding/xgrammar_decoding.py @@ -14,6 +14,9 @@ except ImportError: pass +from vllm.model_executor.guided_decoding.xgrammar_utils import ( + convert_lark_to_gbnf, grammar_is_likely_lark) + if TYPE_CHECKING: from transformers import PreTrainedTokenizer @@ -152,7 +155,19 @@ def from_guided_params(cls, tokenizer_hash=tokenizer_hash, max_threads=max_threads) elif guided_params.grammar: - return cls(grammar_str=guided_params.grammar, + # XGrammar only supports GBNF grammars, so we must convert Lark + if grammar_is_likely_lark(guided_params.grammar): + try: + grammar_str = convert_lark_to_gbnf(guided_params.grammar) + except ValueError as e: + raise ValueError( + "Failed to convert the grammar from Lark to GBNF. " + "Please either use GBNF grammar directly or specify" + " --guided-decoding-backend=outlines.\n" + f"Conversion error: {str(e)}") from e + else: + grammar_str = guided_params.grammar + return cls(grammar_str=grammar_str, vocab_size=model_config.hf_config.vocab_size, encoded_vocab=encoded_vocab, stop_token_ids=stop_token_ids, diff --git a/vllm/model_executor/guided_decoding/xgrammar_utils.py b/vllm/model_executor/guided_decoding/xgrammar_utils.py new file mode 100644 index 0000000000000..12b42245f4e3d --- /dev/null +++ b/vllm/model_executor/guided_decoding/xgrammar_utils.py @@ -0,0 +1,162 @@ +import re + + +def grammar_is_likely_lark(grammar_str: str) -> bool: + """ + Check if grammar appears to use Lark syntax. + + Args: + grammar_str: Input grammar string + + Returns: + bool: True if grammar appears to be in Lark format, False otherwise + + Examples: + >>> grammar_is_likely_lark("rule: 'abc'") + True + >>> grammar_is_likely_lark("rule ::= 'abc'") + False + """ + if not grammar_str or not isinstance(grammar_str, str): + return False + + for line in grammar_str.split('\n'): + # Remove both comment styles + line = re.sub(r'(#|//).*$', '', line).strip() + if not line: + continue + + # Look for Lark-style rule definitions + if ':' in line and '::=' not in line: + return True + + # Look for Lark-specific features + if any(pattern in line for pattern in ['?start:', '|', '~']): + return True + + return False + + +def convert_lark_to_gbnf(grammar_str: str) -> str: + """ + Convert a Lark grammar string to GBNF format. + + GBNF reference: + https://github.com/ggerganov/llama.cpp/blob/master/grammars/README.md + Lark grammar reference: + https://lark-parser.readthedocs.io/en/latest/grammar.html + + Args: + grammar_str: Input grammar in Lark format + + Returns: + str: Converted grammar in GBNF format + + Examples: + >>> print(convert_lark_to_gbnf("rule: 'hello'")) + root ::= rule + rule ::= "hello" + """ + if not isinstance(grammar_str, str): + raise ValueError(f"Grammar must be a string, got {type(grammar_str)}") + if not grammar_str.strip(): + raise ValueError("Grammar string cannot be empty") + + defined_rules = set() + referenced_rules = set() + output_lines = [] + + def clean_line(line: str) -> str: + """Remove comments and whitespace from line.""" + return re.sub(r'(#|//).*$', '', line).strip() + + def check_quotes(text: str, rule_name: str, line_num: int) -> None: + """Validate quote matching in text.""" + if text.count("'") % 2 != 0 or text.count('"') % 2 != 0: + raise ValueError( + f"Mismatched quotes in {rule_name} on line {line_num}") + + def extract_references(text: str) -> set: + """Extract rule references from text.""" + # Remove quoted strings and special characters + text = re.sub(r'"[^"]*"', '', text) + text = re.sub(r'[+*?()|\[\]{}]', ' ', text) + return set(re.findall(r'\b[a-zA-Z_][a-zA-Z0-9_]*\b', text)) + + # First pass: Find root rule and validate rule definitions + lines = [clean_line(line) for line in grammar_str.split('\n')] + first_rule = None + + for line_num, line in enumerate(lines, 1): + if not line or line.startswith('|'): + continue + + if ':' in line: + try: + name = line.split(':', 1)[0].strip().strip('?') + defined_rules.add(name) + if first_rule is None: + first_rule = name + if name == 'start': + first_rule = 'start' + except IndexError as e: + raise ValueError(f"Invalid rule format on line {line_num}. " + "Expected 'rule_name: definition'") from e + + if not defined_rules: + raise ValueError("No valid rules found in grammar") + + # Add root rule + output_lines.append(f"root ::= {first_rule}") + + # Second pass: Process rule definitions and alternatives + current_rule = None + current_definition = [] + + for line_num, line in enumerate(lines, 1): + if not line: + continue + + try: + if ':' in line and not line.startswith('|'): + # Save previous rule if exists + if current_rule: + output_lines.append( + f"{current_rule} ::= {' | '.join(current_definition)}") + + # Process new rule + name, definition = line.split(':', 1) + current_rule = name.strip().strip('?') + + check_quotes(definition, f"rule '{current_rule}'", line_num) + definition = re.sub(r"'([^']*)'", r'"\1"', definition) + referenced_rules.update(extract_references(definition)) + current_definition = [definition.strip()] + + elif line.startswith('|'): + if not current_rule: + raise ValueError(f"Alternative '|' on line {line_num} " + "without a preceding rule definition") + + alt_def = line[1:].strip() + check_quotes(alt_def, f"alternative for rule '{current_rule}'", + line_num) + alt_def = re.sub(r"'([^']*)'", r'"\1"', alt_def) + referenced_rules.update(extract_references(alt_def)) + current_definition.append(alt_def) + + except ValueError as e: + raise ValueError(f"Error on line {line_num}: {str(e)}") from e + + # Add final rule if exists + if current_rule: + output_lines.append( + f"{current_rule} ::= {' | '.join(current_definition)}") + + # Validate all rules are defined + undefined_rules = referenced_rules - defined_rules - {'root'} + if undefined_rules: + raise ValueError("Referenced rules are not defined: " + f"{', '.join(sorted(undefined_rules))}") + + return '\n'.join(output_lines) From 74062740416db8572627dda1f87925268ba2f1d3 Mon Sep 17 00:00:00 2001 From: Sam Stoelinga Date: Fri, 6 Dec 2024 09:03:56 -0800 Subject: [PATCH 252/397] [Doc] add KubeAI to serving integrations (#10837) Signed-off-by: Sam Stoelinga --- docs/source/serving/deploying_with_kubeai.rst | 17 +++++++++++++++++ docs/source/serving/integrations.rst | 1 + 2 files changed, 18 insertions(+) create mode 100644 docs/source/serving/deploying_with_kubeai.rst diff --git a/docs/source/serving/deploying_with_kubeai.rst b/docs/source/serving/deploying_with_kubeai.rst new file mode 100644 index 0000000000000..ec3c065320fd9 --- /dev/null +++ b/docs/source/serving/deploying_with_kubeai.rst @@ -0,0 +1,17 @@ +.. _deploying_with_kubeai: + +Deploying with KubeAI +===================== + +`KubeAI `_ is a Kubernetes operator that enables you to deploy and manage AI models on Kubernetes. It provides a simple and scalable way to deploy vLLM in production. Functionality such as scale-from-zero, load based autoscaling, model caching, and much more is provided out of the box with zero external dependencies. + + +Please see the Installation Guides for environment specific instructions: + +* `Any Kubernetes Cluster `_ +* `EKS `_ +* `GKE `_ + +Once you have KubeAI installed, you can +`configure text generation models `_ +using vLLM. \ No newline at end of file diff --git a/docs/source/serving/integrations.rst b/docs/source/serving/integrations.rst index f39997e0e44d9..0dd505a739863 100644 --- a/docs/source/serving/integrations.rst +++ b/docs/source/serving/integrations.rst @@ -6,6 +6,7 @@ Integrations run_on_sky deploying_with_kserve + deploying_with_kubeai deploying_with_triton deploying_with_bentoml deploying_with_cerebrium From c05cfb67da12f84bd142ba51cca98e59139bea42 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Fri, 6 Dec 2024 11:25:20 -0800 Subject: [PATCH 253/397] [misc] fix typo (#10960) Signed-off-by: youkaichao --- vllm/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/config.py b/vllm/config.py index a5e2702035a5c..fe4c85441fced 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -2082,7 +2082,7 @@ class KVTransferConfig(BaseModel): @classmethod def from_cli(cls, cli_value: str) -> "KVTransferConfig": - """Parse the CLI value for the compilation config.""" + """Parse the CLI value for the kv cache transfer config.""" return KVTransferConfig.model_validate_json(cli_value) def model_post_init(self, __context: Any) -> None: From dcdc3fafe535178037ef0a58f53607b2fb3e4190 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Fri, 6 Dec 2024 11:25:47 -0800 Subject: [PATCH 254/397] [ci] fix broken tests (#10956) Signed-off-by: youkaichao --- vllm/worker/model_runner.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py index 4388b3c1ee164..1bc5f65c7127f 100644 --- a/vllm/worker/model_runner.py +++ b/vllm/worker/model_runner.py @@ -1782,6 +1782,9 @@ def need_recv_kv(self, model_input, kv_caches) -> bool: kv_caches: vLLM's paged memory """ + if self.vllm_config.kv_transfer_config is None: + return False + prefill_meta = model_input.attn_metadata.prefill_metadata # check if the current run is profiling @@ -1789,9 +1792,6 @@ def need_recv_kv(self, model_input, kv_caches) -> bool: # check if the current run is prefill is_prefill_run = prefill_meta is not None - if self.vllm_config.kv_transfer_config is None: - return False - return self.vllm_config.kv_transfer_config.is_kv_consumer and ( not is_profile_run) and is_prefill_run @@ -1807,6 +1807,9 @@ def need_send_kv(self, model_input, kv_caches) -> bool: kv_caches: vLLM's paged memory """ + if self.vllm_config.kv_transfer_config is None: + return False + prefill_meta = model_input.attn_metadata.prefill_metadata # check if the current run is profiling @@ -1814,9 +1817,6 @@ def need_send_kv(self, model_input, kv_caches) -> bool: # check if the current run is prefill is_prefill_run = prefill_meta is not None - if self.vllm_config.kv_transfer_config is None: - return False - return self.vllm_config.kv_transfer_config.is_kv_producer and ( not is_profile_run) and is_prefill_run From 69d357ba125a8c4243c25d7d9162f1c93cfddd1f Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Fri, 6 Dec 2024 21:30:23 -0500 Subject: [PATCH 255/397] [Core] Cleanup startup logging a bit (#10961) Signed-off-by: Russell Bryant --- vllm/engine/arg_utils.py | 1 + vllm/entrypoints/openai/api_server.py | 8 ++++---- vllm/plugins/__init__.py | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 0b304658f012c..ccd9fac225cba 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -433,6 +433,7 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: 'capping to sliding window size') parser.add_argument('--use-v2-block-manager', action='store_true', + default=True, help='[DEPRECATED] block manager v1 has been ' 'removed and SelfAttnBlockSpaceManager (i.e. ' 'block manager v2) is now the default. ' diff --git a/vllm/entrypoints/openai/api_server.py b/vllm/entrypoints/openai/api_server.py index 6bc31ef83ded4..c7bc30040279c 100644 --- a/vllm/entrypoints/openai/api_server.py +++ b/vllm/entrypoints/openai/api_server.py @@ -175,8 +175,8 @@ async def build_async_engine_client_from_engine_args( # Select random path for IPC. ipc_path = get_open_zmq_ipc_path() - logger.info("Multiprocessing frontend to use %s for IPC Path.", - ipc_path) + logger.debug("Multiprocessing frontend to use %s for IPC Path.", + ipc_path) # Start RPCServer in separate process (holds the LLMEngine). # the current process might have CUDA context, @@ -249,8 +249,8 @@ def mount_metrics(app: FastAPI): prometheus_multiproc_dir_path = os.getenv("PROMETHEUS_MULTIPROC_DIR", None) if prometheus_multiproc_dir_path is not None: - logger.info("vLLM to use %s as PROMETHEUS_MULTIPROC_DIR", - prometheus_multiproc_dir_path) + logger.debug("vLLM to use %s as PROMETHEUS_MULTIPROC_DIR", + prometheus_multiproc_dir_path) registry = CollectorRegistry() multiprocess.MultiProcessCollector(registry) diff --git a/vllm/plugins/__init__.py b/vllm/plugins/__init__.py index ae6e5c0a3481f..17f604ea0e202 100644 --- a/vllm/plugins/__init__.py +++ b/vllm/plugins/__init__.py @@ -57,7 +57,7 @@ def load_general_plugins(): discovered_plugins = entry_points(group='vllm.general_plugins') if len(discovered_plugins) == 0: - logger.info("No plugins found.") + logger.debug("No plugins found.") return logger.info("Available plugins:") for plugin in discovered_plugins: From acf092d34802b187f27daa8e1626f67552bde193 Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Sat, 7 Dec 2024 12:08:54 +0800 Subject: [PATCH 256/397] [Bugfix] Fix test-pipeline.yaml (#10973) Signed-off-by: Jee Jee Li --- .buildkite/test-pipeline.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index bf0de3f69f14e..936e284d9675a 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -237,7 +237,7 @@ steps: source_file_dependencies: - vllm/lora - tests/lora - command: pytest -v -s lora --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT --ignore lora/test_long_context.py lora/test_chatglm3_tp.py lora/test_llama_tp.py + command: pytest -v -s lora --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT --ignore=lora/test_long_context.py --ignore=lora/test_chatglm3_tp.py --ignore=lora/test_llama_tp.py parallelism: 4 - label: "PyTorch Fullgraph Smoke Test" # 9min From 955fa9533afde0d232e73f079d72239c8a87c636 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Sat, 7 Dec 2024 16:50:58 +0800 Subject: [PATCH 257/397] [3/N] Support and implement merged input processor for LLaVA model (#10676) Signed-off-by: DarkLight1337 Co-authored-by: Roger Wang --- tests/multimodal/test_mapper.py | 49 +-- tests/multimodal/test_processing.py | 277 +++++++++++----- .../vllm_add_dummy_model/my_llava.py | 12 +- vllm/inputs/registry.py | 42 ++- vllm/model_executor/models/llava.py | 219 +++++------- vllm/multimodal/base.py | 51 ++- vllm/multimodal/processing.py | 313 +++++++++++------- vllm/multimodal/registry.py | 67 +++- vllm/v1/engine/mm_input_mapper.py | 1 + vllm/v1/engine/processor.py | 16 +- 10 files changed, 626 insertions(+), 421 deletions(-) diff --git a/tests/multimodal/test_mapper.py b/tests/multimodal/test_mapper.py index 13ad4a7966b9d..71832acbd17b8 100644 --- a/tests/multimodal/test_mapper.py +++ b/tests/multimodal/test_mapper.py @@ -2,7 +2,7 @@ import numpy as np import pytest -from transformers import CLIPImageProcessor, LlavaNextImageProcessor +from transformers import LlavaNextImageProcessor from vllm.config import ModelConfig from vllm.multimodal import MultiModalRegistry @@ -14,49 +14,6 @@ def mm_registry(): return MultiModalRegistry() -@pytest.mark.parametrize("dtype", ["half", "float"]) -@pytest.mark.parametrize("size_factor", [0.25, 0.5, 1.0]) -def test_clip_image_processor(image_assets, mm_registry, dtype, size_factor): - MODEL_NAME = "llava-hf/llava-1.5-7b-hf" - - hf_processor = CLIPImageProcessor.from_pretrained(MODEL_NAME) - assert isinstance(hf_processor, CLIPImageProcessor) - - model_config = ModelConfig( - model=MODEL_NAME, - task="auto", - tokenizer=MODEL_NAME, - tokenizer_mode="auto", - trust_remote_code=False, - seed=0, - dtype=dtype, - revision=None, - limit_mm_per_prompt={"image": 1}, - ) - - mm_registry.init_mm_limits_per_prompt(model_config) - - for asset in image_assets: - image = rescale_image_size(asset.pil_image, size_factor) - - hf_result = hf_processor.preprocess( - image, - return_tensors="pt", - ) - vllm_result = mm_registry.map_input( - model_config, - {"image": image}, - ) - - assert hf_result.keys() == vllm_result.keys() - for key, hf_tensor in hf_result.items(): - hf_arr: np.ndarray = hf_tensor.numpy() - vllm_arr: np.ndarray = vllm_result[key].numpy() - - assert hf_arr.shape == vllm_arr.shape, f"Failed for key={key}" - assert np.allclose(hf_arr, vllm_arr), f"Failed for key={key}" - - @pytest.mark.parametrize("dtype", ["half", "float"]) @pytest.mark.parametrize("size_factor", [0.25, 0.5, 1.0]) def test_llava_next_image_processor(image_assets, mm_registry, dtype, @@ -107,7 +64,7 @@ def test_llava_next_image_processor(image_assets, mm_registry, dtype, (2, 1, False), (2, 2, True)], ) def test_mm_limits(image_assets, mm_registry, num_images, limit, is_valid): - MODEL_NAME = "llava-hf/llava-1.5-7b-hf" + MODEL_NAME = "llava-hf/llava-v1.6-mistral-7b-hf" model_config = ModelConfig( model=MODEL_NAME, @@ -138,7 +95,7 @@ def test_mm_limits(image_assets, mm_registry, num_images, limit, is_valid): # NOTE: We don't test zero images since the HF processor doesn't support it @pytest.mark.parametrize("num_images", [1, 2]) def test_image_mapper_multi(image_assets, mm_registry, num_images): - MODEL_NAME = "llava-hf/llava-1.5-7b-hf" + MODEL_NAME = "llava-hf/llava-v1.6-mistral-7b-hf" model_config = ModelConfig( model=MODEL_NAME, diff --git a/tests/multimodal/test_processing.py b/tests/multimodal/test_processing.py index b2367060c6c1b..ae668d1dd56c8 100644 --- a/tests/multimodal/test_processing.py +++ b/tests/multimodal/test_processing.py @@ -3,50 +3,15 @@ import pytest from transformers import BatchFeature -from vllm.multimodal.processing import (PromptReplacement, find_text_matches, - find_token_matches, iter_token_matches, - iter_token_runs, replace_text_matches) +from vllm.multimodal.processing import (PromptReplacement, _PlaceholderInfo, + find_text_matches, find_token_matches, + iter_placeholders, iter_token_matches, + replace_text_matches, + replace_token_matches) from vllm.transformers_utils.tokenizer import AnyTokenizer from vllm.utils import full_groupby -# yapf: disable -@pytest.mark.parametrize( - ("token_ids", "expected"), - [ - ([], []), - ( - [32000, 32000, 32000], - [{ "token_id": 32000, "start_idx": 0, "length": 3 }], - ), - ( - [9833, 28747, 32000, 32000, 32000, 9833, 28747, 32000, 32000, 918], - [ - { "token_id": 9833, "start_idx": 0, "length": 1 }, - { "token_id": 28747, "start_idx": 1, "length": 1 }, - { "token_id": 32000, "start_idx": 2, "length": 3 }, - { "token_id": 9833, "start_idx": 5, "length": 1 }, - { "token_id": 28747, "start_idx": 6, "length": 1 }, - { "token_id": 32000, "start_idx": 7, "length": 2 }, - { "token_id": 918, "start_idx": 9, "length": 1 }, - ], - ), - ], -) -# yapf: enable -def test_iter_token_runs(token_ids, expected): - result = list(iter_token_runs(token_ids)) - - # Only displayed on error - print("result:", result) - - # Manually constructed results - assert [item._asdict() for item in result] == expected - - # Invariants - assert sum(run_info.length for run_info in result) == len(token_ids) - - # yapf: disable @pytest.mark.parametrize( ("token_ids", "match_ids", "expected"), @@ -170,13 +135,11 @@ def test_find_token_matches(prompt, target_by_key, expected_by_key): # Should not be used since there is nothing to convert to token IDs mock_tokenizer = cast(AnyTokenizer, object()) - result = find_token_matches( - prompt, - [ - PromptReplacement(target, [], 0).bind(key, mock_tokenizer) - for key, target in target_by_key.items() - ], - ) + prompt_repls = [ + PromptReplacement(target, [], 0).bind(key, mock_tokenizer) + for key, target in target_by_key.items() + ] + result = find_token_matches(prompt, prompt_repls) # Only displayed on error print("result:", result) @@ -279,13 +242,11 @@ def test_find_text_matches(prompt, target_by_key, expected_by_key): # Should not be used since there is nothing to convert to text mock_tokenizer = cast(AnyTokenizer, object()) - result = find_text_matches( - prompt, - [ - PromptReplacement(target, [], 0).bind(key, mock_tokenizer) - for key, target in target_by_key.items() - ], - ) + prompt_repls = [ + PromptReplacement(target, [], 0).bind(key, mock_tokenizer) + for key, target in target_by_key.items() + ] + result = find_text_matches(prompt, prompt_repls) # Only displayed on error print("result:", result) @@ -303,7 +264,7 @@ def test_find_text_matches(prompt, target_by_key, expected_by_key): # yapf: disable @pytest.mark.parametrize( - ("prompt", "target_by_key", "repl_by_key", "expected_by_mm_count"), + ("prompt", "target_by_key", "repl_by_key"), [ ( "Image:Image:!", @@ -322,49 +283,201 @@ def test_find_text_matches(prompt, target_by_key, expected_by_key): # Test multiple repl_count "pattern_3": ("?", 2), }, - { - # Test no replacement - 0: "Image:Image:!", - # Test single replacement - 1: "Image:??", - # Test repeated replacement - 2: "??", - }, ), ] ) +@pytest.mark.parametrize( + ("mm_count", "expected"), + [ + (0, "Image:Image:!"), + (1, "Image:??"), + (2, "??"), + ] +) # yapf: enable def test_find_replace_text( prompt, target_by_key, repl_by_key, - expected_by_mm_count, + mm_count, + expected, ): # Should not be used since there is nothing to convert to text mock_tokenizer = cast(AnyTokenizer, object()) - matches = find_text_matches( + prompt_repls = [ + PromptReplacement(target, *repl_by_key[key]).bind(key, mock_tokenizer) + for key, target in target_by_key.items() + ] + matches = find_text_matches(prompt, prompt_repls) + + result = replace_text_matches( prompt, - [ - PromptReplacement(target, *repl_by_key[key]) \ - .bind(key, mock_tokenizer) - for key, target in target_by_key.items() - ], + matches, + {key: list(range(mm_count)) + for key in repl_by_key}, + BatchFeature(), ) - result_by_mm_count = { - mm_count: replace_text_matches( - prompt, - matches, - {key: list(range(mm_count)) - for key in repl_by_key}, - BatchFeature(), - ) - for mm_count in expected_by_mm_count - } # Only displayed on error print("matches:", matches) - print("result_by_mm_count:", result_by_mm_count) + print("result:", result) + + # Manually constructed results + assert result == expected + + +# yapf: disable +@pytest.mark.parametrize( + ("prompt", "target_by_key", "repl_by_key"), + [ + # Tokenized test cases of `test_find_replace_text` + # using the vocab of llava-hf/llava-v1.6-mistral-7b-hf + ( + [1, 9833, 28747, 32000, 9833, 28747, 32000, 32000, 918], + { + # We use `` before `Image:` to test matches that + # occur out of order + "pattern_1": [32000], + "pattern_2": [9833, 28747], + "pattern_3": [918], + }, + { + # Test whether target is confused with repl_unit + "pattern_1": ([32000, 32000], 1), + # Test empty repl_unit + "pattern_2": ([], 1), + # Test multiple repl_count + "pattern_3": ([1550], 2), + }, + ), + ] +) +@pytest.mark.parametrize( + ("mm_count", "expected"), + [ + (0, [1, 9833, 28747, 32000, 9833, 28747, 32000, 32000, 918]), + (1, [1, 32000, 32000, 9833, 28747, 32000, 32000, 1550, 1550]), + (2, [1, 32000, 32000, 32000, 32000, 32000, 1550, 1550]), + ] +) +# yapf: enable +def test_find_replace_tokens( + prompt, + target_by_key, + repl_by_key, + mm_count, + expected, +): + # Should not be used since there is nothing to convert to tokens + mock_tokenizer = cast(AnyTokenizer, object()) + + prompt_repls = [ + PromptReplacement(target, *repl_by_key[key]).bind(key, mock_tokenizer) + for key, target in target_by_key.items() + ] + matches = find_token_matches(prompt, prompt_repls) + + result = replace_token_matches( + prompt, + matches, + {key: list(range(mm_count)) + for key in repl_by_key}, + BatchFeature(), + ) + + # Only displayed on error + print("matches:", matches) + print("result:", result) + + # Manually constructed results + assert result == expected + + +# yapf: disable +@pytest.mark.parametrize( + "repl_by_key", + [ + { + "pattern_1": ([32000, 32000], 1), + "pattern_2": ([], 1), + "pattern_3": ([1550], 2), + }, + ], +) +@pytest.mark.parametrize( + ("prompt", "expected"), + [ + ( + [1, 9833, 28747, 32000, 9833, 28747, 32000, 32000, 918], + [ + _PlaceholderInfo( + modality="pattern_1", + start_idx=6, + unit=[32000, 32000], + unit_count=1, + ), + ], + ), + ( + [1, 32000, 32000, 9833, 28747, 32000, 32000, 1550, 1550], + [ + _PlaceholderInfo( + modality="pattern_1", + start_idx=1, + unit=[32000, 32000], + unit_count=1, + ), + _PlaceholderInfo( + modality="pattern_1", + start_idx=5, + unit=[32000, 32000], + unit_count=1, + ), + _PlaceholderInfo( + modality="pattern_3", + start_idx=7, + unit=[1550], + unit_count=2, + ), + ], + ), + ( + [1, 32000, 32000, 32000, 32000, 32000, 1550, 1550], + [ + _PlaceholderInfo( + modality="pattern_1", + start_idx=1, + unit=[32000, 32000], + unit_count=2, + ), + _PlaceholderInfo( + modality="pattern_3", + start_idx=6, + unit=[1550], + unit_count=2, + ), + ], + ), + ] +) +def test_iter_placeholders( + repl_by_key, + prompt, + expected, +): + # Should not be used since there is nothing to convert to tokens + mock_tokenizer = cast(AnyTokenizer, object()) + + prompt_repls = [ + PromptReplacement([], *repl).bind(key, mock_tokenizer) + for key, repl in repl_by_key.items() + ] + + result = list(iter_placeholders(prompt_repls, prompt)) + + # Only displayed on error + print("result:", result) # Manually constructed results - assert result_by_mm_count == expected_by_mm_count + assert result == expected diff --git a/tests/plugins/vllm_add_dummy_model/vllm_add_dummy_model/my_llava.py b/tests/plugins/vllm_add_dummy_model/vllm_add_dummy_model/my_llava.py index 3ebd7864b8fc8..f2fc0755cae01 100644 --- a/tests/plugins/vllm_add_dummy_model/vllm_add_dummy_model/my_llava.py +++ b/tests/plugins/vllm_add_dummy_model/vllm_add_dummy_model/my_llava.py @@ -2,19 +2,17 @@ import torch -from vllm.inputs import INPUT_REGISTRY from vllm.model_executor.models.llava import (LlavaForConditionalGeneration, - dummy_data_for_llava, - get_max_llava_image_tokens, - input_processor_for_llava) + create_metadata_for_llava, + dummy_mm_kwargs_for_llava, + get_max_llava_image_tokens) from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY -@MULTIMODAL_REGISTRY.register_image_input_mapper() @MULTIMODAL_REGISTRY.register_max_image_tokens(get_max_llava_image_tokens) -@INPUT_REGISTRY.register_dummy_data(dummy_data_for_llava) -@INPUT_REGISTRY.register_input_processor(input_processor_for_llava) +@MULTIMODAL_REGISTRY.register_processor_by_metadata(create_metadata_for_llava, + dummy_mm_kwargs_for_llava) class MyLlava(LlavaForConditionalGeneration): def compute_logits( diff --git a/vllm/inputs/registry.py b/vllm/inputs/registry.py index 85ab4355cc2e4..646554c72481a 100644 --- a/vllm/inputs/registry.py +++ b/vllm/inputs/registry.py @@ -232,19 +232,35 @@ def dummy_data_for_profiling( """ # Avoid circular import from vllm.model_executor.model_loader import get_model_architecture - - model_cls, _ = get_model_architecture(model_config) - if is_encoder_data: - dummy_factory = self._get_dummy_encoder_data_factory(model_cls) + from vllm.multimodal import MultiModalKwargs + from vllm.multimodal.utils import cached_get_tokenizer + + if mm_registry.has_processor(model_config): + tokenizer = cached_get_tokenizer( + model_config.tokenizer, + trust_remote_code=model_config.trust_remote_code, + ) + processor = mm_registry.create_processor(model_config, tokenizer) + + mm_counts = mm_registry.get_mm_limits_per_prompt(model_config) + mm_max_tokens = mm_registry.get_max_tokens_by_modality( + model_config) + + dummy_data = processor.get_dummy_data(seq_len, mm_counts, + mm_max_tokens) else: - dummy_factory = self._get_dummy_data_factory(model_cls) - mm_counts = mm_registry.get_mm_limits_per_prompt(model_config) - mm_processor_kwargs = get_allowed_kwarg_only_overrides( - dummy_factory, overrides=model_config.mm_processor_kwargs) + model_cls, _ = get_model_architecture(model_config) + if is_encoder_data: + dummy_factory = self._get_dummy_encoder_data_factory(model_cls) + else: + dummy_factory = self._get_dummy_data_factory(model_cls) + mm_counts = mm_registry.get_mm_limits_per_prompt(model_config) + mm_processor_kwargs = get_allowed_kwarg_only_overrides( + dummy_factory, overrides=model_config.mm_processor_kwargs) - dummy_data = dummy_factory(InputContext(model_config), seq_len, - _MultiModalCounts(mm_counts), - **mm_processor_kwargs) + dummy_data = dummy_factory(InputContext(model_config), seq_len, + _MultiModalCounts(mm_counts), + **mm_processor_kwargs) # Having more tokens is over-conservative but otherwise fine num_tokens = dummy_data.seq_data.prompt_token_ids @@ -257,7 +273,9 @@ def dummy_data_for_profiling( raise AssertionError( f"Expected at least {seq_len} dummy tokens for profiling, " f"but found {len(num_tokens)} tokens instead.") - if dummy_data.multi_modal_data is not None: + + if (dummy_data.multi_modal_data is not None and + not isinstance(dummy_data.multi_modal_data, MultiModalKwargs)): for k, v in dummy_data.multi_modal_data.items(): num_items = len(v) if isinstance(v, list) else 1 num_expected = mm_counts[k] diff --git a/vllm/model_executor/models/llava.py b/vllm/model_executor/models/llava.py index d375c1c9da2a9..953b89f1842af 100644 --- a/vllm/model_executor/models/llava.py +++ b/vllm/model_executor/models/llava.py @@ -1,17 +1,19 @@ from functools import cached_property +from types import MethodType from typing import (Iterable, List, Literal, Mapping, Optional, Protocol, Set, Tuple, TypedDict, Union) import torch import torch.nn as nn -from PIL import Image -from transformers import (CLIPVisionConfig, LlavaConfig, PixtralVisionConfig, - PretrainedConfig, SiglipVisionConfig) +from PIL.Image import Image +from transformers import (BatchFeature, CLIPVisionConfig, LlavaConfig, + PixtralVisionConfig, PretrainedConfig, + ProcessorMixin, SiglipVisionConfig) +from transformers.models.pixtral import PixtralProcessor from vllm.attention import AttentionMetadata from vllm.config import VllmConfig -from vllm.inputs import (INPUT_REGISTRY, DecoderOnlyInputs, DummyData, - InputContext) +from vllm.inputs import InputContext from vllm.model_executor.layers.activation import get_act_fn from vllm.model_executor.layers.linear import (ColumnParallelLinear, RowParallelLinear) @@ -19,21 +21,20 @@ from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY -from vllm.multimodal.inputs import NestedTensors +from vllm.multimodal.inputs import MultiModalKwargs, NestedTensors +from vllm.multimodal.processing import (InputProcessingContext, + ModalityProcessingMetadata, + MultiModalProcessingMetadata, + MultiModalProcessor, PromptReplacement) from vllm.sequence import IntermediateTensors -from vllm.utils import is_list_of from .clip import (CLIPVisionModel, dummy_image_for_clip, - dummy_seq_data_for_clip, get_max_clip_image_tokens, - input_processor_for_clip) + get_max_clip_image_tokens) from .interfaces import SupportsMultiModal, SupportsPP from .pixtral import (PixtralHFVisionModel, dummy_image_for_pixtral_hf, - dummy_seq_data_for_pixtral_hf, - get_max_pixtral_hf_image_tokens, - input_processor_for_pixtral_hf) + get_max_pixtral_hf_image_tokens) from .siglip import (SiglipVisionModel, dummy_image_for_siglip, - dummy_seq_data_for_siglip, get_max_siglip_image_tokens, - input_processor_for_siglip) + get_max_siglip_image_tokens) from .utils import (AutoWeightsLoader, flatten_bn, init_vllm_registered_model, maybe_prefix, merge_multimodal_embeddings) @@ -113,102 +114,86 @@ def get_max_llava_image_tokens(ctx: InputContext): raise ValueError(f"Unexpected select feature strategy: {strategy}") -def dummy_data_for_llava(ctx: InputContext, seq_len: int, - mm_counts: Mapping[str, int]): +def dummy_mm_kwargs_for_llava(ctx: InputProcessingContext, + mm_counts: Mapping[str, int]): hf_config = ctx.get_hf_config(LlavaConfig) vision_config = hf_config.vision_config num_images = mm_counts["image"] - image_feature_size = get_max_llava_image_tokens(ctx) - if isinstance(vision_config, CLIPVisionConfig): - seq_data, ranges = dummy_seq_data_for_clip( - vision_config, - seq_len, - num_images, - image_token_id=hf_config.image_token_index, - image_feature_size_override=image_feature_size, - ) - - mm_data = dummy_image_for_clip(vision_config, num_images) - return DummyData(seq_data, mm_data, ranges) + data = dummy_image_for_clip(vision_config, num_images) elif isinstance(vision_config, SiglipVisionConfig): - seq_data, ranges = dummy_seq_data_for_siglip( - vision_config, - seq_len, - num_images, - image_token_id=hf_config.image_token_index, - image_feature_size_override=image_feature_size, - ) - - mm_data = dummy_image_for_siglip(vision_config, num_images) - return DummyData(seq_data, mm_data, ranges) + data = dummy_image_for_siglip(vision_config, num_images) elif isinstance(vision_config, PixtralVisionConfig): - seq_data, ranges = dummy_seq_data_for_pixtral_hf( - vision_config, - seq_len, - num_images, - image_token_id=hf_config.image_token_index, - image_feature_size_override=image_feature_size, - ) - - mm_data = dummy_image_for_pixtral_hf(vision_config, num_images) - return DummyData(seq_data, mm_data, ranges) + data = dummy_image_for_pixtral_hf(vision_config, num_images) + else: + msg = f"Unsupported vision config: {type(vision_config)}" + raise NotImplementedError(msg) - msg = f"Unsupported vision config: {type(vision_config)}" - raise NotImplementedError(msg) + hf_processor = ctx.get_hf_processor() + image_processor = hf_processor.image_processor # type: ignore + hf_inputs = image_processor.preprocess(data['image'], return_tensors="pt") + is_pixtral = isinstance(hf_processor, PixtralProcessor) + return MultiModalKwargs( + **hf_inputs, + is_pixtral=torch.tensor(is_pixtral), + ) -def input_processor_for_llava(ctx: InputContext, inputs: DecoderOnlyInputs): - multi_modal_data = inputs.get("multi_modal_data") - if multi_modal_data is None or "image" not in multi_modal_data: - return inputs - model_config = ctx.model_config +def create_metadata_for_llava( + ctx: InputProcessingContext) -> MultiModalProcessingMetadata: hf_config = ctx.get_hf_config(LlavaConfig) - vision_config = hf_config.vision_config + image_token_id = hf_config.image_token_index + + def get_repl_count( + mm_items: list[Image], + hf_inputs: BatchFeature, + item_idx: int, + ) -> int: + return get_max_llava_image_tokens(ctx) + + return { + "image": + ModalityProcessingMetadata(prompt_repls=[ + PromptReplacement(target=[image_token_id], + repl_unit=[image_token_id], + repl_count=get_repl_count), + ]), + } - image_data = multi_modal_data["image"] - if isinstance(image_data, Image.Image): - image_feature_size = get_max_llava_image_tokens(ctx) - elif is_list_of(image_data, Image.Image): - image_feature_size = [get_max_llava_image_tokens(ctx) - ] * len(image_data) - elif isinstance(image_data, torch.Tensor): - num_images, image_feature_size, hidden_size = image_data.shape - elif is_list_of(image_data, torch.Tensor): - image_feature_size = [item.shape[1] for item in image_data] - else: - raise TypeError(f"Invalid image type: {type(image_data)}") - if isinstance(vision_config, CLIPVisionConfig): - return input_processor_for_clip( - model_config, - vision_config, - inputs, - image_token_id=hf_config.image_token_index, - image_feature_size_override=image_feature_size, - ) - elif isinstance(vision_config, SiglipVisionConfig): - return input_processor_for_siglip( - model_config, - vision_config, - inputs, - image_token_id=hf_config.image_token_index, - image_feature_size_override=image_feature_size, - ) - elif isinstance(vision_config, PixtralVisionConfig): - # We ignore image_feature_size_override since we have non-uniform - # image sizes for Pixtral - return input_processor_for_pixtral_hf( - model_config, - vision_config, - inputs, - image_token_id=hf_config.image_token_index, - ) +class LlavaProcessor(MultiModalProcessor): - msg = f"Unsupported vision config: {type(vision_config)}" - raise NotImplementedError(msg) + def _patch_pixtral_processor(self, hf_processor: PixtralProcessor): + if getattr(hf_processor, "__is_patched__", False): + return # Already patched + + image_processor = hf_processor.image_processor # type: ignore + orig_preprocess = image_processor.preprocess + + def preprocess(__self, *args, **kwargs): + hf_inputs = orig_preprocess(*args, **kwargs) + hf_inputs["is_pixtral"] = torch.tensor(True) + return hf_inputs + + image_processor.preprocess = MethodType(preprocess, image_processor) + + hf_processor.__is_patched__ = True # type: ignore + + def _get_hf_processor(self) -> ProcessorMixin: + hf_processor = self.ctx.get_hf_processor() + + if isinstance(hf_processor, PixtralProcessor): + self._patch_pixtral_processor(hf_processor) + + return hf_processor + + def _get_dummy_mm_kwargs( + self, + mm_counts: Mapping[str, int], + ) -> MultiModalKwargs: + return dummy_mm_kwargs_for_llava(self.ctx, mm_counts) class LlavaLikeConfig(Protocol): @@ -291,10 +276,11 @@ def init_vision_tower_for_llava( raise NotImplementedError(msg) -@MULTIMODAL_REGISTRY.register_image_input_mapper() @MULTIMODAL_REGISTRY.register_max_image_tokens(get_max_llava_image_tokens) -@INPUT_REGISTRY.register_dummy_data(dummy_data_for_llava) -@INPUT_REGISTRY.register_input_processor(input_processor_for_llava) +@MULTIMODAL_REGISTRY.register_processor(lambda ctx: LlavaProcessor( + ctx=ctx, + metadata=create_metadata_for_llava(ctx), +)) class LlavaForConditionalGeneration(nn.Module, SupportsMultiModal, SupportsPP): # BitandBytes specific attributes bitsandbytes_stacked_params_mapping = { @@ -367,38 +353,10 @@ def _validate_pixel_values(self, data: torch.Tensor) -> torch.Tensor: return data - def _validate_image_sizes(self, images: List[torch.Tensor], - sizes: List[torch.Tensor]) -> List[torch.Tensor]: - if not isinstance(sizes, list): - sizes = [sizes] - - total_images = sum(size.numel() // 2 for size in sizes) - if total_images != len(images): - raise ValueError("Mismatch in number of images. " - f"Expected {total_images}, got {len(images)}") - img_idx = 0 - for size in sizes: - # Flatten the size tensor to a list of (height, width) pairs - size = size.view(-1, 2).tolist() - for expected_h, expected_w in size: - if img_idx >= len(images): - raise ValueError("Ran out of images before sizes. " - f"{img_idx} >= {len(images)}") - img = images[img_idx] - if img.shape[-2:] != (expected_h, expected_w): - raise ValueError( - "Image size mismatch. Expected " - f"{(expected_h, expected_w)}, got {img.shape[-2:]}") - if img.shape[-3] != 3: - raise ValueError("Image channel mismatch. Expected 3, " - f"got {img.shape[-3]}") - img_idx += 1 - return images - def _parse_and_validate_image_input( self, **kwargs: object) -> Optional[LlavaImageInputs]: pixel_values = kwargs.pop("pixel_values", None) - image_sizes = kwargs.pop("image_sizes", None) + is_pixtral = kwargs.pop("is_pixtral", torch.tensor([False])) image_embeds = kwargs.pop("image_embeds", None) if pixel_values is None and image_embeds is None: @@ -409,9 +367,8 @@ def _parse_and_validate_image_input( raise ValueError("Incorrect type of pixel values. " f"Got type: {type(pixel_values)}") - # Case for models like PixtralHF that have dynamic image sizes - # so we need to produce a list of tensors - if image_sizes is not None: + assert isinstance(is_pixtral, torch.Tensor) + if is_pixtral.any(): images = pixel_values def flatten_to_3d_tensors(item): @@ -434,7 +391,7 @@ def flatten_to_3d_tensors(item): return LlavaImagePixelInputs( type="pixel_values", - data=self._validate_image_sizes(images, image_sizes), + data=images, ) return LlavaImagePixelInputs( diff --git a/vllm/multimodal/base.py b/vllm/multimodal/base.py index f93722523728d..7dba94b885b6d 100644 --- a/vllm/multimodal/base.py +++ b/vllm/multimodal/base.py @@ -226,16 +226,16 @@ def get_max_multimodal_tokens(self, model_config: "ModelConfig") -> int: """ # Avoid circular import from vllm.model_executor.model_loader import get_model_architecture + from vllm.model_executor.models import supports_multimodal model_cls, _ = get_model_architecture(model_config) - if model_cls not in self._input_mappers: + if not supports_multimodal(model_cls): return 0 max_mm_tokens = self._max_mm_tokens.get(model_cls) if max_mm_tokens is None: - raise KeyError(f"No maximum number of multi-modal tokens is given " - f"for model class {model_cls.__name__} in {self}.") + return 0 if callable(max_mm_tokens): mm_processor_kwargs = get_allowed_kwarg_only_overrides( @@ -326,26 +326,47 @@ def from_seq_group( src_ranges = [] dest_ranges = [] """ - if (not seq_group.multi_modal_data - or not seq_group.multi_modal_placeholders): - return seq_group.multi_modal_data, {} + seq_mm_data = seq_group.multi_modal_data + seq_mm_placeholders = seq_group.multi_modal_placeholders + + if not seq_mm_data or not seq_mm_placeholders: + return seq_mm_data, {} + + # For merged processor, we directly use mm_kwargs as mm_data + if isinstance(seq_mm_data, MultiModalKwargs): + placeholder_maps = dict[str, MultiModalPlaceholderMap]() + + for modality, placeholders in seq_mm_placeholders.items(): + placeholder_map = MultiModalPlaceholderMap() + + if positions: + placeholder_map.append_items_from_seq_group( + positions, + # Dummy, since we don't care about intersecting items + [None] * len(placeholders), + placeholders, + ) + + placeholder_maps[modality] = placeholder_map + + return seq_mm_data, placeholder_maps - mm_data = {**seq_group.multi_modal_data} - placeholder_maps: Dict[str, MultiModalPlaceholderMap] = defaultdict( + mm_data = {**seq_mm_data} + placeholder_maps = defaultdict[str, MultiModalPlaceholderMap]( MultiModalPlaceholderMap) - for ( - modality, - placeholders, - ) in seq_group.multi_modal_placeholders.items(): + for modality, placeholders in seq_mm_placeholders.items(): mm_items = mm_data.pop(modality) if not isinstance(mm_items, list): mm_items = [mm_items] if positions: - intersecting_items = placeholder_maps[ - modality].append_items_from_seq_group( - positions, mm_items, placeholders) + intersecting_items = placeholder_maps[modality] \ + .append_items_from_seq_group( + positions, + mm_items, + placeholders, + ) if intersecting_items: mm_data[modality] = intersecting_items diff --git a/vllm/multimodal/processing.py b/vllm/multimodal/processing.py index 28c8dda581982..4a1737991534f 100644 --- a/vllm/multimodal/processing.py +++ b/vllm/multimodal/processing.py @@ -3,14 +3,13 @@ from collections.abc import Callable, ItemsView, Iterable, Mapping, Sequence from dataclasses import dataclass from functools import lru_cache -from itertools import groupby from typing import Any, Generic, NamedTuple, Optional, Protocol, TypeVar, Union -import numpy as np -from transformers import BatchFeature +import torch +from transformers import BatchFeature, ProcessorMixin from typing_extensions import TypeAlias, TypedDict -from vllm.inputs import InputProcessingContext +from vllm.inputs import DummyData, InputProcessingContext from vllm.transformers_utils.tokenizer import AnyTokenizer, MistralTokenizer from vllm.utils import flatten_2d_lists, full_groupby, is_list_of @@ -256,63 +255,6 @@ def to_multi_format(data: MultiModalDataDict) -> dict[str, list[Any]]: return multi_data -class _TokenRun(NamedTuple): - token_id: int - - start_idx: int - length: int - - -def iter_token_runs(token_ids: list[int]) -> Iterable[_TokenRun]: - """ - Yield the starting index and length of each run of tokens that are the same. - """ - start_idx = 0 - - for token_id, it in groupby(token_ids): - length = sum(1 for _ in it) - yield _TokenRun(token_id=token_id, start_idx=start_idx, length=length) - - start_idx += length - - -class _PlaceholderInfo(NamedTuple): - modality: str - offset: int - length: int - - def to_range(self) -> PlaceholderRange: - return PlaceholderRange(offset=self.offset, length=self.length) - - -def iter_placeholders( - prompt_repls: Sequence[_BoundPromptReplacement[Any]], - token_ids: list[int], - *, - min_placeholder_count: int, -) -> Iterable[_PlaceholderInfo]: - """Yield each set of placeholder tokens found in :code:`token_ids`.""" - placeholder_ids_by_modality = { - modality: { - token_id - for prompt_repl in repls - for token_id in prompt_repl.repl_unit.token_ids - } - for modality, repls in full_groupby_modality(prompt_repls) - } - - for run_info in iter_token_runs(token_ids): - if run_info.length > min_placeholder_count: - for (modality, - placeholder_ids) in placeholder_ids_by_modality.items(): - if run_info.token_id in placeholder_ids: - yield _PlaceholderInfo( - modality=modality, - offset=run_info.start_idx, - length=run_info.length, - ) - - class _TokenMatch(NamedTuple): start_idx: int end_idx: int @@ -353,13 +295,9 @@ def start_idx(self) -> int: def end_idx(self) -> int: raise NotImplementedError + @property @abstractmethod - def get_repl( - self, - mm_items: list[_T], - hf_inputs: BatchFeature, - item_idx: int, - ) -> _S: + def repl_unit(self) -> _S: raise NotImplementedError def __repr__(self) -> str: @@ -380,15 +318,9 @@ def start_idx(self) -> int: def end_idx(self) -> int: return self.match.end_idx - def get_repl( - self, - mm_items: list[_T], - hf_inputs: BatchFeature, - item_idx: int, - ) -> list[int]: - prompt_repl = self.prompt_repl - count = prompt_repl.get_count(mm_items, hf_inputs, item_idx) - return prompt_repl.repl_unit.token_ids * count + @property + def repl_unit(self) -> list[int]: + return self.prompt_repl.repl_unit.token_ids @dataclass(repr=False) @@ -404,15 +336,26 @@ def start_idx(self) -> int: def end_idx(self) -> int: return self.match.end() - def get_repl( - self, - mm_items: list[_T], - hf_inputs: BatchFeature, - item_idx: int, - ) -> str: - prompt_repl = self.prompt_repl - count = prompt_repl.get_count(mm_items, hf_inputs, item_idx) - return prompt_repl.repl_unit.text * count + @property + def repl_unit(self) -> str: + return self.prompt_repl.repl_unit.text + + +class _PlaceholderInfo(NamedTuple): + modality: str + start_idx: int + unit: list[int] + unit_count: int + + @property + def length(self) -> int: + return len(self.unit) * self.unit_count + + def to_range(self) -> PlaceholderRange: + return PlaceholderRange( + offset=self.start_idx, + length=self.length, + ) def find_token_matches( @@ -447,15 +390,17 @@ def _resolve_matches( Resolve :code:`matches` to ensure that there are no overlapping matches, and sort them such that earlier matches take priority over later ones. """ - num_matches_by_idx = np.zeros(len(prompt), dtype=int) + seen_matches: list[Optional[_PromptReplacementMatch[_T, _S]]] \ + = [None] * len(prompt) + for match in matches: - num_matches_by_idx[match.start_idx:match.end_idx] += 1 + for idx in range(match.start_idx, match.end_idx): + if seen_matches[idx] is not None: + raise ValueError("Found overlapping matches " + f"({seen_matches[idx]} and {match}) " + f"at index={idx} of prompt={prompt}") - duplicate_matches_idxs, = np.nonzero(num_matches_by_idx > 1) - if len(duplicate_matches_idxs) > 0: - raise ValueError("Unable to find a unique replacement " - f"at indices={duplicate_matches_idxs} " - f"of prompt={prompt}") + seen_matches[idx] = match return sorted(matches, key=lambda x: x.start_idx) @@ -480,9 +425,12 @@ def _replace_matches( start_idx = match.start_idx end_idx = match.end_idx - repl_ids = match.get_repl(mm_items, hf_inputs, item_idx) + repl_unit = match.repl_unit + repl_info = match.prompt_repl + repl_count = repl_info.get_count(mm_items, hf_inputs, item_idx) - out_seqs.append(prompt[prev_end_idx:start_idx] + repl_ids) + out_seqs.append(prompt[prev_end_idx:start_idx] + + repl_unit * repl_count) prev_end_idx = end_idx next_idx_by_modality[modality] += 1 @@ -531,7 +479,57 @@ def replace_text_matches( return "".join(texts) -class MultiModalProcessor: +def _merge_placeholder_matches( + matches: Iterable[_PromptReplacementTokenMatch], +) -> Iterable[_PromptReplacementTokenMatch]: + current_match = None + + for match in sorted(matches, key=lambda x: x.start_idx): + if current_match is None: + current_match = match + elif (current_match.prompt_repl == match.prompt_repl + and current_match.end_idx == match.start_idx): + current_match = _PromptReplacementTokenMatch( + current_match.prompt_repl, + match=_TokenMatch(current_match.start_idx, match.end_idx), + ) + else: + yield current_match + current_match = match + + if current_match is not None: + yield current_match + + +def iter_placeholders( + prompt_repls: Sequence[_BoundPromptReplacement[Any]], + prompt: list[int], + *, + min_unit_count: int = 1, +) -> Iterable[_PlaceholderInfo]: + """Yield each set of placeholder tokens found in :code:`token_ids`.""" + if min_unit_count <= 0: + raise ValueError("`min_unit_count` must be a positive integer") + + matches = (_PromptReplacementTokenMatch(prompt_repl, match) + for prompt_repl in prompt_repls + if len(repl_unit := prompt_repl.repl_unit.token_ids) > 0 + for match in iter_token_matches(prompt, repl_unit)) + + for match in _merge_placeholder_matches(matches): + unit = match.repl_unit + placeholder = _PlaceholderInfo( + modality=match.modality, + start_idx=match.start_idx, + unit=unit, + unit_count=(match.end_idx - match.start_idx) // len(unit), + ) + + if placeholder.unit_count >= min_unit_count: + yield placeholder + + +class MultiModalProcessor(ABC): """ Helper class to process multi-modal inputs to be used in vLLM. """ @@ -546,6 +544,12 @@ def __init__( self.ctx = ctx self.metadata = metadata + def _get_hf_processor(self) -> ProcessorMixin: + return self.ctx.get_hf_processor() + + def _get_tokenizer(self) -> AnyTokenizer: + return self.ctx.tokenizer + def __call__( self, prompt: str, @@ -562,13 +566,13 @@ def _find_placeholders( # To avoid false positives from multi-input when detecting # whether placeholder tokens have been inserted, in case # the target sequence is a subset of the replacement tokens - min_placeholder_count: int = 16, + min_unit_count: int = 16, ) -> list[_PlaceholderInfo]: return list( iter_placeholders( all_prompt_repls, new_token_ids, - min_placeholder_count=min_placeholder_count, + min_unit_count=min_unit_count, )) def _apply_hf_processor( @@ -577,19 +581,49 @@ def _apply_hf_processor( mm_data: MultiModalDataDict, mm_processor_kwargs: Mapping[str, object], ) -> BatchFeature: - hf_processor = self.ctx.get_hf_processor() + hf_processor = self._get_hf_processor() + + processor_data = dict[str, Any]() + passthrough_data = dict[str, Any]() + for k, v in mm_data.items(): + # TODO: Make a separate modality for embedding inputs + # to avoid confusion + if k in ("image", "video", "audio"): + if isinstance(v, torch.Tensor) and v.ndim == 3: + # Pass through embedding inputs (single) + passthrough_data[f"{k}_embeds"] = [v] + elif is_list_of(v, torch.Tensor) and v[0].ndim == 2: + # Pass through embedding inputs (multi) + passthrough_data[f"{k}_embeds"] = v + else: + # Map keys to plural form, e.g.: image -> images + processor_data[f"{k}s"] = v + else: + processor_data[k] = v + + try: + hf_inputs = hf_processor( + text=prompt, # type: ignore + **processor_data, + **mm_processor_kwargs, + return_tensors="pt", + ) + except Exception as exc: + data = dict(text=prompt, **processor_data) - return hf_processor( - text=prompt, # type: ignore - **mm_data, - **mm_processor_kwargs, - ) + raise RuntimeError( + f"Failed to apply {type(hf_processor).__name__} " + f"on data={data} with kwargs={mm_processor_kwargs}") from exc + + hf_inputs.update(passthrough_data) + + return hf_inputs def _bind_prompt_replacements( self, mm_data: MultiModalDataDict, ) -> list[_BoundPromptReplacement[Any]]: - tokenizer = self.ctx.tokenizer + tokenizer = self._get_tokenizer() return [ prompt_repl.bind(modality, tokenizer) @@ -604,7 +638,7 @@ def _apply_prompt_replacements( token_ids: list[int], prompt_repls: Sequence[_BoundPromptReplacement[Any]], ) -> tuple[list[int], str, list[_PlaceholderInfo]]: - tokenizer = self.ctx.tokenizer + tokenizer = self._get_tokenizer() mm_items = to_multi_format(mm_data) token_matches = find_token_matches(token_ids, prompt_repls) @@ -620,7 +654,7 @@ def _apply_prompt_replacements( # of the search text in the prompt, we instead perform string # replacement on the decoded token IDs, then encode them back. if all( - len(matches) >= len(mm_data[modality]) + len(matches) >= len(mm_items[modality]) for modality, matches in full_groupby_modality(token_matches) ): # yapf: disable token_ids = replace_token_matches( @@ -648,15 +682,6 @@ def _apply_prompt_replacements( placeholders = self._find_placeholders(matched_repls, token_ids) - # Sanity check - assert len(placeholders) == len(matched_repls), dict( - # Log this information for easier debugging - text=text, - token_ids=token_ids, - placeholders=placeholders, - matched_repls=matched_repls, - ) - return token_ids, text, placeholders def apply( @@ -678,7 +703,7 @@ def apply( 3. Extract information about the placeholder tokens from the processed token IDs. """ - tokenizer = self.ctx.tokenizer + tokenizer = self._get_tokenizer() hf_inputs = self._apply_hf_processor(prompt_text, mm_data, mm_processor_kwargs) @@ -717,3 +742,59 @@ def apply( mm_kwargs=mm_kwargs, mm_placeholders=mm_placeholders, ) + + @abstractmethod + def _get_dummy_mm_kwargs( + self, + mm_counts: Mapping[str, int], + ) -> MultiModalKwargs: + """ + Build the input that corresponds to `mm_max_tokens` in + :meth:`get_dummy_data`. + """ + raise NotImplementedError + + def get_dummy_data( + self, + seq_len: int, + mm_counts: Mapping[str, int], + mm_max_tokens: Mapping[str, int], + ) -> DummyData: + # Avoid circular import + from vllm.sequence import SequenceData + + tokenizer = self._get_tokenizer() + + mm_placeholders = dict[str, _PlaceholderInfo]() + offset = 0 + + for modality, max_tokens in mm_max_tokens.items(): + if max_tokens == 0: + continue + + metadata = self.metadata[modality] + repl = metadata.prompt_repls[0].bind(modality, tokenizer) + repl_token_ids = repl.repl_unit.token_ids + + placeholders = _PlaceholderInfo( + modality=modality, + start_idx=offset, + unit=repl_token_ids, + unit_count=max_tokens // len(repl_token_ids), + ) + + mm_placeholders[modality] = placeholders + offset += placeholders.length + + prompt_token_ids = flatten_2d_lists( + [p.unit * p.unit_count for p in mm_placeholders.values()]) + prompt_token_ids.extend([0] * (seq_len - len(prompt_token_ids))) + + return DummyData( + seq_data=SequenceData.from_seqs(prompt_token_ids), + multi_modal_data=self._get_dummy_mm_kwargs(mm_counts), + multi_modal_placeholders={ + modality: [p.to_range()] + for modality, p in mm_placeholders.items() + }, + ) diff --git a/vllm/multimodal/registry.py b/vllm/multimodal/registry.py index b73daee98bd80..f51da8972d15b 100644 --- a/vllm/multimodal/registry.py +++ b/vllm/multimodal/registry.py @@ -15,7 +15,7 @@ from .base import MultiModalInputMapper, MultiModalPlugin, MultiModalTokensCalc from .image import ImagePlugin from .inputs import MultiModalDataDict, MultiModalKwargs, NestedTensors -from .processing import MultiModalProcessor +from .processing import MultiModalProcessingMetadata, MultiModalProcessor from .video import VideoPlugin if TYPE_CHECKING: @@ -200,9 +200,12 @@ def register_max_image_tokens( """ return self.register_max_multimodal_tokens("image", max_mm_tokens) - def get_max_multimodal_tokens(self, model_config: "ModelConfig") -> int: + def get_max_tokens_by_modality( + self, + model_config: "ModelConfig", + ) -> Mapping[str, int]: """ - Get the maximum number of multi-modal tokens + Get the maximum number of tokens from each modality for profiling the memory usage of a model. See :meth:`MultiModalPlugin.get_max_multimodal_tokens` for more details. @@ -212,9 +215,23 @@ def get_max_multimodal_tokens(self, model_config: "ModelConfig") -> int: """ limits_per_plugin = self._limits_by_model[model_config] - return sum((limits_per_plugin[key] * - plugin.get_max_multimodal_tokens(model_config)) - for key, plugin in self._plugins.items()) + return { + key: (limits_per_plugin[key] * + plugin.get_max_multimodal_tokens(model_config)) + for key, plugin in self._plugins.items() + } + + def get_max_multimodal_tokens(self, model_config: "ModelConfig") -> int: + """ + Get the maximum number of multi-modal tokens + for profiling the memory usage of a model. + + See :meth:`MultiModalPlugin.get_max_multimodal_tokens` for more details. + + Note: + This should be called after :meth:`init_mm_limits_per_prompt`. + """ + return sum(self.get_max_tokens_by_modality(model_config).values()) def init_mm_limits_per_prompt( self, @@ -270,7 +287,8 @@ def register_processor( factory: MultiModalProcessorFactory, ): """ - Register a multi-modal processor to a model class. + Register a multi-modal processor to a model class. The processor + is constructed lazily, hence a factory method should be passed. When the model receives multi-modal data, the provided function is invoked to transform the data into a dictionary of model inputs. @@ -293,6 +311,41 @@ def wrapper(model_cls: N) -> N: return wrapper + def register_processor_by_metadata( + self, + metadata_factory: Callable[[InputProcessingContext], + MultiModalProcessingMetadata], + get_dummy_mm_kwargs: Callable[ + [InputProcessingContext, Mapping[str, int]], MultiModalKwargs], + ): + """ + Convenience method to register a multi-modal processor to a model class + according to a function that constructs its metadata. + + When the model receives multi-modal data, the provided function is + invoked to transform the data into a dictionary of model inputs. + + See also: + - :ref:`input_processing_pipeline` + - :ref:`enabling_multimodal_inputs` + """ + + class ConcreteMultiModalProcessor(MultiModalProcessor): + + def _get_dummy_mm_kwargs( + self, + mm_counts: Mapping[str, int], + ) -> MultiModalKwargs: + return get_dummy_mm_kwargs(self.ctx, mm_counts) + + def factory(ctx: InputProcessingContext): + return ConcreteMultiModalProcessor( + ctx=ctx, + metadata=metadata_factory(ctx), + ) + + return self.register_processor(factory) + def has_processor(self, model_config: "ModelConfig") -> bool: """ Test whether a multi-modal processor is defined for a specific model. diff --git a/vllm/v1/engine/mm_input_mapper.py b/vllm/v1/engine/mm_input_mapper.py index 594c973678235..45882f8f076d4 100644 --- a/vllm/v1/engine/mm_input_mapper.py +++ b/vllm/v1/engine/mm_input_mapper.py @@ -12,6 +12,7 @@ def __init__( model_config: ModelConfig, mm_registry: MultiModalRegistry = MULTIMODAL_REGISTRY, ): + self.model_config = model_config self.mm_registry = mm_registry self.multi_modal_input_mapper = mm_registry.create_input_mapper( model_config) diff --git a/vllm/v1/engine/processor.py b/vllm/v1/engine/processor.py index 7a1ea2530abda..120fc64969552 100644 --- a/vllm/v1/engine/processor.py +++ b/vllm/v1/engine/processor.py @@ -7,7 +7,8 @@ from vllm.inputs.parse import is_encoder_decoder_inputs from vllm.inputs.preprocess import InputPreprocessor from vllm.lora.request import LoRARequest -from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalRegistry +from vllm.multimodal import (MULTIMODAL_REGISTRY, MultiModalKwargs, + MultiModalRegistry) from vllm.pooling_params import PoolingParams from vllm.prompt_adapter.request import PromptAdapterRequest from vllm.sampling_params import SamplingParams @@ -101,10 +102,15 @@ def process_inputs( self.generation_config_fields, eos_token_id) # Preprocess multi-modal data - mm_inputs = self.mm_input_mapper.process_inputs( - decoder_inputs.multi_modal_data, - decoder_inputs.mm_processor_kwargs) if len( - decoder_inputs.multi_modal_data) > 0 else None + if len(decoder_inputs.multi_modal_data) == 0: + mm_inputs = None + elif isinstance(decoder_inputs.multi_modal_data, MultiModalKwargs): + mm_inputs = [decoder_inputs.multi_modal_data] + else: + mm_inputs = self.mm_input_mapper.process_inputs( + decoder_inputs.multi_modal_data, + decoder_inputs.mm_processor_kwargs, + ) # Make Request for Detokenizer. detokenizer_request = DetokenizerRequest( From f13cf9ad5049e386f766014877dee78d2f438799 Mon Sep 17 00:00:00 2001 From: Gregory Shtrasberg <156009573+gshtras@users.noreply.github.com> Date: Sat, 7 Dec 2024 04:03:44 -0500 Subject: [PATCH 258/397] [Build] Fix for the Wswitch-bool clang warning (#10060) Signed-off-by: Gregory Shtrasberg --- csrc/attention/paged_attention_v1.cu | 11 ++++------- csrc/attention/paged_attention_v2.cu | 11 ++++------- 2 files changed, 8 insertions(+), 14 deletions(-) diff --git a/csrc/attention/paged_attention_v1.cu b/csrc/attention/paged_attention_v1.cu index 741cd0c82dc89..cb1a069942069 100644 --- a/csrc/attention/paged_attention_v1.cu +++ b/csrc/attention/paged_attention_v1.cu @@ -140,13 +140,10 @@ void paged_attention_v1_launcher( blocksparse_block_size, blocksparse_head_sliding_step); #define CALL_V1_LAUNCHER_SPARSITY(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE) \ - switch (is_block_sparse) { \ - case true: \ - CALL_V1_LAUNCHER(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE, true); \ - break; \ - case false: \ - CALL_V1_LAUNCHER(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE, false); \ - break; \ + if (is_block_sparse) { \ + CALL_V1_LAUNCHER(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE, true); \ + } else { \ + CALL_V1_LAUNCHER(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE, false); \ } // NOTE(woosuk): To reduce the compilation time, we omitted block sizes diff --git a/csrc/attention/paged_attention_v2.cu b/csrc/attention/paged_attention_v2.cu index 6de8d0bdd5b8d..c457bdb89008e 100644 --- a/csrc/attention/paged_attention_v2.cu +++ b/csrc/attention/paged_attention_v2.cu @@ -147,13 +147,10 @@ void paged_attention_v2_launcher( blocksparse_head_sliding_step); #define CALL_V2_LAUNCHER_SPARSITY(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE) \ - switch (is_block_sparse) { \ - case true: \ - CALL_V2_LAUNCHER(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE, true); \ - break; \ - case false: \ - CALL_V2_LAUNCHER(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE, false); \ - break; \ + if (is_block_sparse) { \ + CALL_V2_LAUNCHER(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE, true); \ + } else { \ + CALL_V2_LAUNCHER(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE, false); \ } // NOTE(woosuk): To reduce the compilation time, we omitted block sizes From b26b4cd03c5468c68c3ce328ea6498a5d816870d Mon Sep 17 00:00:00 2001 From: Isotr0py Date: Sat, 7 Dec 2024 18:33:49 +0800 Subject: [PATCH 259/397] [Misc][LoRA] Refactor and clean MergedQKVParallelLinearWithLora implementation (#10958) Signed-off-by: Isotr0py <2037008807@qq.com> --- vllm/lora/layers.py | 323 ++++++++------------------------------------ 1 file changed, 60 insertions(+), 263 deletions(-) diff --git a/vllm/lora/layers.py b/vllm/lora/layers.py index 473e4bedf3d60..3e9c2ceb83eac 100644 --- a/vllm/lora/layers.py +++ b/vllm/lora/layers.py @@ -542,10 +542,20 @@ class MergedColumnParallelLinearWithLoRA(ColumnParallelLinearWithLoRA): Both slices must have the same size. """ - def __init__(self, base_layer: MergedColumnParallelLinear) -> None: + def __init__( + self, base_layer: Union[MergedColumnParallelLinear, + QKVParallelLinear]) -> None: super().__init__(base_layer) # There are two LoRA layers - self.n_slices = len(self.base_layer.output_sizes) + self.tp_size = get_tensor_model_parallel_world_size() + self.tp_rank = get_tensor_model_parallel_rank() + # the output_sizes in MergedColumnParallelLinear is not sharded by tp + # we need to divide it by the tp_size to get correct slices size + output_sizes = self.base_layer.output_sizes + self.output_slices = tuple( + divide(output_size, self.tp_size) for output_size in output_sizes) + self.n_slices = len(self.output_slices) + self.output_ids = (self.tp_rank, ) * self.n_slices def create_lora_weights( self, @@ -559,15 +569,6 @@ def create_lora_weights( """ self.lora_config = lora_config - if not (len(self.base_layer.output_sizes) == self.n_slices == 2 - and self.base_layer.output_sizes[0] - == self.base_layer.output_sizes[1]): - raise ValueError( - "LoRAColumnParallelLinear2Slice requires 2 slices with " - "the same size.") - self.tp_size = get_tensor_model_parallel_world_size() - self.tp_rank = get_tensor_model_parallel_rank() - lora_a_output_size_per_partition = ( lora_config.max_lora_rank if not lora_config.fully_sharded_loras else divide(lora_config.max_lora_rank, self.tp_size)) @@ -585,22 +586,20 @@ def create_lora_weights( torch.zeros( max_loras, 1, - self.output_size // 2, + output_size, lora_config.max_lora_rank, dtype=lora_config.lora_dtype, device=self.device, - ) for _ in range(self.n_slices)) + ) for output_size in self.output_slices) if lora_config.bias_enabled: self.lora_bias_stacked = tuple( torch.zeros( max_loras, 1, - self.output_size // 2, + output_size, dtype=lora_config.lora_dtype, device=self.device, - ) for _ in range(self.n_slices)) - self.output_dim = self.lora_b_stacked[0].shape[2] - self.output_slices = (self.output_dim, self.output_dim) + ) for output_size in self.output_slices) def slice_lora_a( self, lora_a: List[Union[torch.Tensor, None]] @@ -610,27 +609,21 @@ def slice_lora_a( def slice_lora_b( self, lora_b: List[Union[torch.Tensor, None]] ) -> List[Union[torch.Tensor, None]]: - #NOTE: lora_b contains 2 subloras, and each sublora could be None. - shard_size = self.output_dim - start_idx = self.tp_rank * shard_size - end_idx = (self.tp_rank + 1) * shard_size - lora_b = [ - lora_b[0][:, start_idx:end_idx] if lora_b[0] is not None else None, - lora_b[1][:, start_idx:end_idx] if lora_b[1] is not None else None, - ] + for i, (shard_id, shard_size) in enumerate( + zip(self.output_ids, self.output_slices)): + if (lora_b_i := lora_b[i]) is not None: + lora_b[i] = lora_b_i[:, shard_size * shard_id:shard_size * + (shard_id + 1)] return lora_b def slice_bias( self, bias: List[Union[torch.Tensor, None]]) -> List[Union[torch.Tensor, None]]: - # NOTE : each bias could be None. - shard_size = self.output_dim - start_idx = self.tp_rank * shard_size - end_idx = (self.tp_rank + 1) * shard_size - bias = [ - bias[0][start_idx:end_idx] if bias[0] is not None else None, - bias[1][start_idx:end_idx] if bias[1] is not None else None - ] + for i, (shard_id, shard_size) in enumerate( + zip(self.output_ids, self.output_slices)): + if (bias_i := bias[i]) is not None: + bias[i] = bias_i[shard_size * shard_id:shard_size * + (shard_id + 1)] return bias def set_lora( @@ -649,30 +642,25 @@ def set_lora( if lora_bias is not None: lora_bias = self.slice_bias(lora_bias) - if lora_a[0] is not None: - self.lora_a_stacked[0][ - index, 0, :lora_a[0].shape[1], :lora_a[0].shape[0]].copy_( - lora_a[0].T, non_blocking=True) - self.lora_b_stacked[0][ - index, 0, :lora_b[0].shape[1], :lora_b[0].shape[0]].copy_( - lora_b[0].T, non_blocking=True) - if lora_bias is not None and lora_bias[0] is not None: - self.lora_bias_stacked = cast(Tuple[torch.Tensor, ...], - self.lora_bias_stacked) - self.lora_bias_stacked[0][index, 0, :lora_bias[0].shape[0]].copy_( - lora_bias[0].T, non_blocking=True) - if lora_a[1] is not None: - self.lora_a_stacked[1][ - index, 0, :lora_a[1].shape[1], :lora_a[1].shape[0]].copy_( - lora_a[1].T, non_blocking=True) - self.lora_b_stacked[1][ - index, 0, :lora_b[1].shape[1], :lora_b[1].shape[0]].copy_( - lora_b[1].T, non_blocking=True) - if lora_bias is not None and lora_bias[1] is not None: + for i in range(self.n_slices): + if (lora_a_i := lora_a[i]) is not None: + self.lora_a_stacked[i][ + index, 0, :lora_a_i.shape[1], :lora_a_i.shape[0]].copy_( + lora_a_i.T, non_blocking=True) + if (lora_b_i := lora_b[i]) is not None: + self.lora_b_stacked[i][ + index, 0, :lora_b_i.shape[1], :lora_b_i.shape[0]].copy_( + lora_b_i.T, non_blocking=True) + + if lora_bias is not None: self.lora_bias_stacked = cast(Tuple[torch.Tensor, ...], self.lora_bias_stacked) - self.lora_bias_stacked[1][index, 0, :lora_bias[1].shape[0]].copy_( - lora_bias[1].T, non_blocking=True) + for i in range(self.n_slices): + if (lora_bias_i := lora_bias[i]) is not None: + self.lora_bias_stacked[i][index, + 0, :lora_bias_i.shape[0]].copy_( + lora_bias_i.T, + non_blocking=True) @classmethod @_not_fully_sharded_can_replace @@ -755,8 +743,8 @@ def can_replace_layer(cls, source_layer: nn.Module, packed_modules_list) == 1 -class MergedQKVParallelLinearWithLora(ColumnParallelLinearWithLoRA): - """ColumnParallelLinear layer that is composed of 3 sublayers (slices) +class MergedQKVParallelLinearWithLora(MergedColumnParallelLinearWithLoRA): + """MergedColumnParallelLinear layer that is composed of 3 sublayers (slices) packed together in qkv proj fashion (q_proj + k_proj + v_proj -> qkv_proj). @@ -773,22 +761,6 @@ def __init__(self, base_layer: QKVParallelLinear) -> None: self.tp_size = get_tensor_model_parallel_world_size() self.tp_rank = get_tensor_model_parallel_rank() - def create_lora_weights( - self, - max_loras: int, - lora_config: LoRAConfig, - model_config: Optional[PretrainedConfig] = None, - ) -> None: - """ - The main reason for overloading this function is to handle inconsistent - weight dimensions in qkv lora. - """ - self.lora_config = lora_config - - if not (len(self.base_layer.output_sizes) == self.n_slices == 3): - raise ValueError( - "LoRAColumnParallelLinear3Slice requires 3 slices.") - self.q_proj_shard_size = (self.base_layer.num_heads * self.base_layer.head_size) self.kv_proj_shard_size = (self.base_layer.num_kv_heads * @@ -796,203 +768,28 @@ def create_lora_weights( self.q_shard_id = self.tp_rank self.kv_shard_id = self.tp_rank // self.base_layer.num_kv_head_replicas - lora_a_output_size_per_partition = ( - lora_config.max_lora_rank if not lora_config.fully_sharded_loras - else divide(lora_config.max_lora_rank, self.tp_size)) - # q, k, v - self.lora_a_stacked = ( - torch.zeros( - max_loras, - 1, - lora_a_output_size_per_partition, - self.input_size, - dtype=lora_config.lora_dtype, - device=self.device, - ), - torch.zeros( - max_loras, - 1, - lora_a_output_size_per_partition, - self.input_size, - dtype=lora_config.lora_dtype, - device=self.device, - ), - torch.zeros( - max_loras, - 1, - lora_a_output_size_per_partition, - self.input_size, - dtype=lora_config.lora_dtype, - device=self.device, - ), - ) - self.lora_b_stacked = ( - torch.zeros( - max_loras, - 1, - self.q_proj_shard_size, - lora_config.max_lora_rank, - dtype=lora_config.lora_dtype, - device=self.device, - ), - torch.zeros( - max_loras, - 1, - self.kv_proj_shard_size, - lora_config.max_lora_rank, - dtype=lora_config.lora_dtype, - device=self.device, - ), - torch.zeros( - max_loras, - 1, - self.kv_proj_shard_size, - lora_config.max_lora_rank, - dtype=lora_config.lora_dtype, - device=self.device, - ), - ) - if lora_config.bias_enabled: - self.lora_bias_stacked = ( - torch.zeros( - max_loras, - 1, - self.q_proj_shard_size, - dtype=lora_config.lora_dtype, - device=self.device, - ), - torch.zeros( - max_loras, - 1, - self.kv_proj_shard_size, - dtype=lora_config.lora_dtype, - device=self.device, - ), - torch.zeros( - max_loras, - 1, - self.kv_proj_shard_size, - dtype=lora_config.lora_dtype, - device=self.device, - ), - ) self.output_slices = ( self.q_proj_shard_size, self.kv_proj_shard_size, self.kv_proj_shard_size, ) - self.packed_indices: Optional[torch.Tensor] = None - self.standard_indices: Optional[torch.Tensor] = None - # lazily initialized. - self.indices: torch.Tensor - self.indices_len: List[int] - - def slice_lora_a( - self, lora_a: List[Union[torch.Tensor, None]] - ) -> List[Union[torch.Tensor, None]]: - return lora_a - - def slice_lora_b( - self, lora_b: List[Union[torch.Tensor, None]] - ) -> List[Union[torch.Tensor, None]]: - lora_b_q, lora_b_k, lora_b_v = None, None, None - if lora_b[0] is not None: - lora_b_q = lora_b[0][:, self.q_proj_shard_size * - self.q_shard_id:self.q_proj_shard_size * - (self.q_shard_id + 1), ] - if lora_b[1] is not None: - lora_b_k = lora_b[1][:, self.kv_proj_shard_size * - self.kv_shard_id:self.kv_proj_shard_size * - (self.kv_shard_id + 1), ] - if lora_b[2] is not None: - lora_b_v = lora_b[2][:, self.kv_proj_shard_size * - self.kv_shard_id:self.kv_proj_shard_size * - (self.kv_shard_id + 1), ] - lora_b = [lora_b_q, lora_b_k, lora_b_v] - return lora_b - - def slice_bias( - self, bias: List[Union[torch.Tensor, - None]]) -> List[Union[torch.Tensor, None]]: - bias_q, bias_k, bias_v = bias - if bias_q is not None: - bias_q = bias_q[self.q_proj_shard_size * - self.q_shard_id:self.q_proj_shard_size * - (self.q_shard_id + 1)] - if bias_k is not None: - bias_k = bias_k[self.kv_proj_shard_size * - self.kv_shard_id:self.kv_proj_shard_size * - (self.kv_shard_id + 1)] - if bias_v is not None: - bias_v = bias_v[self.kv_proj_shard_size * - self.kv_shard_id:self.kv_proj_shard_size * - (self.kv_shard_id + 1)] - bias = [bias_q, bias_k, bias_v] - return bias + self.output_ids = ( + self.q_shard_id, + self.kv_shard_id, + self.kv_shard_id, + ) - def set_lora( + def create_lora_weights( self, - index: int, - lora_a: torch.Tensor, - lora_b: torch.Tensor, - embeddings_tensor: Optional[torch.Tensor], - lora_bias: Optional[torch.Tensor] = None, - ): - self.reset_lora(index) - - if self.tp_size > 1: - lora_a = self.slice_lora_a(lora_a) - lora_b = self.slice_lora_b(lora_b) - if lora_bias is not None: - lora_bias = self.slice_bias(lora_bias) - - if lora_b[0] is not None: - lora_b_q = lora_b[0] - self.lora_b_stacked[0][ - index, 0, :lora_b_q.shape[1], :lora_b_q.shape[0]].copy_( - lora_b_q.T, non_blocking=True) - if lora_b[1] is not None: - lora_b_k = lora_b[1] - self.lora_b_stacked[1][ - index, 0, :lora_b_k.shape[1], :lora_b_k.shape[0]].copy_( - lora_b_k.T, non_blocking=True) - if lora_b[2] is not None: - lora_b_v = lora_b[2] - self.lora_b_stacked[2][ - index, 0, :lora_b_v.shape[1], :lora_b_v.shape[0]].copy_( - lora_b_v.T, non_blocking=True) - - if lora_a[0] is not None: - self.lora_a_stacked[0][ - index, 0, :lora_a[0].shape[1], :lora_a[0].shape[0]].copy_( - lora_a[0].T, non_blocking=True) - if lora_a[1] is not None: - self.lora_a_stacked[1][ - index, 0, :lora_a[1].shape[1], :lora_a[1].shape[0]].copy_( - lora_a[1].T, non_blocking=True) - if lora_a[2] is not None: - self.lora_a_stacked[2][ - index, 0, :lora_a[2].shape[1], :lora_a[2].shape[0]].copy_( - lora_a[2].T, non_blocking=True) - - if lora_bias is not None: - self.lora_bias_stacked = cast(Tuple[torch.Tensor, ...], - self.lora_bias_stacked) - if lora_bias[0] is not None: - self.lora_bias_stacked[0][index, - 0, :lora_bias[0].shape[0]].copy_( - lora_bias[0].T, - non_blocking=True) - if lora_bias[1] is not None: - self.lora_bias_stacked[1][index, - 0, :lora_bias[1].shape[0]].copy_( - lora_bias[1].T, - non_blocking=True) - if lora_bias[2] is not None: - self.lora_bias_stacked[2][index, - 0, :lora_bias[2].shape[0]].copy_( - lora_bias[2].T, - non_blocking=True) + max_loras: int, + lora_config: LoRAConfig, + model_config: Optional[PretrainedConfig] = None, + ) -> None: + """ + The main reason for overloading this function is to handle inconsistent + weight dimensions in qkv lora. + """ + super().create_lora_weights(max_loras, lora_config, model_config) @classmethod @_not_fully_sharded_can_replace From bf0e382e16065edebbbb414f7889d31523a569e1 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Sat, 7 Dec 2024 22:22:52 +0800 Subject: [PATCH 260/397] [Model] Composite weight loading for multimodal Qwen2 (#10944) Signed-off-by: DarkLight1337 --- vllm/config.py | 10 +- vllm/model_executor/model_loader/loader.py | 4 +- vllm/model_executor/model_loader/utils.py | 10 +- vllm/model_executor/models/qwen2.py | 17 +- vllm/model_executor/models/qwen2_audio.py | 117 ++++---------- vllm/model_executor/models/qwen2_vl.py | 179 ++++++++++----------- vllm/model_executor/models/utils.py | 15 +- 7 files changed, 147 insertions(+), 205 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index fe4c85441fced..db7046ab2c22d 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -2472,7 +2472,15 @@ def _get_quantization_config( return quant_config return None - def with_hf_config(self, hf_config: PretrainedConfig) -> "VllmConfig": + def with_hf_config( + self, + hf_config: PretrainedConfig, + architectures: Optional[list[str]] = None, + ) -> "VllmConfig": + if architectures is not None: + hf_config = copy.deepcopy(hf_config) + hf_config.architectures = architectures + model_config = copy.deepcopy(self.model_config) model_config.hf_config = hf_config diff --git a/vllm/model_executor/model_loader/loader.py b/vllm/model_executor/model_loader/loader.py index a0ea0e5fad3c2..fdc4c6305bd5e 100644 --- a/vllm/model_executor/model_loader/loader.py +++ b/vllm/model_executor/model_loader/loader.py @@ -101,12 +101,10 @@ def _initialize_model( vllm_config: VllmConfig, *, prefix: str = "", - architectures: Optional[list[str]] = None, ) -> nn.Module: """Initialize a model with the given configurations.""" model_config = vllm_config.model_config - model_class, _ = get_model_architecture(model_config, - architectures=architectures) + model_class, _ = get_model_architecture(model_config) signatures = inspect.signature(model_class.__init__) all_params = [param.name for param in signatures.parameters.values()] diff --git a/vllm/model_executor/model_loader/utils.py b/vllm/model_executor/model_loader/utils.py index 864dd04e79921..cfb89e0f336bc 100644 --- a/vllm/model_executor/model_loader/utils.py +++ b/vllm/model_executor/model_loader/utils.py @@ -1,6 +1,6 @@ """Utilities for selecting and loading models.""" import contextlib -from typing import Optional, Tuple, Type +from typing import Tuple, Type import torch from torch import nn @@ -20,12 +20,8 @@ def set_default_torch_dtype(dtype: torch.dtype): def get_model_architecture( - model_config: ModelConfig, - *, - architectures: Optional[list[str]] = None, -) -> Tuple[Type[nn.Module], str]: - if architectures is None: - architectures = getattr(model_config.hf_config, "architectures", []) + model_config: ModelConfig) -> Tuple[Type[nn.Module], str]: + architectures = getattr(model_config.hf_config, "architectures", []) # Special handling for quantized Mixtral. # FIXME(woosuk): This is a temporary hack. diff --git a/vllm/model_executor/models/qwen2.py b/vllm/model_executor/models/qwen2.py index 7d4cc4b69e614..3ce4eb5869f21 100644 --- a/vllm/model_executor/models/qwen2.py +++ b/vllm/model_executor/models/qwen2.py @@ -444,14 +444,17 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.model = Qwen2Model(vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")) - if config.tie_word_embeddings: - self.lm_head = self.model.embed_tokens + if get_pp_group().is_last_rank: + if config.tie_word_embeddings: + self.lm_head = self.model.embed_tokens + else: + self.lm_head = ParallelLMHead(config.vocab_size, + config.hidden_size, + quant_config=quant_config, + prefix=maybe_prefix( + prefix, "lm_head")) else: - self.lm_head = ParallelLMHead(config.vocab_size, - config.hidden_size, - quant_config=quant_config, - prefix=maybe_prefix( - prefix, "lm_head")) + self.lm_head = PPMissingLayer() self.logits_processor = LogitsProcessor(config.vocab_size) self.sampler = get_sampler() diff --git a/vllm/model_executor/models/qwen2_audio.py b/vllm/model_executor/models/qwen2_audio.py index a0605fee82aca..48a2d470414b9 100644 --- a/vllm/model_executor/models/qwen2_audio.py +++ b/vllm/model_executor/models/qwen2_audio.py @@ -19,7 +19,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only Qwen2-Audio model compatible with HuggingFace weights.""" -from functools import lru_cache +from functools import cached_property, lru_cache from typing import (Iterable, List, Mapping, Optional, Set, Tuple, TypedDict, Union) @@ -34,12 +34,7 @@ from vllm.inputs import (INPUT_REGISTRY, DecoderOnlyInputs, DummyData, InputContext, token_inputs) from vllm.logger import init_logger -from vllm.model_executor.layers.logits_processor import LogitsProcessor from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler -from vllm.model_executor.layers.vocab_parallel_embedding import ParallelLMHead -from vllm.model_executor.model_loader.weight_utils import ( - default_weight_loader, maybe_remap_kv_scale_name) -from vllm.model_executor.models.qwen2 import Qwen2Model from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs from vllm.multimodal.inputs import NestedTensors @@ -47,15 +42,11 @@ from vllm.sequence import IntermediateTensors, SequenceData from .interfaces import SupportsMultiModal, SupportsPP -from .utils import merge_multimodal_embeddings +from .utils import (AutoWeightsLoader, init_vllm_registered_model, + maybe_prefix, merge_multimodal_embeddings) logger = init_logger(__name__) -_KEYS_TO_MODIFY_MAPPING = { - "language_model.lm_head": "lm_head", - "language_model.model": "language_model", -} - # # === Audio Inputs === # class Qwen2AudioInputs(TypedDict): @@ -281,25 +272,23 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.quant_config = quant_config - self.language_model = Qwen2Model( - vllm_config=vllm_config.with_hf_config(config.text_config), - prefix=prefix) - self.unpadded_vocab_size = config.text_config.vocab_size - if config.text_config.tie_word_embeddings: - self.lm_head = self.language_model.embed_tokens - else: - self.lm_head = ParallelLMHead(config.text_config.vocab_size, - config.text_config.hidden_size, - quant_config=quant_config) - logit_scale = getattr(config, "logit_scale", 1.0) - self.logits_processor = LogitsProcessor(self.unpadded_vocab_size, - config.text_config.vocab_size, - logit_scale) - self.sampler = get_sampler() + self.language_model = init_vllm_registered_model( + vllm_config=vllm_config, + hf_config=config.text_config, + prefix=maybe_prefix(prefix, "language_model"), + architectures=["Qwen2ForCausalLM"], + ) self.make_empty_intermediate_tensors = ( self.language_model.make_empty_intermediate_tensors) + @cached_property + def sampler(self): + if hasattr(self.language_model, "sampler"): + return self.language_model.sampler + + return get_sampler() + def _validate_and_reshape_mm_tensor(self, mm_input: Union[torch.Tensor, List[torch.Tensor]], @@ -414,72 +403,30 @@ def forward( multimodal_embeddings) input_ids = None - hidden_states = self.language_model(input_ids, - positions, - kv_caches, - attn_metadata, - intermediate_tensors, - inputs_embeds=inputs_embeds) + hidden_states = self.language_model.model(input_ids, + positions, + kv_caches, + attn_metadata, + intermediate_tensors, + inputs_embeds=inputs_embeds) return hidden_states - def compute_logits(self, hidden_states: torch.Tensor, - sampling_metadata: SamplingMetadata) -> torch.Tensor: - logits = self.logits_processor(self.lm_head, hidden_states, - sampling_metadata) - return logits + def compute_logits( + self, + hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[torch.Tensor]: + return self.language_model.compute_logits(hidden_states, + sampling_metadata) def sample( self, logits: torch.Tensor, sampling_metadata: SamplingMetadata, ) -> Optional[SamplerOutput]: - next_tokens = self.sampler(logits, sampling_metadata) - return next_tokens + return self.language_model.sample(logits, sampling_metadata) def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]) -> Set[str]: - stacked_params_mapping = [ - # (param_name, shard_name, shard_id) - ("qkv_proj", "q_proj", "q"), - ("qkv_proj", "k_proj", "k"), - ("qkv_proj", "v_proj", "v"), - ("gate_up_proj", "gate_proj", 0), - ("gate_up_proj", "up_proj", 1), - ] - params_dict = dict(self.named_parameters(remove_duplicate=False)) - loaded_params: Set[str] = set() - for name, loaded_weight in weights: - if "rotary_emb.inv_freq" in name: - continue - if (self.config.text_config.tie_word_embeddings - and "lm_head.weight" in name): - continue - for key_to_modify, new_key in _KEYS_TO_MODIFY_MAPPING.items(): - if key_to_modify in name: - name = name.replace(key_to_modify, new_key) - for (param_name, weight_name, shard_id) in stacked_params_mapping: - if weight_name not in name or 'audio' in name: - continue - name = name.replace(weight_name, param_name) - # Skip loading extra bias for GPTQ models. - if name.endswith(".bias") and name not in params_dict: - continue - param = params_dict[name] - weight_loader = param.weight_loader - weight_loader(param, loaded_weight, shard_id) - break - else: - # Skip loading extra bias for GPTQ models. - if name.endswith(".bias") and name not in params_dict: - continue - # Remapping the name of FP8 kv-scale. - name = maybe_remap_kv_scale_name(name, params_dict) - if name is None: - continue - - param = params_dict[name] - weight_loader = getattr(param, "weight_loader", - default_weight_loader) - weight_loader(param, loaded_weight) - loaded_params.add(name) - return loaded_params + loader = AutoWeightsLoader(self) + return loader.load_weights(weights) diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index 27175dbae7483..cfc90cdab01e4 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -21,7 +21,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only Qwen2-VL model compatible with HuggingFace weights.""" -from functools import partial +from functools import cached_property, partial from typing import (Any, Callable, Dict, Iterable, List, Literal, Mapping, Optional, Set, Tuple, Type, TypedDict, Union) @@ -40,7 +40,7 @@ from vllm.attention import AttentionMetadata from vllm.config import VllmConfig -from vllm.distributed import get_pp_group, parallel_state +from vllm.distributed import parallel_state from vllm.distributed import utils as dist_utils from vllm.inputs import (INPUT_REGISTRY, DecoderOnlyInputs, DummyData, InputContext, token_inputs) @@ -49,15 +49,12 @@ from vllm.model_executor.layers.activation import QuickGELU from vllm.model_executor.layers.linear import (ColumnParallelLinear, RowParallelLinear) -from vllm.model_executor.layers.logits_processor import LogitsProcessor from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.layers.quantization.gptq import GPTQConfig from vllm.model_executor.layers.quantization.gptq_marlin import ( GPTQMarlinConfig) from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler -from vllm.model_executor.layers.vocab_parallel_embedding import ParallelLMHead from vllm.model_executor.model_loader.weight_utils import default_weight_loader -from vllm.model_executor.models.qwen2 import Qwen2Model from vllm.multimodal import MULTIMODAL_REGISTRY from vllm.multimodal.image import cached_get_image_processor from vllm.multimodal.inputs import (MultiModalData, MultiModalDataDict, @@ -69,9 +66,8 @@ from vllm.transformers_utils.processor import cached_get_processor from .interfaces import SupportsLoRA, SupportsMultiModal, SupportsPP -from .utils import (PPMissingLayer, get_vit_attn_backend, - is_pp_missing_parameter, - make_empty_intermediate_tensors_factory, maybe_prefix) +from .utils import (AutoWeightsLoader, WeightsMapper, get_vit_attn_backend, + init_vllm_registered_model, maybe_prefix) logger = init_logger(__name__) @@ -506,6 +502,8 @@ def __init__( mlp_ratio: float = vision_config.mlp_ratio self.spatial_merge_size = spatial_merge_size + self.num_heads = num_heads + self.embed_dim = embed_dim self.patch_embed = Qwen2VisionPatchEmbed( patch_size=patch_size, @@ -595,6 +593,53 @@ def forward( x = self.merger(x) return x + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("qkv_proj", "q_proj", "q"), + ("qkv_proj", "k_proj", "k"), + ("qkv_proj", "v_proj", "v"), + ] + params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() + + for name, loaded_weight in weights: + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + if name.endswith("qkv.weight"): + visual_num_heads = self.num_heads + visual_embed_dim = self.embed_dim + head_size = visual_embed_dim // visual_num_heads + loaded_weight = loaded_weight.view(3, visual_num_heads, + head_size, + visual_embed_dim) + loaded_weight = loaded_weight.transpose(0, 1) + loaded_weight = loaded_weight.reshape(-1, visual_embed_dim) + elif name.endswith("qkv.bias"): + visual_num_heads = self.num_heads + visual_embed_dim = self.embed_dim + head_size = visual_embed_dim // visual_num_heads + loaded_weight = loaded_weight.view(3, visual_num_heads, + head_size) + loaded_weight = loaded_weight.transpose(0, 1) + loaded_weight = loaded_weight.reshape(-1) + + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params + # === Vision input helpers === # @@ -1082,27 +1127,21 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): prefix=maybe_prefix(prefix, "visual"), ) - self.model = Qwen2Model(vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "model")) + self.language_model = init_vllm_registered_model( + vllm_config=vllm_config, + prefix=maybe_prefix(prefix, "language_model"), + architectures=["Qwen2ForCausalLM"], + ) - if get_pp_group().is_last_rank: - if config.tie_word_embeddings: - self.lm_head = self.model.embed_tokens - else: - self.lm_head = ParallelLMHead(config.vocab_size, - config.hidden_size, - quant_config=quant_config, - prefix=maybe_prefix( - prefix, "lm_head")) - else: - self.lm_head = PPMissingLayer() + self.make_empty_intermediate_tensors = ( + self.language_model.make_empty_intermediate_tensors) - self.logits_processor = LogitsProcessor(config.vocab_size) - self.sampler = get_sampler() + @cached_property + def sampler(self): + if hasattr(self.language_model, "sampler"): + return self.language_model.sampler - self.make_empty_intermediate_tensors = ( - make_empty_intermediate_tensors_factory( - ["hidden_states", "residual"], config.hidden_size)) + return get_sampler() def _maybe_ignore_quant_config(self, quant_config: QuantizationConfig): # GPTQ configs do not have a list of ignored modules, however AutoGPTQ @@ -1261,7 +1300,7 @@ def get_input_embeddings( multimodal_embeddings: Optional[List[Tuple[NestedTensors, str]]] = None, ) -> torch.Tensor: - inputs_embeds = self.model.get_input_embeddings(input_ids) + inputs_embeds = self.language_model.get_input_embeddings(input_ids) if multimodal_embeddings is not None: for embeddings, modality in multimodal_embeddings: if modality == "image": @@ -1330,7 +1369,7 @@ def forward( multimodal_embeddings) input_ids = None - hidden_states = self.model( + hidden_states = self.language_model.model( input_ids=input_ids, positions=positions, kv_caches=kv_caches, @@ -1340,80 +1379,28 @@ def forward( ) return hidden_states - def compute_logits(self, hidden_states: torch.Tensor, - sampling_metadata: SamplingMetadata) -> torch.Tensor: - logits = self.logits_processor(self.lm_head, hidden_states, - sampling_metadata) - return logits + def compute_logits( + self, + hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[torch.Tensor]: + return self.language_model.compute_logits(hidden_states, + sampling_metadata) def sample( self, logits: torch.Tensor, sampling_metadata: SamplingMetadata, ) -> Optional[SamplerOutput]: - next_tokens = self.sampler(logits, sampling_metadata) - return next_tokens + return self.language_model.sample(logits, sampling_metadata) def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]) -> Set[str]: - stacked_params_mapping = [ - # (param_name, shard_name, shard_id) - ("qkv_proj", "q_proj", "q"), - ("qkv_proj", "k_proj", "k"), - ("qkv_proj", "v_proj", "v"), - ("gate_up_proj", "up_proj", 1), - ("gate_up_proj", "gate_proj", 0), - ] - params_dict = dict(self.named_parameters(remove_duplicate=False)) - loaded_params: Set[str] = set() - for name, loaded_weight in weights: - if "rotary_emb.inv_freq" in name: - continue - if self.config.tie_word_embeddings and "lm_head.weight" in name: - continue - for (param_name, weight_name, shard_id) in stacked_params_mapping: - if weight_name not in name: - continue - name = name.replace(weight_name, param_name) - # Skip loading extra bias for GPTQ models. - if name.endswith(".bias") and name not in params_dict: - continue - if is_pp_missing_parameter(name, self): - continue - param = params_dict[name] - weight_loader = param.weight_loader - weight_loader(param, loaded_weight, shard_id) - break - else: - if "visual" in name and name.endswith("qkv.weight"): - visual_num_heads = self.config.vision_config.num_heads - visual_embed_dim = self.config.vision_config.embed_dim - head_size = visual_embed_dim // visual_num_heads - loaded_weight = loaded_weight.view(3, visual_num_heads, - head_size, - visual_embed_dim) - loaded_weight = loaded_weight.transpose(0, 1) - loaded_weight = loaded_weight.reshape(-1, visual_embed_dim) - elif "visual" in name and name.endswith("qkv.bias"): - visual_num_heads = self.config.vision_config.num_heads - visual_embed_dim = self.config.vision_config.embed_dim - head_size = visual_embed_dim // visual_num_heads - loaded_weight = loaded_weight.view(3, visual_num_heads, - head_size) - loaded_weight = loaded_weight.transpose(0, 1) - loaded_weight = loaded_weight.reshape(-1) - try: - # Skip loading extra bias for GPTQ models. - if name.endswith(".bias") and name not in params_dict: - continue - if is_pp_missing_parameter(name, self): - continue - param = params_dict[name] - except KeyError: - raise ValueError(f"Unexpected weight: {name}") from None - - weight_loader = getattr(param, "weight_loader", - default_weight_loader) - weight_loader(param, loaded_weight) - loaded_params.add(name) - return loaded_params + hf_to_vllm_mapper = WeightsMapper( + orig_to_new_prefix={ + "lm_head.": "language_model.lm_head.", + "model.": "language_model.model.", + }) + + loader = AutoWeightsLoader(self) + return loader.load_weights(weights, mapper=hf_to_vllm_mapper) diff --git a/vllm/model_executor/models/utils.py b/vllm/model_executor/models/utils.py index 7a1e1f9bf2be4..5ec44955dbd80 100644 --- a/vllm/model_executor/models/utils.py +++ b/vllm/model_executor/models/utils.py @@ -17,7 +17,7 @@ from vllm.multimodal import MultiModalPlaceholderMap, NestedTensors from vllm.platforms import _Backend, current_platform from vllm.sequence import IntermediateTensors -from vllm.utils import is_pin_memory_available +from vllm.utils import is_pin_memory_available, print_warning_once logger = init_logger(__name__) @@ -251,12 +251,15 @@ def init_vllm_registered_model( """ from vllm.model_executor.model_loader.loader import _initialize_model + if hf_config is None and architectures is not None: + # So that the architectures field is overridden + hf_config = vllm_config.model_config.hf_config + if hf_config is not None: - vllm_config = vllm_config.with_hf_config(hf_config) + vllm_config = vllm_config.with_hf_config(hf_config, + architectures=architectures) - return _initialize_model(vllm_config=vllm_config, - prefix=prefix, - architectures=architectures) + return _initialize_model(vllm_config=vllm_config, prefix=prefix) @overload @@ -592,7 +595,7 @@ def get_vit_attn_backend(support_fa: bool = False) -> _Backend: if is_flash_attn_2_available(): selected_backend = _Backend.FLASH_ATTN else: - logger.warning( + print_warning_once( "Current `vllm-flash-attn` has a bug inside vision module, " "so we use xformers backend instead. You can run " "`pip install flash-attn` to use flash-attention backend.") From 1c768fe53713ef333d74a6645e6a59fb7516134f Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Sun, 8 Dec 2024 00:58:02 +0800 Subject: [PATCH 261/397] [Doc] Explicitly state that InternVL 2.5 is supported (#10978) Signed-off-by: DarkLight1337 --- docs/source/models/supported_models.rst | 4 ++-- examples/offline_inference_vision_language.py | 2 +- examples/offline_inference_vision_language_multi_image.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/source/models/supported_models.rst b/docs/source/models/supported_models.rst index 5b416e04da745..d915def588e08 100644 --- a/docs/source/models/supported_models.rst +++ b/docs/source/models/supported_models.rst @@ -547,9 +547,9 @@ Text Generation - ✅︎ - * - :code:`InternVLChatModel` - - InternVL2 + - InternVL 2.5, Mono-InternVL, InternVL 2.0 - T + I\ :sup:`E+` - - :code:`OpenGVLab/Mono-InternVL-2B`, :code:`OpenGVLab/InternVL2-4B`, :code:`OpenGVLab/InternVL2-8B`, etc. + - :code:`OpenGVLab/InternVL2_5-4B`, :code:`OpenGVLab/Mono-InternVL-2B`, :code:`OpenGVLab/InternVL2-4B`, etc. - - ✅︎ * - :code:`LlavaForConditionalGeneration` diff --git a/examples/offline_inference_vision_language.py b/examples/offline_inference_vision_language.py index f08f22eec164a..56209c3c36ed4 100644 --- a/examples/offline_inference_vision_language.py +++ b/examples/offline_inference_vision_language.py @@ -223,7 +223,7 @@ def run_internvl(question: str, modality: str): # Stop tokens for InternVL # models variants may have different stop tokens # please refer to the model card for the correct "stop words": - # https://huggingface.co/OpenGVLab/InternVL2-2B#service + # https://huggingface.co/OpenGVLab/InternVL2-2B/blob/main/conversation.py stop_tokens = ["<|endoftext|>", "<|im_start|>", "<|im_end|>", "<|end|>"] stop_token_ids = [tokenizer.convert_tokens_to_ids(i) for i in stop_tokens] return llm, prompt, stop_token_ids diff --git a/examples/offline_inference_vision_language_multi_image.py b/examples/offline_inference_vision_language_multi_image.py index 788b604cfd4a0..928bbef54eab7 100644 --- a/examples/offline_inference_vision_language_multi_image.py +++ b/examples/offline_inference_vision_language_multi_image.py @@ -165,7 +165,7 @@ def load_internvl(question: str, image_urls: List[str]) -> ModelRequestData: # Stop tokens for InternVL # models variants may have different stop tokens # please refer to the model card for the correct "stop words": - # https://huggingface.co/OpenGVLab/InternVL2-2B#service + # https://huggingface.co/OpenGVLab/InternVL2-2B/blob/main/conversation.py stop_tokens = ["<|endoftext|>", "<|im_start|>", "<|im_end|>", "<|end|>"] stop_token_ids = [tokenizer.convert_tokens_to_ids(i) for i in stop_tokens] From 39e227c7ae3149eb8345ea1a1ffee672ef76c09a Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Sun, 8 Dec 2024 01:10:05 +0800 Subject: [PATCH 262/397] [Model] Update multi-modal processor to support Mantis(LLaVA) model (#10711) Signed-off-by: DarkLight1337 --- .buildkite/test-pipeline.yaml | 2 + docs/source/models/supported_models.rst | 6 +- examples/offline_inference_vision_language.py | 17 +++++ requirements-test.in | 3 - .../vision_language/test_models.py | 30 +++++--- .../vision_language/vlm_utils/core.py | 20 ++++-- .../vision_language/vlm_utils/model_utils.py | 35 +++++++++- .../vision_language/vlm_utils/types.py | 19 ++++-- tests/models/registry.py | 1 + .../vllm_add_dummy_model/my_llava.py | 6 +- vllm/model_executor/models/llava.py | 68 ++++++++++++++++--- vllm/model_executor/models/registry.py | 1 + vllm/multimodal/processing.py | 4 +- vllm/multimodal/registry.py | 41 +---------- 14 files changed, 175 insertions(+), 78 deletions(-) diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index 936e284d9675a..8f57006214c88 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -362,6 +362,7 @@ steps: - tests/models/embedding/vision_language - tests/models/encoder_decoder/vision_language commands: + - pip install git+https://github.com/TIGER-AI-Lab/Mantis.git - pytest -v -s models/decoder_only/audio_language -m 'core_model or quant_model' - pytest -v -s --ignore models/decoder_only/vision_language/test_phi3v.py models/decoder_only/vision_language -m 'core_model or quant_model' - pytest -v -s models/embedding/vision_language -m core_model @@ -377,6 +378,7 @@ steps: - tests/models/embedding/vision_language - tests/models/encoder_decoder/vision_language commands: + - pip install git+https://github.com/TIGER-AI-Lab/Mantis.git - pytest -v -s models/decoder_only/audio_language -m 'not core_model and not quant_model' # HACK - run phi3v tests separately to sidestep this transformers bug # https://github.com/huggingface/transformers/issues/34307 diff --git a/docs/source/models/supported_models.rst b/docs/source/models/supported_models.rst index d915def588e08..c9b3fa8485ff1 100644 --- a/docs/source/models/supported_models.rst +++ b/docs/source/models/supported_models.rst @@ -555,7 +555,7 @@ Text Generation * - :code:`LlavaForConditionalGeneration` - LLaVA-1.5 - T + I\ :sup:`E+` - - :code:`llava-hf/llava-1.5-7b-hf`, :code:`llava-hf/llava-1.5-13b-hf`, etc. + - :code:`llava-hf/llava-1.5-7b-hf`, :code:`TIGER-Lab/Mantis-8B-siglip-llama3` (see note), etc. - - ✅︎ * - :code:`LlavaNextForConditionalGeneration` @@ -664,6 +664,10 @@ Text Generation .. note:: vLLM currently only supports adding LoRA to the language backbone of multimodal models. +.. note:: + To use :code:`TIGER-Lab/Mantis-8B-siglip-llama3`, you have to install their GitHub repo (:code:`pip install git+https://github.com/TIGER-AI-Lab/Mantis.git`) + and pass :code:`--hf_overrides '{"architectures": ["MantisForConditionalGeneration"]}'` when running vLLM. + .. note:: The official :code:`openbmb/MiniCPM-V-2` doesn't work yet, so we need to use a fork (:code:`HwwwH/MiniCPM-V-2`) for now. For more details, please see: https://github.com/vllm-project/vllm/pull/4087#issuecomment-2250397630 diff --git a/examples/offline_inference_vision_language.py b/examples/offline_inference_vision_language.py index 56209c3c36ed4..c6a274ee5894b 100644 --- a/examples/offline_inference_vision_language.py +++ b/examples/offline_inference_vision_language.py @@ -419,6 +419,22 @@ def run_aria(question: str, modality: str): return llm, prompt, stop_token_ids +# Mantis +def run_mantis(question: str, modality: str): + assert modality == "image" + + llama3_template = '<|start_header_id|>user<|end_header_id|>\n\n{}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n' # noqa: E501 + prompt = llama3_template.format(f"{question}\n") + + llm = LLM( + model="TIGER-Lab/Mantis-8B-siglip-llama3", + max_model_len=4096, + hf_overrides={"architectures": ["MantisForConditionalGeneration"]}, + ) + stop_token_ids = [128009] + return llm, prompt, stop_token_ids + + model_example_map = { "llava": run_llava, "llava-next": run_llava_next, @@ -441,6 +457,7 @@ def run_aria(question: str, modality: str): "glm4v": run_glm4v, "idefics3": run_idefics3, "aria": run_aria, + "mantis": run_mantis, } diff --git a/requirements-test.in b/requirements-test.in index 44972866ddc4b..c0b228148ab31 100644 --- a/requirements-test.in +++ b/requirements-test.in @@ -24,9 +24,6 @@ mistral_common[opencv] >= 1.5.0 # required for pixtral test datamodel_code_generator # required for minicpm3 test lm-eval[api]==0.4.4 # required for model evaluation test -# TODO: Add this after fully implementing llava(mantis) -# git+https://github.com/TIGER-AI-Lab/Mantis.git # required for llava(mantis) test - # quantization bitsandbytes>=0.44.0 buildkite-test-collector==0.1.9 diff --git a/tests/models/decoder_only/vision_language/test_models.py b/tests/models/decoder_only/vision_language/test_models.py index 924f19c4448b8..ed8f34a677f84 100644 --- a/tests/models/decoder_only/vision_language/test_models.py +++ b/tests/models/decoder_only/vision_language/test_models.py @@ -34,7 +34,7 @@ "dtype": "half", "max_tokens": 5, "tensor_parallel_size": 2, - "model_kwargs": {"device_map": "auto"}, + "hf_model_kwargs": {"device_map": "auto"}, "image_size_factors": [(.25, 0.5, 1.0)], "distributed_executor_backend": ( "ray", @@ -108,7 +108,7 @@ "cherry_blossom": "What is in the picture?", }), auto_cls=AutoModelForVision2Seq, - postprocess_inputs=model_utils.get_key_type_post_processor( + postprocess_inputs=model_utils.cast_dtype_post_processor( "pixel_values" ), vllm_output_post_proc=model_utils.paligemma_vllm_to_hf_output, @@ -151,7 +151,7 @@ "cherry_blossom": "Please infer the season with reason.", }), multi_image_prompt="Describe the two images shortly.", # noqa: E501 - postprocess_inputs=model_utils.get_key_type_post_processor("pixel_values"), + postprocess_inputs=model_utils.cast_dtype_post_processor("pixel_values"), stop_str=["<|im_end|>"], image_size_factors=[(0.10, 0.15)], max_tokens=64, @@ -177,7 +177,7 @@ prompt_formatter=lambda img_prompt: f"USER: {img_prompt}\nASSISTANT:", max_model_len=4096, auto_cls=AutoModelForVision2Seq, - postprocess_inputs=model_utils.get_key_type_post_processor( + postprocess_inputs=model_utils.cast_dtype_post_processor( "pixel_values" ), # For chameleon, we only compare the sequences @@ -281,7 +281,7 @@ prompt_formatter=lambda vid_prompt: f"<|im_start|>user\n{vid_prompt}<|im_end|>\n<|im_start|>assistant\n", # noqa: E501 num_video_frames=16, max_model_len=16384, - postprocess_inputs=model_utils.get_key_type_post_processor( + postprocess_inputs=model_utils.cast_dtype_post_processor( "pixel_values_videos" ), auto_cls=AutoModelForVision2Seq, @@ -306,6 +306,20 @@ vllm_output_post_proc=model_utils.llava_video_vllm_to_hf_output, image_sizes=[((1669, 2560), (2560, 1669), (183, 488), (488, 183))], ), + "mantis": VLMTestInfo( + models=["TIGER-Lab/Mantis-8B-siglip-llama3"], + test_type=(VLMTestType.IMAGE, VLMTestType.MULTI_IMAGE), + prompt_formatter=lambda img_prompt: f"<|start_header_id|>user<|end_header_id|>\n\n{img_prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", # noqa: E501 + max_model_len=4096, + postprocess_inputs=model_utils.cast_dtype_post_processor( + "pixel_values" + ), + vllm_runner_kwargs={"hf_overrides": {"architectures": ["MantisForConditionalGeneration"]}}, # noqa: E501 + get_stop_token_ids=lambda tok: [128009], + auto_cls=AutoModelForVision2Seq, + vllm_output_post_proc=model_utils.mantis_vllm_to_hf_output, + patch_hf_runner=model_utils.mantis_patch_hf_runner, + ), "minicpmv_25": VLMTestInfo( models=["openbmb/MiniCPM-Llama3-V-2_5"], test_type=VLMTestType.IMAGE, @@ -342,7 +356,7 @@ # max_num_seqs=2, # task="generate", # # use eager mode for hf runner since phi3v didn't work with flash_attn - # model_kwargs={"_attn_implementation": "eager"}, + # hf_model_kwargs={"_attn_implementation": "eager"}, # use_tokenizer_eos=True, # vllm_output_post_proc=model_utils.phi3v_vllm_to_hf_output, # num_logprobs=10, @@ -373,7 +387,7 @@ prompt_formatter=lambda img_prompt: f"USER: {img_prompt}\nASSISTANT:", max_model_len=4096, auto_cls=AutoModelForVision2Seq, - postprocess_inputs=model_utils.get_key_type_post_processor( + postprocess_inputs=model_utils.cast_dtype_post_processor( "pixel_values" ), vllm_output_post_proc = lambda vllm_output, model: vllm_output[:2], @@ -438,7 +452,7 @@ test_type=VLMTestType.CUSTOM_INPUTS, max_model_len=16384, max_num_seqs=2, - postprocess_inputs=model_utils.get_key_type_post_processor( + postprocess_inputs=model_utils.cast_dtype_post_processor( "pixel_values" ), auto_cls=AutoModelForVision2Seq, diff --git a/tests/models/decoder_only/vision_language/vlm_utils/core.py b/tests/models/decoder_only/vision_language/vlm_utils/core.py index 88349ef9a3a69..54b7b0733210f 100644 --- a/tests/models/decoder_only/vision_language/vlm_utils/core.py +++ b/tests/models/decoder_only/vision_language/vlm_utils/core.py @@ -3,9 +3,11 @@ import torch from PIL.Image import Image -from transformers import AutoTokenizer, BatchEncoding +from transformers import AutoTokenizer, BatchEncoding, PreTrainedTokenizerBase from transformers.models.auto.auto_factory import _BaseAutoModelClass +from vllm.config import TaskOption + from .....conftest import HfRunner, VllmRunner from .types import RunnerOutput @@ -28,13 +30,15 @@ def run_test( use_tokenizer_eos: bool, postprocess_inputs: Callable[[BatchEncoding], BatchEncoding], comparator: Callable[..., None], - get_stop_token_ids: Optional[Callable[[AutoTokenizer], List[int]]], + get_stop_token_ids: Optional[Callable[[PreTrainedTokenizerBase], + List[int]]], stop_str: Optional[List[str]], tokenizer_mode: str, limit_mm_per_prompt: Dict[str, int], - model_kwargs: Optional[Dict[str, Any]], + vllm_runner_kwargs: Optional[Dict[str, Any]], + hf_model_kwargs: Optional[Dict[str, Any]], patch_hf_runner: Optional[Callable[[HfRunner], HfRunner]], - task: str = "auto", + task: TaskOption = "auto", runner_mm_key: str = "images", distributed_executor_backend: Optional[str] = None, tensor_parallel_size: int = 1, @@ -58,6 +62,9 @@ def run_test( if stop_str: vllm_kwargs["stop"] = stop_str + if vllm_runner_kwargs is None: + vllm_runner_kwargs = {} + with vllm_runner(model, tokenizer_mode=tokenizer_mode, max_model_len=max_model_len, @@ -67,7 +74,8 @@ def run_test( tensor_parallel_size=tensor_parallel_size, distributed_executor_backend=distributed_executor_backend, enforce_eager=enforce_eager, - task=task) as vllm_model: + task=task, + **vllm_runner_kwargs) as vllm_model: for prompts, media in vllm_inputs: vllm_kwargs[runner_mm_key] = media vllm_output = vllm_model.generate_greedy_logprobs( @@ -78,7 +86,7 @@ def run_test( dtype=dtype, auto_cls=auto_cls, postprocess_inputs=postprocess_inputs, - model_kwargs=model_kwargs) + model_kwargs=hf_model_kwargs) # Some models need to patch things like the model processor, e.g., internvl if patch_hf_runner is not None: diff --git a/tests/models/decoder_only/vision_language/vlm_utils/model_utils.py b/tests/models/decoder_only/vision_language/vlm_utils/model_utils.py index 15f15dd7d8030..3eca8fb9dcb1a 100644 --- a/tests/models/decoder_only/vision_language/vlm_utils/model_utils.py +++ b/tests/models/decoder_only/vision_language/vlm_utils/model_utils.py @@ -126,6 +126,16 @@ def llava_onevision_vllm_to_hf_output(vllm_output: RunnerOutput, return hf_output_ids, hf_output_str, out_logprobs +def mantis_vllm_to_hf_output(vllm_output: RunnerOutput, + model: str) -> RunnerOutput: + """Sanitize vllm output [mantis] to compare with hf output.""" + output_ids, output_str, out_logprobs = vllm_output + + hf_output_str = output_str + "<|eot_id|>" + + return output_ids, hf_output_str, out_logprobs + + def phi3v_vllm_to_hf_output(vllm_output: RunnerOutput, model: str) -> RunnerOutput: """Sanitize vllm output [phi3v] to be comparable with hf output.""" @@ -184,7 +194,7 @@ def get_llava_embeddings(image_assets: _ImageAssets): ####### postprocessors to run on HF BatchEncoding -def get_key_type_post_processor( +def cast_dtype_post_processor( hf_inp_key: str) -> Callable[[BatchEncoding, str], BatchEncoding]: """Gets a handle to a post processor which converts a given key into a target data type.""" @@ -418,3 +428,26 @@ def _internvl_generate( ) return outputs + + +def mantis_patch_hf_runner(hf_model: HfRunner) -> HfRunner: + from mantis.models.mllava import MLlavaProcessor + + hf_model.processor = MLlavaProcessor.from_pretrained(hf_model.model_name) + + orig_generate = hf_model.model.generate + tokenizer = hf_model.processor.tokenizer + + def _generate(self, *args, **kwargs): + return orig_generate( + *args, + **kwargs, + eos_token_id=[ + tokenizer.eos_token_id, + tokenizer.convert_tokens_to_ids("<|eot_id|>"), + ], + ) + + hf_model.model.generate = types.MethodType(_generate, hf_model.model) + + return hf_model diff --git a/tests/models/decoder_only/vision_language/vlm_utils/types.py b/tests/models/decoder_only/vision_language/vlm_utils/types.py index d410fa8c653ce..e2e0c6390fcb9 100644 --- a/tests/models/decoder_only/vision_language/vlm_utils/types.py +++ b/tests/models/decoder_only/vision_language/vlm_utils/types.py @@ -7,9 +7,11 @@ import torch from PIL.Image import Image from pytest import MarkDecorator -from transformers import AutoModelForCausalLM, AutoTokenizer, BatchEncoding +from transformers import (AutoModelForCausalLM, BatchEncoding, + PreTrainedTokenizerBase) from transformers.models.auto.auto_factory import _BaseAutoModelClass +from vllm.config import TaskOption from vllm.sequence import SampleLogprobs from vllm.utils import identity @@ -66,7 +68,7 @@ class ImageSizeWrapper(NamedTuple): class VLMTestInfo(NamedTuple): """Holds the configuration for 1+ tests for one model architecture.""" - models: Union[List[str]] + models: List[str] test_type: Union[VLMTestType, Iterable[VLMTestType]] # Should be None only if this is a CUSTOM_INPUTS test @@ -92,18 +94,20 @@ class VLMTestInfo(NamedTuple): enforce_eager: bool = True max_model_len: int = 1024 max_num_seqs: int = 256 - task: str = "auto" + task: TaskOption = "auto" tensor_parallel_size: int = 1 + vllm_runner_kwargs: Optional[Dict[str, Any]] = None # Optional callable which gets a list of token IDs from the model tokenizer - get_stop_token_ids: Optional[Callable[[AutoTokenizer], List[int]]] = None + get_stop_token_ids: Optional[Callable[[PreTrainedTokenizerBase], + List[int]]] = None # Optional list of strings to stop generation, useful when stop tokens are # not special tokens in the tokenizer stop_str: Optional[List[str]] = None # Exposed options for HF runner - model_kwargs: Optional[Dict[str, Any]] = None - # Indicates we should explicitly pass the EOS from the tokeniezr + hf_model_kwargs: Optional[Dict[str, Any]] = None + # Indicates we should explicitly pass the EOS from the tokenizer use_tokenizer_eos: bool = False auto_cls: Type[_BaseAutoModelClass] = AutoModelForCausalLM # Callable to pass to the HF runner to run on inputs; for now, we also pass @@ -164,6 +168,7 @@ def get_non_parametrized_runner_kwargs(self): "max_num_seqs": self.max_num_seqs, "task": self.task, "tensor_parallel_size": self.tensor_parallel_size, + "vllm_runner_kwargs": self.vllm_runner_kwargs, "hf_output_post_proc": self.hf_output_post_proc, "vllm_output_post_proc": self.vllm_output_post_proc, "auto_cls": self.auto_cls, @@ -171,8 +176,8 @@ def get_non_parametrized_runner_kwargs(self): "postprocess_inputs": self.postprocess_inputs, "comparator": self.comparator, "get_stop_token_ids": self.get_stop_token_ids, + "hf_model_kwargs": self.hf_model_kwargs, "stop_str": self.stop_str, - "model_kwargs": self.model_kwargs, "patch_hf_runner": self.patch_hf_runner, "tokenizer_mode": self.tokenizer_mode } diff --git a/tests/models/registry.py b/tests/models/registry.py index 461f453d8b1c3..a89518820045f 100644 --- a/tests/models/registry.py +++ b/tests/models/registry.py @@ -176,6 +176,7 @@ class _HfExamplesInfo: "LlavaNextForConditionalGeneration": _HfExamplesInfo("llava-hf/llava-v1.6-mistral-7b-hf"), # noqa: E501 "LlavaNextVideoForConditionalGeneration": _HfExamplesInfo("llava-hf/LLaVA-NeXT-Video-7B-hf"), # noqa: E501 "LlavaOnevisionForConditionalGeneration": _HfExamplesInfo("llava-hf/llava-onevision-qwen2-0.5b-ov-hf"), # noqa: E501 + "MantisForConditionalGeneration": _HfExamplesInfo("TIGER-Lab/Mantis-8B-siglip-llama3"), # noqa: E501 "MiniCPMV": _HfExamplesInfo("openbmb/MiniCPM-Llama3-V-2_5", trust_remote_code=True), "MolmoForCausalLM": _HfExamplesInfo("allenai/Molmo-7B-D-0924", diff --git a/tests/plugins/vllm_add_dummy_model/vllm_add_dummy_model/my_llava.py b/tests/plugins/vllm_add_dummy_model/vllm_add_dummy_model/my_llava.py index f2fc0755cae01..2f4194a63fc25 100644 --- a/tests/plugins/vllm_add_dummy_model/vllm_add_dummy_model/my_llava.py +++ b/tests/plugins/vllm_add_dummy_model/vllm_add_dummy_model/my_llava.py @@ -3,16 +3,14 @@ import torch from vllm.model_executor.models.llava import (LlavaForConditionalGeneration, - create_metadata_for_llava, - dummy_mm_kwargs_for_llava, + LlavaProcessor, get_max_llava_image_tokens) from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY @MULTIMODAL_REGISTRY.register_max_image_tokens(get_max_llava_image_tokens) -@MULTIMODAL_REGISTRY.register_processor_by_metadata(create_metadata_for_llava, - dummy_mm_kwargs_for_llava) +@MULTIMODAL_REGISTRY.register_processor(LlavaProcessor) class MyLlava(LlavaForConditionalGeneration): def compute_logits( diff --git a/vllm/model_executor/models/llava.py b/vllm/model_executor/models/llava.py index 953b89f1842af..65c6bd07bfff0 100644 --- a/vllm/model_executor/models/llava.py +++ b/vllm/model_executor/models/llava.py @@ -22,10 +22,11 @@ from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY from vllm.multimodal.inputs import MultiModalKwargs, NestedTensors -from vllm.multimodal.processing import (InputProcessingContext, +from vllm.multimodal.processing import (BaseMultiModalProcessor, + InputProcessingContext, ModalityProcessingMetadata, MultiModalProcessingMetadata, - MultiModalProcessor, PromptReplacement) + PromptReplacement) from vllm.sequence import IntermediateTensors from .clip import (CLIPVisionModel, dummy_image_for_clip, @@ -163,7 +164,13 @@ def get_repl_count( } -class LlavaProcessor(MultiModalProcessor): +class LlavaProcessor(BaseMultiModalProcessor): + + def __init__(self, ctx: InputProcessingContext) -> None: + super().__init__( + ctx=ctx, + metadata=create_metadata_for_llava(ctx), + ) def _patch_pixtral_processor(self, hf_processor: PixtralProcessor): if getattr(hf_processor, "__is_patched__", False): @@ -193,7 +200,30 @@ def _get_dummy_mm_kwargs( self, mm_counts: Mapping[str, int], ) -> MultiModalKwargs: - return dummy_mm_kwargs_for_llava(self.ctx, mm_counts) + hf_config = self.ctx.get_hf_config(LlavaConfig) + vision_config = hf_config.vision_config + num_images = mm_counts["image"] + + if isinstance(vision_config, CLIPVisionConfig): + data = dummy_image_for_clip(vision_config, num_images) + elif isinstance(vision_config, SiglipVisionConfig): + data = dummy_image_for_siglip(vision_config, num_images) + elif isinstance(vision_config, PixtralVisionConfig): + data = dummy_image_for_pixtral_hf(vision_config, num_images) + else: + msg = f"Unsupported vision config: {type(vision_config)}" + raise NotImplementedError(msg) + + hf_processor = self._get_hf_processor() + image_processor = hf_processor.image_processor # type: ignore + hf_inputs = image_processor.preprocess(data['image'], + return_tensors="pt") + is_pixtral = isinstance(hf_processor, PixtralProcessor) + + return MultiModalKwargs( + **hf_inputs, + is_pixtral=torch.tensor(is_pixtral), + ) class LlavaLikeConfig(Protocol): @@ -277,10 +307,7 @@ def init_vision_tower_for_llava( @MULTIMODAL_REGISTRY.register_max_image_tokens(get_max_llava_image_tokens) -@MULTIMODAL_REGISTRY.register_processor(lambda ctx: LlavaProcessor( - ctx=ctx, - metadata=create_metadata_for_llava(ctx), -)) +@MULTIMODAL_REGISTRY.register_processor(LlavaProcessor) class LlavaForConditionalGeneration(nn.Module, SupportsMultiModal, SupportsPP): # BitandBytes specific attributes bitsandbytes_stacked_params_mapping = { @@ -559,3 +586,28 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader(self) return loader.load_weights(weights) + + +class MantisProcessor(LlavaProcessor): + + def _get_hf_processor(self) -> ProcessorMixin: + try: + from mantis.models.mllava import MLlavaProcessor + except ModuleNotFoundError as exc: + raise ModuleNotFoundError( + "You need to `pip install " + "git+https://github.com/TIGER-AI-Lab/Mantis.git` " + "to use this model") from exc + + processor = MLlavaProcessor.from_pretrained( + self.ctx.model_config.tokenizer) + assert isinstance(processor, ProcessorMixin) + return processor + + +# To use this model, please use +# `--hf_overrides '{"architectures": ["MantisForConditionalGeneration"]}'` +@MULTIMODAL_REGISTRY.register_max_image_tokens(get_max_llava_image_tokens) +@MULTIMODAL_REGISTRY.register_processor(MantisProcessor) +class MantisForConditionalGeneration(LlavaForConditionalGeneration): + pass diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index c66fbce018a62..e69596aa915b5 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -152,6 +152,7 @@ "LlavaNextForConditionalGeneration": ("llava_next", "LlavaNextForConditionalGeneration"), # noqa: E501 "LlavaNextVideoForConditionalGeneration": ("llava_next_video", "LlavaNextVideoForConditionalGeneration"), # noqa: E501 "LlavaOnevisionForConditionalGeneration": ("llava_onevision", "LlavaOnevisionForConditionalGeneration"), # noqa: E501 + "MantisForConditionalGeneration": ("llava", "MantisForConditionalGeneration"), # noqa: E501 "MiniCPMV": ("minicpmv", "MiniCPMV"), "MolmoForCausalLM": ("molmo", "MolmoForCausalLM"), "NVLM_D": ("nvlm_d", "NVLM_D_Model"), diff --git a/vllm/multimodal/processing.py b/vllm/multimodal/processing.py index 4a1737991534f..c3a95d60e6fe6 100644 --- a/vllm/multimodal/processing.py +++ b/vllm/multimodal/processing.py @@ -529,9 +529,9 @@ def iter_placeholders( yield placeholder -class MultiModalProcessor(ABC): +class BaseMultiModalProcessor(ABC): """ - Helper class to process multi-modal inputs to be used in vLLM. + Abstract base class to process multi-modal inputs to be used in vLLM. """ def __init__( diff --git a/vllm/multimodal/registry.py b/vllm/multimodal/registry.py index f51da8972d15b..6ab6c0fe2f12e 100644 --- a/vllm/multimodal/registry.py +++ b/vllm/multimodal/registry.py @@ -15,7 +15,7 @@ from .base import MultiModalInputMapper, MultiModalPlugin, MultiModalTokensCalc from .image import ImagePlugin from .inputs import MultiModalDataDict, MultiModalKwargs, NestedTensors -from .processing import MultiModalProcessingMetadata, MultiModalProcessor +from .processing import BaseMultiModalProcessor from .video import VideoPlugin if TYPE_CHECKING: @@ -26,7 +26,7 @@ N = TypeVar("N", bound=Type[nn.Module]) MultiModalProcessorFactory: TypeAlias = Callable[[InputProcessingContext], - MultiModalProcessor] + BaseMultiModalProcessor] """ Constructs a :class:`MultiModalProcessor` instance from the context. @@ -311,41 +311,6 @@ def wrapper(model_cls: N) -> N: return wrapper - def register_processor_by_metadata( - self, - metadata_factory: Callable[[InputProcessingContext], - MultiModalProcessingMetadata], - get_dummy_mm_kwargs: Callable[ - [InputProcessingContext, Mapping[str, int]], MultiModalKwargs], - ): - """ - Convenience method to register a multi-modal processor to a model class - according to a function that constructs its metadata. - - When the model receives multi-modal data, the provided function is - invoked to transform the data into a dictionary of model inputs. - - See also: - - :ref:`input_processing_pipeline` - - :ref:`enabling_multimodal_inputs` - """ - - class ConcreteMultiModalProcessor(MultiModalProcessor): - - def _get_dummy_mm_kwargs( - self, - mm_counts: Mapping[str, int], - ) -> MultiModalKwargs: - return get_dummy_mm_kwargs(self.ctx, mm_counts) - - def factory(ctx: InputProcessingContext): - return ConcreteMultiModalProcessor( - ctx=ctx, - metadata=metadata_factory(ctx), - ) - - return self.register_processor(factory) - def has_processor(self, model_config: "ModelConfig") -> bool: """ Test whether a multi-modal processor is defined for a specific model. @@ -360,7 +325,7 @@ def create_processor( self, model_config: "ModelConfig", tokenizer: AnyTokenizer, - ) -> MultiModalProcessor: + ) -> BaseMultiModalProcessor: """ Create a multi-modal processor for a specific model and tokenizer. """ From c889d5888bf6bbfbe3f4ea55bf27ce84a239c3d0 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Sun, 8 Dec 2024 01:20:49 +0800 Subject: [PATCH 263/397] [Doc] Explicitly state that PP isn't compatible with speculative decoding yet (#10975) Signed-off-by: DarkLight1337 --- docs/source/usage/spec_decode.rst | 3 +++ tests/distributed/test_pipeline_parallel.py | 16 +++++++++++++--- vllm/model_executor/models/exaone.py | 3 ++- vllm/model_executor/models/granite.py | 5 +++-- vllm/model_executor/models/llama.py | 3 ++- vllm/model_executor/models/nemotron.py | 4 +++- vllm/model_executor/models/solar.py | 3 ++- vllm/spec_decode/spec_decode_worker.py | 4 ++++ 8 files changed, 32 insertions(+), 9 deletions(-) diff --git a/docs/source/usage/spec_decode.rst b/docs/source/usage/spec_decode.rst index 67e8ede7654b7..f1f1917f974bb 100644 --- a/docs/source/usage/spec_decode.rst +++ b/docs/source/usage/spec_decode.rst @@ -8,6 +8,9 @@ Speculative decoding not usually yield inter-token latency reductions for all prompt datasets or sampling parameters. The work to optimize it is ongoing and can be followed in `this issue. `_ +.. warning:: + Currently, speculative decoding in vLLM is not compatible with pipeline parallelism. + This document shows how to use `Speculative Decoding `_ with vLLM. Speculative decoding is a technique which improves inter-token latency in memory-bound LLM inference. diff --git a/tests/distributed/test_pipeline_parallel.py b/tests/distributed/test_pipeline_parallel.py index 386877e0e0a2c..b818ca921fcb0 100644 --- a/tests/distributed/test_pipeline_parallel.py +++ b/tests/distributed/test_pipeline_parallel.py @@ -247,9 +247,19 @@ def _compare_tp( *, method: Literal["generate", "encode"], ): - tp_size, pp_size, eager_mode, chunked_prefill = parallel_setup - multi_node_only, trust_remote_code, tokenizer_mode, \ - load_format, hf_overrides = test_options + ( + tp_size, + pp_size, + eager_mode, + chunked_prefill, + ) = parallel_setup + ( + multi_node_only, + trust_remote_code, + tokenizer_mode, + load_format, + hf_overrides, + ) = test_options if num_gpus_available < tp_size * pp_size: pytest.skip(f"Need at least {tp_size} x {pp_size} GPUs") diff --git a/vllm/model_executor/models/exaone.py b/vllm/model_executor/models/exaone.py index 5ca26d53a17e7..0398f0943a70a 100644 --- a/vllm/model_executor/models/exaone.py +++ b/vllm/model_executor/models/exaone.py @@ -473,10 +473,11 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.logits_processor = LogitsProcessor(self.unpadded_vocab_size, config.vocab_size, logit_scale) - self.sampler = get_sampler() else: self.lm_head = PPMissingLayer() + self.sampler = get_sampler() + self.make_empty_intermediate_tensors = ( self.transformer.make_empty_intermediate_tensors) diff --git a/vllm/model_executor/models/granite.py b/vllm/model_executor/models/granite.py index bd2394e71c973..f9e0443b9a508 100644 --- a/vllm/model_executor/models/granite.py +++ b/vllm/model_executor/models/granite.py @@ -400,16 +400,17 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.lm_head.weight = self.model.embed_tokens.weight logit_scale = getattr(config, "logit_scale", 1.0) - if hasattr(config, "logits_scaling"): logit_scale /= config.logits_scaling + self.logits_processor = LogitsProcessor(self.unpadded_vocab_size, config.vocab_size, scale=logit_scale) - self.sampler = get_sampler() else: self.lm_head = PPMissingLayer() + self.sampler = get_sampler() + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: return self.model.get_input_embeddings(input_ids) diff --git a/vllm/model_executor/models/llama.py b/vllm/model_executor/models/llama.py index 31dfb235ae877..733b1bc7d80ac 100644 --- a/vllm/model_executor/models/llama.py +++ b/vllm/model_executor/models/llama.py @@ -540,10 +540,11 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.logits_processor = LogitsProcessor(self.unpadded_vocab_size, config.vocab_size, logit_scale) - self.sampler = get_sampler() else: self.lm_head = PPMissingLayer() + self.sampler = get_sampler() + self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) diff --git a/vllm/model_executor/models/nemotron.py b/vllm/model_executor/models/nemotron.py index c7b4c22b6896b..34cb9981c167b 100644 --- a/vllm/model_executor/models/nemotron.py +++ b/vllm/model_executor/models/nemotron.py @@ -435,9 +435,11 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.logits_processor = LogitsProcessor(self.unpadded_vocab_size, config.vocab_size, logit_scale) - self.sampler = get_sampler() else: self.lm_head = PPMissingLayer() + + self.sampler = get_sampler() + self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) diff --git a/vllm/model_executor/models/solar.py b/vllm/model_executor/models/solar.py index f58710d215056..caae0b65d7d10 100644 --- a/vllm/model_executor/models/solar.py +++ b/vllm/model_executor/models/solar.py @@ -443,10 +443,11 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.logits_processor = LogitsProcessor(self.unpadded_vocab_size, config.vocab_size, logit_scale) - self.sampler = get_sampler() else: self.lm_head = PPMissingLayer() + self.sampler = get_sampler() + self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) diff --git a/vllm/spec_decode/spec_decode_worker.py b/vllm/spec_decode/spec_decode_worker.py index ced7f53827665..2689802161987 100644 --- a/vllm/spec_decode/spec_decode_worker.py +++ b/vllm/spec_decode/spec_decode_worker.py @@ -54,6 +54,10 @@ def create_spec_worker(*args, **kwargs) -> "SpecDecodeWorker": speculative_config: SpeculativeConfig = vllm_config.speculative_config assert speculative_config is not None + if vllm_config.parallel_config.pipeline_parallel_size > 1: + raise NotImplementedError("Speculative decoding is currently " + "incompatible with pipeline parallelism") + draft_worker_kwargs = kwargs.copy() kwargs["model_runner_cls"] = TargetModelRunner From 78029b34ed1be46baf06f92c9e971ea1961d0867 Mon Sep 17 00:00:00 2001 From: zhou fan <1247714429@qq.com> Date: Sun, 8 Dec 2024 01:21:18 +0800 Subject: [PATCH 264/397] [BugFix][Kernel]: fix illegal memory access in causal_conv1d when conv_states is None (#10928) Signed-off-by: xffxff <1247714429@qq.com> --- csrc/mamba/causal_conv1d/causal_conv1d.cu | 2 +- tests/kernels/test_causal_conv1d.py | 39 +++++++++++++---------- 2 files changed, 23 insertions(+), 18 deletions(-) diff --git a/csrc/mamba/causal_conv1d/causal_conv1d.cu b/csrc/mamba/causal_conv1d/causal_conv1d.cu index 498d069c05f0d..dd1e6de2e0180 100644 --- a/csrc/mamba/causal_conv1d/causal_conv1d.cu +++ b/csrc/mamba/causal_conv1d/causal_conv1d.cu @@ -424,7 +424,7 @@ void causal_conv1d_fwd_kernel(ConvParamsBase params) { // and the one before it (chunk = n_chunks - 1 and chunk = n_chunks - 2), // (which occurs when `final_state_position` is a non-positivie index) // we load the correct data from smem_exchange from both chunks, the last chunk iteration and the one before it - if (final_state_position < 0 && seqlen > kWidth){ + if (conv_states != nullptr && final_state_position < 0 && seqlen > kWidth){ input_t vals_load[kNElts] = {0}; if ((chunk == n_chunks - 2) && (tidx == kNThreads - 1)){ // chunk = n_chunks - 2, a segment of the final state sits in the last index diff --git a/tests/kernels/test_causal_conv1d.py b/tests/kernels/test_causal_conv1d.py index f9b11018288be..51be2425d7dd7 100644 --- a/tests/kernels/test_causal_conv1d.py +++ b/tests/kernels/test_causal_conv1d.py @@ -149,13 +149,14 @@ def causal_conv1d_opcheck_fn(x: torch.Tensor, @pytest.mark.parametrize("itype", [torch.bfloat16, torch.float]) @pytest.mark.parametrize("silu_activation", [True]) @pytest.mark.parametrize("has_bias", [True]) +@pytest.mark.parametrize("has_initial_state", [True, False]) @pytest.mark.parametrize("width", [4]) @pytest.mark.parametrize( 'seqlen', [1, 8, 16, 32, 64, 128, 256, 512, 784, 1024, 1025, 2048, 4096]) @pytest.mark.parametrize('dim', [64]) @pytest.mark.parametrize('batch', [1]) def test_causal_conv1d(batch, dim, seqlen, width, has_bias, silu_activation, - itype): + has_initial_state, itype): device = "cuda" rtol, atol = (3e-4, 1e-3) if itype == torch.float32 else (3e-3, 5e-3) if itype == torch.bfloat16: @@ -167,11 +168,18 @@ def test_causal_conv1d(batch, dim, seqlen, width, has_bias, silu_activation, weight = torch.randn(dim, width, device=device, dtype=itype) bias = torch.randn(dim, device=device, dtype=itype) if has_bias else None - initial_states = torch.randn(batch, - dim, - width - 1, - device=device, - dtype=itype) + if has_initial_state: + initial_states = torch.randn(batch, + dim, + width - 1, + device=device, + dtype=itype) + has_initial_state_tensor = torch.ones(batch, + dtype=torch.bool, + device=x.device) + else: + initial_states = None + has_initial_state_tensor = None x_ref = x.clone() weight_ref = weight.clone() bias_ref = bias.clone() if bias is not None else None @@ -183,9 +191,7 @@ def test_causal_conv1d(batch, dim, seqlen, width, has_bias, silu_activation, bias, activation=activation, conv_states=initial_states, - has_initial_state=torch.ones(batch, - dtype=torch.bool, - device=x.device)) + has_initial_state=has_initial_state_tensor) out_ref, final_states_ref = causal_conv1d_ref( x_ref, weight_ref, @@ -193,11 +199,12 @@ def test_causal_conv1d(batch, dim, seqlen, width, has_bias, silu_activation, initial_states=initial_states_ref, return_final_states=True, activation=activation) - assert initial_states is not None and final_states_ref is not None - assert torch.allclose(initial_states, - final_states_ref, - rtol=rtol, - atol=atol) + if has_initial_state: + assert initial_states is not None and final_states_ref is not None + assert torch.allclose(initial_states, + final_states_ref, + rtol=rtol, + atol=atol) assert torch.allclose(out, out_ref, rtol=rtol, atol=atol) causal_conv1d_opcheck_fn(x, @@ -205,9 +212,7 @@ def test_causal_conv1d(batch, dim, seqlen, width, has_bias, silu_activation, bias, activation=activation, conv_states=initial_states, - has_initial_state=torch.ones(batch, - dtype=torch.bool, - device=x.device)) + has_initial_state=has_initial_state_tensor) @pytest.mark.parametrize("itype", [torch.bfloat16]) From 1b62745b1d00153c5e99879edaf0c2d7ceb4e2c6 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sat, 7 Dec 2024 09:33:45 -0800 Subject: [PATCH 265/397] [core][executor] simplify instance id (#10976) Signed-off-by: youkaichao --- vllm/config.py | 7 ++++++- vllm/envs.py | 6 ------ vllm/executor/cpu_executor.py | 6 +----- vllm/executor/multiproc_gpu_executor.py | 5 +---- vllm/executor/ray_gpu_executor.py | 7 +------ vllm/executor/ray_hpu_executor.py | 7 +------ vllm/executor/ray_tpu_executor.py | 6 +----- vllm/executor/ray_xpu_executor.py | 6 +----- vllm/utils.py | 25 +++++++++---------------- vllm/worker/worker_base.py | 2 +- 10 files changed, 22 insertions(+), 55 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index db7046ab2c22d..d1c4f995ad015 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -27,7 +27,8 @@ get_hf_text_config, get_pooling_config, get_sentence_transformer_tokenizer_config, is_encoder_decoder, uses_mrope) from vllm.utils import (GiB_bytes, cuda_device_count_stateless, get_cpu_memory, - print_warning_once, resolve_obj_by_qualname) + print_warning_once, random_uuid, + resolve_obj_by_qualname) if TYPE_CHECKING: from ray.util.placement_group import PlacementGroup @@ -2408,6 +2409,7 @@ class VllmConfig: init=True) # type: ignore kv_transfer_config: KVTransferConfig = field(default=None, init=True) # type: ignore + instance_id: str = "" @staticmethod def get_graph_batch_size(batch_size: int) -> int: @@ -2573,6 +2575,9 @@ def __post_init__(self): current_platform.check_and_update_config(self) + if not self.instance_id: + self.instance_id = random_uuid()[:5] + def __str__(self): return ("model=%r, speculative_config=%r, tokenizer=%r, " "skip_tokenizer_init=%s, tokenizer_mode=%s, revision=%s, " diff --git a/vllm/envs.py b/vllm/envs.py index 28797ac1e4af2..ab12a7b48dc53 100644 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -8,7 +8,6 @@ VLLM_RPC_BASE_PATH: str = tempfile.gettempdir() VLLM_USE_MODELSCOPE: bool = False VLLM_RINGBUFFER_WARNING_INTERVAL: int = 60 - VLLM_INSTANCE_ID: Optional[str] = None VLLM_NCCL_SO_PATH: Optional[str] = None LD_LIBRARY_PATH: Optional[str] = None VLLM_USE_TRITON_FLASH_ATTN: bool = False @@ -175,11 +174,6 @@ def get_default_config_root(): "VLLM_USE_MODELSCOPE": lambda: os.environ.get("VLLM_USE_MODELSCOPE", "False").lower() == "true", - # Instance id represents an instance of the VLLM. All processes in the same - # instance should have the same instance id. - "VLLM_INSTANCE_ID": - lambda: os.environ.get("VLLM_INSTANCE_ID", None), - # Interval in seconds to log a warning message when the ring buffer is full "VLLM_RINGBUFFER_WARNING_INTERVAL": lambda: int(os.environ.get("VLLM_RINGBUFFER_WARNING_INTERVAL", "60")), diff --git a/vllm/executor/cpu_executor.py b/vllm/executor/cpu_executor.py index 6b4cb5a9a1d61..2816b5c5c1f88 100644 --- a/vllm/executor/cpu_executor.py +++ b/vllm/executor/cpu_executor.py @@ -10,8 +10,7 @@ from vllm.model_executor.layers.sampler import SamplerOutput from vllm.prompt_adapter.request import PromptAdapterRequest from vllm.sequence import ExecuteModelRequest -from vllm.utils import (get_distributed_init_method, get_open_port, - get_vllm_instance_id, make_async) +from vllm.utils import get_distributed_init_method, get_open_port, make_async from vllm.worker.worker_base import WorkerWrapperBase logger = init_logger(__name__) @@ -31,9 +30,6 @@ def _init_executor(self) -> None: # Environment variables for CPU executor # - # Ensure that VLLM_INSTANCE_ID is set, to be inherited by workers - os.environ["VLLM_INSTANCE_ID"] = get_vllm_instance_id() - # Disable torch async compiling which won't work with daemonic processes os.environ["TORCHINDUCTOR_COMPILE_THREADS"] = "1" diff --git a/vllm/executor/multiproc_gpu_executor.py b/vllm/executor/multiproc_gpu_executor.py index a6c05a71d2b6f..c450209f0eb91 100644 --- a/vllm/executor/multiproc_gpu_executor.py +++ b/vllm/executor/multiproc_gpu_executor.py @@ -16,7 +16,7 @@ from vllm.triton_utils.importing import HAS_TRITON from vllm.utils import (_run_task_with_lock, cuda_device_count_stateless, cuda_is_initialized, get_distributed_init_method, - get_open_port, get_vllm_instance_id, make_async, + get_open_port, make_async, update_environment_variables) if HAS_TRITON: @@ -37,9 +37,6 @@ def _init_executor(self) -> None: world_size = self.parallel_config.world_size tensor_parallel_size = self.parallel_config.tensor_parallel_size - # Ensure that VLLM_INSTANCE_ID is set, to be inherited by workers - os.environ["VLLM_INSTANCE_ID"] = get_vllm_instance_id() - # Disable torch async compiling which won't work with daemonic processes os.environ["TORCHINDUCTOR_COMPILE_THREADS"] = "1" diff --git a/vllm/executor/ray_gpu_executor.py b/vllm/executor/ray_gpu_executor.py index 6542b18ae70b1..6554cda6b637b 100644 --- a/vllm/executor/ray_gpu_executor.py +++ b/vllm/executor/ray_gpu_executor.py @@ -15,8 +15,7 @@ from vllm.model_executor.layers.sampler import SamplerOutput from vllm.sequence import ExecuteModelRequest from vllm.utils import (_run_task_with_lock, get_distributed_init_method, - get_ip, get_open_port, get_vllm_instance_id, - make_async) + get_ip, get_open_port, make_async) if ray is not None: from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy @@ -220,14 +219,10 @@ def sort_by_driver_then_worker_ip(worker): " environment variable, make sure it is unique for" " each node.") - VLLM_INSTANCE_ID = get_vllm_instance_id() - # Set environment variables for the driver and workers. all_args_to_update_environment_variables = [({ "CUDA_VISIBLE_DEVICES": ",".join(map(str, node_gpus[node_id])), - "VLLM_INSTANCE_ID": - VLLM_INSTANCE_ID, "VLLM_TRACE_FUNCTION": str(envs.VLLM_TRACE_FUNCTION), **({ diff --git a/vllm/executor/ray_hpu_executor.py b/vllm/executor/ray_hpu_executor.py index a74328e5aa272..91c84d9214a60 100644 --- a/vllm/executor/ray_hpu_executor.py +++ b/vllm/executor/ray_hpu_executor.py @@ -15,8 +15,7 @@ from vllm.model_executor.layers.sampler import SamplerOutput from vllm.sequence import ExecuteModelRequest from vllm.utils import (_run_task_with_lock, get_distributed_init_method, - get_ip, get_open_port, get_vllm_instance_id, - make_async) + get_ip, get_open_port, make_async) if ray is not None: from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy @@ -196,12 +195,8 @@ def sort_by_driver_then_worker_ip(worker): "environment variable, make sure it is unique for" " each node.") - VLLM_INSTANCE_ID = get_vllm_instance_id() - # Set environment variables for the driver and workers. all_args_to_update_environment_variables = [({ - "VLLM_INSTANCE_ID": - VLLM_INSTANCE_ID, "VLLM_TRACE_FUNCTION": str(envs.VLLM_TRACE_FUNCTION), }, ) for (node_id, _) in worker_node_and_gpu_ids] diff --git a/vllm/executor/ray_tpu_executor.py b/vllm/executor/ray_tpu_executor.py index c227b5e283c68..3ee59397bf4c9 100644 --- a/vllm/executor/ray_tpu_executor.py +++ b/vllm/executor/ray_tpu_executor.py @@ -13,7 +13,7 @@ from vllm.model_executor.layers.sampler import SamplerOutput from vllm.sequence import ExecuteModelRequest from vllm.utils import (get_distributed_init_method, get_ip, get_open_port, - get_vllm_instance_id, make_async) + make_async) if ray is not None: from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy @@ -144,12 +144,8 @@ def sort_by_driver_then_worker_ip(worker): for i, (node_id, _) in enumerate(worker_node_and_gpu_ids): node_workers[node_id].append(i) - VLLM_INSTANCE_ID = get_vllm_instance_id() - # Set environment variables for the driver and workers. all_args_to_update_environment_variables = [({ - "VLLM_INSTANCE_ID": - VLLM_INSTANCE_ID, "VLLM_TRACE_FUNCTION": str(envs.VLLM_TRACE_FUNCTION), }, ) for _ in worker_node_and_gpu_ids] diff --git a/vllm/executor/ray_xpu_executor.py b/vllm/executor/ray_xpu_executor.py index 2b1cdc09b0a9f..61f5d6a65e999 100644 --- a/vllm/executor/ray_xpu_executor.py +++ b/vllm/executor/ray_xpu_executor.py @@ -5,7 +5,7 @@ from vllm.executor.ray_gpu_executor import RayGPUExecutor, RayGPUExecutorAsync from vllm.executor.xpu_executor import XPUExecutor from vllm.logger import init_logger -from vllm.utils import get_vllm_instance_id, make_async +from vllm.utils import make_async logger = init_logger(__name__) @@ -17,12 +17,8 @@ def _get_env_vars_to_be_updated(self): worker_node_and_gpu_ids = self._run_workers("get_node_and_gpu_ids", use_dummy_driver=True) - VLLM_INSTANCE_ID = get_vllm_instance_id() - # Set environment variables for the driver and workers. all_args_to_update_environment_variables = [({ - "VLLM_INSTANCE_ID": - VLLM_INSTANCE_ID, "VLLM_TRACE_FUNCTION": str(envs.VLLM_TRACE_FUNCTION), }, ) for (_, _) in worker_node_and_gpu_ids] diff --git a/vllm/utils.py b/vllm/utils.py index 6cee4847e57b4..1f19d9eacd16d 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -24,9 +24,9 @@ from collections.abc import Iterable, Mapping from functools import lru_cache, partial, wraps from platform import uname -from typing import (Any, AsyncGenerator, Awaitable, Callable, Dict, Generic, - Hashable, List, Literal, Optional, OrderedDict, Set, Tuple, - Type, TypeVar, Union, overload) +from typing import (TYPE_CHECKING, Any, AsyncGenerator, Awaitable, Callable, + Dict, Generic, Hashable, List, Literal, Optional, + OrderedDict, Set, Tuple, Type, TypeVar, Union, overload) from uuid import uuid4 import numpy as np @@ -43,6 +43,9 @@ from vllm.logger import enable_trace_function_call, init_logger from vllm.platforms import current_platform +if TYPE_CHECKING: + from vllm.config import VllmConfig + logger = init_logger(__name__) # Exception strings for non-implemented encoder/decoder scenarios @@ -335,17 +338,6 @@ def random_uuid() -> str: return str(uuid.uuid4().hex) -@lru_cache(maxsize=None) -def get_vllm_instance_id() -> str: - """ - If the environment variable VLLM_INSTANCE_ID is set, return it. - Otherwise, return a random UUID. - Instance id represents an instance of the VLLM. All processes in the same - instance should have the same instance id. - """ - return envs.VLLM_INSTANCE_ID or f"vllm-instance-{random_uuid()}" - - @lru_cache(maxsize=None) def in_wsl() -> bool: # Reference: https://github.com/microsoft/WSL/issues/4071 @@ -997,7 +989,7 @@ def find_nccl_library() -> str: return so_file -def enable_trace_function_call_for_thread() -> None: +def enable_trace_function_call_for_thread(vllm_config: "VllmConfig") -> None: """Set up function tracing for the current thread, if enabled via the VLLM_TRACE_FUNCTION environment variable """ @@ -1009,7 +1001,8 @@ def enable_trace_function_call_for_thread() -> None: filename = (f"VLLM_TRACE_FUNCTION_for_process_{os.getpid()}" f"_thread_{threading.get_ident()}_" f"at_{datetime.datetime.now()}.log").replace(" ", "_") - log_path = os.path.join(tmp_dir, "vllm", get_vllm_instance_id(), + log_path = os.path.join(tmp_dir, "vllm", + f"vllm-instance-{vllm_config.instance_id}", filename) os.makedirs(os.path.dirname(log_path), exist_ok=True) enable_trace_function_call(log_path) diff --git a/vllm/worker/worker_base.py b/vllm/worker/worker_base.py index 7c0bc5a678956..6d00102e0a324 100644 --- a/vllm/worker/worker_base.py +++ b/vllm/worker/worker_base.py @@ -439,7 +439,7 @@ def init_worker(self, *args, **kwargs): Here we inject some common logic before initializing the worker. Arguments are passed to the worker class constructor. """ - enable_trace_function_call_for_thread() + enable_trace_function_call_for_thread(self.vllm_config) # see https://github.com/NVIDIA/nccl/issues/1234 os.environ['NCCL_CUMEM_ENABLE'] = '0' From 7be15d9356a10c6ae3537565548e4f8bf46f35dd Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sat, 7 Dec 2024 12:06:08 -0800 Subject: [PATCH 266/397] [core][misc] remove use_dummy driver for _run_workers (#10920) Signed-off-by: youkaichao --- vllm/executor/ray_gpu_executor.py | 27 ++++++++++++--------------- vllm/executor/ray_hpu_executor.py | 28 ++++++++++++---------------- vllm/executor/ray_tpu_executor.py | 21 ++++++++++----------- vllm/executor/ray_xpu_executor.py | 11 +++++++++-- 4 files changed, 43 insertions(+), 44 deletions(-) diff --git a/vllm/executor/ray_gpu_executor.py b/vllm/executor/ray_gpu_executor.py index 6554cda6b637b..4263fb27265f6 100644 --- a/vllm/executor/ray_gpu_executor.py +++ b/vllm/executor/ray_gpu_executor.py @@ -188,8 +188,14 @@ def sort_by_driver_then_worker_ip(worker): self.workers = sorted(self.workers, key=sort_by_driver_then_worker_ip) # Get the set of GPU IDs used on each node. - worker_node_and_gpu_ids = self._run_workers("get_node_and_gpu_ids", - use_dummy_driver=True) + worker_node_and_gpu_ids = [] + for worker in [self.driver_dummy_worker] + self.workers: + if worker is None: + # driver_dummy_worker can be None when using ray spmd worker. + continue + worker_node_and_gpu_ids.append( + ray.get(worker.get_node_and_gpu_ids.remote()) \ + ) # type: ignore node_workers = defaultdict(list) # node id -> list of worker ranks node_gpus = defaultdict(list) # node id -> list of gpu ids @@ -329,7 +335,6 @@ def _run_workers( async_run_tensor_parallel_workers_only: bool = False, all_args: Optional[List[Tuple[Any, ...]]] = None, all_kwargs: Optional[List[Dict[str, Any]]] = None, - use_dummy_driver: bool = False, max_concurrent_workers: Optional[int] = None, **kwargs, ) -> Any: @@ -389,18 +394,10 @@ def _run_workers( driver_kwargs = kwargs if all_kwargs is None else all_kwargs[0] # Start the driver worker after all the ray workers. - if not use_dummy_driver: - driver_worker_output = [ - self.driver_worker.execute_method(method, *driver_args, - **driver_kwargs) - ] - else: - assert self.driver_dummy_worker is not None - driver_worker_output = [ - ray.get( - self.driver_dummy_worker.execute_method.remote( - method, *driver_args, **driver_kwargs)) - ] + driver_worker_output = [ + self.driver_worker.execute_method(method, *driver_args, + **driver_kwargs) + ] # Get the results of the ray workers. if self.workers: diff --git a/vllm/executor/ray_hpu_executor.py b/vllm/executor/ray_hpu_executor.py index 91c84d9214a60..f3025cb537ab8 100644 --- a/vllm/executor/ray_hpu_executor.py +++ b/vllm/executor/ray_hpu_executor.py @@ -163,9 +163,14 @@ def sort_by_driver_then_worker_ip(worker): # node will be placed first. self.workers = sorted(self.workers, key=sort_by_driver_then_worker_ip) - # Get the set of GPU IDs used on each node. - worker_node_and_gpu_ids = self._run_workers("get_node_and_gpu_ids", - use_dummy_driver=True) + worker_node_and_gpu_ids = [] + for worker in [self.driver_dummy_worker] + self.workers: + if worker is None: + # driver_dummy_worker can be None when using ray spmd worker. + continue + worker_node_and_gpu_ids.append( + ray.get(worker.get_node_and_gpu_ids.remote()) \ + ) # type: ignore node_workers = defaultdict(list) # node id -> list of worker ranks node_gpus = defaultdict(list) # node id -> list of gpu ids @@ -296,7 +301,6 @@ def _run_workers( async_run_tensor_parallel_workers_only: bool = False, all_args: Optional[List[Tuple[Any, ...]]] = None, all_kwargs: Optional[List[Dict[str, Any]]] = None, - use_dummy_driver: bool = False, max_concurrent_workers: Optional[int] = None, **kwargs, ) -> Any: @@ -356,18 +360,10 @@ def _run_workers( driver_kwargs = kwargs if all_kwargs is None else all_kwargs[0] # Start the driver worker after all the ray workers. - if not use_dummy_driver: - driver_worker_output = [ - self.driver_worker.execute_method(method, *driver_args, - **driver_kwargs) - ] - else: - assert self.driver_dummy_worker is not None - driver_worker_output = [ - ray.get( - self.driver_dummy_worker.execute_method.remote( - method, *driver_args, **driver_kwargs)) - ] + driver_worker_output = [ + self.driver_worker.execute_method(method, *driver_args, + **driver_kwargs) + ] # Get the results of the ray workers. if self.workers: diff --git a/vllm/executor/ray_tpu_executor.py b/vllm/executor/ray_tpu_executor.py index 3ee59397bf4c9..5118c13934f0d 100644 --- a/vllm/executor/ray_tpu_executor.py +++ b/vllm/executor/ray_tpu_executor.py @@ -137,8 +137,14 @@ def sort_by_driver_then_worker_ip(worker): self.workers = sorted(self.workers, key=sort_by_driver_then_worker_ip) # Get the set of TPU IDs used on each node. - worker_node_and_gpu_ids = self._run_workers("get_node_and_gpu_ids", - use_dummy_driver=True) + worker_node_and_gpu_ids = [] + for worker in [self.driver_dummy_worker] + self.workers: + if worker is None: + # driver_dummy_worker can be None when using ray spmd worker. + continue + worker_node_and_gpu_ids.append( + ray.get(worker.get_node_and_gpu_ids.remote()) \ + ) # type: ignore node_workers = defaultdict(list) for i, (node_id, _) in enumerate(worker_node_and_gpu_ids): @@ -199,7 +205,6 @@ def _run_workers( async_run_remote_workers_only: bool = False, all_args: Optional[List[Tuple[Any, ...]]] = None, all_kwargs: Optional[List[Dict[str, Any]]] = None, - use_dummy_driver: bool = False, max_concurrent_workers: Optional[int] = None, use_ray_compiled_dag: bool = False, **kwargs, @@ -241,14 +246,8 @@ def _run_workers( driver_kwargs = kwargs if all_kwargs is None else all_kwargs[0] # Start the driver worker after all the ray workers. - if not use_dummy_driver: - driver_worker_output = self.driver_worker.execute_method( - method, *driver_args, **driver_kwargs) - else: - assert self.driver_dummy_worker is not None - driver_worker_output = ray.get( - self.driver_dummy_worker.execute_method.remote( - method, *driver_args, **driver_kwargs)) + driver_worker_output = self.driver_worker.execute_method( + method, *driver_args, **driver_kwargs) # Get the results of the ray workers. if self.workers: ray_worker_outputs = ray.get(ray_worker_outputs) diff --git a/vllm/executor/ray_xpu_executor.py b/vllm/executor/ray_xpu_executor.py index 61f5d6a65e999..d2086f5fef26c 100644 --- a/vllm/executor/ray_xpu_executor.py +++ b/vllm/executor/ray_xpu_executor.py @@ -1,6 +1,8 @@ import asyncio from typing import List, Optional +import ray + import vllm.envs as envs from vllm.executor.ray_gpu_executor import RayGPUExecutor, RayGPUExecutorAsync from vllm.executor.xpu_executor import XPUExecutor @@ -14,8 +16,13 @@ class RayXPUExecutor(RayGPUExecutor, XPUExecutor): def _get_env_vars_to_be_updated(self): # Get the set of GPU IDs used on each node. - worker_node_and_gpu_ids = self._run_workers("get_node_and_gpu_ids", - use_dummy_driver=True) + worker_node_and_gpu_ids = [] + for worker in [self.driver_dummy_worker] + self.workers: + if worker is None: + # driver_dummy_worker can be None when using ray spmd worker. + continue + worker_node_and_gpu_ids.append( + ray.get(worker.get_node_and_gpu_ids.remote())) # type: ignore # Set environment variables for the driver and workers. all_args_to_update_environment_variables = [({ From fd57d2b5347e8fe6da9287553d4b5a3aaf2e6693 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sun, 8 Dec 2024 03:05:21 -0800 Subject: [PATCH 267/397] [torch.compile] allow candidate compile sizes (#10984) Signed-off-by: youkaichao --- tests/engine/test_arg_utils.py | 8 +++---- vllm/config.py | 44 +++++++++++++++++----------------- vllm/engine/arg_utils.py | 5 +--- vllm/entrypoints/llm.py | 6 +---- 4 files changed, 28 insertions(+), 35 deletions(-) diff --git a/tests/engine/test_arg_utils.py b/tests/engine/test_arg_utils.py index de78d41ad12eb..4e269de9fc40b 100644 --- a/tests/engine/test_arg_utils.py +++ b/tests/engine/test_arg_utils.py @@ -50,12 +50,12 @@ def test_compilation_config(): args = parser.parse_args(["-O=3"]) assert args.compilation_config.level == 3 - # set to json - args = parser.parse_args(["--compilation-config", '{"level": 3}']) + # set to string form of a dict + args = parser.parse_args(["--compilation-config", "{'level': 3}"]) assert args.compilation_config.level == 3 - # set to json - args = parser.parse_args(['--compilation-config={"level": 3}']) + # set to string form of a dict + args = parser.parse_args(["--compilation-config={'level': 3}"]) assert args.compilation_config.level == 3 diff --git a/vllm/config.py b/vllm/config.py index d1c4f995ad015..164622b5af34e 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -1,3 +1,4 @@ +import ast import copy import enum import hashlib @@ -2191,14 +2192,10 @@ class CompilationConfig(BaseModel): - use_inductor: whether to use inductor compilation. - False: inductor compilation is not used. graph runs in eager. - True: inductor compilation is used. one graph for symbolic shape - is compiled. In addition, compile for different sizes specified - in inductor_compile_sizes, using configurations + is compiled. In addition, compile for cudagraph sizes that are + in candidate_compile_sizes, using configurations in inductor_compile_config. - - inductor_compile_sizes: sizes to compile for inductor. - - inductor_specialize_for_cudagraph_no_more_than: an optional integer - to specialize inductor for cudagraph sizes no more than the - specified size. It is useful when we want to specialize inductor - with a subset of cudagraph sizes. + - candidate_compile_sizes: sizes to compile for inductor. - inductor_compile_config: additional configurations for inductor. - None: use default configurations. - inductor_passes: additional passes for inductor. It is a dictionary @@ -2227,8 +2224,7 @@ class CompilationConfig(BaseModel): ]) use_inductor: bool = True - inductor_specialize_for_cudagraph_no_more_than: Optional[int] = None - inductor_compile_sizes: Optional[List[int]] = Field(default=None) + candidate_compile_sizes: Optional[List[int]] = Field(default=None) inductor_compile_config: Dict = Field(default_factory=dict) inductor_passes: Dict[str, str] = Field(default_factory=dict) @@ -2294,7 +2290,9 @@ def from_cli(cls, cli_value: str) -> "CompilationConfig": """Parse the CLI value for the compilation config.""" if cli_value in ["0", "1", "2", "3"]: return cls(level=int(cli_value)) - return CompilationConfig.model_validate_json(cli_value) + # do not use `eval`, it is dangerous and can execute arbitrary code + dict_value = ast.literal_eval(cli_value) + return CompilationConfig.model_validate(dict_value) def model_post_init(self, __context: Any) -> None: @@ -2355,18 +2353,20 @@ def init_with_cudagraph_sizes(self, sizes_to_specialize: List[int]): logger.info(("cudagraph sizes specified by model runner" " %s is overridden by config %s"), sizes_to_specialize, self.cudagraph_capture_sizes) - if self.inductor_specialize_for_cudagraph_no_more_than is not None: - assert self.inductor_compile_sizes is None, ( - "inductor_compile_sizes should be None when " - "inductor_specialize_for_cudagraph_no_more_than is not None") - self.compile_sizes = [ - x for x in self.capture_sizes - if x <= self.inductor_specialize_for_cudagraph_no_more_than - ] - else: - if self.inductor_compile_sizes is None: - self.inductor_compile_sizes = [] - self.compile_sizes = self.inductor_compile_sizes + + if self.candidate_compile_sizes is None: + self.candidate_compile_sizes = [] + self.compile_sizes = [ + x for x in self.candidate_compile_sizes if x in self.capture_sizes + ] + ignored_sizes = [ + x for x in self.candidate_compile_sizes + if x not in self.capture_sizes + ] + if ignored_sizes: + logger.warning(("candidate_compile_sizes %s are ignored " + "because they are not cudagraph capture sizes."), + ignored_sizes) # sort to make sure cudagraph capture sizes are in descending order self.capture_sizes.sort(reverse=True) diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index ccd9fac225cba..96c11ec2b4f9e 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -209,12 +209,9 @@ def __post_init__(self): # support `EngineArgs(compilation_config={...})` # without having to manually construct a # CompilationConfig object - if isinstance(self.compilation_config, (int)): + if isinstance(self.compilation_config, (int, dict)): self.compilation_config = CompilationConfig.from_cli( str(self.compilation_config)) - elif isinstance(self.compilation_config, (dict)): - self.compilation_config = CompilationConfig.from_cli( - json.dumps(self.compilation_config)) # Setup plugins from vllm.plugins import load_general_plugins diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index 65fa9873df28c..8de30ccd18a11 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -1,5 +1,4 @@ import itertools -import json import warnings from contextlib import contextmanager from typing import (Any, ClassVar, Dict, List, Optional, Sequence, Tuple, Type, @@ -186,12 +185,9 @@ def __init__( kwargs["disable_log_stats"] = True if compilation_config is not None: - if isinstance(compilation_config, (int)): + if isinstance(compilation_config, (int, dict)): compilation_config_instance = CompilationConfig.from_cli( str(compilation_config)) - elif isinstance(compilation_config, (dict)): - compilation_config_instance = CompilationConfig.from_cli( - json.dumps(compilation_config)) else: compilation_config_instance = compilation_config else: From a11f3265282c712d1d9fa75368e2a8c40019fbb7 Mon Sep 17 00:00:00 2001 From: Roger Wang <136131678+ywang96@users.noreply.github.com> Date: Sun, 8 Dec 2024 04:50:51 -0800 Subject: [PATCH 268/397] [V1] Initial support of multimodal models for V1 re-arch (#10699) Signed-off-by: Roger Wang --- vllm/engine/arg_utils.py | 16 +-- vllm/model_executor/models/interfaces.py | 5 + vllm/model_executor/models/internvl.py | 68 ++++++++++--- vllm/model_executor/models/molmo.py | 72 ++++++++++++-- vllm/model_executor/models/pixtral.py | 121 +++++++++++++++++------ vllm/model_executor/models/utils.py | 28 +++++- vllm/multimodal/inputs.py | 3 +- vllm/multimodal/utils.py | 10 +- vllm/v1/core/scheduler.py | 4 +- vllm/v1/engine/llm_engine.py | 24 ++++- vllm/v1/engine/mm_input_mapper.py | 2 +- 11 files changed, 284 insertions(+), 69 deletions(-) diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 96c11ec2b4f9e..3db069ec64ee4 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -1050,9 +1050,12 @@ def create_engine_config(self, # long context (> 32K) models. This is to avoid OOM errors in the # initial memory profiling phase. - # Chunked prefill is currently disabled for multimodal models by - # default. - if use_long_context and not model_config.is_multimodal_model: + # For multimodal models, chunked prefill is disabled by default in + # V0, but enabled by design in V1 + if model_config.is_multimodal_model: + self.enable_chunked_prefill = bool(envs.VLLM_USE_V1) + + elif use_long_context: is_gpu = device_config.device_type == "cuda" use_sliding_window = (model_config.get_sliding_window() is not None) @@ -1241,12 +1244,9 @@ def _override_v1_engine_config(self, engine_config: VllmConfig) -> None: Override the EngineConfig's configs based on the usage context for V1. """ assert envs.VLLM_USE_V1, "V1 is not enabled" - # TODO (ywang96): Enable APC by default when VLM supports it. if engine_config.model_config.is_multimodal_model: - logger.warning( - "Prefix caching is currently not supported for multimodal " - "models and has been disabled.") - engine_config.cache_config.enable_prefix_caching = False + # TODO (ywang96): Enable APC by default when VLM supports it. + assert not engine_config.cache_config.enable_prefix_caching @dataclass diff --git a/vllm/model_executor/models/interfaces.py b/vllm/model_executor/models/interfaces.py index 01a381381ccec..c3979eab905db 100644 --- a/vllm/model_executor/models/interfaces.py +++ b/vllm/model_executor/models/interfaces.py @@ -36,6 +36,11 @@ def get_multimodal_embeddings(self, **kwargs) -> Optional[T]: """ Returns multimodal embeddings generated from multimodal kwargs to be merged with text embeddings. + + The output embeddings must be one of the following formats: + - A list or tuple of 2D tensors, where each tensor corresponds to + each input image. + - A single 3D tensor, with the batch dimension grouping the 2D tensors. """ ... diff --git a/vllm/model_executor/models/internvl.py b/vllm/model_executor/models/internvl.py index d5a7781fecfc3..42c769f79e202 100644 --- a/vllm/model_executor/models/internvl.py +++ b/vllm/model_executor/models/internvl.py @@ -26,7 +26,7 @@ InternVisionPatchModel) from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs -from vllm.multimodal.inputs import NestedTensors +from vllm.multimodal.inputs import NestedTensors, PlaceholderRange from vllm.multimodal.utils import cached_get_tokenizer from vllm.sequence import IntermediateTensors from vllm.utils import is_list_of @@ -52,12 +52,18 @@ class InternVLImagePixelInputs(TypedDict): Shape: `(batch_size * num_images * (1 + num_patches), num_channels, height, width)` """ + patches_per_image: List[int] + """ + List of number of total patches for each image in the batch. + """ class InternVLImageEmbeddingInputs(TypedDict): type: Literal["image_embeds"] - data: torch.Tensor - """Shape: `(batch_size * num_images, image_feature_size, hidden_size)` + data: NestedTensors + """ + A tensor of shape `(num_images, total_image_feature_size, hidden_size)` + or a list of tensors of shape `(total_image_feature_size, hidden_size)` `hidden_size` must match the hidden size of language model backbone. """ @@ -349,10 +355,32 @@ def input_processor( new_prompt = self._expand_image_prompt(prompt, image_feature_sizes, num_patches) new_prompt_token_ids = tokenizer.encode(new_prompt) + img_context_token_id = tokenizer.encode(self.img_context_token, + add_special_tokens=False) + assert len(img_context_token_id) == 1, \ + (f"Invalid image token '{self.img_context_token}': A valid image " + f"token encodes to a single token ID, got {img_context_token_id}.") + img_context_token_id = img_context_token_id[0] + + # Get precise tracking of placeholder positions + token_idx = image_idx = 0 + placeholder_ranges = [] + while token_idx < len(new_prompt_token_ids): + if new_prompt_token_ids[token_idx] == img_context_token_id: + curr_image_featue_size = image_feature_sizes[image_idx] + placeholder_ranges.append( + PlaceholderRange(offset=token_idx, + length=curr_image_featue_size)) + image_idx += 1 + token_idx += curr_image_featue_size + else: + token_idx += 1 - return token_inputs(prompt=prompt, - prompt_token_ids=new_prompt_token_ids, - multi_modal_data=multi_modal_data) + return token_inputs( + prompt=prompt, + prompt_token_ids=new_prompt_token_ids, + multi_modal_data=multi_modal_data, + multi_modal_placeholders={"image": placeholder_ranges}) def input_mapper( self, @@ -614,26 +642,46 @@ def _parse_and_validate_image_input( if not isinstance(pixel_values, (torch.Tensor, list)): raise ValueError("Incorrect type of pixel values. " f"Got type: {type(pixel_values)}") + + patches_per_image = [] + for request_pixel_values in pixel_values: + for image_pixel_values in request_pixel_values: + patches_per_image.append(image_pixel_values.shape[0]) # We need to flatten (B, N, P) to (B*N*P), # so we call flatten_bn twice. return InternVLImagePixelInputs( type="pixel_values", data=self._validate_pixel_values( flatten_bn(flatten_bn(pixel_values), concat=True)), - ) + patches_per_image=patches_per_image) raise AssertionError("This line should be unreachable.") def _process_image_input( self, image_input: InternVLImageInputs, - ) -> torch.Tensor: + ) -> Tuple[torch.Tensor]: if image_input["type"] == "image_embeds": return image_input["data"] assert self.vision_model is not None + image_embeds = self.extract_feature(image_input["data"]) + patches_per_image = image_input["patches_per_image"] + if len(patches_per_image) == 1: + image_embeds = image_embeds.unsqueeze(0) + return image_embeds + + # NOTE: Image embeddings are split into separate tensors for each image + # by the size of each embedding. + feature_size = image_embeds.shape[1] + image_embeds = image_embeds.view(-1, + self.config.text_config.hidden_size) + image_feature_sizes = [ + num_patches * feature_size for num_patches in patches_per_image + ] + image_embeds = image_embeds.split(image_feature_sizes) return image_embeds def _set_visual_token_mask(self, input_ids: torch.Tensor) -> torch.Tensor: @@ -696,13 +744,11 @@ def forward( "inputs_embeds": inputs_embeds, } + # Only required if the model is mono-architecture if self.visual_token_mask is not None: - # overwrite visual_token_mask and img_context_token_id back to None, - # so that this doesn't need to depend on encoder output forward_kwargs.update( {"visual_token_mask": self.visual_token_mask}) self.visual_token_mask = None - self.img_context_token_id = None hidden_states = self.language_model.model(**forward_kwargs) return hidden_states diff --git a/vllm/model_executor/models/molmo.py b/vllm/model_executor/models/molmo.py index d1fcbd167c199..a328b5a2aeea7 100644 --- a/vllm/model_executor/models/molmo.py +++ b/vllm/model_executor/models/molmo.py @@ -37,7 +37,7 @@ ParallelLMHead, VocabParallelEmbedding) from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs -from vllm.multimodal.inputs import NestedTensors +from vllm.multimodal.inputs import NestedTensors, PlaceholderRange from vllm.multimodal.utils import cached_get_tokenizer from vllm.sequence import (VLLM_TOKEN_ID_ARRAY_TYPE, IntermediateTensors, SequenceData) @@ -46,12 +46,16 @@ from .interfaces import SupportsMultiModal, SupportsPP from .utils import (AutoWeightsLoader, WeightsMapper, is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, - maybe_prefix) + maybe_prefix, merge_multimodal_embeddings) # TODO: hard-coded for now. Consider making it configurable. VIT_LAYERS = [-2, -9] NUM_PREFIX_TOKENS = 1 ADDITIONAL_VOCAB_SIZE = 128 +DEFAULT_IMAGE_PATCH_TOKEN_ID = 152066 +DEFAULT_IM_START_TOKEN_ID = 152067 +DEFAULT_IM_END_TOKEN_ID = 152064 +DEFAULT_IM_COL_TOKEN_ID = 152065 class MolmoImageInputs(TypedDict): @@ -75,6 +79,11 @@ class MolmoImageInputs(TypedDict): `(batch_size, num_crops, num_patch)` """ + image_start_end: Tuple[int, int] + """Starting and ending index of placeholder + tokens + """ + @dataclass class VisionBackboneConfig: @@ -918,6 +927,8 @@ def image_input_mapper_for_molmo( ctx: InputContext, data: object, ): + if isinstance(data, list): + data = data[0] return MultiModalKwargs(data) @@ -967,7 +978,22 @@ def dummy_data_for_molmo(ctx: InputContext, seq_len: int, if "image_masks" in out: dummy_imgdata["image_masks"] = out["image_masks"] dummy_imgdata["seq_len"] = torch.tensor(seq_len, dtype=torch.long) - return DummyData(dummy_seqdata, {"image": dummy_imgdata}) + size = 0 + offset = -1 + for i in range(len(token_ids)): + if token_ids[i] in (DEFAULT_IMAGE_PATCH_TOKEN_ID, + DEFAULT_IM_START_TOKEN_ID, DEFAULT_IM_END_TOKEN_ID, + DEFAULT_IM_COL_TOKEN_ID): + if offset < 0: + offset = i + size += 1 + dummy_imgdata["image_start_end"] = (offset, offset + size) + return DummyData(seq_data=dummy_seqdata, + multi_modal_data={"image": dummy_imgdata}, + multi_modal_placeholders={ + "image": + [PlaceholderRange(offset=offset, length=size)] + }) def pad_images( @@ -1055,19 +1081,34 @@ def input_processor_for_molmo(ctx: InputContext, inputs: DecoderOnlyInputs): if image_masks is not None: image_data["image_masks"] = image_masks - image_data["seq_len"] = torch.tensor(len(out["input_ids"]), + new_prompt_token_ids = out["input_ids"].tolist() + image_data["seq_len"] = torch.tensor(len(new_prompt_token_ids), dtype=torch.long) multi_modal_data = dict(image=image_data) + size = 0 + offset = -1 + for i in range(len(new_prompt_token_ids)): + if new_prompt_token_ids[i] in (DEFAULT_IMAGE_PATCH_TOKEN_ID, + DEFAULT_IM_START_TOKEN_ID, + DEFAULT_IM_END_TOKEN_ID, + DEFAULT_IM_COL_TOKEN_ID): + if offset < 0: + offset = i + size += 1 + image_data["image_start_end"] = (offset, offset + size) prompt = inputs.get("prompt") if prompt is None: - prompt = tokenizer.decode(out["input_ids"]) + prompt = tokenizer.decode(new_prompt_token_ids) return token_inputs( - prompt_token_ids=out["input_ids"], + prompt_token_ids=new_prompt_token_ids, prompt=prompt, multi_modal_data=multi_modal_data, + multi_modal_placeholders={ + "image": [PlaceholderRange(offset=offset, length=size)] + }, ) @@ -1113,6 +1154,7 @@ def _parse_and_validate_image_input( ) -> Optional[MolmoImageInputs]: images = kwargs.pop("images", None) image_masks = kwargs.pop("image_masks", None) + image_start_end = kwargs.pop("image_start_end", None) if images is None: return None @@ -1130,6 +1172,7 @@ def _parse_and_validate_image_input( image_input_idx=image_input_idx, seq_len=seq_len, image_masks=image_masks, + image_start_end=image_start_end, ) def _process_image_input( @@ -1178,9 +1221,16 @@ def get_multimodal_embeddings(self, **kwargs) -> Optional[NestedTensors]: # Note: In this original implementation from AI2, the final # vision_embeddings will be always be the same length - # of input embedddings, which is not very efficient. - # TODO(ywang96): see if this can be optimized. + # of input embeddings. vision_embeddings = torch.einsum('nd,nm->md', image_features, mat) + + # Split by the sizes of the input sequences. For each full embedding, + # extract the actual vision embeddings to be merged. + vision_embeddings = list(vision_embeddings.split(seq_len.tolist())) + for i in range(len(vision_embeddings)): + start, end = image_input['image_start_end'][i] + vision_embeddings[i] = vision_embeddings[i][start:end] + return vision_embeddings def get_input_embeddings( @@ -1190,7 +1240,11 @@ def get_input_embeddings( ) -> torch.Tensor: inputs_embeds = self.model.get_input_embeddings(input_ids) if multimodal_embeddings is not None: - inputs_embeds = inputs_embeds + multimodal_embeddings + inputs_embeds = merge_multimodal_embeddings( + input_ids, inputs_embeds, multimodal_embeddings, [ + DEFAULT_IMAGE_PATCH_TOKEN_ID, DEFAULT_IM_START_TOKEN_ID, + DEFAULT_IM_END_TOKEN_ID, DEFAULT_IM_COL_TOKEN_ID + ]) return inputs_embeds def forward( diff --git a/vllm/model_executor/models/pixtral.py b/vllm/model_executor/models/pixtral.py index 215727cadd954..c6786c363ab4a 100644 --- a/vllm/model_executor/models/pixtral.py +++ b/vllm/model_executor/models/pixtral.py @@ -48,6 +48,9 @@ except ImportError: USE_XFORMERS_OPS = False +PIXTRAL_IMAGE_BREAK_ID = 12 +PIXTRAL_IMAGE_END_ID = 13 + def get_max_pixtral_image_tokens(ctx: InputContext): tokenizer = cached_get_tokenizer( @@ -68,7 +71,6 @@ def dummy_data_for_pixtral(ctx: InputContext, seq_len: int, tokenizer_mode=ctx.model_config.tokenizer_mode) mm_encoder = tokenizer.mistral.instruct_tokenizer.mm_encoder - patch_size = mm_encoder.mm_config.image_patch_size image_token_id = mm_encoder.special_ids.img mm_config = ctx.model_config.multimodal_config @@ -78,8 +80,8 @@ def dummy_data_for_pixtral(ctx: InputContext, seq_len: int, size = 256 image = Image.new("RGB", (size, size), color=0) - image_feature_size = (size**2) // (patch_size**2) - + encoding = tokenizer.instruct.mm_encoder(ImageChunk(image=image)) + image_feature_size = len(encoding.tokens) num_image_tokens = image_feature_size * num_images seq_data = SequenceData.from_prompt_token_counts( (image_token_id, num_image_tokens), @@ -101,14 +103,13 @@ def input_mapper_for_pixtral(ctx: InputContext, Args: ctx: Context of the loaded model. - data: data potentially containing image/image embeddings to be mapped - to pixel_values in .forward() for a visual QWenLMHeadModel model. + data: data potentially containing PIL images to be processed + and mapped to `images`. Returns: MultiModalKwargs containing the stacked normalized images tensor or image embeddings. """ - # Early exit if we have provided an image to a language only Qwen model model_config = ctx.model_config tokenizer = cached_get_tokenizer( model_config.tokenizer, tokenizer_mode=model_config.tokenizer_mode) @@ -116,35 +117,67 @@ def input_mapper_for_pixtral(ctx: InputContext, data_list = data if isinstance(data, list) else [data] images = [] + image_tokens_list = [] for image_data in data_list: image = ImageChunk(image=image_data) encoding = tokenizer.instruct.mm_encoder(image) image = torch.from_numpy(encoding.image).to(device="cuda", dtype=torch.float16) images.append(image) + image_tokens_list.append(encoding.tokens) - return MultiModalKwargs({"images": images}) + image_tokens = torch.tensor([ + token_id for image_tokens in image_tokens_list + for token_id in image_tokens + ]) + return MultiModalKwargs({"images": images, "image_tokens": image_tokens}) def input_processor_for_pixtral(ctx: InputContext, inputs: DecoderOnlyInputs): multi_modal_data = inputs.get("multi_modal_data") - if multi_modal_data is not None and "image" in multi_modal_data: - tokenizer = cached_get_tokenizer( - ctx.model_config.tokenizer, - tokenizer_mode=ctx.model_config.tokenizer_mode) - - mm_encoder = tokenizer.mistral.instruct_tokenizer.mm_encoder - image_token_id = mm_encoder.special_ids.img + if multi_modal_data is None or "image" not in multi_modal_data: + return inputs - if image_token_id not in inputs['prompt_token_ids']: - raise ValueError( - f"You've passed {inputs=} without {image_token_id=}" - " Make sure to process your input via mistral_common's" - " tokenizer or pass a chat completion request. For more" - " For more info, see: " - "https://github.com/vllm-project/vllm/issues/8411.") + prompt_token_ids = inputs.get("prompt_token_ids") + prompt = inputs.get("prompt") + tokenizer = cached_get_tokenizer( + ctx.model_config.tokenizer, + tokenizer_mode=ctx.model_config.tokenizer_mode) - return inputs + mm_encoder = tokenizer.mistral.instruct_tokenizer.mm_encoder + image_token_id = mm_encoder.special_ids.img + image_break_id = mm_encoder.special_ids.img_break + image_end_id = mm_encoder.special_ids.img_end + + if image_token_id not in inputs['prompt_token_ids']: + raise ValueError( + f"You've passed {inputs=} without {image_token_id=}" + " Make sure to process your input via mistral_common's" + " tokenizer or pass a chat completion request. For more" + " For more info, see: " + "https://github.com/vllm-project/vllm/issues/8411.") + + # Get precise tracking of placeholder positions + placeholder_ranges = [] + curr_offset = -1 + curr_length = 0 + for i in range(len(prompt_token_ids)): + if prompt_token_ids[i] in (image_token_id, image_break_id): + if curr_offset < 0: + curr_offset = i + curr_length += 1 + elif prompt_token_ids[i] == image_end_id: + curr_length += 1 + placeholder_ranges.append( + PlaceholderRange(offset=curr_offset, length=curr_length)) + curr_offset = -1 + curr_length = 0 + else: + pass + return token_inputs(prompt=prompt, + prompt_token_ids=prompt_token_ids, + multi_modal_data=multi_modal_data, + multi_modal_placeholders={"image": placeholder_ranges}) @MULTIMODAL_REGISTRY.register_image_input_mapper(input_mapper_for_pixtral) @@ -192,11 +225,29 @@ def sampler(self): return get_sampler() def get_multimodal_embeddings(self, **kwargs) -> Optional[NestedTensors]: - image_input = self._parse_and_validate_image_input(**kwargs) + image_input, image_tokens = self._parse_and_validate_image_input( + **kwargs) if image_input is None: return None + vision_embeddings = self._process_image_input(image_input) - return vision_embeddings + + # NOTE: We patch the outputs of the vision encoder with embeddings + # from `[IMG_BREAK]` and `[IMG_END]` tokens. + image_embeds = self.language_model.get_input_embeddings(image_tokens) + image_token_mask = image_tokens == self.vision_args.image_token_id + image_embeds[image_token_mask] = vision_embeddings + + # NOTE: Image embeddings are split into separate tensors for each image + # by the indices of `[IMG_END]` token. + split_indices = torch.where( + image_tokens == PIXTRAL_IMAGE_END_ID)[0] + 1 + if len(split_indices) <= 1: + # Do not split, return as tensor of shape [1, fs, hs] + return image_embeds.unsqueeze(0) + + image_embeds = image_embeds.tensor_split(split_indices.cpu()) + return image_embeds def get_input_embeddings( self, @@ -206,8 +257,10 @@ def get_input_embeddings( inputs_embeds = self.language_model.get_input_embeddings(input_ids) if multimodal_embeddings is not None: inputs_embeds = merge_multimodal_embeddings( - input_ids, inputs_embeds, multimodal_embeddings, - self.vision_args.image_token_id) + input_ids, inputs_embeds, multimodal_embeddings, [ + self.vision_args.image_token_id, PIXTRAL_IMAGE_END_ID, + PIXTRAL_IMAGE_BREAK_ID + ]) return inputs_embeds def forward( @@ -245,10 +298,11 @@ def forward( def _parse_and_validate_image_input( self, images: Optional[Union[List[List[torch.Tensor]], List[torch.Tensor], - torch.Tensor]] = None + torch.Tensor]] = None, + image_tokens: Optional[torch.Tensor] = None, ) -> Optional[List[torch.Tensor]]: if images is None: - return None + return None, None if isinstance(images, torch.Tensor): # if passed as batch take all images @@ -267,7 +321,16 @@ def _parse_and_validate_image_input( images = flatten_images - return images + if isinstance(image_tokens, torch.Tensor): + # image_tokens are batched + image_tokens = image_tokens.flatten() + elif isinstance(image_tokens, list): + # image_tokens are of different lengths thus passed as a list + image_tokens = torch.cat(image_tokens) + + assert image_tokens.dim() == 1 + + return images, image_tokens def _process_image_input(self, image_input: List[torch.Tensor]) -> torch.Tensor: diff --git a/vllm/model_executor/models/utils.py b/vllm/model_executor/models/utils.py index 5ec44955dbd80..269b66806adf4 100644 --- a/vllm/model_executor/models/utils.py +++ b/vllm/model_executor/models/utils.py @@ -409,16 +409,42 @@ def merge_multimodal_embeddings( input_ids: torch.Tensor, inputs_embeds: torch.Tensor, multimodal_embeddings: NestedTensors, - placeholder_token_id: int, + placeholder_token_id: Union[int, List[int]], ) -> torch.Tensor: """ Merge ``multimodal_embeddings`` into ``inputs_embeds`` by overwriting the positions in ``inputs_embeds`` corresponding to placeholder tokens in ``input_ids``. + + ``placeholder_token_id`` can be a list of token ids (e.g, token ids + of img_start, img_break, and img_end tokens) when needed: This means + the order of these tokens in the ``input_ids`` MUST MATCH the order of + their embeddings in ``multimodal_embeddings`` since we need to + slice-merge instead of individually scattering. + + For example, if input_ids is "TTTTTSIIIBIIIBIIIETTT", where + - T is text token + - S is image start token + - I is image embedding token + - B is image break token + - E is image end token. + + Then the image embeddings (that correspond to I's) from vision encoder + must be padded with embeddings of S, B, and E in the same order of + input_ids for a correct embedding merge. Note: This updates ``inputs_embeds`` in place. """ + if isinstance(placeholder_token_id, list): + placeholder_token_id = torch.tensor(placeholder_token_id, + device=input_ids.device) + return _merge_multimodal_embeddings( + inputs_embeds, + torch.isin(input_ids, placeholder_token_id), + multimodal_embeddings, + ) + return _merge_multimodal_embeddings( inputs_embeds, (input_ids == placeholder_token_id), diff --git a/vllm/multimodal/inputs.py b/vllm/multimodal/inputs.py index 640c7c04b8817..229a8fbdf5831 100644 --- a/vllm/multimodal/inputs.py +++ b/vllm/multimodal/inputs.py @@ -96,7 +96,8 @@ class PlaceholderRange(TypedDict): """The length of the placeholder.""" -NestedTensors = Union[List["NestedTensors"], List[torch.Tensor], torch.Tensor] +NestedTensors = Union[List["NestedTensors"], List[torch.Tensor], torch.Tensor, + Tuple[torch.Tensor, ...]] """ Uses a list instead of a tensor if the dimensions of each element do not match. """ diff --git a/vllm/multimodal/utils.py b/vllm/multimodal/utils.py index d4333b7519b47..c898ca4e6573e 100644 --- a/vllm/multimodal/utils.py +++ b/vllm/multimodal/utils.py @@ -535,11 +535,13 @@ def repeat_and_pad_placeholder_tokens( return new_prompt, new_token_ids, placeholder_ranges -def consecutive_placeholder_ranges(num_items: int, - item_size: int) -> List[PlaceholderRange]: +def consecutive_placeholder_ranges( + num_items: int, + item_size: int, + initial_offset: int = 0) -> List[PlaceholderRange]: """Returns a list of consecutive PlaceholderRanges of a fixed size""" return [ - PlaceholderRange(offset=i * item_size, length=item_size) - for i in range(num_items) + PlaceholderRange(offset=initial_offset + i * item_size, + length=item_size) for i in range(num_items) ] diff --git a/vllm/v1/core/scheduler.py b/vllm/v1/core/scheduler.py index f1f26f4e8d443..1203d35fc985f 100644 --- a/vllm/v1/core/scheduler.py +++ b/vllm/v1/core/scheduler.py @@ -73,12 +73,12 @@ def __init__( # has the Transformer architecture (e.g., ViT). # FIXME(woosuk): Below are placeholder values. We need to calculate the # actual values from the configurations. - self.max_num_encoder_input_tokens = 2048 + self.max_num_encoder_input_tokens = 16384 # NOTE(woosuk): For the models without encoder (e.g., text-only models), # the encoder cache will not be initialized and used, regardless of # the cache size. This is because the memory space for the encoder cache # is preallocated in the profiling run. - self.encoder_cache_manager = EncoderCacheManager(cache_size=2048) + self.encoder_cache_manager = EncoderCacheManager(cache_size=16384) def schedule(self) -> "SchedulerOutput": # NOTE(woosuk) on the scheduling algorithm: diff --git a/vllm/v1/engine/llm_engine.py b/vllm/v1/engine/llm_engine.py index 312c0242a45dd..994e68669108e 100644 --- a/vllm/v1/engine/llm_engine.py +++ b/vllm/v1/engine/llm_engine.py @@ -1,5 +1,7 @@ from typing import Dict, List, Mapping, Optional, Type, Union +from typing_extensions import TypeVar + from vllm.config import VllmConfig from vllm.engine.arg_utils import EngineArgs from vllm.engine.metrics_types import StatLoggerBase @@ -12,7 +14,8 @@ from vllm.pooling_params import PoolingParams from vllm.prompt_adapter.request import PromptAdapterRequest from vllm.sampling_params import SamplingParams -from vllm.transformers_utils.tokenizer_group import init_tokenizer_from_configs +from vllm.transformers_utils.tokenizer_group import ( + BaseTokenizerGroup, init_tokenizer_from_configs) from vllm.usage.usage_lib import UsageContext from vllm.v1.engine.core_client import EngineCoreClient from vllm.v1.engine.detokenizer import Detokenizer @@ -21,6 +24,8 @@ logger = init_logger(__name__) +_G = TypeVar("_G", bound=BaseTokenizerGroup, default=BaseTokenizerGroup) + class LLMEngine: """Legacy LLMEngine for backwards compatibility.""" @@ -169,5 +174,18 @@ def start_profile(self): def stop_profile(self): self.engine_core.profile(False) - def get_tokenizer_group(self, group_type): - pass + def get_tokenizer_group( + self, + group_type: Type[_G] = BaseTokenizerGroup, + ) -> _G: + tokenizer_group = self.tokenizer + + if tokenizer_group is None: + raise ValueError("Unable to get tokenizer because " + "skip_tokenizer_init is True") + if not isinstance(tokenizer_group, group_type): + raise TypeError("Invalid type of tokenizer group. " + f"Expected type: {group_type}, but " + f"found type: {type(tokenizer_group)}") + + return tokenizer_group diff --git a/vllm/v1/engine/mm_input_mapper.py b/vllm/v1/engine/mm_input_mapper.py index 45882f8f076d4..7ad6882b04520 100644 --- a/vllm/v1/engine/mm_input_mapper.py +++ b/vllm/v1/engine/mm_input_mapper.py @@ -33,7 +33,7 @@ def process_inputs( num_images = len(image_inputs) for i in range(num_images): mm_input = self.multi_modal_input_mapper( - {"image": [image_inputs[i]]}, + {"image": image_inputs[i]}, mm_processor_kwargs=mm_processor_kwargs, ) mm_inputs.append(mm_input) From 43b05fa314e90e551d87211e8bdde2e2bb5a0bdc Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sun, 8 Dec 2024 11:18:18 -0800 Subject: [PATCH 269/397] [torch.compile][misc] fix comments (#10993) Signed-off-by: youkaichao --- vllm/config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index 164622b5af34e..38cf642b23cda 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -2177,8 +2177,8 @@ class CompilationConfig(BaseModel): TODO: move outside cudagraph logic into compilation. torch.compile will handle cudagraph capture logic in the future. - cudagraph_capture_sizes: sizes to capture cudagraph. - - None: capture sizes are inferred from compilation context. - - List[int]: capture sizes are specified. + - None (default): capture sizes are inferred from vllm config. + - List[int]: capture sizes are specified as given. - cudagraph_num_of_warmups: number of warmup runs for cudagraph. It means the first several runs will be treated as warmup runs. Only after that, the execution will be recorded, and the recorded From 46004e83a2e0b908f28099d93171bfb4934e4722 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sun, 8 Dec 2024 17:28:27 -0800 Subject: [PATCH 270/397] [misc] clean up and unify logging (#10999) Signed-off-by: youkaichao --- vllm/config.py | 73 ++++++++++++++++++--------------------- vllm/engine/llm_engine.py | 54 ++--------------------------- 2 files changed, 37 insertions(+), 90 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index 38cf642b23cda..7fbe04eaaf4f8 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -2579,45 +2579,40 @@ def __post_init__(self): self.instance_id = random_uuid()[:5] def __str__(self): - return ("model=%r, speculative_config=%r, tokenizer=%r, " - "skip_tokenizer_init=%s, tokenizer_mode=%s, revision=%s, " - "override_neuron_config=%s, tokenizer_revision=%s, " - "trust_remote_code=%s, dtype=%s, max_seq_len=%d, " - "download_dir=%r, load_format=%s, tensor_parallel_size=%d, " - "pipeline_parallel_size=%d, " - "disable_custom_all_reduce=%s, quantization=%s, " - "enforce_eager=%s, kv_cache_dtype=%s, " - "quantization_param_path=%s, device_config=%s, " - "decoding_config=%r, observability_config=%r, " - "seed=%d, served_model_name=%s, " - "num_scheduler_steps=%d, enable_prefix_caching=%s, " - "use_async_output_proc=%s, mm_processor_kwargs=%s") % \ - (self.model_config.model, self.speculative_config, - self.model_config.tokenizer, - self.model_config.skip_tokenizer_init, - self.model_config.tokenizer_mode, - self.model_config.revision, - self.model_config.override_neuron_config, - self.model_config.tokenizer_revision, - self.model_config.trust_remote_code, - self.model_config.dtype, - self.model_config.max_model_len, - self.load_config.download_dir, - self.load_config.load_format, - self.parallel_config.tensor_parallel_size, - self.parallel_config.pipeline_parallel_size, - self.parallel_config.disable_custom_all_reduce, - self.model_config.quantization, - self.model_config.enforce_eager, - self.cache_config.cache_dtype, - self.model_config.quantization_param_path, - self.device_config.device, self.decoding_config, - self.observability_config, self.model_config.seed, - self.model_config.served_model_name, - self.scheduler_config.num_scheduler_steps, - self.cache_config.enable_prefix_caching, - self.model_config.use_async_output_proc, - self.model_config.mm_processor_kwargs) + return ( + f"model={self.model_config.model!r}," + f" speculative_config={self.speculative_config!r}," + f" tokenizer={self.model_config.tokenizer!r}, " + f"skip_tokenizer_init={self.model_config.skip_tokenizer_init}," + f" tokenizer_mode={self.model_config.tokenizer_mode}, " + f"revision={self.model_config.revision}, " + f"override_neuron_config={self.model_config.override_neuron_config}," + f" tokenizer_revision={self.model_config.tokenizer_revision}, " + f"trust_remote_code={self.model_config.trust_remote_code}, " + f"dtype={self.model_config.dtype}, " + f"max_seq_len={self.model_config.max_model_len}," + f" download_dir={self.load_config.download_dir!r}, " + f"load_format={self.load_config.load_format}, " + f"tensor_parallel_size={self.parallel_config.tensor_parallel_size}," + f" pipeline_parallel_size={self.parallel_config.pipeline_parallel_size}, " # noqa + f"disable_custom_all_reduce={self.parallel_config.disable_custom_all_reduce}, " # noqa + f"quantization={self.model_config.quantization}, " + f"enforce_eager={self.model_config.enforce_eager}, " + f"kv_cache_dtype={self.cache_config.cache_dtype}, " + f"quantization_param_path={self.model_config.quantization_param_path}," + f" device_config={self.device_config.device}, " + f"decoding_config={self.decoding_config!r}, " + f"observability_config={self.observability_config!r}, " + f"seed={self.model_config.seed}, " + f"served_model_name={self.model_config.served_model_name}, " + f"num_scheduler_steps={self.scheduler_config.num_scheduler_steps}, " + f"multi_step_stream_outputs={self.scheduler_config.multi_step_stream_outputs}, " # noqa + f"enable_prefix_caching={self.cache_config.enable_prefix_caching}, " + f"chunked_prefill_enabled={self.scheduler_config.chunked_prefill_enabled}, " # noqa + f"use_async_output_proc={self.model_config.use_async_output_proc}, " + f"mm_processor_kwargs={self.model_config.mm_processor_kwargs}, " + f"pooler_config={self.model_config.pooler_config!r}," + f" compilation_config={self.compilation_config!r}") _current_vllm_config: Optional[VllmConfig] = None diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index 26a8c94099a11..560f84a008291 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -247,60 +247,12 @@ def __init__( ) logger.info( - "Initializing an LLM engine (v%s) with config: " - "model=%r, speculative_config=%r, tokenizer=%r, " - "skip_tokenizer_init=%s, tokenizer_mode=%s, revision=%s, " - "override_neuron_config=%s, tokenizer_revision=%s, " - "trust_remote_code=%s, dtype=%s, max_seq_len=%d, " - "download_dir=%r, load_format=%s, tensor_parallel_size=%d, " - "pipeline_parallel_size=%d, " - "disable_custom_all_reduce=%s, quantization=%s, " - "enforce_eager=%s, kv_cache_dtype=%s, " - "quantization_param_path=%s, device_config=%s, " - "decoding_config=%r, observability_config=%r, " - "seed=%d, served_model_name=%s, " - "num_scheduler_steps=%d, chunked_prefill_enabled=%s " - "multi_step_stream_outputs=%s, enable_prefix_caching=%s, " - "use_async_output_proc=%s, use_cached_outputs=%s, " - "mm_processor_kwargs=%s, pooler_config=%r," - "compilation_config=%r", + "Initializing an LLM engine (v%s) with config: %r," + "use_cached_outputs=%s, ", VLLM_VERSION, - self.model_config.model, - self.speculative_config, - self.model_config.tokenizer, - self.model_config.skip_tokenizer_init, - self.model_config.tokenizer_mode, - self.model_config.revision, - self.model_config.override_neuron_config, - self.model_config.tokenizer_revision, - self.model_config.trust_remote_code, - self.model_config.dtype, - self.model_config.max_model_len, - self.load_config.download_dir, - self.load_config.load_format, - self.parallel_config.tensor_parallel_size, - self.parallel_config.pipeline_parallel_size, - self.parallel_config.disable_custom_all_reduce, - self.model_config.quantization, - self.model_config.enforce_eager, - self.cache_config.cache_dtype, - self.model_config.quantization_param_path, - self.device_config.device, - self.decoding_config, - self.observability_config, - self.model_config.seed, - self.model_config.served_model_name, - self.scheduler_config.num_scheduler_steps, - self.scheduler_config.chunked_prefill_enabled, - self.scheduler_config.multi_step_stream_outputs, - self.cache_config.enable_prefix_caching, - self.model_config.use_async_output_proc, + vllm_config, use_cached_outputs, - self.model_config.mm_processor_kwargs, - self.model_config.pooler_config, - vllm_config.compilation_config, ) - # TODO(woosuk): Print more configs in debug mode. self.log_stats = log_stats self.use_cached_outputs = use_cached_outputs From af7c4a92e654684066e61518d6ed90feda983635 Mon Sep 17 00:00:00 2001 From: Roger Wang <136131678+ywang96@users.noreply.github.com> Date: Sun, 8 Dec 2024 22:29:16 -0800 Subject: [PATCH 271/397] [Doc][V1] Add V1 support column for multimodal models (#10998) Signed-off-by: Roger Wang --- docs/source/models/supported_models.rst | 26 ++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/docs/source/models/supported_models.rst b/docs/source/models/supported_models.rst index c9b3fa8485ff1..4e5b10967e3bb 100644 --- a/docs/source/models/supported_models.rst +++ b/docs/source/models/supported_models.rst @@ -495,7 +495,7 @@ Text Generation --------------- .. list-table:: - :widths: 25 25 15 25 5 5 + :widths: 25 25 15 20 5 5 5 :header-rows: 1 * - Architecture @@ -504,47 +504,55 @@ Text Generation - Example HF Models - :ref:`LoRA ` - :ref:`PP ` + - V1 * - :code:`AriaForConditionalGeneration` - Aria - T + I - :code:`rhymes-ai/Aria` - - ✅︎ + - * - :code:`Blip2ForConditionalGeneration` - BLIP-2 - T + I\ :sup:`E` - :code:`Salesforce/blip2-opt-2.7b`, :code:`Salesforce/blip2-opt-6.7b`, etc. - - ✅︎ + - * - :code:`ChameleonForConditionalGeneration` - Chameleon - T + I - :code:`facebook/chameleon-7b` etc. - - ✅︎ + - * - :code:`FuyuForCausalLM` - Fuyu - T + I - :code:`adept/fuyu-8b` etc. - - ✅︎ + - * - :code:`ChatGLMModel` - GLM-4V - T + I - :code:`THUDM/glm-4v-9b` etc. - ✅︎ - ✅︎ + - * - :code:`H2OVLChatModel` - H2OVL - T + I\ :sup:`E+` - :code:`h2oai/h2ovl-mississippi-800m`, :code:`h2oai/h2ovl-mississippi-2b`, etc. - - ✅︎ + - * - :code:`Idefics3ForConditionalGeneration` - Idefics3 - T + I - :code:`HuggingFaceM4/Idefics3-8B-Llama3` etc. - ✅︎ + - - * - :code:`InternVLChatModel` - InternVL 2.5, Mono-InternVL, InternVL 2.0 @@ -552,96 +560,112 @@ Text Generation - :code:`OpenGVLab/InternVL2_5-4B`, :code:`OpenGVLab/Mono-InternVL-2B`, :code:`OpenGVLab/InternVL2-4B`, etc. - - ✅︎ + - ✅︎ * - :code:`LlavaForConditionalGeneration` - LLaVA-1.5 - T + I\ :sup:`E+` - :code:`llava-hf/llava-1.5-7b-hf`, :code:`TIGER-Lab/Mantis-8B-siglip-llama3` (see note), etc. - - ✅︎ + - ✅︎ * - :code:`LlavaNextForConditionalGeneration` - LLaVA-NeXT - T + I\ :sup:`E+` - :code:`llava-hf/llava-v1.6-mistral-7b-hf`, :code:`llava-hf/llava-v1.6-vicuna-7b-hf`, etc. - - ✅︎ + - * - :code:`LlavaNextVideoForConditionalGeneration` - LLaVA-NeXT-Video - T + V - :code:`llava-hf/LLaVA-NeXT-Video-7B-hf`, etc. - - ✅︎ + - * - :code:`LlavaOnevisionForConditionalGeneration` - LLaVA-Onevision - T + I\ :sup:`+` + V\ :sup:`+` - :code:`llava-hf/llava-onevision-qwen2-7b-ov-hf`, :code:`llava-hf/llava-onevision-qwen2-0.5b-ov-hf`, etc. - - ✅︎ + - * - :code:`MiniCPMV` - MiniCPM-V - T + I\ :sup:`E+` - :code:`openbmb/MiniCPM-V-2` (see note), :code:`openbmb/MiniCPM-Llama3-V-2_5`, :code:`openbmb/MiniCPM-V-2_6`, etc. - ✅︎ - ✅︎ + - * - :code:`MllamaForConditionalGeneration` - Llama 3.2 - T + I\ :sup:`+` - :code:`meta-llama/Llama-3.2-90B-Vision-Instruct`, :code:`meta-llama/Llama-3.2-11B-Vision`, etc. - - + - * - :code:`MolmoForCausalLM` - Molmo - T + I - :code:`allenai/Molmo-7B-D-0924`, :code:`allenai/Molmo-72B-0924`, etc. - - ✅︎ + - ✅︎ * - :code:`NVLM_D_Model` - NVLM-D 1.0 - T + I\ :sup:`E+` - :code:`nvidia/NVLM-D-72B`, etc. - - ✅︎ + - ✅︎ * - :code:`PaliGemmaForConditionalGeneration` - PaliGemma - T + I\ :sup:`E` - :code:`google/paligemma-3b-pt-224`, :code:`google/paligemma-3b-mix-224`, etc. - - ✅︎ + - * - :code:`Phi3VForCausalLM` - Phi-3-Vision, Phi-3.5-Vision - T + I\ :sup:`E+` - :code:`microsoft/Phi-3-vision-128k-instruct`, :code:`microsoft/Phi-3.5-vision-instruct` etc. - - ✅︎ + - ✅︎ * - :code:`PixtralForConditionalGeneration` - Pixtral - T + I\ :sup:`+` - :code:`mistralai/Pixtral-12B-2409`, :code:`mistral-community/pixtral-12b` etc. - - ✅︎ + - ✅︎ * - :code:`QWenLMHeadModel` - Qwen-VL - T + I\ :sup:`E+` - :code:`Qwen/Qwen-VL`, :code:`Qwen/Qwen-VL-Chat`, etc. - ✅︎ - ✅︎ + - * - :code:`Qwen2AudioForConditionalGeneration` - Qwen2-Audio - T + A\ :sup:`+` - :code:`Qwen/Qwen2-Audio-7B-Instruct` - - ✅︎ + - * - :code:`Qwen2VLForConditionalGeneration` - Qwen2-VL - T + I\ :sup:`E+` + V\ :sup:`E+` - :code:`Qwen/Qwen2-VL-2B-Instruct`, :code:`Qwen/Qwen2-VL-7B-Instruct`, :code:`Qwen/Qwen2-VL-72B-Instruct`, etc. - ✅︎ - ✅︎ + - * - :code:`UltravoxModel` - Ultravox - T + A\ :sup:`E+` - :code:`fixie-ai/ultravox-v0_3` - - ✅︎ + - | :sup:`E` Pre-computed embeddings can be inputted for this modality. | :sup:`+` Multiple items can be inputted per text prompt for this modality. From d1c2e15eb31ef12e688ce0cb71895f88eaf4cd4f Mon Sep 17 00:00:00 2001 From: youkaichao Date: Sun, 8 Dec 2024 23:09:04 -0800 Subject: [PATCH 272/397] [torch.compile] add dynamo time tracking (#11005) Signed-off-by: youkaichao --- vllm/compilation/backends.py | 6 ++++++ vllm/compilation/decorators.py | 6 +++--- vllm/compilation/monitor.py | 9 +++++++-- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/vllm/compilation/backends.py b/vllm/compilation/backends.py index 1206424ae1e3f..f002a8ff905b1 100644 --- a/vllm/compilation/backends.py +++ b/vllm/compilation/backends.py @@ -265,7 +265,13 @@ def configure_post_pass(self): def __call__(self, graph: fx.GraphModule, example_inputs) -> Callable: + # when dynamo calls the backend, it means the bytecode + # transform and analysis are done compilation_counter.num_graphs_seen += 1 + from .monitor import torch_compile_start_time + dynamo_time = time.time() - torch_compile_start_time + logger.info("Dynamo bytecode transform time: %.2f s", dynamo_time) + self.compilation_configs.compilation_time += dynamo_time # we control the compilation process, each instance can only be # called once diff --git a/vllm/compilation/decorators.py b/vllm/compilation/decorators.py index a32dced57e5b3..938430fe2a501 100644 --- a/vllm/compilation/decorators.py +++ b/vllm/compilation/decorators.py @@ -145,6 +145,7 @@ def _support_torch_compile( def __init__(self, *, vllm_config: VllmConfig, prefix: str = '', **kwargs): old_init(self, vllm_config=vllm_config, prefix=prefix, **kwargs) + self.vllm_config = vllm_config # for CompilationLevel.DYNAMO_AS_IS , the upper level model runner # will handle the compilation, so we don't need to do anything here. self.do_not_compile = \ @@ -157,9 +158,6 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = '', **kwargs): TorchCompileWrapperWithCustomDispatcher.__init__( self, compilation_level=vllm_config.compilation_config.level) - if vllm_config.compilation_config.level == CompilationLevel.PIECEWISE: - start_monitoring_torch_compile(vllm_config.compilation_config) - cls.__init__ = __init__ def __call__(self, *args, **kwargs): @@ -186,6 +184,8 @@ def __call__(self, *args, **kwargs): raise ValueError( "Unsupported dynamic dimensions" f" {dims} for argument {k} with type {type(arg)}.") + # here, it is the starting point of the `torch.compile` process + start_monitoring_torch_compile(self.vllm_config.compilation_config) # if we don't use custom dispatcher, we can directly call the # compiled function and let torch.compile handle the dispatching, diff --git a/vllm/compilation/monitor.py b/vllm/compilation/monitor.py index f718e46423212..3348674b09af2 100644 --- a/vllm/compilation/monitor.py +++ b/vllm/compilation/monitor.py @@ -1,14 +1,19 @@ +import time + from vllm.config import CompilationConfig, CompilationLevel from vllm.logger import init_logger logger = init_logger(__name__) +torch_compile_start_time: float = 0.0 + def start_monitoring_torch_compile(compilation_config: CompilationConfig): - pass + global torch_compile_start_time + torch_compile_start_time = time.time() def end_monitoring_torch_compile(compilation_config: CompilationConfig): if compilation_config.level == CompilationLevel.PIECEWISE: - logger.info("graph compilation takes %.2f s in total", + logger.info("torch.compile takes %.2f s in total", compilation_config.compilation_time) From c690357928fd2812f450bfb0c3629a816f5e9a55 Mon Sep 17 00:00:00 2001 From: Roger Wang <136131678+ywang96@users.noreply.github.com> Date: Mon, 9 Dec 2024 08:27:10 -0800 Subject: [PATCH 273/397] [V1] Fix Detokenizer loading in `AsyncLLM` (#10997) Signed-off-by: Roger Wang --- vllm/v1/engine/async_llm.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/vllm/v1/engine/async_llm.py b/vllm/v1/engine/async_llm.py index 4ef372fd8464b..0bcccda2bf329 100644 --- a/vllm/v1/engine/async_llm.py +++ b/vllm/v1/engine/async_llm.py @@ -65,7 +65,12 @@ def __init__( input_registry) # Detokenizer (converts EngineCoreOutputs --> RequestOutput). - self.detokenizer = Detokenizer(vllm_config.model_config.tokenizer) + self.detokenizer = Detokenizer( + tokenizer_name=vllm_config.model_config.tokenizer, + tokenizer_mode=vllm_config.model_config.tokenizer_mode, + trust_remote_code=vllm_config.model_config.trust_remote_code, + revision=vllm_config.model_config.tokenizer_revision, + ) # EngineCore (starts the engine in background process). self.engine_core = EngineCoreClient.make_client( From e691b26f6fae5a3a1c220d15f20de83c7d78ed51 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Mon, 9 Dec 2024 11:44:27 -0500 Subject: [PATCH 274/397] [Core] Require xgrammar >= 0.1.6 (#11021) Signed-off-by: Russell Bryant --- requirements-common.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-common.txt b/requirements-common.txt index 72fb020a82c4e..112528880c0ac 100644 --- a/requirements-common.txt +++ b/requirements-common.txt @@ -19,7 +19,7 @@ prometheus-fastapi-instrumentator >= 7.0.0 tiktoken >= 0.6.0 # Required for DBRX tokenizer lm-format-enforcer >= 0.10.9, < 0.11 outlines >= 0.0.43, < 0.1 -xgrammar >= 0.1.5; platform_machine == "x86_64" +xgrammar >= 0.1.6; platform_machine == "x86_64" typing_extensions >= 4.10 filelock >= 3.16.1 # need to contain https://github.com/tox-dev/filelock/pull/317 partial-json-parser # used for parsing partial JSON outputs From aea2fc38c3b31b9a8ea7d1cffb8f37a2da6f6075 Mon Sep 17 00:00:00 2001 From: wangxiyuan Date: Tue, 10 Dec 2024 01:24:46 +0800 Subject: [PATCH 275/397] [Platform] Move `async output` check to platform (#10768) Signed-off-by: wangxiyuan --- vllm/config.py | 17 +++-------------- vllm/platforms/cpu.py | 6 +++++- vllm/platforms/cuda.py | 12 +++++++++++- vllm/platforms/hpu.py | 6 +++++- vllm/platforms/interface.py | 11 +++++++++++ vllm/platforms/neuron.py | 6 +++++- vllm/platforms/openvino.py | 6 +++++- vllm/platforms/rocm.py | 12 +++++++++++- vllm/platforms/tpu.py | 6 +++++- vllm/platforms/xpu.py | 6 +++++- 10 files changed, 66 insertions(+), 22 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index 7fbe04eaaf4f8..29f0839dcabba 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -513,11 +513,10 @@ def verify_async_output_proc(self, parallel_config, speculative_config, # Reminder: Please update docs/source/usage/compatibility_matrix.rst # If the feature combo become valid - if device_config.device_type not in ("cuda", "tpu", "xpu", "hpu"): + if not current_platform.is_async_output_supported(self.enforce_eager): logger.warning( - "Async output processing is only supported for CUDA, TPU, XPU " - "and HPU." - "Disabling it for other platforms.") + "Async output processing is not supported on the " + "current platform type %s.", current_platform.device_type) self.use_async_output_proc = False return @@ -527,16 +526,6 @@ def verify_async_output_proc(self, parallel_config, speculative_config, self.use_async_output_proc = False return - # Reminder: Please update docs/source/usage/compatibility_matrix.rst - # If the feature combo become valid - if device_config.device_type == "cuda" and self.enforce_eager: - logger.warning( - "To see benefits of async output processing, enable CUDA " - "graph. Since, enforce-eager is enabled, async output " - "processor cannot be used") - self.use_async_output_proc = not self.enforce_eager - return - # Async postprocessor is not necessary with embedding mode # since there is no token generation if self.task == "embedding": diff --git a/vllm/platforms/cpu.py b/vllm/platforms/cpu.py index 680ee74129739..e5142b985d1f2 100644 --- a/vllm/platforms/cpu.py +++ b/vllm/platforms/cpu.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional import psutil import torch @@ -37,6 +37,10 @@ def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: def get_device_total_memory(cls, device_id: int = 0) -> int: return psutil.virtual_memory().total + @classmethod + def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool: + return False + @classmethod def inference_mode(cls): return torch.no_grad() diff --git a/vllm/platforms/cuda.py b/vllm/platforms/cuda.py index 846a1869da228..edaf377b501df 100644 --- a/vllm/platforms/cuda.py +++ b/vllm/platforms/cuda.py @@ -4,7 +4,7 @@ import os from functools import lru_cache, wraps -from typing import TYPE_CHECKING, Callable, List, TypeVar +from typing import TYPE_CHECKING, Callable, List, Optional, TypeVar import pynvml import torch @@ -88,6 +88,16 @@ def get_device_name(cls, device_id: int = 0) -> str: def get_device_total_memory(cls, device_id: int = 0) -> int: raise NotImplementedError + @classmethod + def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool: + if enforce_eager: + logger.warning( + "To see benefits of async output processing, enable CUDA " + "graph. Since, enforce-eager is enabled, async output " + "processor cannot be used") + return False + return True + @classmethod def is_full_nvlink(cls, device_ids: List[int]) -> bool: raise NotImplementedError diff --git a/vllm/platforms/hpu.py b/vllm/platforms/hpu.py index 10aaa6d54962c..7f22bee3eaa74 100644 --- a/vllm/platforms/hpu.py +++ b/vllm/platforms/hpu.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional import torch @@ -20,6 +20,10 @@ class HpuPlatform(Platform): def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: return _Backend.HPU_ATTN + @classmethod + def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool: + return True + @staticmethod def inference_mode(): return torch.no_grad() diff --git a/vllm/platforms/interface.py b/vllm/platforms/interface.py index 0be7df7941b8b..db06d2c18e681 100644 --- a/vllm/platforms/interface.py +++ b/vllm/platforms/interface.py @@ -6,11 +6,15 @@ import numpy as np import torch +from vllm.logger import init_logger + if TYPE_CHECKING: from vllm.config import VllmConfig else: VllmConfig = None +logger = init_logger(__name__) + class _Backend(enum.Enum): FLASH_ATTN = enum.auto() @@ -147,6 +151,13 @@ def get_device_total_memory(cls, device_id: int = 0) -> int: """Get the total memory of a device in bytes.""" raise NotImplementedError + @classmethod + def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool: + """ + Check if the current platform supports async output. + """ + raise NotImplementedError + @classmethod def inference_mode(cls): """A device-specific wrapper of `torch.inference_mode`. diff --git a/vllm/platforms/neuron.py b/vllm/platforms/neuron.py index 87655ea198303..1e5c4bddfa24f 100644 --- a/vllm/platforms/neuron.py +++ b/vllm/platforms/neuron.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional from .interface import Platform, PlatformEnum @@ -18,6 +18,10 @@ class NeuronPlatform(Platform): def get_device_name(cls, device_id: int = 0) -> str: return "neuron" + @classmethod + def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool: + return False + @classmethod def check_and_update_config(cls, vllm_config: VllmConfig) -> None: parallel_config = vllm_config.parallel_config diff --git a/vllm/platforms/openvino.py b/vllm/platforms/openvino.py index 29b61e955d9ab..e0f8e8b4b49fe 100644 --- a/vllm/platforms/openvino.py +++ b/vllm/platforms/openvino.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional import torch @@ -37,6 +37,10 @@ def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: def get_device_name(self, device_id: int = 0) -> str: return "openvino" + @classmethod + def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool: + return False + @classmethod def inference_mode(self): return torch.inference_mode(mode=True) diff --git a/vllm/platforms/rocm.py b/vllm/platforms/rocm.py index 3c14fbc179f69..66674e3ebe91f 100644 --- a/vllm/platforms/rocm.py +++ b/vllm/platforms/rocm.py @@ -1,6 +1,6 @@ import os from functools import lru_cache -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional import torch @@ -72,6 +72,16 @@ def get_device_total_memory(cls, device_id: int = 0) -> int: device_props = torch.cuda.get_device_properties(device_id) return device_props.total_memory + @classmethod + def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool: + if enforce_eager: + logger.warning( + "To see benefits of async output processing, enable CUDA " + "graph. Since, enforce-eager is enabled, async output " + "processor cannot be used") + return False + return True + @classmethod def check_and_update_config(cls, vllm_config: VllmConfig) -> None: parallel_config = vllm_config.parallel_config diff --git a/vllm/platforms/tpu.py b/vllm/platforms/tpu.py index b138f7e1c54c5..10d874349f36b 100644 --- a/vllm/platforms/tpu.py +++ b/vllm/platforms/tpu.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional import torch @@ -35,6 +35,10 @@ def get_device_name(cls, device_id: int = 0) -> str: def get_device_total_memory(cls, device_id: int = 0) -> int: raise NotImplementedError + @classmethod + def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool: + return True + @classmethod def inference_mode(cls): return torch.no_grad() diff --git a/vllm/platforms/xpu.py b/vllm/platforms/xpu.py index 9665786f4c499..11dbd04d55671 100644 --- a/vllm/platforms/xpu.py +++ b/vllm/platforms/xpu.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional import torch @@ -41,6 +41,10 @@ def get_device_total_memory(cls, device_id: int = 0) -> int: device_props = torch.xpu.get_device_properties(device_id) return device_props.total_memory + @classmethod + def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool: + return True + @staticmethod def inference_mode(): return torch.no_grad() From 25b79d9fd38e2c53ce281be23241d8939ec7320c Mon Sep 17 00:00:00 2001 From: Varun Sundar Rabindranath Date: Mon, 9 Dec 2024 12:33:41 -0500 Subject: [PATCH 276/397] [V1] Input Batch Relocation (#10962) Signed-off-by: Varun Sundar Rabindranath Co-authored-by: Varun Sundar Rabindranath --- vllm/v1/worker/gpu_input_batch.py | 280 +++++++++++++++++++++++++++++ vllm/v1/worker/gpu_model_runner.py | 273 +--------------------------- 2 files changed, 283 insertions(+), 270 deletions(-) create mode 100644 vllm/v1/worker/gpu_input_batch.py diff --git a/vllm/v1/worker/gpu_input_batch.py b/vllm/v1/worker/gpu_input_batch.py new file mode 100644 index 0000000000000..457784bb0287c --- /dev/null +++ b/vllm/v1/worker/gpu_input_batch.py @@ -0,0 +1,280 @@ +# Datastructures defining an input batch + +from dataclasses import dataclass +from typing import TYPE_CHECKING, Dict, List, Optional, Set + +import numpy as np +import torch + +from vllm.multimodal import MultiModalKwargs +from vllm.sampling_params import SamplingParams, SamplingType +from vllm.v1.sample.metadata import SamplingMetadata + +if TYPE_CHECKING: + from vllm.multimodal.inputs import PlaceholderRange + + +@dataclass +class CachedRequestState: + + req_id: str + prompt_token_ids: List[int] + prompt: Optional[str] + mm_inputs: List[MultiModalKwargs] + mm_positions: List["PlaceholderRange"] + sampling_params: SamplingParams + generator: Optional[torch.Generator] + + block_ids: List[int] + num_computed_tokens: int + output_token_ids: List[int] + + @property + def num_tokens(self) -> int: + return len(self.prompt_token_ids) + len(self.output_token_ids) + + +class InputBatch: + + def __init__( + self, + max_num_reqs: int, + max_model_len: int, + max_num_blocks_per_req: int, + device: torch.device, + pin_memory: bool, + ): + self.max_num_reqs = max_num_reqs + self.max_model_len = max_model_len + self.max_num_blocks_per_req = max_num_blocks_per_req + self.device = device + self.pin_memory = pin_memory + + self.req_ids: List[Optional[str]] = [None] * max_num_reqs + self.req_id_to_index: Dict[str, int] = {} + + self.token_ids_cpu = np.empty((max_num_reqs, max_model_len), + dtype=np.int32) + self.num_computed_tokens_cpu = np.empty(max_num_reqs, dtype=np.int32) + + # Attention-related. + self.block_table = torch.zeros((max_num_reqs, max_num_blocks_per_req), + device=self.device, + dtype=torch.int32) + self.block_table_cpu_tensor = torch.zeros( + (max_num_reqs, max_num_blocks_per_req), + device="cpu", + dtype=torch.int32, + pin_memory=pin_memory, + ) + self.block_table_cpu = self.block_table_cpu_tensor.numpy() + + # Sampling-related. + self.temperature = torch.empty((max_num_reqs, ), + dtype=torch.float32, + device=device) + self.temperature_cpu_tensor = torch.empty((max_num_reqs, ), + dtype=torch.float32, + device="cpu", + pin_memory=pin_memory) + self.temperature_cpu = self.temperature_cpu_tensor.numpy() + self.greedy_reqs: Set[str] = set() + self.random_reqs: Set[str] = set() + + self.top_p = torch.empty((max_num_reqs, ), + dtype=torch.float32, + device=device) + self.top_p_cpu_tensor = torch.empty((max_num_reqs, ), + dtype=torch.float32, + device="cpu", + pin_memory=pin_memory) + self.top_p_cpu = self.top_p_cpu_tensor.numpy() + self.top_p_reqs: Set[str] = set() + + self.top_k = torch.empty((max_num_reqs, ), + dtype=torch.int32, + device=device) + self.top_k_cpu_tensor = torch.empty((max_num_reqs, ), + dtype=torch.int32, + device="cpu", + pin_memory=pin_memory) + self.top_k_cpu = self.top_k_cpu_tensor.numpy() + self.top_k_reqs: Set[str] = set() + + # req_index -> generator + self.generators: Dict[int, torch.Generator] = {} + + self.num_logprobs: Dict[str, int] = {} + self.prompt_logprob_reqs: Set[str] = set() + + def add_request( + self, + request: "CachedRequestState", + req_index: Optional[int] = None, + ) -> None: + if req_index is None: + req_index = self.num_reqs + assert req_index < self.max_num_reqs + + req_id = request.req_id + self.req_ids[req_index] = req_id + self.req_id_to_index[req_id] = req_index + + # Copy the prompt token ids and output token ids. + num_prompt_tokens = len(request.prompt_token_ids) + self.token_ids_cpu[ + req_index, :num_prompt_tokens] = request.prompt_token_ids + start_idx = num_prompt_tokens + end_idx = start_idx + len(request.output_token_ids) + self.token_ids_cpu[req_index, + start_idx:end_idx] = request.output_token_ids + + self.num_computed_tokens_cpu[req_index] = request.num_computed_tokens + num_blocks = len(request.block_ids) + self.block_table_cpu[req_index, :num_blocks] = request.block_ids + + sampling_params = request.sampling_params + self.temperature_cpu[req_index] = sampling_params.temperature + if sampling_params.sampling_type == SamplingType.GREEDY: + self.greedy_reqs.add(req_id) + else: + self.random_reqs.add(req_id) + + self.top_p_cpu[req_index] = sampling_params.top_p + if sampling_params.top_p < 1: + self.top_p_reqs.add(req_id) + self.top_k_cpu[req_index] = sampling_params.top_k + if sampling_params.top_k > 0: + self.top_k_reqs.add(req_id) + + self.generators[req_index] = request.generator + + num_logprobs = sampling_params.logprobs + if num_logprobs is not None and num_logprobs > 0: + self.num_logprobs[req_id] = num_logprobs + if sampling_params.prompt_logprobs: + self.prompt_logprob_reqs.add(req_id) + + def remove_request(self, req_id: str) -> Optional[int]: + req_index = self.req_id_to_index.pop(req_id, None) + if req_index is None: + return None + self.req_ids[req_index] = None + + self.greedy_reqs.discard(req_id) + self.random_reqs.discard(req_id) + self.top_p_reqs.discard(req_id) + self.top_k_reqs.discard(req_id) + self.generators.pop(req_index, None) + self.num_logprobs.pop(req_id, None) + self.prompt_logprob_reqs.discard(req_id) + return req_index + + def clear(self) -> None: + self.req_ids = [None] * self.max_num_reqs + self.req_id_to_index.clear() + self.greedy_reqs.clear() + self.random_reqs.clear() + self.top_p_reqs.clear() + self.top_k_reqs.clear() + self.generators.clear() + self.num_logprobs.clear() + self.prompt_logprob_reqs.clear() + + def condense(self, empty_req_indices: List[int]) -> None: + if self.num_reqs == 0: + # The batched states are empty. + return + + # NOTE(woosuk): This function assumes that the empty_req_indices + # is sorted in descending order. + last_req_index = self.num_reqs + len(empty_req_indices) - 1 + while empty_req_indices: + # Find the largest non-empty index. + while last_req_index in empty_req_indices: + last_req_index -= 1 + + # Find the smallest empty index. + empty_index = empty_req_indices.pop() + if empty_index >= last_req_index: + break + + # Swap the states. + req_id = self.req_ids[last_req_index] + self.req_ids[empty_index] = req_id + self.req_ids[last_req_index] = None + self.req_id_to_index[req_id] = empty_index + + # TODO(woosuk): Optimize the copy of token_ids_cpu and + # block_table_cpu. + self.token_ids_cpu[empty_index] = self.token_ids_cpu[ + last_req_index] + self.num_computed_tokens_cpu[ + empty_index] = self.num_computed_tokens_cpu[last_req_index] + self.block_table_cpu[empty_index] = self.block_table_cpu[ + last_req_index] + self.temperature_cpu[empty_index] = self.temperature_cpu[ + last_req_index] + self.top_p_cpu[empty_index] = self.top_p_cpu[last_req_index] + self.top_k_cpu[empty_index] = self.top_k_cpu[last_req_index] + generator = self.generators.pop(last_req_index, None) + if generator is not None: + self.generators[empty_index] = generator + + # Decrement last_req_index since it is now empty. + last_req_index -= 1 + + def make_sampling_metadata( + self, + skip_copy: bool = False, + ) -> SamplingMetadata: + if not skip_copy: + self.temperature[:self.num_reqs].copy_( + self.temperature_cpu_tensor[:self.num_reqs], non_blocking=True) + self.top_p[:self.num_reqs].copy_( + self.top_p_cpu_tensor[:self.num_reqs], non_blocking=True) + self.top_k[:self.num_reqs].copy_( + self.top_k_cpu_tensor[:self.num_reqs], non_blocking=True) + return SamplingMetadata( + temperature=self.temperature[:self.num_reqs], + all_greedy=self.all_greedy, + all_random=self.all_random, + top_p=self.top_p[:self.num_reqs], + top_k=self.top_k[:self.num_reqs], + no_top_p=self.no_top_p, + no_top_k=self.no_top_k, + generators=self.generators, + max_num_logprobs=self.max_num_logprobs, + ) + + @property + def num_reqs(self) -> int: + return len(self.req_id_to_index) + + @property + def all_greedy(self) -> bool: + return len(self.random_reqs) == 0 + + @property + def all_random(self) -> bool: + return len(self.greedy_reqs) == 0 + + @property + def no_top_p(self) -> bool: + return len(self.top_p_reqs) == 0 + + @property + def no_top_k(self) -> bool: + return len(self.top_k_reqs) == 0 + + @property + def max_num_logprobs(self) -> int: + return max(self.num_logprobs.values()) if self.num_logprobs else 0 + + @property + def no_logprob(self) -> bool: + return len(self.num_logprobs) == 0 + + @property + def no_prompt_logprob(self) -> bool: + return len(self.prompt_logprob_reqs) == 0 diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index e8d964a722f60..7f95be06188e3 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -1,7 +1,6 @@ import gc import time -from dataclasses import dataclass -from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple +from typing import TYPE_CHECKING, Dict, List, Optional, Tuple import numpy as np import torch @@ -15,16 +14,16 @@ from vllm.logger import init_logger from vllm.model_executor.model_loader import get_model from vllm.multimodal import MultiModalKwargs -from vllm.sampling_params import SamplingParams, SamplingType +from vllm.sampling_params import SamplingType from vllm.utils import (STR_DTYPE_TO_TORCH_DTYPE, DeviceMemoryProfiler, cdiv, is_pin_memory_available) from vllm.v1.attention.backends.flash_attn import (FlashAttentionBackend, FlashAttentionMetadata) from vllm.v1.outputs import ModelRunnerOutput from vllm.v1.sample.metadata import SamplingMetadata +from vllm.v1.worker.gpu_input_batch import CachedRequestState, InputBatch if TYPE_CHECKING: - from vllm.multimodal.inputs import PlaceholderRange from vllm.v1.core.scheduler import SchedulerOutput logger = init_logger(__name__) @@ -609,269 +608,3 @@ def _get_padded_batch_size(self, batch_size: int) -> Optional[int]: if batch_size <= size: return size return None - - -@dataclass -class CachedRequestState: - - req_id: str - prompt_token_ids: List[int] - prompt: Optional[str] - mm_inputs: List[MultiModalKwargs] - mm_positions: List["PlaceholderRange"] - sampling_params: SamplingParams - generator: Optional[torch.Generator] - - block_ids: List[int] - num_computed_tokens: int - output_token_ids: List[int] - - @property - def num_tokens(self) -> int: - return len(self.prompt_token_ids) + len(self.output_token_ids) - - -class InputBatch: - - def __init__( - self, - max_num_reqs: int, - max_model_len: int, - max_num_blocks_per_req: int, - device: torch.device, - pin_memory: bool, - ): - self.max_num_reqs = max_num_reqs - self.max_model_len = max_model_len - self.max_num_blocks_per_req = max_num_blocks_per_req - self.device = device - self.pin_memory = pin_memory - - self.req_ids: List[Optional[str]] = [None] * max_num_reqs - self.req_id_to_index: Dict[str, int] = {} - - self.token_ids_cpu = np.empty((max_num_reqs, max_model_len), - dtype=np.int32) - self.num_computed_tokens_cpu = np.empty(max_num_reqs, dtype=np.int32) - - # Attention-related. - self.block_table = torch.zeros((max_num_reqs, max_num_blocks_per_req), - device=self.device, - dtype=torch.int32) - self.block_table_cpu_tensor = torch.zeros( - (max_num_reqs, max_num_blocks_per_req), - device="cpu", - dtype=torch.int32, - pin_memory=pin_memory, - ) - self.block_table_cpu = self.block_table_cpu_tensor.numpy() - - # Sampling-related. - self.temperature = torch.empty((max_num_reqs, ), - dtype=torch.float32, - device=device) - self.temperature_cpu_tensor = torch.empty((max_num_reqs, ), - dtype=torch.float32, - device="cpu", - pin_memory=pin_memory) - self.temperature_cpu = self.temperature_cpu_tensor.numpy() - self.greedy_reqs: Set[str] = set() - self.random_reqs: Set[str] = set() - - self.top_p = torch.empty((max_num_reqs, ), - dtype=torch.float32, - device=device) - self.top_p_cpu_tensor = torch.empty((max_num_reqs, ), - dtype=torch.float32, - device="cpu", - pin_memory=pin_memory) - self.top_p_cpu = self.top_p_cpu_tensor.numpy() - self.top_p_reqs: Set[str] = set() - - self.top_k = torch.empty((max_num_reqs, ), - dtype=torch.int32, - device=device) - self.top_k_cpu_tensor = torch.empty((max_num_reqs, ), - dtype=torch.int32, - device="cpu", - pin_memory=pin_memory) - self.top_k_cpu = self.top_k_cpu_tensor.numpy() - self.top_k_reqs: Set[str] = set() - - # req_index -> generator - self.generators: Dict[int, torch.Generator] = {} - - self.num_logprobs: Dict[str, int] = {} - self.prompt_logprob_reqs: Set[str] = set() - - def add_request( - self, - request: "CachedRequestState", - req_index: Optional[int] = None, - ) -> None: - if req_index is None: - req_index = self.num_reqs - assert req_index < self.max_num_reqs - - req_id = request.req_id - self.req_ids[req_index] = req_id - self.req_id_to_index[req_id] = req_index - - # Copy the prompt token ids and output token ids. - num_prompt_tokens = len(request.prompt_token_ids) - self.token_ids_cpu[ - req_index, :num_prompt_tokens] = request.prompt_token_ids - start_idx = num_prompt_tokens - end_idx = start_idx + len(request.output_token_ids) - self.token_ids_cpu[req_index, - start_idx:end_idx] = request.output_token_ids - - self.num_computed_tokens_cpu[req_index] = request.num_computed_tokens - num_blocks = len(request.block_ids) - self.block_table_cpu[req_index, :num_blocks] = request.block_ids - - sampling_params = request.sampling_params - self.temperature_cpu[req_index] = sampling_params.temperature - if sampling_params.sampling_type == SamplingType.GREEDY: - self.greedy_reqs.add(req_id) - else: - self.random_reqs.add(req_id) - - self.top_p_cpu[req_index] = sampling_params.top_p - if sampling_params.top_p < 1: - self.top_p_reqs.add(req_id) - self.top_k_cpu[req_index] = sampling_params.top_k - if sampling_params.top_k > 0: - self.top_k_reqs.add(req_id) - - self.generators[req_index] = request.generator - - num_logprobs = sampling_params.logprobs - if num_logprobs is not None and num_logprobs > 0: - self.num_logprobs[req_id] = num_logprobs - if sampling_params.prompt_logprobs: - self.prompt_logprob_reqs.add(req_id) - - def remove_request(self, req_id: str) -> Optional[int]: - req_index = self.req_id_to_index.pop(req_id, None) - if req_index is None: - return None - self.req_ids[req_index] = None - - self.greedy_reqs.discard(req_id) - self.random_reqs.discard(req_id) - self.top_p_reqs.discard(req_id) - self.top_k_reqs.discard(req_id) - self.generators.pop(req_index, None) - self.num_logprobs.pop(req_id, None) - self.prompt_logprob_reqs.discard(req_id) - return req_index - - def clear(self) -> None: - self.req_ids = [None] * self.max_num_reqs - self.req_id_to_index.clear() - self.greedy_reqs.clear() - self.random_reqs.clear() - self.top_p_reqs.clear() - self.top_k_reqs.clear() - self.generators.clear() - self.num_logprobs.clear() - self.prompt_logprob_reqs.clear() - - def condense(self, empty_req_indices: List[int]) -> None: - if self.num_reqs == 0: - # The batched states are empty. - return - - # NOTE(woosuk): This function assumes that the empty_req_indices - # is sorted in descending order. - last_req_index = self.num_reqs + len(empty_req_indices) - 1 - while empty_req_indices: - # Find the largest non-empty index. - while last_req_index in empty_req_indices: - last_req_index -= 1 - - # Find the smallest empty index. - empty_index = empty_req_indices.pop() - if empty_index >= last_req_index: - break - - # Swap the states. - req_id = self.req_ids[last_req_index] - self.req_ids[empty_index] = req_id - self.req_ids[last_req_index] = None - self.req_id_to_index[req_id] = empty_index - - # TODO(woosuk): Optimize the copy of token_ids_cpu and - # block_table_cpu. - self.token_ids_cpu[empty_index] = self.token_ids_cpu[ - last_req_index] - self.num_computed_tokens_cpu[ - empty_index] = self.num_computed_tokens_cpu[last_req_index] - self.block_table_cpu[empty_index] = self.block_table_cpu[ - last_req_index] - self.temperature_cpu[empty_index] = self.temperature_cpu[ - last_req_index] - self.top_p_cpu[empty_index] = self.top_p_cpu[last_req_index] - self.top_k_cpu[empty_index] = self.top_k_cpu[last_req_index] - generator = self.generators.pop(last_req_index, None) - if generator is not None: - self.generators[empty_index] = generator - - # Decrement last_req_index since it is now empty. - last_req_index -= 1 - - def make_sampling_metadata( - self, - skip_copy: bool = False, - ) -> SamplingMetadata: - if not skip_copy: - self.temperature[:self.num_reqs].copy_( - self.temperature_cpu_tensor[:self.num_reqs], non_blocking=True) - self.top_p[:self.num_reqs].copy_( - self.top_p_cpu_tensor[:self.num_reqs], non_blocking=True) - self.top_k[:self.num_reqs].copy_( - self.top_k_cpu_tensor[:self.num_reqs], non_blocking=True) - return SamplingMetadata( - temperature=self.temperature[:self.num_reqs], - all_greedy=self.all_greedy, - all_random=self.all_random, - top_p=self.top_p[:self.num_reqs], - top_k=self.top_k[:self.num_reqs], - no_top_p=self.no_top_p, - no_top_k=self.no_top_k, - generators=self.generators, - max_num_logprobs=self.max_num_logprobs, - ) - - @property - def num_reqs(self) -> int: - return len(self.req_id_to_index) - - @property - def all_greedy(self) -> bool: - return len(self.random_reqs) == 0 - - @property - def all_random(self) -> bool: - return len(self.greedy_reqs) == 0 - - @property - def no_top_p(self) -> bool: - return len(self.top_p_reqs) == 0 - - @property - def no_top_k(self) -> bool: - return len(self.top_k_reqs) == 0 - - @property - def max_num_logprobs(self) -> int: - return max(self.num_logprobs.values()) if self.num_logprobs else 0 - - @property - def no_logprob(self) -> bool: - return len(self.num_logprobs) == 0 - - @property - def no_prompt_logprob(self) -> bool: - return len(self.prompt_logprob_reqs) == 0 From edc4fa31888b4a41060acb7b16250540f051ad59 Mon Sep 17 00:00:00 2001 From: "Kevin H. Luu" Date: Mon, 9 Dec 2024 11:46:58 -0800 Subject: [PATCH 277/397] [ci/build] Recompile CI dependencies list with Python 3.12 (#11013) Signed-off-by: kevin --- requirements-test.txt | 25 ++----------------------- 1 file changed, 2 insertions(+), 23 deletions(-) diff --git a/requirements-test.txt b/requirements-test.txt index 19369254dbe26..38a064bca449a 100644 --- a/requirements-test.txt +++ b/requirements-test.txt @@ -1,8 +1,8 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.12 # by the following command: # -# pip-compile requirements-test.in +# python3.12 -m piptools compile requirements-test.in -o requirements-test.txt # absl-py==2.1.0 # via rouge-score @@ -27,10 +27,6 @@ anyio==4.6.2.post1 # via httpx argcomplete==3.5.1 # via datamodel-code-generator -async-timeout==4.0.3 - # via - # aiohttp - # redis attrs==24.2.0 # via # aiohttp @@ -111,10 +107,6 @@ email-validator==2.2.0 # via pydantic evaluate==0.4.3 # via lm-eval -exceptiongroup==1.2.2 - # via - # anyio - # pytest fastrlock==0.8.2 # via cupy-cuda12x filelock==3.16.1 @@ -165,8 +157,6 @@ idna==3.10 # httpx # requests # yarl -importlib-resources==6.4.5 - # via matplotlib inflect==5.6.2 # via datamodel-code-generator iniconfig==2.0.0 @@ -518,12 +508,6 @@ timm==1.0.11 # via -r requirements-test.in tokenizers==0.20.3 # via transformers -toml==0.10.2 - # via datamodel-code-generator -tomli==2.0.2 - # via - # black - # pytest torch==2.5.1 # via # -r requirements-test.in @@ -567,12 +551,9 @@ typepy[datetime]==1.3.2 # tabledata typing-extensions==4.12.2 # via - # anyio - # black # huggingface-hub # librosa # mistral-common - # multidict # pydantic # pydantic-core # torch @@ -590,8 +571,6 @@ xxhash==3.5.0 # evaluate yarl==1.17.1 # via aiohttp -zipp==3.20.2 - # via importlib-resources zstandard==0.23.0 # via lm-eval From 3b61cb450d899dc423feb264c297d4d18d701678 Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Mon, 9 Dec 2024 12:38:46 -0800 Subject: [PATCH 278/397] [V1] Further reduce CPU overheads in flash-attn (#10989) Signed-off-by: Woosuk Kwon --- csrc/cache_kernels.cu | 14 ++++++++++++-- vllm/v1/attention/backends/flash_attn.py | 21 ++++++++++++++++----- 2 files changed, 28 insertions(+), 7 deletions(-) diff --git a/csrc/cache_kernels.cu b/csrc/cache_kernels.cu index 1be806bbfa43c..8a95279f9a25a 100644 --- a/csrc/cache_kernels.cu +++ b/csrc/cache_kernels.cu @@ -307,10 +307,20 @@ void reshape_and_cache_flash( torch::Tensor& key_cache, // [num_blocks, block_size, num_heads, head_size] torch::Tensor& value_cache, // [num_blocks, block_size, num_heads, head_size] - torch::Tensor& slot_mapping, // [num_tokens] + torch::Tensor& slot_mapping, // [num_tokens] or [num_actual_tokens] const std::string& kv_cache_dtype, const double k_scale, const double v_scale) { - int num_tokens = key.size(0); + // NOTE(woosuk): In vLLM V1, key.size(0) can be different from + // slot_mapping.size(0) because of padding for CUDA graphs. + // In vLLM V0, key.size(0) is always equal to slot_mapping.size(0) because + // both include padding. + // In vLLM V1, however, key.size(0) can be larger than slot_mapping.size(0) + // since key includes padding for CUDA graphs, while slot_mapping does not. + // In this case, slot_mapping.size(0) represents the actual number of tokens + // before padding. + // For compatibility with both cases, we use slot_mapping.size(0) as the + // number of tokens. + int num_tokens = slot_mapping.size(0); int num_heads = key.size(1); int head_size = key.size(2); int block_size = key_cache.size(1); diff --git a/vllm/v1/attention/backends/flash_attn.py b/vllm/v1/attention/backends/flash_attn.py index d37989055c2e5..251a103e60f06 100644 --- a/vllm/v1/attention/backends/flash_attn.py +++ b/vllm/v1/attention/backends/flash_attn.py @@ -138,14 +138,25 @@ def forward( # Profiling run. return output - num_actual_tokens = attn_metadata.num_actual_tokens + # IMPORTANT! + # NOTE(woosuk): With piece-wise CUDA graphs, this method is executed in + # eager-mode PyTorch. Thus, we need to be careful about any CPU overhead + # in this method. For example, `view` and `slice` (or `[:n]`) operations + # are surprisingly slow even in the case they do not invoke any GPU ops. + # Minimize the PyTorch ops in this method as much as possible. + # Whenever making a change in this method, please benchmark the + # performance to make sure it does not introduce any overhead. + num_actual_tokens = attn_metadata.num_actual_tokens # Reshape the input keys and values and store them in the cache. - key_cache = kv_cache[0] - value_cache = kv_cache[1] + # NOTE(woosuk): Here, key and value are padded while slot_mapping is + # not padded. However, we don't need to do key[:num_actual_tokens] and + # value[:num_actual_tokens] because the reshape_and_cache_flash op uses + # the slot_mapping's shape to determine the number of actual tokens. + key_cache, value_cache = kv_cache.unbind(0) torch.ops._C_cache_ops.reshape_and_cache_flash( - key[:num_actual_tokens], - value[:num_actual_tokens], + key, + value, key_cache, value_cache, attn_metadata.slot_mapping, From ca871491edb0fba11fe9aa94300bd8d282fa29e1 Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Tue, 10 Dec 2024 04:54:44 +0800 Subject: [PATCH 279/397] [Misc][LoRA] Abstract PunicaWrapper (#10955) Signed-off-by: Jee Jee Li --- tests/lora/test_layers.py | 49 +- vllm/lora/layers.py | 7 +- vllm/lora/models.py | 8 +- vllm/lora/punica.py | 725 -------------------- vllm/lora/punica_wrapper/__init__.py | 7 + vllm/lora/punica_wrapper/punica_base.py | 480 +++++++++++++ vllm/lora/punica_wrapper/punica_gpu.py | 358 ++++++++++ vllm/lora/punica_wrapper/punica_selector.py | 14 + vllm/lora/punica_wrapper/utils.py | 159 +++++ 9 files changed, 1058 insertions(+), 749 deletions(-) delete mode 100644 vllm/lora/punica.py create mode 100644 vllm/lora/punica_wrapper/__init__.py create mode 100644 vllm/lora/punica_wrapper/punica_base.py create mode 100644 vllm/lora/punica_wrapper/punica_gpu.py create mode 100644 vllm/lora/punica_wrapper/punica_selector.py create mode 100644 vllm/lora/punica_wrapper/utils.py diff --git a/tests/lora/test_layers.py b/tests/lora/test_layers.py index a113e3f7abc1e..fb8c0b2a7ba26 100644 --- a/tests/lora/test_layers.py +++ b/tests/lora/test_layers.py @@ -28,7 +28,7 @@ # yapf: enable from vllm.lora.models import (LongContextLoRAContext, LoRALayerWeights, PackedLoRALayerWeights) -from vllm.lora.punica import PunicaWrapper +from vllm.lora.punica_wrapper import get_punica_wrapper from vllm.model_executor.layers.linear import (ColumnParallelLinear, MergedColumnParallelLinear, QKVParallelLinear, @@ -48,11 +48,12 @@ torch.float32: (5e-3, 5e-3), torch.bfloat16: (3e-2, 2e-2), } -CUDA_DEVICES = [ +# TODO: Modify this based on platform +DEVICES = [ f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2) ] -# We will launch different triton kernels between the prefill and decode +#For GPU, we will launch different triton kernels between the prefill and decode # stages, so we need to verify this. prefill stage(True) or decode stage(False) STAGES = [True, False] @@ -192,9 +193,18 @@ def create_random_inputs( return inputs, index_mapping, prompt_mapping +def check_punica_wrapper(punica_wrapper) -> bool: + if current_platform.is_cuda_alike(): + from vllm.lora.punica_wrapper.punica_gpu import PunicaWrapperGPU + + return type(punica_wrapper) is PunicaWrapperGPU + else: + return False + + @torch.inference_mode() @pytest.mark.parametrize("num_loras", [1, 2, 4, 8]) -@pytest.mark.parametrize("device", CUDA_DEVICES) +@pytest.mark.parametrize("device", DEVICES) @pytest.mark.parametrize("vocab_size", [512, 32000, 64000, 128000]) @pytest.mark.parametrize("stage", STAGES) def test_embeddings(dist_init, num_loras, device, vocab_size, stage) -> None: @@ -205,7 +215,8 @@ def test_embeddings(dist_init, num_loras, device, vocab_size, stage) -> None: torch.set_default_device(device) max_loras = 8 - punica_wrapper = PunicaWrapper(8192, 256, device) + punica_wrapper = get_punica_wrapper(8192, 256, device) + assert check_punica_wrapper(punica_wrapper) lora_config = LoRAConfig(max_loras=max_loras, max_lora_rank=8, lora_dtype=torch.float16) @@ -296,7 +307,7 @@ def create_random_embedding_layer(): # @pytest.mark.skip( # reason="Fails when loras are in any slot other than the first.") @pytest.mark.parametrize("num_loras", [1, 2, 4, 8]) -@pytest.mark.parametrize("device", CUDA_DEVICES) +@pytest.mark.parametrize("device", DEVICES) @pytest.mark.parametrize("vocab_size", [512, 32000, 64000, 128000]) @pytest.mark.parametrize("stage", STAGES) def test_embeddings_with_new_embeddings(dist_init, num_loras, device, @@ -305,7 +316,8 @@ def test_embeddings_with_new_embeddings(dist_init, num_loras, device, torch.cuda.set_device(device) torch.set_default_device(device) max_loras = 8 - punica_wrapper = PunicaWrapper(8192, 256, device) + punica_wrapper = get_punica_wrapper(8192, 256, device) + assert check_punica_wrapper(punica_wrapper) lora_config = LoRAConfig(max_loras=max_loras, max_lora_rank=8, lora_dtype=torch.float16) @@ -432,7 +444,7 @@ def create_random_embedding_layer(): @torch.inference_mode() @pytest.mark.parametrize("num_loras", [1, 2, 4, 8]) -@pytest.mark.parametrize("device", CUDA_DEVICES) +@pytest.mark.parametrize("device", DEVICES) @pytest.mark.parametrize("vocab_size", [512, 32000, 64000, 256512]) @pytest.mark.parametrize("stage", STAGES) def test_lm_head_logits_processor(dist_init, num_loras, device, vocab_size, @@ -441,7 +453,8 @@ def test_lm_head_logits_processor(dist_init, num_loras, device, vocab_size, torch.cuda.set_device(device) torch.set_default_device(device) max_loras = 8 - punica_wrapper = PunicaWrapper(8192, 256, device) + punica_wrapper = get_punica_wrapper(8192, 256, device) + assert check_punica_wrapper(punica_wrapper) lora_config = LoRAConfig(max_loras=max_loras, max_lora_rank=8, lora_dtype=torch.float16) @@ -563,7 +576,7 @@ def _pretest(): @torch.inference_mode() @pytest.mark.parametrize("num_loras", [1, 2, 4, 8]) -@pytest.mark.parametrize("device", CUDA_DEVICES) +@pytest.mark.parametrize("device", DEVICES) @pytest.mark.parametrize("stage", STAGES) @pytest.mark.parametrize("bias_enabled", [True, False]) def test_linear_replicated(dist_init, num_loras, device, stage, @@ -571,7 +584,8 @@ def test_linear_replicated(dist_init, num_loras, device, stage, torch.cuda.set_device(device) torch.set_default_device(device) - punica_wrapper = PunicaWrapper(8192, 256, device) + punica_wrapper = get_punica_wrapper(8192, 256, device) + assert check_punica_wrapper(punica_wrapper) max_loras = 8 lora_config = LoRAConfig(max_loras=max_loras, max_lora_rank=8, @@ -675,7 +689,7 @@ def create_random_linear_replicated_layer(): @pytest.mark.parametrize("num_loras", [1, 2, 4, 8]) @pytest.mark.parametrize("orientation", ["row", "column"]) @pytest.mark.parametrize("fully_shard", [True, False]) -@pytest.mark.parametrize("device", CUDA_DEVICES) +@pytest.mark.parametrize("device", DEVICES) @pytest.mark.parametrize("stage", STAGES) @pytest.mark.parametrize("bias_enabled", [True, False]) def test_linear_parallel(dist_init, num_loras, orientation, fully_shard, @@ -683,7 +697,8 @@ def test_linear_parallel(dist_init, num_loras, orientation, fully_shard, torch.cuda.set_device(device) torch.set_default_device(device) - punica_wrapper = PunicaWrapper(8192, 256, device) + punica_wrapper = get_punica_wrapper(8192, 256, device) + assert check_punica_wrapper(punica_wrapper) max_loras = 8 lora_config = LoRAConfig(max_loras=max_loras, max_lora_rank=8, @@ -797,7 +812,7 @@ def create_random_linear_parallel_layer(): @pytest.mark.parametrize("num_loras", [1, 2, 4, 8]) @pytest.mark.parametrize("repeats", [1, 2, 3]) @pytest.mark.parametrize("fully_shard", [True, False]) -@pytest.mark.parametrize("device", CUDA_DEVICES) +@pytest.mark.parametrize("device", DEVICES) @pytest.mark.parametrize("stage", STAGES) @pytest.mark.parametrize("bias_enabled", [True, False]) def test_column_parallel_packed(dist_init, num_loras, repeats, fully_shard, @@ -805,7 +820,8 @@ def test_column_parallel_packed(dist_init, num_loras, repeats, fully_shard, torch.cuda.set_device(device) torch.set_default_device(device) - punica_wrapper = PunicaWrapper(8192, 256, device) + punica_wrapper = get_punica_wrapper(8192, 256, device) + assert check_punica_wrapper(punica_wrapper) max_loras = 8 lora_config = LoRAConfig(max_loras=max_loras, max_lora_rank=8, @@ -963,7 +979,8 @@ def test_rotary_embedding_long_context(dist_init, num_loras, device, seed = 0 current_platform.seed_everything(seed) torch.set_default_device(device) - punica_wrapper = PunicaWrapper(8192, 256, device) + punica_wrapper = get_punica_wrapper(8192, 256, device) + assert check_punica_wrapper(punica_wrapper) max_loras = 8 lora_config = LoRAConfig(max_loras=max_loras, max_lora_rank=8, diff --git a/vllm/lora/layers.py b/vllm/lora/layers.py index 3e9c2ceb83eac..38cb846578d5c 100644 --- a/vllm/lora/layers.py +++ b/vllm/lora/layers.py @@ -17,7 +17,6 @@ tensor_model_parallel_all_reduce, tensor_model_parallel_gather) from vllm.distributed.utils import divide -from vllm.lora.punica import PunicaWrapper # yapf: disable from vllm.model_executor.layers.linear import (ColumnParallelLinear, LinearBase, @@ -33,7 +32,7 @@ VocabParallelEmbedding) if TYPE_CHECKING: - pass + from vllm.lora.punica_wrapper import PunicaWrapperBase def _get_lora_device(base_layer: nn.Module) -> torch.device: @@ -115,9 +114,9 @@ def set_lora( def set_mapping( self, - punica_wrapper: PunicaWrapper, + punica_wrapper, ): - self.punica_wrapper: PunicaWrapper = punica_wrapper + self.punica_wrapper: PunicaWrapperBase = punica_wrapper @classmethod def can_replace_layer( diff --git a/vllm/lora/models.py b/vllm/lora/models.py index 9855b57d0c9c9..49cd9f0c236ad 100644 --- a/vllm/lora/models.py +++ b/vllm/lora/models.py @@ -21,7 +21,7 @@ LinearScalingRotaryEmbeddingWithLora, LoRAMapping) from vllm.lora.lora import LoRALayerWeights, PackedLoRALayerWeights -from vllm.lora.punica import PunicaWrapper +from vllm.lora.punica_wrapper import get_punica_wrapper from vllm.lora.utils import (from_layer, from_layer_logits_processor, is_regex_target_modules, parse_fine_tuned_lora_name, replace_submodule) @@ -331,9 +331,9 @@ def __init__( self.lora_index_to_id: List[Optional[int]] = [None] * self.lora_slots self.vocab_size = vocab_size self.long_lora_context: Optional[LongContextLoRAContext] = None - self.punica_wrapper = PunicaWrapper(max_num_batched_tokens, - max_batches=self.max_num_seqs, - device=self.device) + self.punica_wrapper = get_punica_wrapper(max_num_batched_tokens, + max_batches=self.max_num_seqs, + device=self.device) # Scaling factor -> offset to the sin_cos_cache to it. # Used for long context lora. self.scaling_factor_to_offset: Dict[float, int] = {} diff --git a/vllm/lora/punica.py b/vllm/lora/punica.py deleted file mode 100644 index 563d1181d6fcb..0000000000000 --- a/vllm/lora/punica.py +++ /dev/null @@ -1,725 +0,0 @@ -""" -Based on: -Chen, L., Ye, Z., Wu, Y., Zhuo, D., Ceze, L., & Krishnamurthy, A. (2023). -Punica: Multi-Tenant LoRA Serving. -https://arxiv.org/abs/2310.18547 -""" - -from typing import TYPE_CHECKING, Callable, List, Optional, Tuple, Union - -import torch - -from vllm.triton_utils import HAS_TRITON - -if HAS_TRITON: - from vllm.lora.ops.bgmv_expand import bgmv_expand - from vllm.lora.ops.bgmv_expand_slice import bgmv_expand_slice - from vllm.lora.ops.bgmv_shrink import bgmv_shrink - from vllm.lora.ops.sgmv_expand import sgmv_expand - from vllm.lora.ops.sgmv_expand_slice import sgmv_expand_slice - from vllm.lora.ops.sgmv_shrink import sgmv_shrink - -if TYPE_CHECKING: - # avoid circuit import - from vllm.lora.layers import LoRAMapping - from vllm.lora.models import LongContextLoRAContext - - -def compute_meta( - token_lora_tensor: torch.Tensor -) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int, int, int, bool]: - """ - Get the information required for the sgmv kernel. With the features: - 1. If consecutive requests in the batch use the same LoRA, this function - will combine them into a single request, improving sgmv kernel inference - performance. - 2. At the beginning of each prefill stage inference, recalculations are - needed based on the input, but only once. - """ - - lora_indices_tensor, seq_length_tensor = torch.unique_consecutive( - token_lora_tensor, return_counts=True) - cum_result = torch.cumsum(seq_length_tensor, dim=0) - b_seq_start_tensor = torch.zeros_like(seq_length_tensor) - b_seq_start_tensor[1:].copy_(cum_result[:-1]) - max_length = seq_length_tensor.max().item() - token_nums = seq_length_tensor.sum().item() - batch_size = lora_indices_tensor.size(0) - no_lora = False - # -1 means no lora should be applied. Use `no_lora` to determine whether - # the current step requires LoRA. If LoRA is not needed, the prefill stage - # does not need to launch the triton kernel, which can improve performance - if batch_size == 1 and lora_indices_tensor == -1: - no_lora = True - return (b_seq_start_tensor, seq_length_tensor, lora_indices_tensor, - batch_size, max_length, token_nums, no_lora) - - -# TODO see if this can be vectorized -def convert_mapping( - mapping: "LoRAMapping", - lora_index_to_id: List[Optional[int]], - max_loras: int, - vocab_size: int, - extra_vocab_size: int, - device: torch.device, - long_lora_context: Optional["LongContextLoRAContext"] = None, -) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, - Optional[torch.Tensor], List[int]]: - """Converts LoRAMapping to index tensors. - - Args: - mapping: LoRAMapping mapping rows in a batch to LoRA ids. - lora_index_to_id: List mapping LoRA ids to LoRA indices. - max_loras: Maximum number of LoRAs. - vocab_size: Model vocab size. - extra_vocab_size: Extra vocab size each LoRA can have. - long_lora_context: Passed if there are long context lora in a batch. - - Returns: - A tuple of tensors: - base_indices: Tensor of shape [batch_size] mapping batch rows to - LoRA indices. - sampler_indices: Tensor of shape [batch_size] mapping requests to - LoRA indices for sampler. For generation, this will be the - same as base_indicies. For prefill, this will map requests - to LoRA indices. - sampler_indices_padded: Tensor of shape [batch_size] mapping - requests to LoRA indices for sampler with padding. - Same as sampler_indicies, but -1 is replaced with - max_loras. - embeddings_indices: Tensor of shape [2, batch_size] mapping - requests to embedding indices. First row is for embeddings - added by the LoRAs, second row is for the LoRA.lora_a - embeddings. - long_lora_indices: Tensor of shape [batch_size] mapping - requests to RoPE offsets and rot dims for long LoRAs. - None if long context lora doesn't exist. - indices_len: List of lengths of the above tensors. It contains - (base_indices, sampler_indices, sampler_indices_padded, - embeddings_indices, long_lora_indices). - """ - index_mapping_indices: List[int] = list(mapping.index_mapping).copy() - embedding_indices = index_mapping_indices.copy() - lora_indices = index_mapping_indices.copy() - long_lora_offsets: Optional[torch.Tensor] = None - if long_lora_context: - long_lora_offsets = torch.zeros(len(index_mapping_indices), - device=device, - dtype=torch.long) - prompt_mapping: List[int] = [ - lora_index_to_id.index(x) if x > 0 else -1 - for x in mapping.prompt_mapping - ] - lora_idx = None - for i in range(len(index_mapping_indices)): - # TODO index can be slow. optimize - lora_idx = (lora_index_to_id.index(index_mapping_indices[i]) - if index_mapping_indices[i] > 0 else -1) - embedding_indices[i] = lora_idx if index_mapping_indices[i] > 0 else 0 - lora_indices[i] = lora_idx - if long_lora_context: - assert long_lora_offsets is not None - lora_offset: int = long_lora_context.offsets_by_lora_id.get( - index_mapping_indices[i], 0) - long_lora_offsets[i] = lora_offset - - indices_list: List[Union[List[int], torch.Tensor]] = [ - index_mapping_indices, - lora_indices, - embedding_indices, - ] - if long_lora_context: - assert long_lora_offsets is not None - indices_list.append(long_lora_offsets) - indices = torch.tensor(indices_list, dtype=torch.long, device=device) - prompt_mapping_tensor = torch.tensor(prompt_mapping, - dtype=torch.long, - device=device) - embeddings_indices = torch.stack([ - indices[2] * extra_vocab_size, - indices[2] * (vocab_size + extra_vocab_size), - ]) - embeddings_indices[embeddings_indices == -1] = max_loras - 1 - base_indices = indices[1] - sampler_indices = prompt_mapping_tensor - sampler_indices_padded = sampler_indices.clone() - sampler_indices_padded[sampler_indices_padded == -1] = max_loras - 1 - sampler_indices_padded = torch.arange( - 0, len(sampler_indices_padded), device=device, dtype=torch.long) + ( - sampler_indices_padded * len(sampler_indices_padded)) - long_lora_indices = None - long_lora_indices_len: Optional[int] = None - if long_lora_context: - long_lora_indices = indices[3] - long_lora_indices_len = long_lora_indices.shape[-1] - # Contain length of indices tensors. Used to index into each tensor. - indices_len = [ - base_indices.shape[-1], - sampler_indices.shape[-1], - sampler_indices_padded.shape[-1], - embeddings_indices.shape[-1], - ] - if long_lora_indices_len is not None: - indices_len.append(long_lora_indices_len) - else: - # If long_lora doesn't exist,append None - indices_len.append(None) - - return ( - base_indices, - sampler_indices, - sampler_indices_padded, - embeddings_indices, - long_lora_indices, - indices_len, - ) - - -class PunicaWrapper: - """ - PunicaWrapper is designed to manage and provide metadata for the punica - kernel. The main function is to maintain the state information for - Multi-LoRA, and to provide the interface for the punica kernel. - """ - - def __init__(self, max_num_batched_tokens: int, max_batches: int, - device: Union[torch.device, str]): - self._token_lora_indices = torch.empty(max_num_batched_tokens, - dtype=torch.long, - device=device) - self._sampler_indices = torch.empty(max_num_batched_tokens, - dtype=torch.long, - device=device) - self._sampler_indices_padded = torch.empty(max_num_batched_tokens, - dtype=torch.long, - device=device) - self._embeddings_indices = torch.empty(2, - max_num_batched_tokens, - dtype=torch.long, - device=device) - self._long_lora_indices = torch.empty(max_num_batched_tokens, - dtype=torch.long, - device=device) - - # 5 is the number of indicies tensors. - # base_indices, sampler_indices, sampler_indices_padded, - # embeddings_indices,long_lora_indices - self.indices_len: List[Optional[int]] = [None] * 5 - # these attributes are the information required for sgmv kernel - self._seq_start_locs = torch.empty(max_batches, - dtype=torch.long, - device=device) - self._seq_lengths = torch.empty(max_batches, - dtype=torch.long, - device=device) - self._lora_indices_per_batch = torch.empty(max_batches, - dtype=torch.long, - device=device) - self.device: torch.device = device - self.max_length: int = 0 - self.token_nums: int = 0 - self.batch_size: int = -1 - self.is_prefill = False - self.no_lora = False - - def update_metadata( - self, - mapping: "LoRAMapping", - lora_index_to_id: List[Optional[int]], - max_loras: int, - vocab_size: int, - extra_vocab_size: int, - long_lora_context: Optional["LongContextLoRAContext"] = None, - ): - - self._update_base_metadata(mapping, lora_index_to_id, max_loras, - vocab_size, extra_vocab_size, - long_lora_context) - if mapping.is_prefill: - # Update metadata required for prefill-related operators. - self._update_prefill_metada(self.token_lora_indices) - self.is_prefill = True - else: - self.is_prefill = False - - def _update_base_metadata( - self, - mapping: "LoRAMapping", - lora_index_to_id: List[Optional[int]], - max_loras: int, - vocab_size: int, - extra_vocab_size: int, - long_lora_context: Optional["LongContextLoRAContext"] = None, - ): - ( - base_indices, - sampler_indices, - sampler_indices_padded, - embeddings_indices, - long_lora_offsets_tensor, - indices_len, - ) = convert_mapping( - mapping, - lora_index_to_id, - max_loras, - vocab_size, - extra_vocab_size, - self.device, - long_lora_context, - ) - self._token_lora_indices[:base_indices.shape[0]].copy_(base_indices) - self._sampler_indices[:sampler_indices.shape[0]].copy_(sampler_indices) - self._sampler_indices_padded[:sampler_indices_padded.shape[0]].copy_( - sampler_indices_padded) - self._embeddings_indices[:embeddings_indices. - shape[0], :embeddings_indices.shape[1]].copy_( - embeddings_indices) - if long_lora_offsets_tensor is not None: - self._long_lora_indices[:long_lora_offsets_tensor.shape[0]].copy_( - long_lora_offsets_tensor) - else: - self._long_lora_indices.zero_() - self.indices_len[:] = indices_len - - def _update_prefill_metada(self, token_lora_tensor: torch.Tensor) -> None: - - (b_seq_start_tensor, seq_length_tensor, lora_indices_tensor, - batch_size, max_length, token_nums, - no_lora) = compute_meta(token_lora_tensor) - - self._seq_start_locs[:b_seq_start_tensor.shape[0]].copy_( - b_seq_start_tensor) - self._seq_lengths[:seq_length_tensor.shape[0]].copy_(seq_length_tensor) - self._lora_indices_per_batch[:lora_indices_tensor.shape[0]].copy_( - lora_indices_tensor) - self.batch_size = batch_size - self.max_length = max_length - self.token_nums = token_nums - self.no_lora = no_lora - - @property - def prefill_metadata( - self - ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int, int, int]: - """ - This property provides a convenient way to access the necessary - metadata for prefill-related kernel computations. - 1. seq_start_locs: Tensor of sequence start positions. - 2. seq_lengths: Tensor of sequence lengths. - 3. lora_indices_per_batch: Tensor of lora indices, and an index of - -1 means no lora should be applied. - 4. batch_size: Batch size after clustering identical lora indices. - 5. max_length: The maximum sequence length in the batch. - 6. token_nums: The token numbers in the batch. - """ - return (self._seq_start_locs[:self.batch_size], - self._seq_lengths[:self.batch_size], - self._lora_indices_per_batch[:self.batch_size], - self.batch_size, self.max_length, self.token_nums) - - @property - def token_lora_indices(self) -> torch.Tensor: - """ - This property provides the lora indices corresponding to each token - in the batch. An index of -1 means no lora should be applied. - """ - token_lora_len = self.indices_len[0] - return self._token_lora_indices[:token_lora_len] - - @property - def sampler_indices(self) -> torch.Tensor: - """ - This property is used to access the lora indices specifically for - LogitsProcessorWithLoRA. - """ - sampler_indices_len = self.indices_len[1] - return self._sampler_indices[:sampler_indices_len] - - @property - def sampler_indices_padded(self) -> torch.Tensor: - """ - This property provides access to padded sampler indices. - """ - indices_padded_len = self.indices_len[2] - return self._sampler_indices_padded[:indices_padded_len] - - @property - def embeddings_indices(self) -> torch.Tensor: - """ - This property provides access to the indices used for lora embeddings, - specifically for VocabParallelEmbeddingWithLoRA. - """ - embeddings_indices_len = self.indices_len[3] - return self._embeddings_indices[:, :embeddings_indices_len] - - @property - def long_lora_indices(self) -> torch.Tensor: - """ - This property provides access to the indices used for long context - lora, specifically for LinearScalingRotaryEmbeddingWithLora. - """ - long_lora_len = self.indices_len[4] - return self._long_lora_indices[:long_lora_len] - - def _shrink_prefill( - self, - y: torch.Tensor, - x: torch.Tensor, - w_t_all: torch.Tensor, - scale: float, - ): - #No LoRA request, so return directly - if self.no_lora: - return - sgmv_shrink( - x, - w_t_all, - y, - *self.prefill_metadata, - scale, - ) - - def _shrink_decode( - self, - y: torch.Tensor, - x: torch.Tensor, - w_t_all: torch.Tensor, - scale: float, - ): - bgmv_shrink(x, w_t_all, y, self.token_lora_indices, scale) - - def _expand_prefill( - self, - y: torch.Tensor, - x: torch.Tensor, - w_t_all: torch.Tensor, - add_input: bool, - ): - #No LoRA request, so return directly - if self.no_lora: - return - sgmv_expand( - x, - w_t_all, - y, - *self.prefill_metadata, - add_input, - ) - - def _expand_decode( - self, - y: torch.Tensor, - x: torch.Tensor, - w_t_all: torch.Tensor, - add_input: bool, - ): - bgmv_expand(x, w_t_all, y, self.token_lora_indices, add_input) - - def _expand_slice_prefill( - self, - y: torch.Tensor, - x: torch.Tensor, - w_t_all: torch.Tensor, - y_offset: Optional[int], - y_slice_size: Optional[int], - add_input: bool, - ): - #No LoRA request, so return directly - if self.no_lora: - return - sgmv_expand_slice( - x, - w_t_all, - y, - *self.prefill_metadata, - y_offset, - y_slice_size, - add_input, - ) - - def _expand_slice_decode( - self, - y: torch.Tensor, - x: torch.Tensor, - w_t_all: torch.Tensor, - y_offset: Optional[int], - y_slice_size: Optional[int], - add_input: bool, - ): - bgmv_expand_slice(x, w_t_all, y, self.token_lora_indices, y_offset, - y_slice_size, add_input) - - def _apply_expand(self, - y: torch.Tensor, - x: torch.Tensor, - w_t_all: torch.Tensor, - y_offset: Optional[int], - y_slice_size: Optional[int], - add_input: bool = True): - """ - Perform the ` y[:,y_offset:y_offset+y_slice_size]+=x@w_t_all` - computation, which is suitable for the - GEMM of lora'b. - """ - - expand_slice_fun: Callable = (self._expand_slice_prefill - if self.is_prefill else - self._expand_slice_decode) - expand_slice_fun(y, x, w_t_all, y_offset, y_slice_size, add_input) - - def _apply_bias( - self, - indices: torch.Tensor, - output: torch.Tensor, - output_slices: Tuple[int, ...], - lora_bias_stacked: Tuple[Optional[torch.Tensor], ...], - ): - """Applies bias to output - - Input shapes: - lora_bias_stacked: 3 element tuple of (num_loras, output_dim) - indices: (batch_size) - output: (batch_size, q_slice_size + 2*kv_slice_size) - output_slices: n-1 element tuple of (slice_size...), - where n is number of slices - """ - org_output = output - output = output.view(-1, output.shape[-1]) - indices = indices.view(-1) - - offset_left = 0 - for slice_idx, slice in enumerate(output_slices): - bias = lora_bias_stacked[slice_idx] - if bias is not None: - bias = bias.view(-1, bias.shape[-1]) - bias = bias[indices] - bias[indices == -1] = 0 - output[:, offset_left:offset_left + slice] += bias - offset_left += slice - - return output.view_as(org_output) - - def _apply_shrink( - self, - y: torch.Tensor, - x: torch.Tensor, - w_t_all: torch.Tensor, - scale: float, - ): - """ - Perform the ` y+=x@w_t_all` computation, which is suitable for the - GEMM of lora'a. - When `is_prefill is` true, it indicates that it is currently the - prefill stage, and the `_shrink_prefill` function should be called. - Otherwise, it is the decode stage, and the _shrink_decode function - should be called. - """ - y_org = y - y = y.view(-1, y.shape[-1]) - shrink_fun: Callable = (self._shrink_prefill - if self.is_prefill else self._shrink_decode) - shrink_fun(y, x, w_t_all, scale) - y = y.view_as(y_org) - - def add_shrink( - self, - y: Union[Tuple[torch.Tensor, ...], torch.Tensor], - x: torch.Tensor, - lora_a_stacked: Tuple[torch.Tensor, ...], - scale: float, - ): - """ - Performs GEMM for multiple slices of lora_a. - When `is_prefill is` true, it indicates that it is currently the - prefill stage, and the `_shrink_prefill` function should be called. - Otherwise, it is the decode stage, and the _shrink_decode function - should be called. - - Semantics: - for i in range(len(lora_a_stacked)): - y[i] += (x @ lora_a_stacked[i]) * scale - - Args: - y (Union[Tuple[torch.Tensor, ...], torch.Tensor]): Output tensors - x (torch.Tensor): Input tensor - lora_a_stacked (Tuple[torch.Tensor, ...]): lora_a's weights - scale (float): Scaling factor for the operation - """ - - x = x.view(-1, x.shape[-1]) - # TODO fuse these kernels - for slice_idx in range(len(lora_a_stacked)): - self._apply_shrink(y[slice_idx], x, lora_a_stacked[slice_idx], - scale) - - def add_expand( - self, - y: torch.Tensor, - x: Union[Tuple[torch.Tensor, ...], torch.Tensor], - lora_b_stacked: Tuple[torch.Tensor, ...], - lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]], - output_slices: Tuple[int, ...], - offset_start: int = 0, - add_input=True, - ) -> None: - """ - Performs GEMM and bias addition for multiple slices of lora_b. - - Semantics: - for i in range(len(lora_b_stacked)): - slice = output_slices[i] - y[:, offset:offset+slice] += x[i] @ lora_b_stacked[i] + - lora_bias_stacked[i] - offset += slice - - Args: - y (torch.Tensor): Output tensor. - x (Union[Tuple[torch.Tensor, ...], torch.Tensor]): Input tensors - lora_b_stacked (Tuple[torch.Tensor, ...]): lora_b's weight - lora_bias_stacked (Optional[Tuple[torch.Tensor, ...]]): - bias's weight - output_slices (Tuple[int, ...]): Every slice's size - add_input (bool): Defaults to True. - """ - y_org = y - y = y.view(-1, y.shape[-1]) - offset_left = offset_start - if lora_bias_stacked is not None: - self._apply_bias(self.token_lora_indices, y, output_slices, - lora_bias_stacked) - for slice_idx in range(len(lora_b_stacked)): - self._apply_expand( - y, - x[slice_idx], - lora_b_stacked[slice_idx], - offset_left, - output_slices[slice_idx], - add_input=add_input, - ) - offset_left += output_slices[slice_idx] - y = y.view_as(y_org) - - def add_lora_embedding( - self, - y: torch.Tensor, - x: torch.Tensor, - lora_b_stacked: torch.Tensor, - add_input: bool = True, - ): - """ - Applies lora specifically for VocabParallelEmbeddingWithLoRA. - - Semantics: - y += x @ lora_b_stacked - - Args: - y (torch.Tensor): Output tensor. - x (torch.Tensor): Input tensor. - lora_b_stacked (torch.Tensor): lora_b's weights. - add_input (bool): Default to True. - - """ - - # Embedding layer only need expand op - expand_fun: Callable = (self._expand_prefill - if self.is_prefill else self._expand_decode) - expand_fun(y, x, lora_b_stacked, add_input) - - def add_lora_linear( - self, - y: torch.Tensor, - x: torch.Tensor, - lora_a_stacked: Tuple[torch.Tensor, ...], - lora_b_stacked: Tuple[torch.Tensor, ...], - lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]], - scale: float, - output_slices: Tuple[int, ...], - *, - buffer: Optional[Tuple[torch.Tensor, ...]] = None) -> None: - """ - Applicable to linear-related lora. - - Semantics: - for i in range(len(lora_a_stacked)): - y[i] += ( - x[i].unsqueeze(0) - @ lora_a_stacked[indices[i], layer_idx, :, :] - @ lora_b_stacked[indices[i], layer_idx, :, :] - * scale - ).squeeze(0)+lora_bias_stacked[i] - - Args: - y (torch.Tensor): Output tensor. Will be changed in-place. - x (torch.Tensor): Input tensor - lora_a_stacked (Tuple[torch.Tensor, ...]): lora_a's weight. - lora_b_stacked (Tuple[torch.Tensor, ...]): lora_b's weight. - lora_bias_stacked (Optional[Tuple[torch.Tensor, ...]]): lora's bias. - scale (float): Scaling factor. - output_slices (Tuple[int, ...]): Every slice's size. - buffer (Optional[Tuple[torch.Tensor, ...]]): Defaults to None. - """ - - assert len(lora_a_stacked) == len(lora_b_stacked) == len(output_slices) - if lora_bias_stacked is not None: - assert len(lora_bias_stacked) == len(output_slices) - y = self._apply_bias(self.token_lora_indices, y, output_slices, - lora_bias_stacked) - - if buffer is None: - r = lora_b_stacked[0].size(-1) - # We set the buffer to be float32 by default ,refer to: - # https://github.com/triton-lang/triton/issues/1387 - buffer = tuple( - torch.zeros( - (x.size(0), r), dtype=torch.float32, device=x.device) - for _ in range(len(output_slices))) - self.add_shrink(buffer, x, lora_a_stacked, scale) - self.add_expand(y, - buffer, - lora_b_stacked, - None, - output_slices, - add_input=True) - - def add_lora_logits(self, - y: torch.Tensor, - x: torch.Tensor, - lora_a_stacked: torch.Tensor, - lora_b_stacked: torch.Tensor, - scale, - *, - buffer: Optional[torch.Tensor] = None) -> None: - """ - Applies lora specifically for LogitsProcessorWithLoRA. - - Semantics: - buffer = (x @ lora_a_stacked) * scale - y += buffer @ lora_b_stacked - - Args: - y (torch.Tensor): Output tensor. - x (torch.Tensor): Input tensor. - lora_a_stacked (torch.Tensor): lora_a's weights. - lora_b_stacked (torch.Tensor):lora_b's weights. - scale (float): Scaling factor. - buffer (Optional[torch.Tensor]):Default to None. - """ - y_org = y - y = y.view(-1, y.shape[-1]) - x = x.view(-1, x.shape[-1]) - r = lora_b_stacked.size(-1) - if buffer is None: - # We set the buffer to be float32 by default ,refer to: - # https://github.com/triton-lang/triton/issues/1387 - buffer = torch.zeros((x.size(0), r), - dtype=torch.float32, - device=x.device) - # LogitsProcessorWithLoRA always using bgmv. - bgmv_shrink(x, lora_a_stacked, buffer, self.sampler_indices, scale) - bgmv_expand(buffer, - lora_b_stacked, - y, - self.sampler_indices, - add_inputs=True) - y = y.view_as(y_org) diff --git a/vllm/lora/punica_wrapper/__init__.py b/vllm/lora/punica_wrapper/__init__.py new file mode 100644 index 0000000000000..48ada3926ea46 --- /dev/null +++ b/vllm/lora/punica_wrapper/__init__.py @@ -0,0 +1,7 @@ +from vllm.lora.punica_wrapper.punica_base import PunicaWrapperBase +from vllm.lora.punica_wrapper.punica_selector import get_punica_wrapper + +__all__ = [ + "PunicaWrapperBase", + "get_punica_wrapper", +] diff --git a/vllm/lora/punica_wrapper/punica_base.py b/vllm/lora/punica_wrapper/punica_base.py new file mode 100644 index 0000000000000..0a5a84bdd8deb --- /dev/null +++ b/vllm/lora/punica_wrapper/punica_base.py @@ -0,0 +1,480 @@ +""" +Based on: +Chen, L., Ye, Z., Wu, Y., Zhuo, D., Ceze, L., & Krishnamurthy, A. (2023). +Punica: Multi-Tenant LoRA Serving. +https://arxiv.org/abs/2310.18547 +""" + +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, List, Optional, Tuple, Union + +import torch + +from .utils import compute_meta, convert_mapping + +if TYPE_CHECKING: + # avoid circuit import + from vllm.lora.layers import LoRAMapping + from vllm.lora.models import LongContextLoRAContext + + +class PunicaWrapperABC(ABC): + """ + PunicaWrapper ABC. + """ + + @abstractmethod + def update_metadata( + self, + mapping: "LoRAMapping", + lora_index_to_id: List[Optional[int]], + max_loras: int, + vocab_size: int, + extra_vocab_size: int, + long_lora_context: Optional["LongContextLoRAContext"] = None, + **kwargs, + ) -> None: + """ + Update the lora-related metadata + """ + raise NotImplementedError + + @abstractmethod + def add_shrink( + self, + y: Union[Tuple[torch.Tensor, ...], torch.Tensor], + x: torch.Tensor, + lora_a_stacked: Tuple[torch.Tensor, ...], + scale: float, + **kwargs, + ) -> None: + """ + Performs GEMM for multiple slices of lora_a. + """ + + raise NotImplementedError + + @abstractmethod + def add_expand( + self, + y: torch.Tensor, + x: Union[Tuple[torch.Tensor, ...], torch.Tensor], + lora_b_stacked: Tuple[torch.Tensor, ...], + lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]], + output_slices: Tuple[int, ...], + offset_start: int = 0, + add_input=True, + **kwargs, + ) -> None: + """ + Performs GEMM and bias addition for multiple slices of lora_b. + """ + raise NotImplementedError + + @abstractmethod + def add_lora_embedding( + self, + y: torch.Tensor, + x: torch.Tensor, + lora_b_stacked: torch.Tensor, + add_input: bool = True, + **kwargs, + ) -> None: + """ + Applies lora specifically for VocabParallelEmbeddingWithLoRA, + and this layer only requires the expand operation. + """ + raise NotImplementedError + + @abstractmethod + def add_lora_linear(self, + y: torch.Tensor, + x: torch.Tensor, + lora_a_stacked: Tuple[torch.Tensor, ...], + lora_b_stacked: Tuple[torch.Tensor, ...], + lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]], + scale: float, + output_slices: Tuple[int, ...], + *, + buffer: Optional[Tuple[torch.Tensor, ...]] = None, + **kwargs) -> None: + """ + Applicable to linear-related lora. + """ + + raise NotImplementedError + + @abstractmethod + def add_lora_logits(self, + y: torch.Tensor, + x: torch.Tensor, + lora_a_stacked: torch.Tensor, + lora_b_stacked: torch.Tensor, + scale, + *, + buffer: Optional[torch.Tensor] = None, + **kwargs) -> None: + """ + Applies lora specifically for LogitsProcessorWithLoRA. + """ + raise NotImplementedError + + +class PunicaWrapperBase(PunicaWrapperABC): + """ + PunicaWrapperBase is designed to manage and provide metadata for the punica + kernel. The main function is to maintain the state information for + Multi-LoRA, and to provide the interface for the punica. + """ + + def __init__(self, max_num_batched_tokens: int, max_batches: int, + device: Union[torch.device, str], **kwargs): + self._token_lora_indices = torch.empty(max_num_batched_tokens, + dtype=torch.long, + device=device) + self._sampler_indices = torch.empty(max_num_batched_tokens, + dtype=torch.long, + device=device) + self._sampler_indices_padded = torch.empty(max_num_batched_tokens, + dtype=torch.long, + device=device) + self._embeddings_indices = torch.empty(2, + max_num_batched_tokens, + dtype=torch.long, + device=device) + self._long_lora_indices = torch.empty(max_num_batched_tokens, + dtype=torch.long, + device=device) + + # 5 is the number of indicies tensors. + # base_indices, sampler_indices, sampler_indices_padded, + # embeddings_indices,long_lora_indices + self.indices_len: List[Optional[int]] = [None] * 5 + # these attributes are the information required for sgmv kernel + self._seq_start_locs = torch.empty(max_batches, + dtype=torch.long, + device=device) + self._seq_lengths = torch.empty(max_batches, + dtype=torch.long, + device=device) + self._lora_indices_per_batch = torch.empty(max_batches, + dtype=torch.long, + device=device) + self.device: torch.device = device + self.max_length: int = 0 + self.token_nums: int = 0 + self.batch_size: int = -1 + self.is_prefill = False + self.no_lora = False + + def _update_base_metadata( + self, + mapping: "LoRAMapping", + lora_index_to_id: List[Optional[int]], + max_loras: int, + vocab_size: int, + extra_vocab_size: int, + long_lora_context: Optional["LongContextLoRAContext"] = None, + ): + ( + base_indices, + sampler_indices, + sampler_indices_padded, + embeddings_indices, + long_lora_offsets_tensor, + indices_len, + ) = convert_mapping( + mapping, + lora_index_to_id, + max_loras, + vocab_size, + extra_vocab_size, + self.device, + long_lora_context, + ) + self._token_lora_indices[:base_indices.shape[0]].copy_(base_indices) + self._sampler_indices[:sampler_indices.shape[0]].copy_(sampler_indices) + self._sampler_indices_padded[:sampler_indices_padded.shape[0]].copy_( + sampler_indices_padded) + self._embeddings_indices[:embeddings_indices. + shape[0], :embeddings_indices.shape[1]].copy_( + embeddings_indices) + if long_lora_offsets_tensor is not None: + self._long_lora_indices[:long_lora_offsets_tensor.shape[0]].copy_( + long_lora_offsets_tensor) + else: + self._long_lora_indices.zero_() + self.indices_len[:] = indices_len + + def _update_prefill_metada(self, token_lora_tensor: torch.Tensor) -> None: + + (b_seq_start_tensor, seq_length_tensor, lora_indices_tensor, + batch_size, max_length, token_nums, + no_lora) = compute_meta(token_lora_tensor) + + self._seq_start_locs[:b_seq_start_tensor.shape[0]].copy_( + b_seq_start_tensor) + self._seq_lengths[:seq_length_tensor.shape[0]].copy_(seq_length_tensor) + self._lora_indices_per_batch[:lora_indices_tensor.shape[0]].copy_( + lora_indices_tensor) + self.batch_size = batch_size + self.max_length = max_length + self.token_nums = token_nums + self.no_lora = no_lora + + def _apply_bias( + self, + indices: torch.Tensor, + output: torch.Tensor, + output_slices: Tuple[int, ...], + lora_bias_stacked: Tuple[Optional[torch.Tensor], ...], + ): + """Applies bias to output + + Input shapes: + lora_bias_stacked: 3 element tuple of (num_loras, output_dim) + indices: (batch_size) + output: (batch_size, q_slice_size + 2*kv_slice_size) + output_slices: n-1 element tuple of (slice_size...), + where n is number of slices + """ + org_output = output + output = output.view(-1, output.shape[-1]) + indices = indices.view(-1) + + offset_left = 0 + for slice_idx, slice in enumerate(output_slices): + bias = lora_bias_stacked[slice_idx] + if bias is not None: + bias = bias.view(-1, bias.shape[-1]) + bias = bias[indices] + bias[indices == -1] = 0 + output[:, offset_left:offset_left + slice] += bias + offset_left += slice + + return output.view_as(org_output) + + @property + def prefill_metadata( + self + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int, int, int]: + """ + This property provides a convenient way to access the necessary + metadata for prefill-related kernel computations. + 1. seq_start_locs: Tensor of sequence start positions. + 2. seq_lengths: Tensor of sequence lengths. + 3. lora_indices_per_batch: Tensor of lora indices, and an index of + -1 means no lora should be applied. + 4. batch_size: Batch size after clustering identical lora indices. + 5. max_length: The maximum sequence length in the batch. + 6. token_nums: The token numbers in the batch. + """ + return (self._seq_start_locs[:self.batch_size], + self._seq_lengths[:self.batch_size], + self._lora_indices_per_batch[:self.batch_size], + self.batch_size, self.max_length, self.token_nums) + + @property + def token_lora_indices(self) -> torch.Tensor: + """ + This property provides the lora indices corresponding to each token + in the batch. An index of -1 means no lora should be applied. + """ + token_lora_len = self.indices_len[0] + return self._token_lora_indices[:token_lora_len] + + @property + def sampler_indices(self) -> torch.Tensor: + """ + This property is used to access the lora indices specifically for + LogitsProcessorWithLoRA. + """ + sampler_indices_len = self.indices_len[1] + return self._sampler_indices[:sampler_indices_len] + + @property + def sampler_indices_padded(self) -> torch.Tensor: + """ + This property provides access to padded sampler indices. + """ + indices_padded_len = self.indices_len[2] + return self._sampler_indices_padded[:indices_padded_len] + + @property + def embeddings_indices(self) -> torch.Tensor: + """ + This property provides access to the indices used for lora embeddings, + specifically for VocabParallelEmbeddingWithLoRA. + """ + embeddings_indices_len = self.indices_len[3] + return self._embeddings_indices[:, :embeddings_indices_len] + + @property + def long_lora_indices(self) -> torch.Tensor: + """ + This property provides access to the indices used for long context + lora, specifically for LinearScalingRotaryEmbeddingWithLora. + """ + long_lora_len = self.indices_len[4] + return self._long_lora_indices[:long_lora_len] + + def update_metadata( + self, + mapping: "LoRAMapping", + lora_index_to_id: List[Optional[int]], + max_loras: int, + vocab_size: int, + extra_vocab_size: int, + long_lora_context: Optional["LongContextLoRAContext"] = None, + **kwargs): + + self._update_base_metadata(mapping, lora_index_to_id, max_loras, + vocab_size, extra_vocab_size, + long_lora_context) + if mapping.is_prefill: + # Update metadata required for prefill-related operators. + self._update_prefill_metada(self.token_lora_indices) + self.is_prefill = True + else: + self.is_prefill = False + + @abstractmethod + def add_shrink(self, y: Union[Tuple[torch.Tensor, ...], torch.Tensor], + x: torch.Tensor, lora_a_stacked: Tuple[torch.Tensor, ...], + scale: float, **kwargs) -> None: + """ + Performs GEMM for multiple slices of lora_a. + + Semantics: + for i in range(len(lora_a_stacked)): + y[i] += (x @ lora_a_stacked[i]) * scale + + Args: + y (Union[Tuple[torch.Tensor, ...], torch.Tensor]): Output tensors + x (torch.Tensor): Input tensor + lora_a_stacked (Tuple[torch.Tensor, ...]): lora_a's weights + scale (float): Scaling factor for the operation + + """ + # TODO: implement it based on torch ops + raise NotImplementedError + + @abstractmethod + def add_expand(self, + y: torch.Tensor, + x: Union[Tuple[torch.Tensor, ...], torch.Tensor], + lora_b_stacked: Tuple[torch.Tensor, ...], + lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]], + output_slices: Tuple[int, ...], + offset_start: int = 0, + add_input=True, + **kwargs) -> None: + """ + Performs GEMM and bias addition for multiple slices of lora_b. + + Semantics: + for i in range(len(lora_b_stacked)): + slice = output_slices[i] + y[:, offset:offset+slice] += x[i] @ lora_b_stacked[i] + + lora_bias_stacked[i] + offset += slice + + Args: + y (torch.Tensor): Output tensor. + x (Union[Tuple[torch.Tensor, ...], torch.Tensor]): Input tensors + lora_b_stacked (Tuple[torch.Tensor, ...]): lora_b's weight + lora_bias_stacked (Optional[Tuple[torch.Tensor, ...]]): + bias's weight + output_slices (Tuple[int, ...]): Every slice's size + add_input (bool): Defaults to True. + + """ + # TODO: implement it based on torch ops + raise NotImplementedError + + @abstractmethod + def add_lora_embedding(self, + y: torch.Tensor, + x: torch.Tensor, + lora_b_stacked: torch.Tensor, + add_input: bool = True, + **kwargs) -> None: + """ + Applies lora specifically for VocabParallelEmbeddingWithLoRA. + and this layer only requires the expand operation. + Semantics: + y += x @ lora_b_stacked + + Args: + y (torch.Tensor): Output tensor. + x (torch.Tensor): Input tensor. + lora_b_stacked (torch.Tensor): lora_b's weights. + add_input (bool): Default to True. + """ + # TODO: implement it based on torch ops + raise NotImplementedError + + @abstractmethod + def add_lora_linear(self, + y: torch.Tensor, + x: torch.Tensor, + lora_a_stacked: Tuple[torch.Tensor, ...], + lora_b_stacked: Tuple[torch.Tensor, ...], + lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]], + scale: float, + output_slices: Tuple[int, ...], + *, + buffer: Optional[Tuple[torch.Tensor, ...]] = None, + **kwargs) -> None: + """ + Applicable to linear-related lora. + + Semantics: + for i in range(len(lora_a_stacked)): + y[i] += ( + x[i].unsqueeze(0) + @ lora_a_stacked[indices[i], layer_idx, :, :] + @ lora_b_stacked[indices[i], layer_idx, :, :] + * scale + ).squeeze(0)+lora_bias_stacked[i] + + Args: + y (torch.Tensor): Output tensor. Will be changed in-place. + x (torch.Tensor): Input tensor + lora_a_stacked (Tuple[torch.Tensor, ...]): lora_a's weight. + lora_b_stacked (Tuple[torch.Tensor, ...]): lora_b's weight. + lora_bias_stacked (Optional[Tuple[torch.Tensor, ...]]): lora's bias. + scale (float): Scaling factor. + output_slices (Tuple[int, ...]): Every slice's size. + buffer (Optional[Tuple[torch.Tensor, ...]]): Defaults to None. + """ + # TODO: implement it based on torch ops + raise NotImplementedError + + @abstractmethod + def add_lora_logits(self, + y: torch.Tensor, + x: torch.Tensor, + lora_a_stacked: torch.Tensor, + lora_b_stacked: torch.Tensor, + scale, + *, + buffer: Optional[torch.Tensor] = None, + **kwargs) -> None: + """ + Applies lora specifically for LogitsProcessorWithLoRA. + + Semantics: + buffer = (x @ lora_a_stacked) * scale + y += buffer @ lora_b_stacked + + Args: + y (torch.Tensor): Output tensor. + x (torch.Tensor): Input tensor. + lora_a_stacked (torch.Tensor): lora_a's weights. + lora_b_stacked (torch.Tensor):lora_b's weights. + scale (float): Scaling factor. + buffer (Optional[torch.Tensor]):Default to None. + """ + # TODO: implement it based on torch ops + raise NotImplementedError diff --git a/vllm/lora/punica_wrapper/punica_gpu.py b/vllm/lora/punica_wrapper/punica_gpu.py new file mode 100644 index 0000000000000..b2af29de129ce --- /dev/null +++ b/vllm/lora/punica_wrapper/punica_gpu.py @@ -0,0 +1,358 @@ +""" +Based on: +Chen, L., Ye, Z., Wu, Y., Zhuo, D., Ceze, L., & Krishnamurthy, A. (2023). +Punica: Multi-Tenant LoRA Serving. +https://arxiv.org/abs/2310.18547 +""" + +from typing import Callable, Optional, Tuple, Union, final + +import torch + +from vllm.triton_utils import HAS_TRITON + +if HAS_TRITON: + from vllm.lora.ops.bgmv_expand import bgmv_expand + from vllm.lora.ops.bgmv_expand_slice import bgmv_expand_slice + from vllm.lora.ops.bgmv_shrink import bgmv_shrink + from vllm.lora.ops.sgmv_expand import sgmv_expand + from vllm.lora.ops.sgmv_expand_slice import sgmv_expand_slice + from vllm.lora.ops.sgmv_shrink import sgmv_shrink + +from .punica_base import PunicaWrapperBase + + +@final +class PunicaWrapperGPU(PunicaWrapperBase): + """ + PunicaWrapperGPU is designed to manage and provide metadata for the punica + kernel. The main function is to maintain the state information for + Multi-LoRA, and to provide the interface for the punica triton kernel. + """ + + def __init__(self, max_num_batched_tokens: int, max_batches: int, + device: Union[torch.device, str], **kwargs): + PunicaWrapperBase.__init__(self, max_num_batched_tokens, max_batches, + device) + + def _shrink_prefill( + self, + y: torch.Tensor, + x: torch.Tensor, + w_t_all: torch.Tensor, + scale: float, + ): + #No LoRA request, so return directly + if self.no_lora: + return + sgmv_shrink( + x, + w_t_all, + y, + *self.prefill_metadata, + scale, + ) + + def _shrink_decode( + self, + y: torch.Tensor, + x: torch.Tensor, + w_t_all: torch.Tensor, + scale: float, + ): + bgmv_shrink(x, w_t_all, y, self.token_lora_indices, scale) + + def _expand_prefill( + self, + y: torch.Tensor, + x: torch.Tensor, + w_t_all: torch.Tensor, + add_input: bool, + ): + #No LoRA request, so return directly + if self.no_lora: + return + sgmv_expand( + x, + w_t_all, + y, + *self.prefill_metadata, + add_input, + ) + + def _expand_decode( + self, + y: torch.Tensor, + x: torch.Tensor, + w_t_all: torch.Tensor, + add_input: bool, + ): + bgmv_expand(x, w_t_all, y, self.token_lora_indices, add_input) + + def _expand_slice_prefill( + self, + y: torch.Tensor, + x: torch.Tensor, + w_t_all: torch.Tensor, + y_offset: Optional[int], + y_slice_size: Optional[int], + add_input: bool, + ): + #No LoRA request, so return directly + if self.no_lora: + return + sgmv_expand_slice( + x, + w_t_all, + y, + *self.prefill_metadata, + y_offset, + y_slice_size, + add_input, + ) + + def _expand_slice_decode( + self, + y: torch.Tensor, + x: torch.Tensor, + w_t_all: torch.Tensor, + y_offset: Optional[int], + y_slice_size: Optional[int], + add_input: bool, + ): + bgmv_expand_slice(x, w_t_all, y, self.token_lora_indices, y_offset, + y_slice_size, add_input) + + def _apply_expand( + self, + y: torch.Tensor, + x: torch.Tensor, + w_t_all: torch.Tensor, + y_offset: Optional[int], + y_slice_size: Optional[int], + add_input: bool = True, + ): + """ + Perform the ` y[:,y_offset:y_offset+y_slice_size]+=x@w_t_all` + computation, which is suitable for the + GEMM of lora'b. + """ + + expand_slice_fun: Callable = (self._expand_slice_prefill + if self.is_prefill else + self._expand_slice_decode) + expand_slice_fun(y, x, w_t_all, y_offset, y_slice_size, add_input) + + def _apply_shrink(self, y: torch.Tensor, x: torch.Tensor, + w_t_all: torch.Tensor, scale: float): + """ + Perform the ` y+=x@w_t_all` computation, which is suitable for the + GEMM of lora'a. + When `is_prefill is` true, it indicates that it is currently the + prefill stage, and the `_shrink_prefill` function should be called. + Otherwise, it is the decode stage, and the _shrink_decode function + should be called. + """ + y_org = y + y = y.view(-1, y.shape[-1]) + shrink_fun: Callable = (self._shrink_prefill + if self.is_prefill else self._shrink_decode) + shrink_fun(y, x, w_t_all, scale) + y = y.view_as(y_org) + + def add_shrink(self, y: Union[Tuple[torch.Tensor, ...], torch.Tensor], + x: torch.Tensor, lora_a_stacked: Tuple[torch.Tensor, ...], + scale: float, **kwargs): + """ + Performs GEMM for multiple slices of lora_a. + When `is_prefill is` true, it indicates that it is currently the + prefill stage, and the `_shrink_prefill` function should be called. + Otherwise, it is the decode stage, and the _shrink_decode function + should be called. + + Semantics: + for i in range(len(lora_a_stacked)): + y[i] += (x @ lora_a_stacked[i]) * scale + + Args: + y (Union[Tuple[torch.Tensor, ...], torch.Tensor]): Output tensors + x (torch.Tensor): Input tensor + lora_a_stacked (Tuple[torch.Tensor, ...]): lora_a's weights + scale (float): Scaling factor for the operation + """ + + x = x.view(-1, x.shape[-1]) + # TODO fuse these kernels + for slice_idx in range(len(lora_a_stacked)): + self._apply_shrink(y[slice_idx], x, lora_a_stacked[slice_idx], + scale) + + def add_expand(self, + y: torch.Tensor, + x: Union[Tuple[torch.Tensor, ...], torch.Tensor], + lora_b_stacked: Tuple[torch.Tensor, ...], + lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]], + output_slices: Tuple[int, ...], + offset_start: int = 0, + add_input=True, + **kwargs) -> None: + """ + Performs GEMM and bias addition for multiple slices of lora_b. + + Semantics: + for i in range(len(lora_b_stacked)): + slice = output_slices[i] + y[:, offset:offset+slice] += x[i] @ lora_b_stacked[i] + + lora_bias_stacked[i] + offset += slice + + Args: + y (torch.Tensor): Output tensor. + x (Union[Tuple[torch.Tensor, ...], torch.Tensor]): Input tensors + lora_b_stacked (Tuple[torch.Tensor, ...]): lora_b's weight + lora_bias_stacked (Optional[Tuple[torch.Tensor, ...]]): + bias's weight + output_slices (Tuple[int, ...]): Every slice's size + add_input (bool): Defaults to True. + """ + y_org = y + y = y.view(-1, y.shape[-1]) + offset_left = offset_start + if lora_bias_stacked is not None: + self._apply_bias(self.token_lora_indices, y, output_slices, + lora_bias_stacked) + for slice_idx in range(len(lora_b_stacked)): + self._apply_expand( + y, + x[slice_idx], + lora_b_stacked[slice_idx], + offset_left, + output_slices[slice_idx], + add_input=add_input, + ) + offset_left += output_slices[slice_idx] + y = y.view_as(y_org) + + def add_lora_embedding(self, + y: torch.Tensor, + x: torch.Tensor, + lora_b_stacked: torch.Tensor, + add_input: bool = True, + **kwargs) -> None: + """ + Applies lora specifically for VocabParallelEmbeddingWithLoRA. + + Semantics: + y += x @ lora_b_stacked + + Args: + y (torch.Tensor): Output tensor. + x (torch.Tensor): Input tensor. + lora_b_stacked (torch.Tensor): lora_b's weights. + add_input (bool): Default to True. + """ + + # Embedding layer only need expand op + expand_fun: Callable = (self._expand_prefill + if self.is_prefill else self._expand_decode) + expand_fun(y, x, lora_b_stacked, add_input) + + def add_lora_linear(self, + y: torch.Tensor, + x: torch.Tensor, + lora_a_stacked: Tuple[torch.Tensor, ...], + lora_b_stacked: Tuple[torch.Tensor, ...], + lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]], + scale: float, + output_slices: Tuple[int, ...], + *, + buffer: Optional[Tuple[torch.Tensor, ...]] = None, + **kwargs) -> None: + """ + Applicable to linear-related lora. + + Semantics: + for i in range(len(lora_a_stacked)): + y[i] += ( + x[i].unsqueeze(0) + @ lora_a_stacked[indices[i], layer_idx, :, :] + @ lora_b_stacked[indices[i], layer_idx, :, :] + * scale + ).squeeze(0)+lora_bias_stacked[i] + + Args: + y (torch.Tensor): Output tensor. Will be changed in-place. + x (torch.Tensor): Input tensor + lora_a_stacked (Tuple[torch.Tensor, ...]): lora_a's weight. + lora_b_stacked (Tuple[torch.Tensor, ...]): lora_b's weight. + lora_bias_stacked (Optional[Tuple[torch.Tensor, ...]]): lora's bias. + scale (float): Scaling factor. + output_slices (Tuple[int, ...]): Every slice's size. + buffer (Optional[Tuple[torch.Tensor, ...]]): Defaults to None. + """ + + assert len(lora_a_stacked) == len(lora_b_stacked) == len(output_slices) + if lora_bias_stacked is not None: + assert len(lora_bias_stacked) == len(output_slices) + y = self._apply_bias(self.token_lora_indices, y, output_slices, + lora_bias_stacked) + + if buffer is None: + r = lora_b_stacked[0].size(-1) + # We set the buffer to be float32 by default ,refer to: + # https://github.com/triton-lang/triton/issues/1387 + buffer = tuple( + torch.zeros( + (x.size(0), r), dtype=torch.float32, device=x.device) + for _ in range(len(output_slices))) + self.add_shrink(buffer, x, lora_a_stacked, scale, **kwargs) + self.add_expand(y, + buffer, + lora_b_stacked, + None, + output_slices, + add_input=True, + **kwargs) + + def add_lora_logits(self, + y: torch.Tensor, + x: torch.Tensor, + lora_a_stacked: torch.Tensor, + lora_b_stacked: torch.Tensor, + scale, + *, + buffer: Optional[torch.Tensor] = None, + **kwargs) -> None: + """ + Applies lora specifically for LogitsProcessorWithLoRA. + + Semantics: + buffer = (x @ lora_a_stacked) * scale + y += buffer @ lora_b_stacked + + Args: + y (torch.Tensor): Output tensor. + x (torch.Tensor): Input tensor. + lora_a_stacked (torch.Tensor): lora_a's weights. + lora_b_stacked (torch.Tensor):lora_b's weights. + scale (float): Scaling factor. + buffer (Optional[torch.Tensor]):Default to None. + """ + y_org = y + y = y.view(-1, y.shape[-1]) + x = x.view(-1, x.shape[-1]) + r = lora_b_stacked.size(-1) + if buffer is None: + # We set the buffer to be float32 by default ,refer to: + # https://github.com/triton-lang/triton/issues/1387 + buffer = torch.zeros((x.size(0), r), + dtype=torch.float32, + device=x.device) + # LogitsProcessorWithLoRA always using bgmv. + bgmv_shrink(x, lora_a_stacked, buffer, self.sampler_indices, scale) + bgmv_expand(buffer, + lora_b_stacked, + y, + self.sampler_indices, + add_inputs=True) + y = y.view_as(y_org) diff --git a/vllm/lora/punica_wrapper/punica_selector.py b/vllm/lora/punica_wrapper/punica_selector.py new file mode 100644 index 0000000000000..df6c1bdc7dd71 --- /dev/null +++ b/vllm/lora/punica_wrapper/punica_selector.py @@ -0,0 +1,14 @@ +from vllm.platforms import current_platform +from vllm.utils import print_info_once + +from .punica_base import PunicaWrapperBase + + +def get_punica_wrapper(*args, **kwargs) -> PunicaWrapperBase: + if current_platform.is_cuda_alike(): + # Lazy import to avoid ImportError + from vllm.lora.punica_wrapper.punica_gpu import PunicaWrapperGPU + print_info_once("Using PunicaWrapperGPU.") + return PunicaWrapperGPU(*args, **kwargs) + else: + raise NotImplementedError diff --git a/vllm/lora/punica_wrapper/utils.py b/vllm/lora/punica_wrapper/utils.py new file mode 100644 index 0000000000000..7360c8c09e3ac --- /dev/null +++ b/vllm/lora/punica_wrapper/utils.py @@ -0,0 +1,159 @@ +from typing import TYPE_CHECKING, List, Optional, Tuple, Union + +import torch + +if TYPE_CHECKING: + # avoid circuit import + from vllm.lora.layers import LoRAMapping + from vllm.lora.models import LongContextLoRAContext + + +def compute_meta( + token_lora_tensor: torch.Tensor +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int, int, int, bool]: + """ + Get the information required for the sgmv kernel. With the features: + 1. If consecutive requests in the batch use the same LoRA, this function + will combine them into a single request, improving sgmv kernel inference + performance. + 2. At the beginning of each prefill stage inference, recalculations are + needed based on the input, but only once. + """ + + lora_indices_tensor, seq_length_tensor = torch.unique_consecutive( + token_lora_tensor, return_counts=True) + cum_result = torch.cumsum(seq_length_tensor, dim=0) + b_seq_start_tensor = torch.zeros_like(seq_length_tensor) + b_seq_start_tensor[1:].copy_(cum_result[:-1]) + max_length = seq_length_tensor.max().item() + token_nums = seq_length_tensor.sum().item() + batch_size = lora_indices_tensor.size(0) + no_lora = False + # -1 means no lora should be applied. Use `no_lora` to determine whether + # the current step requires LoRA. If LoRA is not needed, the prefill stage + # does not need to launch the triton kernel, which can improve performance + if batch_size == 1 and lora_indices_tensor == -1: + no_lora = True + return (b_seq_start_tensor, seq_length_tensor, lora_indices_tensor, + batch_size, max_length, token_nums, no_lora) + + +# TODO see if this can be vectorized +def convert_mapping( + mapping: "LoRAMapping", + lora_index_to_id: List[Optional[int]], + max_loras: int, + vocab_size: int, + extra_vocab_size: int, + device: torch.device, + long_lora_context: Optional["LongContextLoRAContext"] = None, +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, + Optional[torch.Tensor], List[int]]: + """Converts LoRAMapping to index tensors. + + Args: + mapping: LoRAMapping mapping rows in a batch to LoRA ids. + lora_index_to_id: List mapping LoRA ids to LoRA indices. + max_loras: Maximum number of LoRAs. + vocab_size: Model vocab size. + extra_vocab_size: Extra vocab size each LoRA can have. + long_lora_context: Passed if there are long context lora in a batch. + + Returns: + A tuple of tensors: + base_indices: Tensor of shape [batch_size] mapping batch rows to + LoRA indices. + sampler_indices: Tensor of shape [batch_size] mapping requests to + LoRA indices for sampler. For generation, this will be the + same as base_indicies. For prefill, this will map requests + to LoRA indices. + sampler_indices_padded: Tensor of shape [batch_size] mapping + requests to LoRA indices for sampler with padding. + Same as sampler_indicies, but -1 is replaced with + max_loras. + embeddings_indices: Tensor of shape [2, batch_size] mapping + requests to embedding indices. First row is for embeddings + added by the LoRAs, second row is for the LoRA.lora_a + embeddings. + long_lora_indices: Tensor of shape [batch_size] mapping + requests to RoPE offsets and rot dims for long LoRAs. + None if long context lora doesn't exist. + indices_len: List of lengths of the above tensors. It contains + (base_indices, sampler_indices, sampler_indices_padded, + embeddings_indices, long_lora_indices). + """ + index_mapping_indices: List[int] = list(mapping.index_mapping).copy() + embedding_indices = index_mapping_indices.copy() + lora_indices = index_mapping_indices.copy() + long_lora_offsets: Optional[torch.Tensor] = None + if long_lora_context: + long_lora_offsets = torch.zeros(len(index_mapping_indices), + device=device, + dtype=torch.long) + prompt_mapping: List[int] = [ + lora_index_to_id.index(x) if x > 0 else -1 + for x in mapping.prompt_mapping + ] + lora_idx = None + for i in range(len(index_mapping_indices)): + # TODO index can be slow. optimize + lora_idx = (lora_index_to_id.index(index_mapping_indices[i]) + if index_mapping_indices[i] > 0 else -1) + embedding_indices[i] = lora_idx if index_mapping_indices[i] > 0 else 0 + lora_indices[i] = lora_idx + if long_lora_context: + assert long_lora_offsets is not None + lora_offset: int = long_lora_context.offsets_by_lora_id.get( + index_mapping_indices[i], 0) + long_lora_offsets[i] = lora_offset + + indices_list: List[Union[List[int], torch.Tensor]] = [ + index_mapping_indices, + lora_indices, + embedding_indices, + ] + if long_lora_context: + assert long_lora_offsets is not None + indices_list.append(long_lora_offsets) + indices = torch.tensor(indices_list, dtype=torch.long, device=device) + prompt_mapping_tensor = torch.tensor(prompt_mapping, + dtype=torch.long, + device=device) + embeddings_indices = torch.stack([ + indices[2] * extra_vocab_size, + indices[2] * (vocab_size + extra_vocab_size), + ]) + embeddings_indices[embeddings_indices == -1] = max_loras - 1 + base_indices = indices[1] + sampler_indices = prompt_mapping_tensor + sampler_indices_padded = sampler_indices.clone() + sampler_indices_padded[sampler_indices_padded == -1] = max_loras - 1 + sampler_indices_padded = torch.arange( + 0, len(sampler_indices_padded), device=device, dtype=torch.long) + ( + sampler_indices_padded * len(sampler_indices_padded)) + long_lora_indices = None + long_lora_indices_len: Optional[int] = None + if long_lora_context: + long_lora_indices = indices[3] + long_lora_indices_len = long_lora_indices.shape[-1] + # Contain length of indices tensors. Used to index into each tensor. + indices_len = [ + base_indices.shape[-1], + sampler_indices.shape[-1], + sampler_indices_padded.shape[-1], + embeddings_indices.shape[-1], + ] + if long_lora_indices_len is not None: + indices_len.append(long_lora_indices_len) + else: + # If long_lora doesn't exist,append None + indices_len.append(None) + + return ( + base_indices, + sampler_indices, + sampler_indices_padded, + embeddings_indices, + long_lora_indices, + indices_len, + ) From a811dd660856a5c222a1447fe1d93deccbc162fd Mon Sep 17 00:00:00 2001 From: Isotr0py Date: Tue, 10 Dec 2024 04:55:10 +0800 Subject: [PATCH 280/397] [Model] merged input processor for Phi-3-Vision models (#10977) Signed-off-by: Isotr0py <2037008807@qq.com> Co-authored-by: Cyrus Leung --- tests/entrypoints/openai/test_vision.py | 4 +- .../openai/test_vision_embedding.py | 4 +- .../mm_processor_kwargs/test_phi3v.py | 136 ++------ tests/multimodal/test_processor_kwargs.py | 169 +++++----- vllm/inputs/registry.py | 4 +- vllm/model_executor/models/phi3v.py | 298 +++++------------- vllm/multimodal/processing.py | 29 +- 7 files changed, 235 insertions(+), 409 deletions(-) diff --git a/tests/entrypoints/openai/test_vision.py b/tests/entrypoints/openai/test_vision.py index 157d873a75b4d..a0b6edd566561 100644 --- a/tests/entrypoints/openai/test_vision.py +++ b/tests/entrypoints/openai/test_vision.py @@ -89,7 +89,7 @@ async def test_single_chat_session_image(client: openai.AsyncOpenAI, choice = chat_completion.choices[0] assert choice.finish_reason == "length" assert chat_completion.usage == openai.types.CompletionUsage( - completion_tokens=10, prompt_tokens=772, total_tokens=782) + completion_tokens=10, prompt_tokens=775, total_tokens=785) message = choice.message message = chat_completion.choices[0].message @@ -181,7 +181,7 @@ async def test_single_chat_session_image_base64encoded( choice = chat_completion.choices[0] assert choice.finish_reason == "length" assert chat_completion.usage == openai.types.CompletionUsage( - completion_tokens=10, prompt_tokens=772, total_tokens=782) + completion_tokens=10, prompt_tokens=775, total_tokens=785) message = choice.message message = chat_completion.choices[0].message diff --git a/tests/entrypoints/openai/test_vision_embedding.py b/tests/entrypoints/openai/test_vision_embedding.py index d0c43b47bf0af..425f2a10ec855 100644 --- a/tests/entrypoints/openai/test_vision_embedding.py +++ b/tests/entrypoints/openai/test_vision_embedding.py @@ -95,5 +95,5 @@ async def test_image_embedding(server: RemoteOpenAIServer, model_name: str, assert len(embeddings["data"]) == 1 assert len(embeddings["data"][0]["embedding"]) == 3072 assert embeddings["usage"]["completion_tokens"] == 0 - assert embeddings["usage"]["prompt_tokens"] == 762 - assert embeddings["usage"]["total_tokens"] == 762 + assert embeddings["usage"]["prompt_tokens"] == 765 + assert embeddings["usage"]["total_tokens"] == 765 diff --git a/tests/models/decoder_only/vision_language/mm_processor_kwargs/test_phi3v.py b/tests/models/decoder_only/vision_language/mm_processor_kwargs/test_phi3v.py index 60a8f63eb5faa..c16192a1e1438 100644 --- a/tests/models/decoder_only/vision_language/mm_processor_kwargs/test_phi3v.py +++ b/tests/models/decoder_only/vision_language/mm_processor_kwargs/test_phi3v.py @@ -2,12 +2,10 @@ from typing import Optional import pytest -import torch -from transformers import AutoImageProcessor, AutoTokenizer +from transformers import AutoTokenizer -from vllm.inputs import InputContext, token_inputs +from vllm.inputs import InputContext, InputProcessingContext from vllm.model_executor.models.phi3v import _IMAGE_TOKEN_ID -from vllm.multimodal import MultiModalRegistry from .....conftest import _ImageAssets from ....utils import build_model_context @@ -17,15 +15,9 @@ # Wrap lazy imports to avoid initializing CUDA during test collection @pytest.fixture() -def input_processor_for_phi3v(): - from vllm.model_executor.models.phi3v import input_processor_for_phi3v - return input_processor_for_phi3v - - -@pytest.fixture() -def dummy_data_for_phi3v(): - from vllm.model_executor.models.phi3v import dummy_data_for_phi3v - return dummy_data_for_phi3v +def processor_for_phi3v(): + from vllm.model_executor.models.phi3v import Phi3VProcessor + return Phi3VProcessor @pytest.fixture() @@ -34,53 +26,6 @@ def get_max_phi3v_image_tokens(): return get_max_phi3v_image_tokens -@pytest.mark.parametrize("model", models) -@pytest.mark.parametrize("num_crops", [4, 16, None]) -def test_input_mapper_override(model: str, image_assets: _ImageAssets, - num_crops: Optional[int]): - """Ensure that the [default] input mapper handles num_crops properly.""" - # We pass the processor kwargs here since for this model, we fall back to - # the default mapper; this will fall back to the HF mapper and forward - # mm_processor_kwargs to it. - mm_processor_kwargs = { - "num_crops": num_crops - } if num_crops is not None else {} - ctx = build_model_context( - model_name=model, - tokenizer_name=model, - trust_remote_code=True, - mm_processor_kwargs=mm_processor_kwargs, - ) - - hf_processor = AutoImageProcessor.from_pretrained(model, - trust_remote_code=True, - **mm_processor_kwargs) - - mm_registry = MultiModalRegistry() - mm_registry.init_mm_limits_per_prompt(ctx.model_config) - - image = image_assets[0].pil_image - hf_result = hf_processor.preprocess( - image, - return_tensors="pt", - ) - - vllm_result = mm_registry.map_input( - ctx.model_config, - {"image": image}, - ) - - assert torch.all(hf_result["image_sizes"] == vllm_result["image_sizes"]) - assert torch.all( - hf_result["num_img_tokens"] == vllm_result["num_img_tokens"]) - - # For pixel values, the second axis should be the num_crops + 1 - # for the rescaled original image. The default value in VLLM falls - # back to the HF config, which is why we compare to the processor num_crops - assert torch.all(hf_result["pixel_values"] == vllm_result["pixel_values"]) - assert vllm_result["pixel_values"].shape[1] == hf_processor.num_crops + 1 - - @pytest.mark.parametrize("model", models) @pytest.mark.parametrize("num_crops,expected_max_tokens", [ (4, 781), @@ -112,48 +57,20 @@ def test_max_tokens_override(get_max_phi3v_image_tokens, model: str, @pytest.mark.parametrize("model", models) -@pytest.mark.parametrize("num_crops,toks_per_img,num_imgs", [ - (4, 781, 1), - (4, 781, 2), - (16, 2653, 1), - (16, 2653, 2), -]) -def test_dummy_data_override(dummy_data_for_phi3v, model: str, num_crops: int, - toks_per_img: int, num_imgs: int): - """Ensure dummy_data_for_phi3v handles num_crops properly.""" - # Same as the previous test - don't initialize mm_processor_kwargs - # in this test and assume that the kwargs will be correctly expanded by - # the partial when calling the dummy data func. - ctx = build_model_context( - model_name=model, - tokenizer_name=model, - trust_remote_code=True, - mm_processor_kwargs=None, - ) - - dummy_data = dummy_data_for_phi3v( - ctx=ctx, - seq_len=8192, # Should be bigger than num_imgs * toks_per_img - mm_counts={"image": num_imgs}, - num_crops=num_crops, - ) - sequence_data = dummy_data.seq_data - # Ensure we have the right number of placeholders per num_crops size - img_tok_count = sequence_data.get_token_ids().count(_IMAGE_TOKEN_ID) - assert img_tok_count == toks_per_img * num_imgs - - -@pytest.mark.parametrize("model", models) -@pytest.mark.parametrize("num_crops,expected_toks_per_img,num_imgs", [ - (4, 757, 1), - (4, 757, 2), - (16, 1921, 1), - (16, 1921, 2), -]) -def test_input_processor_override(input_processor_for_phi3v, - image_assets: _ImageAssets, model: str, - num_crops: int, expected_toks_per_img: int, - num_imgs: int): +@pytest.mark.parametrize( + "num_crops,expected_toks_per_img,num_imgs", + [ + (4, 757, 1), + (4, 757, 2), + (16, 1921, 1), + (16, 1921, 2), + # the default num_crops of phi-3.5-vision is 4 + (None, 757, 2), + (None, 757, 2), + ]) +def test_processor_override(processor_for_phi3v, image_assets: _ImageAssets, + model: str, num_crops: Optional[int], + expected_toks_per_img: int, num_imgs: int): """Ensure input_processor_for_phi3v handles num_crops properly.""" # Same as the previous test - don't initialize mm_processor_kwargs # in this test and assume that the kwargs will be correctly expanded by @@ -163,19 +80,20 @@ def test_input_processor_override(input_processor_for_phi3v, tokenizer_name=model, trust_remote_code=True, ) - tokenizer = AutoTokenizer.from_pretrained(model) + tokenizer = AutoTokenizer.from_pretrained(model, trust_remote_code=True) + ctx = InputProcessingContext(ctx.model_config, tokenizer) # Build the image str / prompt based on the number of images we pass img_str = "".join([f"<|image_{idx}|>\n" for idx in range(1, num_imgs + 1)]) prompt = f"<|user|>\n{img_str}<|end|>\n<|assistant|>\n" images = [image_assets[0].pil_image] * num_imgs - inputs = token_inputs(prompt_token_ids=tokenizer.encode(prompt), - prompt=prompt, - multi_modal_data={"image": images}) + mm_data = {"image": images} + mm_processor_kwargs = {} + if num_crops is not None: + mm_processor_kwargs = {"num_crops": num_crops} - processed_inputs = input_processor_for_phi3v(ctx, - inputs, - num_crops=num_crops) + processor = processor_for_phi3v(ctx) + processed_inputs = processor.apply(prompt, mm_data, mm_processor_kwargs) # Ensure we have the right number of placeholders per num_crops size img_tok_count = processed_inputs["prompt_token_ids"].count(_IMAGE_TOKEN_ID) diff --git a/tests/multimodal/test_processor_kwargs.py b/tests/multimodal/test_processor_kwargs.py index e6c8793989e13..d141cdf1f083b 100644 --- a/tests/multimodal/test_processor_kwargs.py +++ b/tests/multimodal/test_processor_kwargs.py @@ -15,13 +15,13 @@ # Used for fast tests where the model doesn't matter DUMMY_MODEL_ID = "facebook/opt-125m" # Used for tests that need a multimodal model -MULTIMODAL_MODEL_ID = "microsoft/Phi-3.5-vision-instruct" +MULTIMODAL_MODEL_ID = "OpenGVLab/InternVL2-2B" # For mm_processor_kwargs - we test overrides by defining mocks for each place # it is used, and ensuring that we can pass processor kwargs an override value # to receive the intended result for things like sequence length etc. -DEFAULT_NUM_CROPS = 4 -NUM_CROPS_OVERRIDE = 16 +DEFAULT_MAX_DYNAMIC_PATCH = 6 +MAX_DYNAMIC_PATCH_OVERRIDE = 4 # Mocks for all of the places that we use the mm_processor_kwargs @@ -33,10 +33,11 @@ def use_processor_mock(): def custom_processor(ctx: InputContext, inputs: DecoderOnlyInputs, *, - num_crops=DEFAULT_NUM_CROPS): + max_dynamic_patch=DEFAULT_MAX_DYNAMIC_PATCH): # For testing purposes, we don't worry about the prompt - return token_inputs(prompt_token_ids=[], - mm_processor_kwargs={"num_crops": num_crops}) + return token_inputs( + prompt_token_ids=[], + mm_processor_kwargs={"max_dynamic_patch": max_dynamic_patch}) with patch("vllm.inputs.registry.InputRegistry._get_model_input_processor", return_value=custom_processor): @@ -52,9 +53,9 @@ def custom_dummy_data_factory(self, seq_len: int, mm_counts: Mapping[str, int], *, - num_crops=DEFAULT_NUM_CROPS): + max_dynamic_patch=DEFAULT_MAX_DYNAMIC_PATCH): seq_data = SequenceData( - array(VLLM_TOKEN_ID_ARRAY_TYPE, [0] * num_crops)) + array(VLLM_TOKEN_ID_ARRAY_TYPE, [0] * max_dynamic_patch)) return DummyData(seq_data, None) with patch( @@ -65,15 +66,15 @@ def custom_dummy_data_factory(self, # Lazy import to avoid CUDA reinitialization error def mm_model_cls(): - from vllm.model_executor.models.phi3v import Phi3VForCausalLM + from vllm.model_executor.models.internvl import InternVLChatModel - return Phi3VForCausalLM + return InternVLChatModel # lambda whose signature matches max token calcs extra & mapper + extra kwargs -get_num_crops = lambda ctx, *, num_crops=DEFAULT_NUM_CROPS: num_crops -custom_mapper = lambda ctx, data, *, num_crops=DEFAULT_NUM_CROPS: { - "pixel_values": torch.zeros(size=(1, num_crops + 1, 3, 336, 336)) +get_max_dynamic_patch = lambda ctx, *, max_dynamic_patch=DEFAULT_MAX_DYNAMIC_PATCH: max_dynamic_patch # noqa: E501 +custom_mapper = lambda ctx, data, *, max_dynamic_patch=DEFAULT_MAX_DYNAMIC_PATCH: { # noqa: E501 + "pixel_values": torch.zeros(size=(1, max_dynamic_patch + 1, 3, 448, 448)) } @@ -88,27 +89,28 @@ def test_default_processor_is_a_noop(): assert proc_inputs is proc_outputs -def _get_num_crops_info(init_num_crops: int, inference_num_crops: int): - """Get the init / inference kwargs and expected num_crops for this test.""" - # If we have a value for num_crops, pass the override value and make +def _get_max_dynamic_patch_info(init_max_dynamic_patch: int, + inference_max_dynamic_patch: int): + """Get the init / inference kwargs and expected max_dynamic_patch.""" + # If we have a value for max_dynamic_patch, pass the override value and make # sure we get that value as a return-value from out mock processor, # otherwise fall back to the default value - init_kwargs = None if init_num_crops is None else { - "num_crops": init_num_crops + init_kwargs = None if init_max_dynamic_patch is None else { + "max_dynamic_patch": init_max_dynamic_patch } - inference_kwargs = None if inference_num_crops is None else { - "num_crops": inference_num_crops + inference_kwargs = None if inference_max_dynamic_patch is None else { + "max_dynamic_patch": inference_max_dynamic_patch } - if inference_num_crops is not None: - expected_seq_count = inference_num_crops - elif init_num_crops is not None: - expected_seq_count = init_num_crops + if inference_max_dynamic_patch is not None: + expected_seq_count = inference_max_dynamic_patch + elif init_max_dynamic_patch is not None: + expected_seq_count = init_max_dynamic_patch else: - expected_seq_count = DEFAULT_NUM_CROPS + expected_seq_count = DEFAULT_MAX_DYNAMIC_PATCH return init_kwargs, inference_kwargs, expected_seq_count -def _get_processed_num_crops( +def _get_processed_max_dynamic_patch( processor: Callable[[ProcessorInputs], ProcessorInputs], inference_kwargs: Optional[Dict[str, int]], ) -> int: @@ -120,27 +122,30 @@ def _get_processed_num_crops( assert "type" in processed_inputs assert processed_inputs["type"] == "token" assert "mm_processor_kwargs" in processed_inputs - return processed_inputs["mm_processor_kwargs"]["num_crops"] + return processed_inputs["mm_processor_kwargs"]["max_dynamic_patch"] -@pytest.mark.parametrize("init_num_crops,inference_num_crops", [ - (None, None), - (NUM_CROPS_OVERRIDE, None), - (DEFAULT_NUM_CROPS, NUM_CROPS_OVERRIDE), -]) -def test_input_processor_kwargs(use_processor_mock, init_num_crops, - inference_num_crops): +@pytest.mark.parametrize( + "init_max_dynamic_patch,inference_max_dynamic_patch", [ + (None, None), + (MAX_DYNAMIC_PATCH_OVERRIDE, None), + (DEFAULT_MAX_DYNAMIC_PATCH, MAX_DYNAMIC_PATCH_OVERRIDE), + ]) +def test_input_processor_kwargs(use_processor_mock, init_max_dynamic_patch, + inference_max_dynamic_patch): """Ensure input processors can use processor kwargs.""" dummy_registry = InputRegistry() - init_kwargs, inference_kwargs, expected_seq_count = _get_num_crops_info( - init_num_crops, inference_num_crops) + (init_kwargs, inference_kwargs, + expected_seq_count) = _get_max_dynamic_patch_info( + init_max_dynamic_patch, inference_max_dynamic_patch) ctx = build_model_context(DUMMY_MODEL_ID, mm_processor_kwargs=init_kwargs) processor = dummy_registry.create_input_processor(ctx.model_config) - num_crops_val = _get_processed_num_crops(processor, inference_kwargs) + max_dynamic_patch_val = _get_processed_max_dynamic_patch( + processor, inference_kwargs) - assert num_crops_val == expected_seq_count + assert max_dynamic_patch_val == expected_seq_count @pytest.mark.parametrize( @@ -165,18 +170,21 @@ def test_processor_with_sad_kwarg_overrides(use_processor_mock, processor = dummy_registry.create_input_processor(ctx.model_config) # Should filter out the inference time kwargs - num_crops_val = _get_processed_num_crops(processor, mm_processor_kwargs) - assert num_crops_val == DEFAULT_NUM_CROPS + max_dynamic_patch_val = _get_processed_max_dynamic_patch( + processor, mm_processor_kwargs) + assert max_dynamic_patch_val == DEFAULT_MAX_DYNAMIC_PATCH ### Test overrides for the dummy data -@pytest.mark.parametrize("num_crops", [None, NUM_CROPS_OVERRIDE]) -def test_dummy_data_kwarg_overrides(use_dummy_data_mock, num_crops): +@pytest.mark.parametrize("max_dynamic_patch", + [None, MAX_DYNAMIC_PATCH_OVERRIDE]) +def test_dummy_data_kwarg_overrides(use_dummy_data_mock, max_dynamic_patch): """Ensure dummy data factories can use processor kwargs.""" - mm_processor_kwargs = None if num_crops is None else { - "num_crops": num_crops + mm_processor_kwargs = None if max_dynamic_patch is None else { + "max_dynamic_patch": max_dynamic_patch } - expected_seq_count = DEFAULT_NUM_CROPS if num_crops is None else num_crops + expected_seq_count = (DEFAULT_MAX_DYNAMIC_PATCH + if max_dynamic_patch is None else max_dynamic_patch) dummy_registry = InputRegistry() ctx = build_model_context(DUMMY_MODEL_ID, mm_processor_kwargs=mm_processor_kwargs) @@ -217,17 +225,20 @@ def test_dummy_data_with_sad_kwarg_overrides(use_dummy_data_mock, # len is solely dependent on the value of the mm_processor_kwargs. dummy_data = dummy_registry.dummy_data_for_profiling( ctx.model_config, seq_len=-1, mm_registry=mm_registry) - assert len(dummy_data.seq_data.prompt_token_ids) == DEFAULT_NUM_CROPS + assert len( + dummy_data.seq_data.prompt_token_ids) == DEFAULT_MAX_DYNAMIC_PATCH ### Test overrides for the max token count per multimodal instance -@pytest.mark.parametrize("num_crops", [None, NUM_CROPS_OVERRIDE]) -def test_max_tokens_kwarg_overrides(num_crops): +@pytest.mark.parametrize("max_dynamic_patch", + [None, MAX_DYNAMIC_PATCH_OVERRIDE]) +def test_max_tokens_kwarg_overrides(max_dynamic_patch): """Ensure max token calcs can use processor kwargs.""" - mm_processor_kwargs = None if num_crops is None else { - "num_crops": num_crops + mm_processor_kwargs = None if max_dynamic_patch is None else { + "max_dynamic_patch": max_dynamic_patch } - expected_seq_count = DEFAULT_NUM_CROPS if num_crops is None else num_crops + expected_seq_count = (DEFAULT_MAX_DYNAMIC_PATCH + if max_dynamic_patch is None else max_dynamic_patch) ctx = build_model_context(MULTIMODAL_MODEL_ID, task="generate", @@ -239,11 +250,11 @@ def test_max_tokens_kwarg_overrides(num_crops): mm_registry.init_mm_limits_per_prompt(ctx.model_config) # Patch the image registry for phi3v with our lambda that is compatible # with overrides, then ensure that calling the method correctly echos - # our num_crops value back from the mm_processor_kwargs. + # our max_dynamic_patch value back from the mm_processor_kwargs. with patch.object( mm_registry._get_plugin("image"), "_max_mm_tokens", - {mm_model_cls(): get_num_crops}, + {mm_model_cls(): get_max_dynamic_patch}, ): max_multimodal_tokens = mm_registry.get_max_multimodal_tokens( ctx.model_config) @@ -279,26 +290,29 @@ def test_max_tokens_with_sad_kwarg_overrides(mm_processor_kwargs): with patch.object( mm_registry._get_plugin("image"), "_max_mm_tokens", - {mm_model_cls(): get_num_crops}, + {mm_model_cls(): get_max_dynamic_patch}, ): max_multimodal_tokens = mm_registry.get_max_multimodal_tokens( ctx.model_config) - assert max_multimodal_tokens == DEFAULT_NUM_CROPS + assert max_multimodal_tokens == DEFAULT_MAX_DYNAMIC_PATCH ### Test overrides for the mapper -@pytest.mark.parametrize("num_crops", [DEFAULT_NUM_CROPS, NUM_CROPS_OVERRIDE]) -def test_default_mapper_with_processor_kwargs(image_assets, num_crops): +@pytest.mark.parametrize( + "max_dynamic_patch", + [DEFAULT_MAX_DYNAMIC_PATCH, MAX_DYNAMIC_PATCH_OVERRIDE]) +def test_default_mapper_with_processor_kwargs(image_assets, max_dynamic_patch): """Ensure that the mapper processor kwargs can fall back to HF models.""" # NOTE - we don't validate bad inputs for the default mapper, because it's # through the automodel interface in transformers, so we can't easily # inspect what kwargs are or are not allowed. - ctx = build_model_context(MULTIMODAL_MODEL_ID, - task="generate", - trust_remote_code=True, - mm_processor_kwargs={"num_crops": num_crops}, - limit_mm_per_prompt={"image": 1}) + ctx = build_model_context( + MULTIMODAL_MODEL_ID, + task="generate", + trust_remote_code=True, + mm_processor_kwargs={"max_dynamic_patch": max_dynamic_patch}, + limit_mm_per_prompt={"image": 1}) mm_registry = MultiModalRegistry() mm_registry.init_mm_limits_per_prompt(ctx.model_config) @@ -307,20 +321,22 @@ def test_default_mapper_with_processor_kwargs(image_assets, num_crops): mm_inputs = {"image": image} mapped_inputs = mm_registry.map_input(ctx.model_config, mm_inputs) - # Phi3v pixel vals should have shape: [batch, num_crops+1, 3, 336, 336] - assert mapped_inputs["pixel_values"].shape[1] == num_crops + 1 + # pixel vals should have shape: [batch, max_dynamic_patch+1, ...] + assert mapped_inputs["pixel_values"].shape[1] == max_dynamic_patch + 1 -@pytest.mark.parametrize("init_num_crops,inference_num_crops", [ - (None, None), - (NUM_CROPS_OVERRIDE, None), - (DEFAULT_NUM_CROPS, NUM_CROPS_OVERRIDE), -]) -def test_custom_mapper_kwarg_overrides(image_assets, init_num_crops, - inference_num_crops): +@pytest.mark.parametrize( + "init_max_dynamic_patch,inference_max_dynamic_patch", [ + (None, None), + (MAX_DYNAMIC_PATCH_OVERRIDE, None), + (DEFAULT_MAX_DYNAMIC_PATCH, MAX_DYNAMIC_PATCH_OVERRIDE), + ]) +def test_custom_mapper_kwarg_overrides(image_assets, init_max_dynamic_patch, + inference_max_dynamic_patch): """Ensure custom mappers can use processor kwargs.""" - init_kwargs, inference_kwargs, expected_seq_count = _get_num_crops_info( - init_num_crops, inference_num_crops) + (init_kwargs, inference_kwargs, + expected_seq_count) = _get_max_dynamic_patch_info( + init_max_dynamic_patch, inference_max_dynamic_patch) ctx = build_model_context(MULTIMODAL_MODEL_ID, task="generate", @@ -335,7 +351,7 @@ def test_custom_mapper_kwarg_overrides(image_assets, init_num_crops, # Patch the image registry for phi3v with our lambda that is compatible # with overrides, then ensure that calling the method correctly echos - # our num_crops value back from the mm_processor_kwargs. + # our max_dynamic_patch value back from the mm_processor_kwargs. mm_registry._get_plugin("image").register_input_mapper(custom_mapper)( mm_model_cls()) mapped_inputs = mm_registry.map_input(ctx.model_config, mm_inputs, @@ -373,11 +389,12 @@ def test_custom_mapper_with_sad_kwarg_overrides(image_assets, # Patch the image registry for phi3v with our lambda that is compatible # with overrides, then ensure that calling the method correctly echos - # our num_crops value back from the mm_processor_kwargs. + # our max_dynamic_patch value back from the mm_processor_kwargs. mm_registry._get_plugin("image").register_input_mapper(custom_mapper)( mm_model_cls()) # Should filter out the inference time kwargs mapped_inputs = mm_registry.map_input( ctx.model_config, mm_inputs, mm_processor_kwargs=mm_processor_kwargs) - assert mapped_inputs["pixel_values"].shape[1] == DEFAULT_NUM_CROPS + 1 + assert mapped_inputs["pixel_values"].shape[1] == ( + DEFAULT_MAX_DYNAMIC_PATCH + 1) diff --git a/vllm/inputs/registry.py b/vllm/inputs/registry.py index 646554c72481a..0dfed3b7e61bf 100644 --- a/vllm/inputs/registry.py +++ b/vllm/inputs/registry.py @@ -69,12 +69,12 @@ class InputProcessingContext(InputContext): tokenizer: AnyTokenizer """The tokenizer used to tokenize the inputs.""" - def get_hf_processor(self) -> ProcessorMixin: + def get_hf_processor(self, **kwargs) -> ProcessorMixin: return cached_get_processor( self.model_config.tokenizer, tokenizer=self.tokenizer, # Override the tokenizer with ours trust_remote_code=self.model_config.trust_remote_code, - ) + **kwargs) N = TypeVar("N", bound=Type[nn.Module]) diff --git a/vllm/model_executor/models/phi3v.py b/vllm/model_executor/models/phi3v.py index eef23029a2aca..3c7854ce388ab 100644 --- a/vllm/model_executor/models/phi3v.py +++ b/vllm/model_executor/models/phi3v.py @@ -12,22 +12,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import itertools -import re -from functools import cached_property, lru_cache -from typing import (Any, Dict, Iterable, List, Literal, Mapping, Optional, Set, - Tuple, TypedDict, Union) +from functools import cached_property +from typing import (Iterable, List, Literal, Mapping, Optional, Set, Tuple, + TypedDict, Union) -import numpy as np import torch import torch.nn as nn -from PIL import Image -from transformers import CLIPVisionConfig, PretrainedConfig +from transformers import (BatchFeature, CLIPVisionConfig, PretrainedConfig, + ProcessorMixin) from vllm.attention import AttentionMetadata -from vllm.config import ModelConfig, VllmConfig -from vllm.inputs import (INPUT_REGISTRY, DecoderOnlyInputs, DummyData, - InputContext, token_inputs) +from vllm.config import VllmConfig +from vllm.inputs import InputContext from vllm.logger import init_logger from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler @@ -36,12 +32,18 @@ from vllm.model_executor.models.clip import CLIPVisionModel from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY -from vllm.multimodal.inputs import NestedTensors, PlaceholderRange -from vllm.multimodal.utils import cached_get_tokenizer, repeat_and_pad_token +from vllm.multimodal.image import cached_get_image_processor +from vllm.multimodal.inputs import MultiModalKwargs, NestedTensors +from vllm.multimodal.processing import (BaseMultiModalProcessor, + InputProcessingContext, + ModalityProcessingMetadata, + MultiModalDataDict, + MultiModalProcessingMetadata, + PromptReplacement) from vllm.sequence import IntermediateTensors from vllm.utils import is_list_of -from .clip import dummy_image_for_clip, dummy_seq_data_for_clip +from .clip import dummy_image_for_clip from .interfaces import SupportsMultiModal, SupportsPP from .utils import (AutoWeightsLoader, WeightsMapper, flatten_bn, init_vllm_registered_model, maybe_prefix, @@ -303,231 +305,99 @@ def add_image_newline(self, image_features_hd): return image_features_hd_newline -# Based on https://huggingface.co/microsoft/Phi-3-vision-128k-instruct/blob/main/image_processing_phi3_v.py#L57 -def _calc_padded_size(*, width: int, height: int, padding_unit: int = 336): - target_height = int(np.ceil(height / padding_unit) * padding_unit) - top_padding = int((target_height - height) / 2) - bottom_padding = target_height - height - top_padding - padded_width = width - padded_height = height + top_padding + bottom_padding - return padded_width, padded_height - - -# Based on https://huggingface.co/microsoft/Phi-3-vision-128k-instruct/blob/main/image_processing_phi3_v.py#L90 -def _calc_hd_transform_size(*, width: int, height: int, hd_num: int): - transposed = False - if width < height: - width, height = height, width - transposed = True - - ratio = width / height - scale = 1 - while scale * np.ceil(scale / ratio) <= hd_num: - scale += 1 - scale -= 1 - - new_width = int(scale * 336) - new_height = int(new_width / ratio) - - padded_width, padded_height = _calc_padded_size(width=new_width, - height=new_height) - - if transposed: - padded_width, padded_height = padded_height, padded_width - - return padded_width, padded_height - - -# Based on https://huggingface.co/microsoft/Phi-3-vision-128k-instruct/blob/main/image_processing_phi3_v.py#L181 -def get_phi3v_image_feature_size( - hf_config: Dict[str, Any], - *, - input_height: int, - input_width: int, - num_crops: int, -) -> int: - if num_crops is None: - num_crops = hf_config.get("num_crops", 16) - new_width, new_height = _calc_hd_transform_size(width=input_width, - height=input_height, - hd_num=num_crops) - - return (new_height // 336 * new_width // 336 + 1) * 144 + 1 \ - + (new_height // 336 + 1) * 12 - - def get_max_phi3v_image_tokens(ctx: InputContext, *, num_crops: Optional[int] = None): + mm_processor_kwargs = {} + if num_crops is not None: + mm_processor_kwargs["num_crops"] = num_crops - return get_phi3v_image_feature_size( - ctx.get_hf_image_processor_config(), - input_height=MAX_IMAGE_FEATURE_SIZE_HEIGHT, - input_width=MAX_IMAGE_FEATURE_SIZE_WIDTH, - num_crops=num_crops, + model_config = ctx.model_config + image_processor = cached_get_image_processor( + model_config.model, + trust_remote_code=model_config.trust_remote_code, + **mm_processor_kwargs, + ) + + num_tokens = image_processor.calc_num_image_tokens_from_image_size( + width=MAX_IMAGE_FEATURE_SIZE_WIDTH, + height=MAX_IMAGE_FEATURE_SIZE_HEIGHT, ) + return num_tokens -def dummy_data_for_phi3v(ctx: InputContext, - seq_len: int, - mm_counts: Mapping[str, int], - *, - num_crops: Optional[int] = None): +def dummy_mm_kwargs_for_phi3v(ctx: InputProcessingContext, + mm_counts: Mapping[str, int]): num_images = mm_counts["image"] - image_feature_size = get_max_phi3v_image_tokens(ctx, num_crops=num_crops) - - seq_data, ranges = dummy_seq_data_for_clip( - CLIP_VIT_LARGE_PATCH14_336_CONFIG, - seq_len, - num_images, - image_token_id=_IMAGE_TOKEN_ID, - image_feature_size_override=image_feature_size, - ) - mm_data = dummy_image_for_clip( + data = dummy_image_for_clip( CLIP_VIT_LARGE_PATCH14_336_CONFIG, num_images, image_width_override=MAX_IMAGE_FEATURE_SIZE_WIDTH, image_height_override=MAX_IMAGE_FEATURE_SIZE_HEIGHT, ) - return DummyData(seq_data, mm_data, ranges) - + hf_processor = ctx.get_hf_processor() + image_processor = hf_processor.image_processor # type: ignore + hf_inputs = image_processor.preprocess(data['image'], return_tensors="pt") -@lru_cache -def _get_image_placeholder_token_id_candidates( - model_config: ModelConfig, - idx: int, -) -> List[List[int]]: - assert idx > 0 + return MultiModalKwargs(**hf_inputs) - tokenizer = cached_get_tokenizer(model_config.tokenizer) - # This is used when the image token is at the start of the string - start_candidate = tokenizer.encode(f"<|image_{idx}|>", - add_special_tokens=False) +def create_metadata_for_phi3v( + ctx: InputProcessingContext) -> MultiModalProcessingMetadata: + return { + "image": + ModalityProcessingMetadata(prompt_repls=[ + PromptReplacement(target=[_IMAGE_TOKEN_ID], + repl_unit=[_IMAGE_TOKEN_ID], + repl_count=get_max_phi3v_image_tokens(ctx)), + ]), + } - # This is used when the image token is in the middle of the string - # We need to get the token for "<", not "▁<" - # https://huggingface.co/microsoft/Phi-3-vision-128k-instruct/raw/main/tokenizer.json - a_token_id, = tokenizer.encode("a", add_special_tokens=False) - a_token_id_, *middle_candidate = tokenizer.encode(f"a<|image_{idx}|>", - add_special_tokens=False) - assert a_token_id == a_token_id_ - return [start_candidate, middle_candidate] +class Phi3VProcessor(BaseMultiModalProcessor): + def __init__(self, ctx: InputProcessingContext) -> None: + super().__init__( + ctx=ctx, + metadata=create_metadata_for_phi3v(ctx), + ) -def input_processor_for_phi3v(ctx: InputContext, - inputs: DecoderOnlyInputs, - *, - num_crops: Optional[int] = None): - multi_modal_data = inputs.get("multi_modal_data") - if multi_modal_data is None or "image" not in multi_modal_data: - return inputs - - model_config = ctx.model_config - hf_config = ctx.get_hf_image_processor_config() - - image_data = multi_modal_data["image"] - if isinstance(image_data, Image.Image): - w, h = image_data.size - image_feature_size = [ - get_phi3v_image_feature_size(hf_config, - input_width=w, - input_height=h, - num_crops=num_crops) - ] - image_data = [image_data] - elif is_list_of(image_data, Image.Image): - image_feature_size = [] - for image in image_data: - w, h = image.size - image_feature_size.append( - get_phi3v_image_feature_size(hf_config, - input_width=w, - input_height=h, - num_crops=num_crops)) - elif isinstance(image_data, torch.Tensor): - image_feature_size = [image_data.shape[0]] - image_data = [image_data] - elif is_list_of(image_data, torch.Tensor): - image_feature_size = [item.shape[0] for item in image_data] - else: - raise TypeError(f"Invalid image type: {type(image_data)}") - - prompt = inputs.get("prompt") - if prompt is None: - # for async server request, we assume prompt and its token_ids is always - # in correct format. And num_image_tags == len(image_data) always True. - image_idx = range(1, len(image_data) + 1) - new_prompt = None - else: - image_idx = sorted(map(int, re.findall(r"<\|image_(\d+)\|>+", prompt))) - if prompt.count("<|image|>") > 0: - logger.warning("Please follow the prompt format that is " - "documented on HuggingFace which does not involve " - "repeating <|image|> tokens.") - elif (num_image_tags := len(image_idx)) > 1: - assert num_image_tags == len( - image_data), "The count of image_placeholder not match image's" - new_prompt = prompt - - prompt_token_ids = inputs["prompt_token_ids"].copy() - - # masked placeholder with image token id - for idx in image_idx: - candidates = _get_image_placeholder_token_id_candidates(model_config, - idx=idx) - - for candidate in candidates: - for i in range(len(prompt_token_ids) - len(candidate) + 1): - if prompt_token_ids[i:i + len(candidate)] == candidate: - prompt_token_ids[i:i + - len(candidate)] = ([_IMAGE_TOKEN_ID] * - len(candidate)) - break - - # merge consecutive tag ids - merged_token_ids: List[int] = [] - for is_placeholder, token_ids in itertools.groupby( - prompt_token_ids, lambda x: x == _IMAGE_TOKEN_ID): - if is_placeholder: - merged_token_ids.append(_IMAGE_TOKEN_ID) - else: - merged_token_ids.extend(list(token_ids)) - - # TODO: Move this to utils or integrate with clip. - new_token_ids: List[int] = [] - placeholder_ranges: List[PlaceholderRange] = [] - placeholder_idx = 0 - while merged_token_ids: - token_id = merged_token_ids.pop(0) - if token_id == _IMAGE_TOKEN_ID: - replacement_ids = repeat_and_pad_token( - _IMAGE_TOKEN_ID, - repeat_count=image_feature_size[placeholder_idx], - ) - placeholder_ranges.append({ - "offset": len(new_token_ids), - "length": len(replacement_ids) - }) - new_token_ids.extend(replacement_ids) - placeholder_idx += 1 - else: - new_token_ids.append(token_id) - - # NOTE: Create a defensive copy of the original inputs - return token_inputs(prompt_token_ids=new_token_ids, - prompt=new_prompt, - multi_modal_data=multi_modal_data, - multi_modal_placeholders={"image": placeholder_ranges}) + def _get_hf_processor( + self, + *, + num_crops: Optional[int] = None, + ) -> ProcessorMixin: + if num_crops is not None: + return self.ctx.get_hf_processor(num_crops=num_crops) + return self.ctx.get_hf_processor() + + def _apply_hf_processor( + self, + prompt: str, + mm_data: MultiModalDataDict, + mm_processor_kwargs: Mapping[str, object], + ) -> BatchFeature: + processed_outputs = super()._apply_hf_processor( + prompt, mm_data, mm_processor_kwargs) + # Phi3v processor has inserted -1, -2 etc as placeholder in prompt_ids, + # which will cause OverflowError when decoding the prompt_ids. + # Therefore, we need to do an early replacement here + token_ids = processed_outputs['input_ids'] + token_ids[token_ids < 0] = _IMAGE_TOKEN_ID + processed_outputs['input_ids'] = token_ids + return processed_outputs + + def _get_dummy_mm_kwargs( + self, + mm_counts: Mapping[str, int], + ) -> MultiModalKwargs: + return dummy_mm_kwargs_for_phi3v(self.ctx, mm_counts) -@MULTIMODAL_REGISTRY.register_image_input_mapper() @MULTIMODAL_REGISTRY.register_max_image_tokens(get_max_phi3v_image_tokens) -@INPUT_REGISTRY.register_dummy_data(dummy_data_for_phi3v) -@INPUT_REGISTRY.register_input_processor(input_processor_for_phi3v) +@MULTIMODAL_REGISTRY.register_processor(Phi3VProcessor) class Phi3VForCausalLM(nn.Module, SupportsMultiModal, SupportsPP): def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): diff --git a/vllm/multimodal/processing.py b/vllm/multimodal/processing.py index c3a95d60e6fe6..922c83b6fd8a9 100644 --- a/vllm/multimodal/processing.py +++ b/vllm/multimodal/processing.py @@ -3,7 +3,8 @@ from collections.abc import Callable, ItemsView, Iterable, Mapping, Sequence from dataclasses import dataclass from functools import lru_cache -from typing import Any, Generic, NamedTuple, Optional, Protocol, TypeVar, Union +from typing import (Any, Dict, Generic, NamedTuple, Optional, Protocol, + TypeVar, Union, cast) import torch from transformers import BatchFeature, ProcessorMixin @@ -11,7 +12,8 @@ from vllm.inputs import DummyData, InputProcessingContext from vllm.transformers_utils.tokenizer import AnyTokenizer, MistralTokenizer -from vllm.utils import flatten_2d_lists, full_groupby, is_list_of +from vllm.utils import (flatten_2d_lists, full_groupby, is_list_of, + resolve_mm_processor_kwargs) from .inputs import (AudioItem, ImageItem, MultiModalDataDict, MultiModalInputsV2, MultiModalKwargs, PlaceholderRange, @@ -543,8 +545,14 @@ def __init__( self.ctx = ctx self.metadata = metadata + self.init_mm_processor_kwargs = (ctx.model_config.mm_processor_kwargs + or {}) - def _get_hf_processor(self) -> ProcessorMixin: + def _get_hf_processor( + self, + **mm_processor_kwargs: Mapping[str, object], + ) -> ProcessorMixin: + # by default, we won't pass any kwargs to the processor initialization return self.ctx.get_hf_processor() def _get_tokenizer(self) -> AnyTokenizer: @@ -581,7 +589,13 @@ def _apply_hf_processor( mm_data: MultiModalDataDict, mm_processor_kwargs: Mapping[str, object], ) -> BatchFeature: - hf_processor = self._get_hf_processor() + # some mm_processor_kwargs may be used in processor initialization + # instead of processor call + processor_init_kwargs = { + **self.init_mm_processor_kwargs, + **mm_processor_kwargs, + } + hf_processor = self._get_hf_processor(**processor_init_kwargs) processor_data = dict[str, Any]() passthrough_data = dict[str, Any]() @@ -601,6 +615,13 @@ def _apply_hf_processor( else: processor_data[k] = v + # filter mm_processor_kwargs used in processor call + mm_processor_kwargs = resolve_mm_processor_kwargs( + self.init_mm_processor_kwargs, + cast(Dict[str, Any], mm_processor_kwargs), + hf_processor, + ) + try: hf_inputs = hf_processor( text=prompt, # type: ignore From cbcbdb1ceb9c219d13b2386e101992c399410551 Mon Sep 17 00:00:00 2001 From: Konrad Zawora Date: Mon, 9 Dec 2024 22:21:06 +0100 Subject: [PATCH 281/397] [Bugfix][Hardware][Gaudi] Bump vllm_hpu_extension version (#11028) Signed-off-by: Konrad Zawora --- requirements-hpu.txt | 2 +- vllm/attention/backends/hpu_attn.py | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/requirements-hpu.txt b/requirements-hpu.txt index 4674efb812cfd..17d40d0ee131a 100644 --- a/requirements-hpu.txt +++ b/requirements-hpu.txt @@ -8,4 +8,4 @@ pandas tabulate setuptools>=61 setuptools-scm>=8 -vllm-hpu-extension @ git+https://github.com/HabanaAI/vllm-hpu-extension.git@fd7f2e6 +vllm-hpu-extension @ git+https://github.com/HabanaAI/vllm-hpu-extension.git@e096d6f diff --git a/vllm/attention/backends/hpu_attn.py b/vllm/attention/backends/hpu_attn.py index 2c62e565c04c7..f90d15d4207e7 100644 --- a/vllm/attention/backends/hpu_attn.py +++ b/vllm/attention/backends/hpu_attn.py @@ -111,8 +111,16 @@ def __init__( self.matmul_qk = Matmul() self.softmax = Softmax() self.matmul_av = Matmul() + self.batch2block_matmul = Matmul() + self.block2batch_matmul = Matmul() + # NOTE(kzawora): Contiguous PA is off until model runner supports it self.k_cache = VLLMKVCache() + self.k_cache.use_contiguous_pa = False self.v_cache = VLLMKVCache() + self.v_cache.use_contiguous_pa = False + # NOTE(kzawora): Pipelined PA is off until model runner supports it + ops.pa_impl = ops.pa + self.num_kv_heads = num_heads if num_kv_heads is None else num_kv_heads self.sliding_window = sliding_window self.alibi_slopes = alibi_slopes @@ -228,9 +236,12 @@ def forward( block_mapping=attn_metadata.block_mapping, block_bias=attn_metadata.attn_bias, block_scales=attn_metadata.block_scales, + block_groups=None, scale=self.scale, matmul_qk_op=self.matmul_qk, matmul_av_op=self.matmul_av, + batch2block_matmul_op=self.batch2block_matmul, + block2batch_matmul_op=self.block2batch_matmul, keys_fetch_func=self.k_cache.fetch_from_cache, values_fetch_func=self.v_cache.fetch_from_cache) # Reshape the output tensor. From 1a2f8fb828f0444705db319786b2e901159f184e Mon Sep 17 00:00:00 2001 From: youkaichao Date: Mon, 9 Dec 2024 13:47:24 -0800 Subject: [PATCH 282/397] [v1] fix use compile sizes (#11000) Signed-off-by: youkaichao --- vllm/config.py | 1 + vllm/v1/worker/gpu_model_runner.py | 3 +++ 2 files changed, 4 insertions(+) diff --git a/vllm/config.py b/vllm/config.py index 29f0839dcabba..5fb9563fcf3a3 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -2522,6 +2522,7 @@ def __post_init__(self): self.compilation_config.custom_ops = ["none"] self.compilation_config.use_cudagraph = True self.compilation_config.use_inductor = True + self.compilation_config.cudagraph_num_of_warmups = 1 self.compilation_config.pass_config.enable_fusion = False self.compilation_config.pass_config.enable_reshape = False self.compilation_config.level = CompilationLevel.PIECEWISE diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 7f95be06188e3..c601aca13feaf 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -582,6 +582,9 @@ def capture_model(self) -> None: # can reuse the memory pool allocated for the large shapes. with graph_capture(): for num_tokens in reversed(self.cudagraph_batch_sizes): + for _ in range(self.vllm_config.compilation_config. + cudagraph_num_of_warmups): + self._dummy_run(self.model, num_tokens, self.kv_caches) self._dummy_run(self.model, num_tokens, self.kv_caches) end_time = time.perf_counter() From 9c6459e4cb020ec1ad9ea08cac9309b83d432fc8 Mon Sep 17 00:00:00 2001 From: xendo Date: Mon, 9 Dec 2024 22:53:24 +0100 Subject: [PATCH 283/397] [Neuron] Upgrade neuron to 2.20.2 (#11016) Signed-off-by: Jerzy Zagorski Co-authored-by: Jerzy Zagorski --- Dockerfile.neuron | 3 ++- vllm/utils.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Dockerfile.neuron b/Dockerfile.neuron index 76dbd4c04d3f3..77162bc82de62 100644 --- a/Dockerfile.neuron +++ b/Dockerfile.neuron @@ -1,5 +1,6 @@ # default base image -ARG BASE_IMAGE="public.ecr.aws/neuron/pytorch-inference-neuronx:2.1.2-neuronx-py310-sdk2.20.0-ubuntu20.04" +# https://gallery.ecr.aws/neuron/pytorch-inference-neuronx +ARG BASE_IMAGE="public.ecr.aws/neuron/pytorch-inference-neuronx:2.1.2-neuronx-py310-sdk2.20.2-ubuntu20.04" FROM $BASE_IMAGE diff --git a/vllm/utils.py b/vllm/utils.py index 1f19d9eacd16d..2bb1fb2af40f4 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -1628,7 +1628,7 @@ def direct_register_custom_op( library object. If you want to bind the operator to a different library, make sure the library object is alive when the operator is used. """ - if is_in_doc_build(): + if is_in_doc_build() or not supports_custom_op(): return import torch.library if hasattr(torch.library, "infer_schema"): From b63ba848323efd88207b12d7582501d525503b8a Mon Sep 17 00:00:00 2001 From: Gregory Shtrasberg <156009573+gshtras@users.noreply.github.com> Date: Mon, 9 Dec 2024 17:00:29 -0500 Subject: [PATCH 284/397] [ROCm][bugfix] scpecilative decoding worker class (#11035) Signed-off-by: Gregory Shtrasberg --- vllm/platforms/rocm.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/vllm/platforms/rocm.py b/vllm/platforms/rocm.py index 66674e3ebe91f..0133f26a0b1bc 100644 --- a/vllm/platforms/rocm.py +++ b/vllm/platforms/rocm.py @@ -93,6 +93,8 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: elif vllm_config.speculative_config: parallel_config.worker_cls = \ "vllm.spec_decode.spec_decode_worker.create_spec_worker" + parallel_config.sd_worker_cls = \ + "vllm.worker.worker.Worker" else: parallel_config.worker_cls = "vllm.worker.worker.Worker" From 5ed5d5f128d26a48c1b1db16c319fcb96c93799d Mon Sep 17 00:00:00 2001 From: Richard Liu <39319471+richardsliu@users.noreply.github.com> Date: Mon, 9 Dec 2024 15:07:48 -0800 Subject: [PATCH 285/397] Build tpu image in release pipeline (#10936) Signed-off-by: Richard Liu Co-authored-by: Kevin H. Luu --- .buildkite/release-pipeline.yaml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/.buildkite/release-pipeline.yaml b/.buildkite/release-pipeline.yaml index 93e118fb3eab8..2de6fceb0c3fe 100644 --- a/.buildkite/release-pipeline.yaml +++ b/.buildkite/release-pipeline.yaml @@ -39,3 +39,19 @@ steps: - "aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/q9t5s3a7" - "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --build-arg CUDA_VERSION=12.1.0 --tag public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT --target vllm-openai --progress plain ." - "docker push public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT" + + - label: "Build and publish TPU release image" + depends_on: ~ + if: build.env("NIGHTLY") == "1" + agents: + queue: tpu_queue_postmerge + commands: + - "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --tag vllm/vllm-tpu:nightly --tag vllm/vllm-tpu:$BUILDKITE_COMMIT --progress plain -f Dockerfile.tpu ." + - "docker push vllm/vllm-tpu:nightly" + - "docker push vllm/vllm-tpu:$BUILDKITE_COMMIT" + plugins: + - docker-login#v3.0.0: + username: vllm + password-env: DOCKERHUB_TOKEN + env: + DOCKER_BUILDKIT: "1" From 6faec545057e6152e92e8ab619fc018e20864943 Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Mon, 9 Dec 2024 15:08:19 -0800 Subject: [PATCH 286/397] [V1] Do not store `None` in self.generators (#11038) Signed-off-by: Woosuk Kwon --- vllm/v1/worker/gpu_input_batch.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/vllm/v1/worker/gpu_input_batch.py b/vllm/v1/worker/gpu_input_batch.py index 457784bb0287c..25d95ac6e26af 100644 --- a/vllm/v1/worker/gpu_input_batch.py +++ b/vllm/v1/worker/gpu_input_batch.py @@ -102,6 +102,8 @@ def __init__( self.top_k_reqs: Set[str] = set() # req_index -> generator + # NOTE(woosuk): The indices of the requests that do not have their own + # generator should not be included in the dictionary. self.generators: Dict[int, torch.Generator] = {} self.num_logprobs: Dict[str, int] = {} @@ -147,7 +149,10 @@ def add_request( if sampling_params.top_k > 0: self.top_k_reqs.add(req_id) - self.generators[req_index] = request.generator + # NOTE(woosuk): self.generators should not include the requests that + # do not have their own generator. + if request.generator is not None: + self.generators[req_index] = request.generator num_logprobs = sampling_params.logprobs if num_logprobs is not None and num_logprobs > 0: From 6d525288c1a40ee70f9cff2fe08657f23bae88dc Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Mon, 9 Dec 2024 20:15:34 -0500 Subject: [PATCH 287/397] [Docs] Add dedicated tool calling page to docs (#10554) Signed-off-by: mgoin Co-authored-by: Tyler Michael Smith --- docs/source/index.rst | 1 + .../serving/openai_compatible_server.md | 217 ------------- docs/source/usage/tool_calling.md | 287 ++++++++++++++++++ 3 files changed, 288 insertions(+), 217 deletions(-) create mode 100644 docs/source/usage/tool_calling.md diff --git a/docs/source/index.rst b/docs/source/index.rst index 86b1eed2d26ba..c45c941b00e20 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -102,6 +102,7 @@ Documentation usage/lora usage/multimodal_inputs + usage/tool_calling usage/structured_outputs usage/spec_decode usage/compatibility_matrix diff --git a/docs/source/serving/openai_compatible_server.md b/docs/source/serving/openai_compatible_server.md index d75e90807ca1d..f75653106cf66 100644 --- a/docs/source/serving/openai_compatible_server.md +++ b/docs/source/serving/openai_compatible_server.md @@ -361,220 +361,3 @@ $ vllm serve SOME_MODEL --config config.yaml **NOTE** In case an argument is supplied simultaneously using command line and the config file, the value from the commandline will take precedence. The order of priorities is `command line > config file values > defaults`. - ---- - -## Tool calling in the chat completion API -vLLM currently supports named function calling, as well as the `auto` and `none` options for the `tool_choice` field in the chat completion API. The `tool_choice` option `required` is **not yet supported** but on the roadmap. - -It is the callers responsibility to prompt the model with the tool information, vLLM will not automatically manipulate the prompt. -Please see below for recommended configuration and chat templates to use when function calling is to be used with the different models. - - -### Named Function Calling -vLLM supports named function calling in the chat completion API by default. It does so using Outlines, so this is -enabled by default, and will work with any supported model. You are guaranteed a validly-parsable function call - not a -high-quality one. - -vLLM will use guided decoding to ensure the response matches the tool parameter object defined by the JSON schema in the `tools` parameter. - -To use a named function, you need to define the functions in the `tools` parameter of the chat completion request, and -specify the `name` of one of the tools in the `tool_choice` parameter of the chat completion request. - - -### Automatic Function Calling -To enable this feature, you should set the following flags: -* `--enable-auto-tool-choice` -- **mandatory** Auto tool choice. tells vLLM that you want to enable the model to generate its own tool calls when it -deems appropriate. -* `--tool-call-parser` -- select the tool parser to use (listed below). Additional tool parsers -will continue to be added in the future, and also can register your own tool parsers in the `--tool-parser-plugin`. -* `--tool-parser-plugin` -- **optional** tool parser plugin used to register user defined tool parsers into vllm, the registered tool parser name can be specified in `--tool-call-parser`. -* `--chat-template` -- **optional** for auto tool choice. the path to the chat template which handles `tool`-role messages and `assistant`-role messages -that contain previously generated tool calls. Hermes, Mistral and Llama models have tool-compatible chat templates in their -`tokenizer_config.json` files, but you can specify a custom template. This argument can be set to `tool_use` if your model has a tool use-specific chat -template configured in the `tokenizer_config.json`. In this case, it will be used per the `transformers` specification. More on this [here](https://huggingface.co/docs/transformers/en/chat_templating#why-do-some-models-have-multiple-templates) -from HuggingFace; and you can find an example of this in a `tokenizer_config.json` [here](https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B/blob/main/tokenizer_config.json) - -If your favorite tool-calling model is not supported, please feel free to contribute a parser & tool use chat template! - - -#### Hermes Models (`hermes`) - -All Nous Research Hermes-series models newer than Hermes 2 Pro should be supported. -* `NousResearch/Hermes-2-Pro-*` -* `NousResearch/Hermes-2-Theta-*` -* `NousResearch/Hermes-3-*` - - -_Note that the Hermes 2 **Theta** models are known to have degraded tool call quality & capabilities due to the merge -step in their creation_. - -Flags: `--tool-call-parser hermes` - - -#### Mistral Models (`mistral`) - -Supported models: -* `mistralai/Mistral-7B-Instruct-v0.3` (confirmed) -* Additional mistral function-calling models are compatible as well. - -Known issues: -1. Mistral 7B struggles to generate parallel tool calls correctly. -2. Mistral's `tokenizer_config.json` chat template requires tool call IDs that are exactly 9 digits, which is -much shorter than what vLLM generates. Since an exception is thrown when this condition -is not met, the following additional chat templates are provided: - -* `examples/tool_chat_template_mistral.jinja` - this is the "official" Mistral chat template, but tweaked so that -it works with vLLM's tool call IDs (provided `tool_call_id` fields are truncated to the last 9 digits) -* `examples/tool_chat_template_mistral_parallel.jinja` - this is a "better" version that adds a tool-use system prompt -when tools are provided, that results in much better reliability when working with parallel tool calling. - - -Recommended flags: `--tool-call-parser mistral --chat-template examples/tool_chat_template_mistral_parallel.jinja` - - -#### Llama Models (`llama3_json`) - -Supported models: -* `meta-llama/Meta-Llama-3.1-8B-Instruct` -* `meta-llama/Meta-Llama-3.1-70B-Instruct` -* `meta-llama/Meta-Llama-3.1-405B-Instruct` -* `meta-llama/Meta-Llama-3.1-405B-Instruct-FP8` - -The tool calling that is supported is the [JSON based tool calling](https://llama.meta.com/docs/model-cards-and-prompt-formats/llama3_1/#json-based-tool-calling). For [pythonic tool calling](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/text_prompt_format.md#zero-shot-function-calling) in Llama-3.2 models, see the `pythonic` tool parser below. -Other tool calling formats like the built in python tool calling or custom tool calling are not supported. - -Known issues: -1. Parallel tool calls are not supported. -2. The model can generate parameters with a wrong format, such as generating - an array serialized as string instead of an array. - -The `tool_chat_template_llama3_json.jinja` file contains the "official" Llama chat template, but tweaked so that -it works better with vLLM. - -Recommended flags: `--tool-call-parser llama3_json --chat-template examples/tool_chat_template_llama3_json.jinja` - -#### IBM Granite - -Supported models: -* `ibm-granite/granite-3.0-8b-instruct` - -Recommended flags: `--tool-call-parser granite --chat-template examples/tool_chat_template_granite.jinja` - -`examples/tool_chat_template_granite.jinja`: this is a modified chat template from the original on Huggingface. Parallel function calls are supported. - -* `ibm-granite/granite-20b-functioncalling` - -Recommended flags: `--tool-call-parser granite-20b-fc --chat-template examples/tool_chat_template_granite_20b_fc.jinja` - -`examples/tool_chat_template_granite_20b_fc.jinja`: this is a modified chat template from the original on Huggingface, which is not vLLM compatible. It blends function description elements from the Hermes template and follows the same system prompt as "Response Generation" mode from [the paper](https://arxiv.org/abs/2407.00121). Parallel function calls are supported. - - -#### InternLM Models (`internlm`) - -Supported models: -* `internlm/internlm2_5-7b-chat` (confirmed) -* Additional internlm2.5 function-calling models are compatible as well - -Known issues: -* Although this implementation also supports InternLM2, the tool call results are not stable when testing with the `internlm/internlm2-chat-7b` model. - -Recommended flags: `--tool-call-parser internlm --chat-template examples/tool_chat_template_internlm2_tool.jinja` - - -#### Jamba Models (`jamba`) -AI21's Jamba-1.5 models are supported. -* `ai21labs/AI21-Jamba-1.5-Mini` -* `ai21labs/AI21-Jamba-1.5-Large` - - -Flags: `--tool-call-parser jamba` - - -#### Models with Pythonic Tool Calls (`pythonic`) - -A growing number of models output a python list to represent tool calls instead of using JSON. This has the advantage of inherently supporting parallel tool calls and removing ambiguity around the JSON schema required for tool calls. The `pythonic` tool parser can support such models. - -As a concrete example, these models may look up the weather in San Francisco and Seattle by generating: -```python -[get_weather(city='San Francisco', metric='celsius'), get_weather(city='Seattle', metric='celsius')] -``` - -Limitations: -* The model must not generate both text and tool calls in the same generation. This may not be hard to change for a specific model, but the community currently lacks consensus on which tokens to emit when starting and ending tool calls. (In particular, the Llama 3.2 models emit no such tokens.) -* Llama's smaller models struggle to use tools effectively. - -Example supported models: -* `meta-llama/Llama-3.2-1B-Instruct`\* (use with `examples/tool_chat_template_llama3.2_pythonic.jinja`) -* `meta-llama/Llama-3.2-3B-Instruct`\* (use with `examples/tool_chat_template_llama3.2_pythonic.jinja`) -* `Team-ACE/ToolACE-8B` (use with `examples/tool_chat_template_toolace.jinja`) -* `fixie-ai/ultravox-v0_4-ToolACE-8B` (use with `examples/tool_chat_template_toolace.jinja`) - -Flags: `--tool-call-parser pythonic --chat-template {see_above}` - ---- -**WARNING** -Llama's smaller models frequently fail to emit tool calls in the correct format. Your mileage may vary. - ---- - - -### How to write a tool parser plugin - -A tool parser plugin is a Python file containing one or more ToolParser implementations. You can write a ToolParser similar to the `Hermes2ProToolParser` in vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py. - -Here is a summary of a plugin file: - -```python - -# import the required packages - -# define a tool parser and register it to vllm -# the name list in register_module can be used -# in --tool-call-parser. you can define as many -# tool parsers as you want here. -@ToolParserManager.register_module(["example"]) -class ExampleToolParser(ToolParser): - def __init__(self, tokenizer: AnyTokenizer): - super().__init__(tokenizer) - - # adjust request. e.g.: set skip special tokens - # to False for tool call output. - def adjust_request( - self, request: ChatCompletionRequest) -> ChatCompletionRequest: - return request - - # implement the tool call parse for stream call - def extract_tool_calls_streaming( - self, - previous_text: str, - current_text: str, - delta_text: str, - previous_token_ids: Sequence[int], - current_token_ids: Sequence[int], - delta_token_ids: Sequence[int], - request: ChatCompletionRequest, - ) -> Union[DeltaMessage, None]: - return delta - - # implement the tool parse for non-stream call - def extract_tool_calls( - self, - model_output: str, - request: ChatCompletionRequest, - ) -> ExtractedToolCallInformation: - return ExtractedToolCallInformation(tools_called=False, - tool_calls=[], - content=text) - - -``` - -Then you can use this plugin in the command line like this. -``` - --enable-auto-tool-choice \ - --tool-parser-plugin - --tool-call-parser example \ - --chat-template \ -``` - diff --git a/docs/source/usage/tool_calling.md b/docs/source/usage/tool_calling.md new file mode 100644 index 0000000000000..f8be023307b0c --- /dev/null +++ b/docs/source/usage/tool_calling.md @@ -0,0 +1,287 @@ +# Tool Calling + +vLLM currently supports named function calling, as well as the `auto` and `none` options for the `tool_choice` field in the chat completion API. The `tool_choice` option `required` is **not yet supported** but on the roadmap. + +## Quickstart + +Start the server with tool calling enabled. This example uses Meta's Llama 3.1 8B model, so we need to use the llama3 tool calling chat template from the vLLM examples directory: + +```bash +vllm serve meta-llama/Llama-3.1-8B-Instruct \ + --enable-auto-tool-choice \ + --tool-call-parser llama3_json \ + --chat-template examples/tool_chat_template_llama3_json.jinja +``` + +Next, make a request to the model that should result in it using the available tools: + +```python +from openai import OpenAI +import json + +client = OpenAI(base_url="http://localhost:8000/v1", api_key="dummy") + +def get_weather(location: str, unit: str): + return f"Getting the weather for {location} in {unit}..." +tool_functions = {"get_weather": get_weather} + +tools = [{ + "type": "function", + "function": { + "name": "get_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": {"type": "string", "description": "City and state, e.g., 'San Francisco, CA'"}, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]} + }, + "required": ["location", "unit"] + } + } +}] + +response = client.chat.completions.create( + model=client.models.list().data[0].id, + messages=[{"role": "user", "content": "What's the weather like in San Francisco?"}], + tools=tools, + tool_choice="auto" +) + +tool_call = response.choices[0].message.tool_calls[0].function +print(f"Function called: {tool_call.name}") +print(f"Arguments: {tool_call.arguments}") +print(f"Result: {get_weather(**json.loads(tool_call.arguments))}") +``` + +Example output: +``` +Function called: get_weather +Arguments: {"location": "San Francisco, CA", "unit": "fahrenheit"} +Result: Getting the weather for San Francisco, CA in fahrenheit... +``` + +This example demonstrates: +- Setting up the server with tool calling enabled +- Defining an actual function to handle tool calls +- Making a request with `tool_choice="auto"` +- Handling the structured response and executing the corresponding function + +You can also specify a particular function using named function calling by setting `tool_choice={"type": "function", "function": {"name": "get_weather"}}`. Note that this will use the guided decoding backend - so the first time this is used, there will be several seconds of latency (or more) as the FSM is compiled for the first time before it is cached for subsequent requests. + +Remember that it's the callers responsibility to: +1. Define appropriate tools in the request +2. Include relevant context in the chat messages +3. Handle the tool calls in your application logic + +For more advanced usage, including parallel tool calls and different model-specific parsers, see the sections below. + +## Named Function Calling +vLLM supports named function calling in the chat completion API by default. It does so using Outlines through guided decoding, so this is +enabled by default, and will work with any supported model. You are guaranteed a validly-parsable function call - not a +high-quality one. + +vLLM will use guided decoding to ensure the response matches the tool parameter object defined by the JSON schema in the `tools` parameter. +For best results, we recommend ensuring that the expected output format / schema is specified in the prompt to ensure that the model's intended generation is aligned with the schema that it's being forced to generate by the guided decoding backend. + +To use a named function, you need to define the functions in the `tools` parameter of the chat completion request, and +specify the `name` of one of the tools in the `tool_choice` parameter of the chat completion request. + + +## Automatic Function Calling + +To enable this feature, you should set the following flags: +* `--enable-auto-tool-choice` -- **mandatory** Auto tool choice. tells vLLM that you want to enable the model to generate its own tool calls when it +deems appropriate. +* `--tool-call-parser` -- select the tool parser to use (listed below). Additional tool parsers +will continue to be added in the future, and also can register your own tool parsers in the `--tool-parser-plugin`. +* `--tool-parser-plugin` -- **optional** tool parser plugin used to register user defined tool parsers into vllm, the registered tool parser name can be specified in `--tool-call-parser`. +* `--chat-template` -- **optional** for auto tool choice. the path to the chat template which handles `tool`-role messages and `assistant`-role messages +that contain previously generated tool calls. Hermes, Mistral and Llama models have tool-compatible chat templates in their +`tokenizer_config.json` files, but you can specify a custom template. This argument can be set to `tool_use` if your model has a tool use-specific chat +template configured in the `tokenizer_config.json`. In this case, it will be used per the `transformers` specification. More on this [here](https://huggingface.co/docs/transformers/en/chat_templating#why-do-some-models-have-multiple-templates) +from HuggingFace; and you can find an example of this in a `tokenizer_config.json` [here](https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B/blob/main/tokenizer_config.json) + +If your favorite tool-calling model is not supported, please feel free to contribute a parser & tool use chat template! + + +### Hermes Models (`hermes`) + +All Nous Research Hermes-series models newer than Hermes 2 Pro should be supported. +* `NousResearch/Hermes-2-Pro-*` +* `NousResearch/Hermes-2-Theta-*` +* `NousResearch/Hermes-3-*` + + +_Note that the Hermes 2 **Theta** models are known to have degraded tool call quality & capabilities due to the merge +step in their creation_. + +Flags: `--tool-call-parser hermes` + + +### Mistral Models (`mistral`) + +Supported models: +* `mistralai/Mistral-7B-Instruct-v0.3` (confirmed) +* Additional mistral function-calling models are compatible as well. + +Known issues: +1. Mistral 7B struggles to generate parallel tool calls correctly. +2. Mistral's `tokenizer_config.json` chat template requires tool call IDs that are exactly 9 digits, which is +much shorter than what vLLM generates. Since an exception is thrown when this condition +is not met, the following additional chat templates are provided: + +* `examples/tool_chat_template_mistral.jinja` - this is the "official" Mistral chat template, but tweaked so that +it works with vLLM's tool call IDs (provided `tool_call_id` fields are truncated to the last 9 digits) +* `examples/tool_chat_template_mistral_parallel.jinja` - this is a "better" version that adds a tool-use system prompt +when tools are provided, that results in much better reliability when working with parallel tool calling. + + +Recommended flags: `--tool-call-parser mistral --chat-template examples/tool_chat_template_mistral_parallel.jinja` + + +### Llama Models (`llama3_json`) + +Supported models: +* `meta-llama/Meta-Llama-3.1-8B-Instruct` +* `meta-llama/Meta-Llama-3.1-70B-Instruct` +* `meta-llama/Meta-Llama-3.1-405B-Instruct` +* `meta-llama/Meta-Llama-3.1-405B-Instruct-FP8` + +The tool calling that is supported is the [JSON based tool calling](https://llama.meta.com/docs/model-cards-and-prompt-formats/llama3_1/#json-based-tool-calling). For [pythonic tool calling](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/text_prompt_format.md#zero-shot-function-calling) in Llama-3.2 models, see the `pythonic` tool parser below. +Other tool calling formats like the built in python tool calling or custom tool calling are not supported. + +Known issues: +1. Parallel tool calls are not supported. +2. The model can generate parameters with a wrong format, such as generating + an array serialized as string instead of an array. + +The `tool_chat_template_llama3_json.jinja` file contains the "official" Llama chat template, but tweaked so that +it works better with vLLM. + +Recommended flags: `--tool-call-parser llama3_json --chat-template examples/tool_chat_template_llama3_json.jinja` + +#### IBM Granite + +Supported models: +* `ibm-granite/granite-3.0-8b-instruct` + +Recommended flags: `--tool-call-parser granite --chat-template examples/tool_chat_template_granite.jinja` + +`examples/tool_chat_template_granite.jinja`: this is a modified chat template from the original on Huggingface. Parallel function calls are supported. + +* `ibm-granite/granite-20b-functioncalling` + +Recommended flags: `--tool-call-parser granite-20b-fc --chat-template examples/tool_chat_template_granite_20b_fc.jinja` + +`examples/tool_chat_template_granite_20b_fc.jinja`: this is a modified chat template from the original on Huggingface, which is not vLLM compatible. It blends function description elements from the Hermes template and follows the same system prompt as "Response Generation" mode from [the paper](https://arxiv.org/abs/2407.00121). Parallel function calls are supported. + + +### InternLM Models (`internlm`) + +Supported models: +* `internlm/internlm2_5-7b-chat` (confirmed) +* Additional internlm2.5 function-calling models are compatible as well + +Known issues: +* Although this implementation also supports InternLM2, the tool call results are not stable when testing with the `internlm/internlm2-chat-7b` model. + +Recommended flags: `--tool-call-parser internlm --chat-template examples/tool_chat_template_internlm2_tool.jinja` + + +### Jamba Models (`jamba`) +AI21's Jamba-1.5 models are supported. +* `ai21labs/AI21-Jamba-1.5-Mini` +* `ai21labs/AI21-Jamba-1.5-Large` + + +Flags: `--tool-call-parser jamba` + + +### Models with Pythonic Tool Calls (`pythonic`) + +A growing number of models output a python list to represent tool calls instead of using JSON. This has the advantage of inherently supporting parallel tool calls and removing ambiguity around the JSON schema required for tool calls. The `pythonic` tool parser can support such models. + +As a concrete example, these models may look up the weather in San Francisco and Seattle by generating: +```python +[get_weather(city='San Francisco', metric='celsius'), get_weather(city='Seattle', metric='celsius')] +``` + +Limitations: +* The model must not generate both text and tool calls in the same generation. This may not be hard to change for a specific model, but the community currently lacks consensus on which tokens to emit when starting and ending tool calls. (In particular, the Llama 3.2 models emit no such tokens.) +* Llama's smaller models struggle to use tools effectively. + +Example supported models: +* `meta-llama/Llama-3.2-1B-Instruct`\* (use with `examples/tool_chat_template_llama3.2_pythonic.jinja`) +* `meta-llama/Llama-3.2-3B-Instruct`\* (use with `examples/tool_chat_template_llama3.2_pythonic.jinja`) +* `Team-ACE/ToolACE-8B` (use with `examples/tool_chat_template_toolace.jinja`) +* `fixie-ai/ultravox-v0_4-ToolACE-8B` (use with `examples/tool_chat_template_toolace.jinja`) + +Flags: `--tool-call-parser pythonic --chat-template {see_above}` + +--- +**WARNING** +Llama's smaller models frequently fail to emit tool calls in the correct format. Your mileage may vary. + +--- + + +## How to write a tool parser plugin + +A tool parser plugin is a Python file containing one or more ToolParser implementations. You can write a ToolParser similar to the `Hermes2ProToolParser` in vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py. + +Here is a summary of a plugin file: + +```python + +# import the required packages + +# define a tool parser and register it to vllm +# the name list in register_module can be used +# in --tool-call-parser. you can define as many +# tool parsers as you want here. +@ToolParserManager.register_module(["example"]) +class ExampleToolParser(ToolParser): + def __init__(self, tokenizer: AnyTokenizer): + super().__init__(tokenizer) + + # adjust request. e.g.: set skip special tokens + # to False for tool call output. + def adjust_request( + self, request: ChatCompletionRequest) -> ChatCompletionRequest: + return request + + # implement the tool call parse for stream call + def extract_tool_calls_streaming( + self, + previous_text: str, + current_text: str, + delta_text: str, + previous_token_ids: Sequence[int], + current_token_ids: Sequence[int], + delta_token_ids: Sequence[int], + request: ChatCompletionRequest, + ) -> Union[DeltaMessage, None]: + return delta + + # implement the tool parse for non-stream call + def extract_tool_calls( + self, + model_output: str, + request: ChatCompletionRequest, + ) -> ExtractedToolCallInformation: + return ExtractedToolCallInformation(tools_called=False, + tool_calls=[], + content=text) + + +``` + +Then you can use this plugin in the command line like this. +``` + --enable-auto-tool-choice \ + --tool-parser-plugin + --tool-call-parser example \ + --chat-template \ +``` + From d1f6d1c8af892c7269f113711783374eebb52511 Mon Sep 17 00:00:00 2001 From: Isotr0py Date: Tue, 10 Dec 2024 10:23:07 +0800 Subject: [PATCH 288/397] [Model] Add has_weight to RMSNorm and re-enable weights loading tracker for Mamba (#10739) Signed-off-by: Isotr0py <2037008807@qq.com> --- vllm/model_executor/layers/layernorm.py | 11 ++++++-- .../layers/mamba/mamba_mixer.py | 26 +++++++++++++------ vllm/model_executor/models/mamba.py | 9 +++++-- 3 files changed, 34 insertions(+), 12 deletions(-) diff --git a/vllm/model_executor/layers/layernorm.py b/vllm/model_executor/layers/layernorm.py index 345919c5d1636..43ea4eb5a4d1a 100644 --- a/vllm/model_executor/layers/layernorm.py +++ b/vllm/model_executor/layers/layernorm.py @@ -20,6 +20,7 @@ def __init__( hidden_size: int, eps: float = 1e-6, var_hidden_size: Optional[int] = None, + has_weight: bool = True, ) -> None: super().__init__() @@ -27,7 +28,11 @@ def __init__( self.variance_epsilon = eps self.variance_size_override = (None if var_hidden_size == hidden_size else var_hidden_size) - self.weight = nn.Parameter(torch.ones(hidden_size)) + self.has_weight = has_weight + + self.weight = torch.ones(hidden_size) + if self.has_weight: + self.weight = nn.Parameter(self.weight) def forward_native( self, @@ -59,7 +64,9 @@ def forward_native( variance = x_var.pow(2).mean(dim=-1, keepdim=True) x = x * torch.rsqrt(variance + self.variance_epsilon) - x = x.to(orig_dtype) * self.weight + x = x.to(orig_dtype) + if self.has_weight: + x = x * self.weight if residual is None: return x else: diff --git a/vllm/model_executor/layers/mamba/mamba_mixer.py b/vllm/model_executor/layers/mamba/mamba_mixer.py index 8ef0a6cdf2c52..10bec75f49fdf 100644 --- a/vllm/model_executor/layers/mamba/mamba_mixer.py +++ b/vllm/model_executor/layers/mamba/mamba_mixer.py @@ -40,6 +40,7 @@ def __init__(self, use_conv_bias: bool, use_bias: bool, use_rms_norm: bool, + rms_norm_has_weight: bool = True, rms_norm_eps: float = 1e-5, activation="silu"): super().__init__() @@ -105,14 +106,23 @@ def A_weight_loader(param: Parameter, loaded_weight: torch.Tensor): input_is_parallel=True, ) - self.dt_layernorm = RMSNorm(time_step_rank, - eps=rms_norm_eps) if use_rms_norm else None - - self.b_layernorm = RMSNorm(ssm_state_size, - eps=rms_norm_eps) if use_rms_norm else None - - self.c_layernorm = RMSNorm(ssm_state_size, - eps=rms_norm_eps) if use_rms_norm else None + self.dt_layernorm = RMSNorm( + time_step_rank, + eps=rms_norm_eps, + has_weight=rms_norm_has_weight, + ) if use_rms_norm else None + + self.b_layernorm = RMSNorm( + ssm_state_size, + eps=rms_norm_eps, + has_weight=rms_norm_has_weight, + ) if use_rms_norm else None + + self.c_layernorm = RMSNorm( + ssm_state_size, + eps=rms_norm_eps, + has_weight=rms_norm_has_weight, + ) if use_rms_norm else None def forward_native(self, hidden_states: torch.Tensor, attn_metadata: AttentionMetadata, diff --git a/vllm/model_executor/models/mamba.py b/vllm/model_executor/models/mamba.py index b32032e411b0a..8bdcd2c5aad1f 100644 --- a/vllm/model_executor/models/mamba.py +++ b/vllm/model_executor/models/mamba.py @@ -1,5 +1,5 @@ """PyTorch MAMBA model.""" -from typing import Iterable, List, Optional, Tuple +from typing import Iterable, List, Optional, Set, Tuple import torch from torch import nn @@ -47,6 +47,7 @@ def __init__(self, use_conv_bias=config.use_conv_bias, use_bias=config.use_bias, use_rms_norm=self.is_falcon_mamba, + rms_norm_has_weight=not self.is_falcon_mamba, rms_norm_eps=mixer_rms_eps, activation=config.hidden_act) @@ -241,8 +242,10 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "A_log" in name: name = name.replace("A_log", "A") @@ -254,3 +257,5 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params From 391d7b2763df0b90a975c7232f38c4de4be2ff85 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Tue, 10 Dec 2024 13:45:47 +0800 Subject: [PATCH 289/397] [Bugfix] Fix usage of `deprecated` decorator (#11025) Signed-off-by: DarkLight1337 --- vllm/engine/llm_engine.py | 8 +-- vllm/engine/multiprocessing/__init__.py | 8 +-- vllm/engine/multiprocessing/client.py | 16 +++--- vllm/entrypoints/llm.py | 72 ++++++++++++------------- 4 files changed, 52 insertions(+), 52 deletions(-) diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index 560f84a008291..8fc69d96d321e 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -677,12 +677,10 @@ def stop_remote_worker_execution_loop(self) -> None: self.model_executor.stop_remote_worker_execution_loop() @overload - @deprecated("'inputs' will be renamed to 'prompt") def add_request( self, request_id: str, - *, - inputs: PromptType, + prompt: PromptType, params: Union[SamplingParams, PoolingParams], arrival_time: Optional[float] = None, lora_request: Optional[LoRARequest] = None, @@ -693,10 +691,12 @@ def add_request( ... @overload + @deprecated("'inputs' will be renamed to 'prompt") def add_request( self, request_id: str, - prompt: PromptType, + *, + inputs: PromptType, params: Union[SamplingParams, PoolingParams], arrival_time: Optional[float] = None, lora_request: Optional[LoRARequest] = None, diff --git a/vllm/engine/multiprocessing/__init__.py b/vllm/engine/multiprocessing/__init__.py index 7020012e8bb86..420f540d0b5f4 100644 --- a/vllm/engine/multiprocessing/__init__.py +++ b/vllm/engine/multiprocessing/__init__.py @@ -35,11 +35,9 @@ class RPCProcessRequest: priority: int = 0 @overload - @deprecated("'inputs' will be renamed to 'prompt") def __init__( self, - *, - inputs: PromptType, + prompt: PromptType, params: Union[SamplingParams, PoolingParams], request_id: str, lora_request: Optional[LoRARequest] = None, @@ -50,9 +48,11 @@ def __init__( ... @overload + @deprecated("'inputs' will be renamed to 'prompt") def __init__( self, - prompt: PromptType, + *, + inputs: PromptType, params: Union[SamplingParams, PoolingParams], request_id: str, lora_request: Optional[LoRARequest] = None, diff --git a/vllm/engine/multiprocessing/client.py b/vllm/engine/multiprocessing/client.py index 7e4f81b2cf8e2..32bd83305bb8f 100644 --- a/vllm/engine/multiprocessing/client.py +++ b/vllm/engine/multiprocessing/client.py @@ -415,11 +415,9 @@ def dead_error(self) -> BaseException: return ENGINE_DEAD_ERROR(self._errored_with) @overload - @deprecated("'inputs' will be renamed to 'prompt") def generate( self, - *, - inputs: PromptType, + prompt: PromptType, sampling_params: SamplingParams, request_id: str, lora_request: Optional[LoRARequest] = None, @@ -430,9 +428,11 @@ def generate( ... @overload + @deprecated("'inputs' will be renamed to 'prompt") def generate( self, - prompt: PromptType, + *, + inputs: PromptType, sampling_params: SamplingParams, request_id: str, lora_request: Optional[LoRARequest] = None, @@ -487,11 +487,9 @@ def generate( prompt_adapter_request, priority) @overload - @deprecated("'inputs' will be renamed to 'prompt") def encode( self, - *, - inputs: PromptType, + prompt: PromptType, pooling_params: PoolingParams, request_id: str, lora_request: Optional[LoRARequest] = None, @@ -501,9 +499,11 @@ def encode( ... @overload + @deprecated("'inputs' will be renamed to 'prompt") def encode( self, - prompt: PromptType, + *, + inputs: PromptType, pooling_params: PoolingParams, request_id: str, lora_request: Optional[LoRARequest] = None, diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index 8de30ccd18a11..2a02187223a33 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -252,8 +252,21 @@ def set_tokenizer(self, tokenizer: AnyTokenizer) -> None: else: tokenizer_group.tokenizer = get_cached_tokenizer(tokenizer) + @overload + def generate( + self, + prompts: Union[PromptType, Sequence[PromptType]], + /, + *, + sampling_params: Optional[Union[SamplingParams, + Sequence[SamplingParams]]] = None, + use_tqdm: bool = True, + lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, + ) -> List[RequestOutput]: + ... + @overload # LEGACY: single (prompt + optional token ids) - @deprecated("'prompt_token_ids' will become part of 'prompts") + @deprecated("'prompt_token_ids' will become part of 'prompts'") def generate( self, prompts: str, @@ -266,7 +279,7 @@ def generate( ... @overload # LEGACY: multi (prompt + optional token ids) - @deprecated("'prompt_token_ids' will become part of 'prompts") + @deprecated("'prompt_token_ids' will become part of 'prompts'") def generate( self, prompts: List[str], @@ -279,7 +292,7 @@ def generate( ... @overload # LEGACY: single (token ids + optional prompt) - @deprecated("'prompt_token_ids' will become part of 'prompts") + @deprecated("'prompt_token_ids' will become part of 'prompts'") def generate( self, prompts: Optional[str] = None, @@ -293,7 +306,7 @@ def generate( ... @overload # LEGACY: multi (token ids + optional prompt) - @deprecated("'prompt_token_ids' will become part of 'prompts") + @deprecated("'prompt_token_ids' will become part of 'prompts'") def generate( self, prompts: Optional[List[str]] = None, @@ -307,7 +320,7 @@ def generate( ... @overload # LEGACY: single or multi token ids [pos-only] - @deprecated("'prompt_token_ids' will become part of 'prompts") + @deprecated("'prompt_token_ids' will become part of 'prompts'") def generate( self, prompts: None, @@ -318,19 +331,6 @@ def generate( ) -> List[RequestOutput]: ... - @overload - def generate( - self, - prompts: Union[PromptType, Sequence[PromptType]], - /, - *, - sampling_params: Optional[Union[SamplingParams, - Sequence[SamplingParams]]] = None, - use_tqdm: bool = True, - lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, - ) -> List[RequestOutput]: - ... - @deprecate_kwargs( "prompt_token_ids", is_deprecated=lambda: LLM.DEPRECATE_LEGACY, @@ -672,8 +672,21 @@ def chat( lora_request=lora_request, ) + @overload + def encode( + self, + prompts: Union[PromptType, Sequence[PromptType]], + /, + *, + pooling_params: Optional[Union[PoolingParams, + Sequence[PoolingParams]]] = None, + use_tqdm: bool = True, + lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, + ) -> List[PoolingRequestOutput]: + ... + @overload # LEGACY: single (prompt + optional token ids) - @deprecated("'prompt_token_ids' will become part of 'prompts") + @deprecated("'prompt_token_ids' will become part of 'prompts'") def encode( self, prompts: str, @@ -686,7 +699,7 @@ def encode( ... @overload # LEGACY: multi (prompt + optional token ids) - @deprecated("'prompt_token_ids' will become part of 'prompts") + @deprecated("'prompt_token_ids' will become part of 'prompts'") def encode( self, prompts: List[str], @@ -699,7 +712,7 @@ def encode( ... @overload # LEGACY: single (token ids + optional prompt) - @deprecated("'prompt_token_ids' will become part of 'prompts") + @deprecated("'prompt_token_ids' will become part of 'prompts'") def encode( self, prompts: Optional[str] = None, @@ -713,7 +726,7 @@ def encode( ... @overload # LEGACY: multi (token ids + optional prompt) - @deprecated("'prompt_token_ids' will become part of 'prompts") + @deprecated("'prompt_token_ids' will become part of 'prompts'") def encode( self, prompts: Optional[List[str]] = None, @@ -727,7 +740,7 @@ def encode( ... @overload # LEGACY: single or multi token ids [pos-only] - @deprecated("'prompt_token_ids' will become part of 'prompts") + @deprecated("'prompt_token_ids' will become part of 'prompts'") def encode( self, prompts: None, @@ -738,19 +751,6 @@ def encode( ) -> List[PoolingRequestOutput]: ... - @overload - def encode( - self, - prompts: Union[PromptType, Sequence[PromptType]], - /, - *, - pooling_params: Optional[Union[PoolingParams, - Sequence[PoolingParams]]] = None, - use_tqdm: bool = True, - lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, - ) -> List[PoolingRequestOutput]: - ... - @deprecate_kwargs( "prompt_token_ids", is_deprecated=lambda: LLM.DEPRECATE_LEGACY, From 980ad394a83a6f12c576a035922db3c2e743beff Mon Sep 17 00:00:00 2001 From: Joe Runde Date: Mon, 9 Dec 2024 22:46:29 -0700 Subject: [PATCH 290/397] [Frontend] Use request id from header (#10968) Signed-off-by: Joe Runde --- docs/requirements-docs.txt | 1 + vllm/entrypoints/openai/api_server.py | 4 ++-- vllm/entrypoints/openai/serving_chat.py | 3 ++- vllm/entrypoints/openai/serving_completion.py | 4 ++-- vllm/entrypoints/openai/serving_embedding.py | 4 ++-- vllm/entrypoints/openai/serving_engine.py | 11 ++++++++++- vllm/entrypoints/openai/serving_score.py | 4 ++-- vllm/entrypoints/openai/serving_tokenization.py | 9 ++++++--- 8 files changed, 27 insertions(+), 13 deletions(-) diff --git a/docs/requirements-docs.txt b/docs/requirements-docs.txt index 5c80645b405ae..ca2da4cd66d2d 100644 --- a/docs/requirements-docs.txt +++ b/docs/requirements-docs.txt @@ -16,5 +16,6 @@ mistral_common >= 1.5.0 aiohttp starlette openai # Required by docs/source/serving/openai_compatible_server.md's vllm.entrypoints.openai.cli_args +fastapi # Required by docs/source/serving/openai_compatible_server.md's vllm.entrypoints.openai.cli_args partial-json-parser # Required by docs/source/serving/openai_compatible_server.md's vllm.entrypoints.openai.cli_args requests diff --git a/vllm/entrypoints/openai/api_server.py b/vllm/entrypoints/openai/api_server.py index c7bc30040279c..0f93eb54111ad 100644 --- a/vllm/entrypoints/openai/api_server.py +++ b/vllm/entrypoints/openai/api_server.py @@ -305,7 +305,7 @@ async def health(raw_request: Request) -> Response: async def tokenize(request: TokenizeRequest, raw_request: Request): handler = tokenization(raw_request) - generator = await handler.create_tokenize(request) + generator = await handler.create_tokenize(request, raw_request) if isinstance(generator, ErrorResponse): return JSONResponse(content=generator.model_dump(), status_code=generator.code) @@ -319,7 +319,7 @@ async def tokenize(request: TokenizeRequest, raw_request: Request): async def detokenize(request: DetokenizeRequest, raw_request: Request): handler = tokenization(raw_request) - generator = await handler.create_detokenize(request) + generator = await handler.create_detokenize(request, raw_request) if isinstance(generator, ErrorResponse): return JSONResponse(content=generator.model_dump(), status_code=generator.code) diff --git a/vllm/entrypoints/openai/serving_chat.py b/vllm/entrypoints/openai/serving_chat.py index 54ca0463bcab1..0af7613a473a4 100644 --- a/vllm/entrypoints/openai/serving_chat.py +++ b/vllm/entrypoints/openai/serving_chat.py @@ -176,7 +176,8 @@ async def create_chat_completion( logger.exception("Error in preprocessing prompt inputs") return self.create_error_response(str(e)) - request_id = f"chatcmpl-{request.request_id}" + request_id = "chatcmpl-" \ + f"{self._base_request_id(raw_request, request.request_id)}" request_metadata = RequestResponseMetadata(request_id=request_id) if raw_request: diff --git a/vllm/entrypoints/openai/serving_completion.py b/vllm/entrypoints/openai/serving_completion.py index fc1c4908d6650..c54d5f07cf58c 100644 --- a/vllm/entrypoints/openai/serving_completion.py +++ b/vllm/entrypoints/openai/serving_completion.py @@ -30,7 +30,7 @@ from vllm.sampling_params import BeamSearchParams, SamplingParams from vllm.sequence import Logprob from vllm.transformers_utils.tokenizer import AnyTokenizer -from vllm.utils import merge_async_iterators, random_uuid +from vllm.utils import merge_async_iterators logger = init_logger(__name__) @@ -86,7 +86,7 @@ async def create_completion( "suffix is not currently supported") model_name = self.base_model_paths[0].name - request_id = f"cmpl-{random_uuid()}" + request_id = f"cmpl-{self._base_request_id(raw_request)}" created_time = int(time.time()) request_metadata = RequestResponseMetadata(request_id=request_id) diff --git a/vllm/entrypoints/openai/serving_embedding.py b/vllm/entrypoints/openai/serving_embedding.py index 2cbb252610e39..3f7b75e893cad 100644 --- a/vllm/entrypoints/openai/serving_embedding.py +++ b/vllm/entrypoints/openai/serving_embedding.py @@ -19,7 +19,7 @@ from vllm.entrypoints.openai.serving_engine import BaseModelPath, OpenAIServing from vllm.logger import init_logger from vllm.outputs import PoolingOutput, PoolingRequestOutput -from vllm.utils import merge_async_iterators, random_uuid +from vllm.utils import merge_async_iterators logger = init_logger(__name__) @@ -110,7 +110,7 @@ async def create_embedding( "dimensions is currently not supported") model_name = request.model - request_id = f"embd-{random_uuid()}" + request_id = f"embd-{self._base_request_id(raw_request)}" created_time = int(time.monotonic()) truncate_prompt_tokens = None diff --git a/vllm/entrypoints/openai/serving_engine.py b/vllm/entrypoints/openai/serving_engine.py index 8232c6116c1bd..63f27b955461e 100644 --- a/vllm/entrypoints/openai/serving_engine.py +++ b/vllm/entrypoints/openai/serving_engine.py @@ -6,6 +6,7 @@ from typing import (Any, Callable, Dict, Iterable, Iterator, List, Mapping, Optional, Sequence, Tuple, TypedDict, Union) +from fastapi import Request from pydantic import Field from starlette.datastructures import Headers from typing_extensions import Annotated @@ -47,7 +48,7 @@ from vllm.tracing import (contains_trace_headers, extract_trace_headers, log_tracing_disabled_warning) from vllm.transformers_utils.tokenizer import AnyTokenizer, MistralTokenizer -from vllm.utils import AtomicCounter, is_list_of, make_async +from vllm.utils import AtomicCounter, is_list_of, make_async, random_uuid logger = init_logger(__name__) @@ -565,6 +566,14 @@ async def _get_trace_headers( return None + @staticmethod + def _base_request_id(raw_request: Request, + default: Optional[str] = None) -> Optional[str]: + """Pulls the request id to use from a header, if provided""" + default = default or random_uuid() + return raw_request.headers.get( + "X-Request-Id", default) if raw_request is not None else default + @staticmethod def _get_decoded_token(logprob: Logprob, token_id: int, diff --git a/vllm/entrypoints/openai/serving_score.py b/vllm/entrypoints/openai/serving_score.py index a1f14449ba9c3..fed06fa452955 100644 --- a/vllm/entrypoints/openai/serving_score.py +++ b/vllm/entrypoints/openai/serving_score.py @@ -15,7 +15,7 @@ from vllm.logger import init_logger from vllm.outputs import PoolingRequestOutput from vllm.transformers_utils.tokenizers.mistral import MistralTokenizer -from vllm.utils import make_async, merge_async_iterators, random_uuid +from vllm.utils import make_async, merge_async_iterators logger = init_logger(__name__) @@ -102,7 +102,7 @@ async def create_score( return error_check_ret model_name = request.model - request_id = f"score-{random_uuid()}" + request_id = f"score-{self._base_request_id(raw_request)}" created_time = int(time.monotonic()) truncate_prompt_tokens = request.truncate_prompt_tokens diff --git a/vllm/entrypoints/openai/serving_tokenization.py b/vllm/entrypoints/openai/serving_tokenization.py index 9c3dc2c98b2dd..2e849333680d4 100644 --- a/vllm/entrypoints/openai/serving_tokenization.py +++ b/vllm/entrypoints/openai/serving_tokenization.py @@ -1,5 +1,7 @@ from typing import Final, List, Optional, Union +from fastapi import Request + from vllm.config import ModelConfig from vllm.engine.protocol import EngineClient from vllm.entrypoints.chat_utils import ChatTemplateContentFormatOption @@ -17,7 +19,6 @@ LoRAModulePath, OpenAIServing) from vllm.logger import init_logger -from vllm.utils import random_uuid logger = init_logger(__name__) @@ -48,12 +49,13 @@ def __init__( async def create_tokenize( self, request: TokenizeRequest, + raw_request: Request, ) -> Union[TokenizeResponse, ErrorResponse]: error_check_ret = await self._check_model(request) if error_check_ret is not None: return error_check_ret - request_id = f"tokn-{random_uuid()}" + request_id = f"tokn-{self._base_request_id(raw_request)}" try: ( @@ -112,12 +114,13 @@ async def create_tokenize( async def create_detokenize( self, request: DetokenizeRequest, + raw_request: Request, ) -> Union[DetokenizeResponse, ErrorResponse]: error_check_ret = await self._check_model(request) if error_check_ret is not None: return error_check_ret - request_id = f"tokn-{random_uuid()}" + request_id = f"tokn-{self._base_request_id(raw_request)}" ( lora_request, From bc192a2b099558ec94864974b2a91b84c271a84d Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Tue, 10 Dec 2024 07:09:32 +0100 Subject: [PATCH 291/397] [Pixtral] Improve loading (#11040) --- vllm/model_executor/models/pixtral.py | 56 ++++++++++++--------------- 1 file changed, 25 insertions(+), 31 deletions(-) diff --git a/vllm/model_executor/models/pixtral.py b/vllm/model_executor/models/pixtral.py index c6786c363ab4a..94a4ab882c1a9 100644 --- a/vllm/model_executor/models/pixtral.py +++ b/vllm/model_executor/models/pixtral.py @@ -1,6 +1,5 @@ from dataclasses import dataclass, fields from functools import cached_property -from itertools import tee from typing import Iterable, List, Mapping, Optional, Set, Tuple, Union import numpy @@ -359,38 +358,33 @@ def is_vision_encoder_weights(weight: Tuple[str, torch.Tensor]): def is_vision_lang_adapter_weights(weight: Tuple[str, torch.Tensor]): return weight[0].startswith("vision_language_adapter") - def is_vision_weights(weight: Tuple[str, torch.Tensor]): - return is_vision_encoder_weights( - weight) or is_vision_lang_adapter_weights(weight) - - llm_weights, vision_encoder_weights, vision_lang_adapter_weights = tee( - weights, 3) - - # llm - llm_weights = filter(lambda x: not is_vision_weights(x), llm_weights) - self.language_model.load_weights(llm_weights) - - # vision encoder - vision_encoder_weights = filter(is_vision_encoder_weights, - vision_encoder_weights) + # Get references to parameters for direct loading vision_encoder_dict = dict(self.vision_encoder.named_parameters()) - for name, loaded_weight in vision_encoder_weights: - # cut 'vision_encoder.' - name = '.'.join(name.split(".")[1:]) - param = vision_encoder_dict[name] - - default_weight_loader(param, loaded_weight) - - # adapter - vision_lang_adapter_weights = filter(is_vision_lang_adapter_weights, - vision_lang_adapter_weights) - vision_lang_adpter_dict = dict( + vision_lang_adapter_dict = dict( self.vision_language_adapter.named_parameters()) - for name, loaded_weight in vision_lang_adapter_weights: - # cut 'vision_language_adapter.' - name = '.'.join(name.split(".")[1:]) - param = vision_lang_adpter_dict[name] - default_weight_loader(param, loaded_weight) + + def llm_weights_generator(): + # Single pass over weights + for name, w in weights: + if is_vision_encoder_weights((name, w)): + # Load vision encoder weights directly + trimmed_name = '.'.join(name.split(".")[1:]) + param = vision_encoder_dict[trimmed_name] + with torch.no_grad(): + default_weight_loader(param, w) + elif is_vision_lang_adapter_weights((name, w)): + # Load vision-language adapter weights directly + trimmed_name = '.'.join(name.split(".")[1:]) + param = vision_lang_adapter_dict[trimmed_name] + with torch.no_grad(): + default_weight_loader(param, w) + else: + # LLM weights: yield them to be loaded + # by language_model.load_weights + yield (name, w) + + # Now we call the language model load with the generator + self.language_model.load_weights(llm_weights_generator()) # Vision encoder From 28b3a1c7e596c08efac0fcfa59a629d16197be30 Mon Sep 17 00:00:00 2001 From: Tyler Michael Smith Date: Tue, 10 Dec 2024 01:28:14 -0500 Subject: [PATCH 292/397] [V1] Multiprocessing Tensor Parallel Support for v1 (#9856) Signed-off-by: Tyler Michael Smith --- .../test_basic_correctness.py | 16 + tests/conftest.py | 11 +- .../device_communicators/shm_broadcast.py | 76 ++-- vllm/executor/multiproc_gpu_executor.py | 47 +-- vllm/executor/multiproc_worker_utils.py | 42 ++ .../model_executor/layers/logits_processor.py | 5 +- vllm/platforms/cuda.py | 28 +- vllm/utils.py | 26 ++ vllm/v1/core/scheduler.py | 4 +- vllm/v1/engine/async_llm.py | 18 +- vllm/v1/engine/core.py | 74 ++-- vllm/v1/engine/core_client.py | 13 +- vllm/v1/engine/llm_engine.py | 19 +- vllm/v1/executor/abstract.py | 48 +++ vllm/v1/executor/multiproc_executor.py | 375 ++++++++++++++++++ .../{gpu_executor.py => uniproc_executor.py} | 12 +- vllm/v1/outputs.py | 6 +- vllm/v1/sample/sampler.py | 3 +- vllm/v1/utils.py | 33 +- vllm/v1/worker/gpu_model_runner.py | 12 +- vllm/v1/worker/gpu_worker.py | 11 +- 21 files changed, 733 insertions(+), 146 deletions(-) create mode 100644 vllm/v1/executor/abstract.py create mode 100644 vllm/v1/executor/multiproc_executor.py rename vllm/v1/executor/{gpu_executor.py => uniproc_executor.py} (90%) diff --git a/tests/basic_correctness/test_basic_correctness.py b/tests/basic_correctness/test_basic_correctness.py index fcba253d159f3..11d05cefb7313 100644 --- a/tests/basic_correctness/test_basic_correctness.py +++ b/tests/basic_correctness/test_basic_correctness.py @@ -26,6 +26,14 @@ TARGET_TEST_SUITE = os.environ.get("TARGET_TEST_SUITE", "L4") +@pytest.fixture(autouse=True) +def v1(run_with_both_engines): + # Simple autouse wrapper to run both engines for each test + # This can be promoted up to conftest.py to run for every + # test in a package + pass + + def test_vllm_gc_ed(): """Verify vllm instance is GC'ed when it is deleted""" llm = LLM("facebook/opt-125m") @@ -36,6 +44,7 @@ def test_vllm_gc_ed(): assert weak_llm() is None +@pytest.mark.skip_v1 @pytest.mark.parametrize("model", MODELS) @pytest.mark.parametrize("backend", ["FLASH_ATTN", "XFORMERS", "FLASHINFER"]) @pytest.mark.parametrize("dtype", ["half"]) @@ -118,6 +127,11 @@ def test_models_distributed( if attention_backend: os.environ["VLLM_ATTENTION_BACKEND"] = attention_backend + # Import VLLM_USE_V1 dynamically to handle patching + from vllm.envs import VLLM_USE_V1 + if VLLM_USE_V1 and distributed_executor_backend != "mp": + pytest.skip(f"Skip {distributed_executor_backend} for V1") + dtype = "half" max_tokens = 5 @@ -143,6 +157,7 @@ def test_models_distributed( ) +@pytest.mark.skip_v1 def test_model_with_failure(vllm_runner) -> None: try: with patch("vllm.model_executor.models.opt.OPTForCausalLM.forward", @@ -169,6 +184,7 @@ def test_model_with_failure(vllm_runner) -> None: os.remove(filename) +@pytest.mark.skip_v1 def test_failure_with_async_out_proc(vllm_runner) -> None: filename = None diff --git a/tests/conftest.py b/tests/conftest.py index d6be8f5b00af8..7606e0f11dfeb 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -5,7 +5,6 @@ from enum import Enum from typing import (Any, Callable, Dict, List, Optional, Tuple, Type, TypedDict, TypeVar, Union) -from unittest.mock import patch import numpy as np import pytest @@ -110,7 +109,7 @@ def prompts(self, prompts: _VideoAssetPrompts) -> List[str]: @pytest.fixture(params=[True, False]) -def run_with_both_engines(request): +def run_with_both_engines(request, monkeypatch): # Automatically runs tests twice, once with V1 and once without use_v1 = request.param # Tests decorated with `@skip_v1` are only run without v1 @@ -119,11 +118,11 @@ def run_with_both_engines(request): if use_v1: if skip_v1: pytest.skip("Skipping test on vllm V1") - with patch('vllm.envs.VLLM_USE_V1', True): - yield + monkeypatch.setenv('VLLM_USE_V1', '1') else: - with patch('vllm.envs.VLLM_USE_V1', False): - yield + monkeypatch.setenv('VLLM_USE_V1', '0') + + yield @pytest.fixture(autouse=True) diff --git a/vllm/distributed/device_communicators/shm_broadcast.py b/vllm/distributed/device_communicators/shm_broadcast.py index 2ff1a1ead99c1..9a2d8918d96e5 100644 --- a/vllm/distributed/device_communicators/shm_broadcast.py +++ b/vllm/distributed/device_communicators/shm_broadcast.py @@ -1,10 +1,11 @@ import os import pickle +import sys import time from contextlib import contextmanager from dataclasses import dataclass, field from multiprocessing import shared_memory -from typing import List, Optional +from typing import List, Optional, Tuple from unittest.mock import patch import torch @@ -21,6 +22,20 @@ logger = init_logger(__name__) +# We prefer to use os.sched_yield as it results in tighter polling loops, +# measured to be around 3e-7 seconds. However on earlier versions of Python +# os.sched_yield() does not release the GIL, so we fall back to time.sleep(0) +USE_SCHED_YIELD = ((sys.version_info[:3] >= (3, 11, 1)) + or (sys.version_info[:2] == (3, 10) + and sys.version_info[2] >= 8)) + + +def sched_yield(): + if USE_SCHED_YIELD: + os.sched_yield() + else: + time.sleep(0) + class ShmRingBuffer: @@ -114,11 +129,14 @@ def __init__(self, # and we should suppress the error pass + def handle(self): + return (self.n_reader, self.max_chunk_bytes, self.max_chunks, + self.shared_memory.name) + def __reduce__(self): return ( self.__class__, - (self.n_reader, self.max_chunk_bytes, self.max_chunks, - self.shared_memory.name), + self.handle(), ) def __del__(self): @@ -147,7 +165,7 @@ class Handle: connect_ip: str local_reader_ranks: List[int] = field(default_factory=list) - buffer: Optional[ShmRingBuffer] = None + buffer_handle: Optional[Tuple[int, int, int, str]] = None local_subscribe_port: Optional[int] = None remote_subscribe_port: Optional[int] = None @@ -228,7 +246,7 @@ def __init__( self.handle = Handle( connect_ip=connect_ip, local_reader_ranks=local_reader_ranks, - buffer=self.buffer, + buffer_handle=self.buffer.handle(), local_subscribe_port=local_subscribe_port, remote_subscribe_port=remote_subscribe_port, ) @@ -247,8 +265,8 @@ def create_from_handle(handle: Handle, rank) -> "MessageQueue": context = Context() if rank in handle.local_reader_ranks: - assert handle.buffer is not None - self.buffer = handle.buffer + assert handle.buffer_handle is not None + self.buffer = ShmRingBuffer(*handle.buffer_handle) self.current_idx = 0 self.local_reader_rank = handle.local_reader_ranks.index(rank) self._is_local_reader = True @@ -314,7 +332,7 @@ def wait_until_ready(self): assert recv == b"READY" @contextmanager - def acquire_write(self): + def acquire_write(self, timeout: Optional[float] = None): assert self._is_writer, "Only writers can acquire write" start_time = time.monotonic() n_warning = 1 @@ -329,16 +347,20 @@ def acquire_write(self): # we need to wait until it is read by all readers # Release the processor to other threads - os.sched_yield() + sched_yield() - # if we wait for a long time, we should warn the user + # if we wait for a long time, log a message if (time.monotonic() - start_time > VLLM_RINGBUFFER_WARNING_INTERVAL * n_warning): - logger.warning( - "No available block found in %s second. ", - VLLM_RINGBUFFER_WARNING_INTERVAL) + logger.debug("No available block found in %s second. ", + VLLM_RINGBUFFER_WARNING_INTERVAL) n_warning += 1 + # if we time out, raise an exception + if (timeout is not None + and time.monotonic() - start_time > timeout): + raise TimeoutError + continue # found a block that is either # (1) not written @@ -365,7 +387,7 @@ def acquire_write(self): break @contextmanager - def acquire_read(self): + def acquire_read(self, timeout: Optional[float] = None): assert self._is_local_reader, "Only readers can acquire read" start_time = time.monotonic() n_warning = 1 @@ -383,16 +405,20 @@ def acquire_read(self): # we need to wait until it is written # Release the processor to other threads - os.sched_yield() + sched_yield() - # if we wait for a long time, we should warn the user + # if we wait for a long time, log a message if (time.monotonic() - start_time > VLLM_RINGBUFFER_WARNING_INTERVAL * n_warning): - logger.warning( - "No available block found in %s second. ", - VLLM_RINGBUFFER_WARNING_INTERVAL) + logger.debug("No available block found in %s second. ", + VLLM_RINGBUFFER_WARNING_INTERVAL) n_warning += 1 + # if we time out, raise an exception + if (timeout is not None + and time.monotonic() - start_time > timeout): + raise TimeoutError + continue # found a block that is not read by this reader # let caller read from the buffer @@ -406,24 +432,26 @@ def acquire_read(self): 1) % self.buffer.max_chunks break - def enqueue(self, obj): + def enqueue(self, obj, timeout: Optional[float] = None): + """ Write to message queue with optional timeout (in seconds) """ assert self._is_writer, "Only writers can enqueue" serialized_obj = pickle.dumps(obj, protocol=pickle.HIGHEST_PROTOCOL) if self.n_local_reader > 0: if len(serialized_obj) >= self.buffer.max_chunk_bytes: - with self.acquire_write() as buf: + with self.acquire_write(timeout) as buf: buf[0] = 1 # overflow self.local_socket.send(serialized_obj) else: - with self.acquire_write() as buf: + with self.acquire_write(timeout) as buf: buf[0] = 0 # not overflow buf[1:len(serialized_obj) + 1] = serialized_obj if self.n_remote_reader > 0: self.remote_socket.send(serialized_obj) - def dequeue(self): + def dequeue(self, timeout: Optional[float] = None): + """ Read from message queue with optional timeout (in seconds) """ if self._is_local_reader: - with self.acquire_read() as buf: + with self.acquire_read(timeout) as buf: overflow = buf[0] == 1 if not overflow: # no need to know the size of serialized object diff --git a/vllm/executor/multiproc_gpu_executor.py b/vllm/executor/multiproc_gpu_executor.py index c450209f0eb91..fc58163cade64 100644 --- a/vllm/executor/multiproc_gpu_executor.py +++ b/vllm/executor/multiproc_gpu_executor.py @@ -3,25 +3,19 @@ from functools import partial from typing import Any, List, Optional -import torch - from vllm.executor.distributed_gpu_executor import ( # yapf: disable DistributedGPUExecutor, DistributedGPUExecutorAsync) from vllm.executor.gpu_executor import create_worker -from vllm.executor.multiproc_worker_utils import (ProcessWorkerWrapper, - ResultHandler, WorkerMonitor) +from vllm.executor.multiproc_worker_utils import ( + ProcessWorkerWrapper, ResultHandler, WorkerMonitor, + set_multiprocessing_worker_envs) from vllm.logger import init_logger from vllm.model_executor.layers.sampler import SamplerOutput from vllm.sequence import ExecuteModelRequest -from vllm.triton_utils.importing import HAS_TRITON from vllm.utils import (_run_task_with_lock, cuda_device_count_stateless, - cuda_is_initialized, get_distributed_init_method, - get_open_port, make_async, + get_distributed_init_method, get_open_port, make_async, update_environment_variables) -if HAS_TRITON: - from vllm.triton_utils import maybe_set_triton_cache_manager - logger = init_logger(__name__) @@ -37,30 +31,8 @@ def _init_executor(self) -> None: world_size = self.parallel_config.world_size tensor_parallel_size = self.parallel_config.tensor_parallel_size - # Disable torch async compiling which won't work with daemonic processes - os.environ["TORCHINDUCTOR_COMPILE_THREADS"] = "1" - - # Configure thread parallelism if OMP_NUM_THREADS isn't set - # - # Helps to avoid CPU contention. The default of spawning a thread per - # core combined with multiprocessing for each GPU can have a negative - # impact on performance. The contention is amplified when running in a - # container where CPU limits can cause throttling. - default_omp_num_threads = 1 - if "OMP_NUM_THREADS" not in os.environ and ( - current_parallelism := - torch.get_num_threads()) > default_omp_num_threads: - logger.warning( - "Reducing Torch parallelism from %d threads to %d to avoid " - "unnecessary CPU contention. Set OMP_NUM_THREADS in the " - "external environment to tune this value as needed.", - current_parallelism, default_omp_num_threads) - os.environ["OMP_NUM_THREADS"] = str(default_omp_num_threads) - torch.set_num_threads(default_omp_num_threads) - - # workaround for https://github.com/vllm-project/vllm/issues/6103 - if HAS_TRITON and world_size > 1: - maybe_set_triton_cache_manager() + # Set multiprocessing envs that are common to V0 and V1 + set_multiprocessing_worker_envs(self.parallel_config) # Multiprocessing-based executor does not support multi-node setting. # Since it only works for single node, we can use the loopback address @@ -122,13 +94,6 @@ def _check_executor_parameters(self): "CUDA_VISIBLE_DEVICES": (",".join(map(str, range(world_size)))) }) - if (cuda_is_initialized() - and os.environ.get("VLLM_WORKER_MULTIPROC_METHOD") != "spawn"): - logger.warning("CUDA was previously initialized. We must use " - "the `spawn` multiprocessing start method. Setting " - "VLLM_WORKER_MULTIPROC_METHOD to 'spawn'.") - os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn" - cuda_device_count = cuda_device_count_stateless() # Use confusing message for more common TP-only case. assert tensor_parallel_size <= cuda_device_count, ( diff --git a/vllm/executor/multiproc_worker_utils.py b/vllm/executor/multiproc_worker_utils.py index 884267d23dfc8..fe475db6d3f57 100644 --- a/vllm/executor/multiproc_worker_utils.py +++ b/vllm/executor/multiproc_worker_utils.py @@ -11,8 +11,15 @@ from typing import (Any, Callable, Dict, Generic, List, Optional, TextIO, TypeVar, Union) +import torch + import vllm.envs as envs from vllm.logger import init_logger +from vllm.triton_utils.importing import HAS_TRITON +from vllm.utils import cuda_is_initialized + +if HAS_TRITON: + from vllm.triton_utils import maybe_set_triton_cache_manager logger = init_logger(__name__) @@ -270,3 +277,38 @@ def write_with_prefix(s: str): def get_mp_context(): mp_method = envs.VLLM_WORKER_MULTIPROC_METHOD return multiprocessing.get_context(mp_method) + + +def set_multiprocessing_worker_envs(parallel_config): + """ Set up environment variables that should be used when there are workers + in a multiprocessing environment. This should be called by the parent + process before worker processes are created""" + + if (cuda_is_initialized() + and os.environ.get("VLLM_WORKER_MULTIPROC_METHOD") != "spawn"): + logger.warning("CUDA was previously initialized. We must use " + "the `spawn` multiprocessing start method. Setting " + "VLLM_WORKER_MULTIPROC_METHOD to 'spawn'.") + os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn" + + # Configure thread parallelism if OMP_NUM_THREADS isn't set + # + # Helps to avoid CPU contention. The default of spawning a thread per + # core combined with multiprocessing for each GPU can have a negative + # impact on performance. The contention is amplified when running in a + # container where CPU limits can cause throttling. + default_omp_num_threads = 1 + if "OMP_NUM_THREADS" not in os.environ and ( + current_parallelism := + torch.get_num_threads()) > default_omp_num_threads: + logger.warning( + "Reducing Torch parallelism from %d threads to %d to avoid " + "unnecessary CPU contention. Set OMP_NUM_THREADS in the " + "external environment to tune this value as needed.", + current_parallelism, default_omp_num_threads) + os.environ["OMP_NUM_THREADS"] = str(default_omp_num_threads) + torch.set_num_threads(default_omp_num_threads) + + # workaround for https://github.com/vllm-project/vllm/issues/6103 + if HAS_TRITON and parallel_config.world_size > 1: + maybe_set_triton_cache_manager() diff --git a/vllm/model_executor/layers/logits_processor.py b/vllm/model_executor/layers/logits_processor.py index fb76b1b17925e..2bc7e458494f7 100644 --- a/vllm/model_executor/layers/logits_processor.py +++ b/vllm/model_executor/layers/logits_processor.py @@ -5,6 +5,7 @@ import torch import torch.nn as nn +import vllm.envs as envs from vllm.distributed import (tensor_model_parallel_all_gather, tensor_model_parallel_gather) from vllm.model_executor.layers.vocab_parallel_embedding import ( @@ -42,7 +43,9 @@ def __init__(self, # Soft cap the logits. Used in Gemma 2. self.soft_cap = soft_cap # Whether to use gather or all-gather to gather the logits. - self.use_gather = not current_platform.is_tpu() + + self.use_gather = not current_platform.is_tpu( + ) and not envs.VLLM_USE_V1 def forward( self, diff --git a/vllm/platforms/cuda.py b/vllm/platforms/cuda.py index edaf377b501df..10f83fd304281 100644 --- a/vllm/platforms/cuda.py +++ b/vllm/platforms/cuda.py @@ -12,6 +12,7 @@ # import custom ops, trigger op registration import vllm._C # noqa +import vllm.envs as envs from vllm.logger import init_logger from .interface import DeviceCapability, Platform, PlatformEnum @@ -110,17 +111,28 @@ def log_warnings(cls): def check_and_update_config(cls, vllm_config: VllmConfig) -> None: parallel_config = vllm_config.parallel_config scheduler_config = vllm_config.scheduler_config + if parallel_config.worker_cls == "auto": if scheduler_config.is_multi_step: - parallel_config.worker_cls = \ - "vllm.worker.multi_step_worker.MultiStepWorker" + if envs.VLLM_USE_V1: + raise NotImplementedError + else: + parallel_config.worker_cls = \ + "vllm.worker.multi_step_worker.MultiStepWorker" elif vllm_config.speculative_config: - parallel_config.worker_cls = \ - "vllm.spec_decode.spec_decode_worker.create_spec_worker" - parallel_config.sd_worker_cls = \ - "vllm.worker.worker.Worker" + if envs.VLLM_USE_V1: + raise NotImplementedError + else: + parallel_config.worker_cls = \ + "vllm.spec_decode.spec_decode_worker.create_spec_worker" + parallel_config.sd_worker_cls = \ + "vllm.worker.worker.Worker" else: - parallel_config.worker_cls = "vllm.worker.worker.Worker" + if envs.VLLM_USE_V1: + parallel_config.worker_cls = \ + "vllm.v1.worker.gpu_worker.Worker" + else: + parallel_config.worker_cls = "vllm.worker.worker.Worker" # NVML utils @@ -249,4 +261,4 @@ def is_full_nvlink(cls, physical_device_ids: List[int]) -> bool: if not isinstance(pynvml, _MockModule): CudaPlatform.log_warnings() except ModuleNotFoundError: - CudaPlatform.log_warnings() \ No newline at end of file + CudaPlatform.log_warnings() diff --git a/vllm/utils.py b/vllm/utils.py index 2bb1fb2af40f4..7cdb2cb320b05 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -10,6 +10,7 @@ import inspect import ipaddress import os +import signal import socket import subprocess import sys @@ -1652,3 +1653,28 @@ def resolve_obj_by_qualname(qualname: str) -> Any: module_name, obj_name = qualname.rsplit(".", 1) module = importlib.import_module(module_name) return getattr(module, obj_name) + + +def kill_process_tree(pid: int): + """ + Kills all descendant processes of the given pid by sending SIGKILL. + + Args: + pid (int): Process ID of the parent process + """ + try: + parent = psutil.Process(pid) + except psutil.NoSuchProcess: + return + + # Get all children recursively + children = parent.children(recursive=True) + + # Send SIGKILL to all children first + for child in children: + with contextlib.suppress(ProcessLookupError): + os.kill(child.pid, signal.SIGKILL) + + # Finally kill the parent + with contextlib.suppress(ProcessLookupError): + os.kill(pid, signal.SIGKILL) diff --git a/vllm/v1/core/scheduler.py b/vllm/v1/core/scheduler.py index 1203d35fc985f..a3e85c20cc664 100644 --- a/vllm/v1/core/scheduler.py +++ b/vllm/v1/core/scheduler.py @@ -5,6 +5,8 @@ from vllm.config import CacheConfig, LoRAConfig, SchedulerConfig from vllm.logger import init_logger +from vllm.multimodal import MultiModalKwargs +from vllm.multimodal.base import PlaceholderRange from vllm.sampling_params import SamplingParams from vllm.v1.core.encoder_cache_manager import EncoderCacheManager from vllm.v1.core.kv_cache_manager import KVCacheManager @@ -383,7 +385,7 @@ def update_from_output( model_runner_output: "ModelRunnerOutput", ) -> List[EngineCoreOutput]: # NOTE(woosuk): This method doesn't consider speculative decoding. - sampled_token_ids = model_runner_output.sampled_token_ids_cpu.tolist() + sampled_token_ids = model_runner_output.sampled_token_ids num_scheduled_tokens = scheduler_output.num_scheduled_tokens new_running: List[Request] = [] engine_core_outputs: List[EngineCoreOutput] = [] diff --git a/vllm/v1/engine/async_llm.py b/vllm/v1/engine/async_llm.py index 0bcccda2bf329..26fd650aee4b7 100644 --- a/vllm/v1/engine/async_llm.py +++ b/vllm/v1/engine/async_llm.py @@ -20,7 +20,7 @@ from vllm.v1.engine.core_client import EngineCoreClient from vllm.v1.engine.detokenizer import Detokenizer from vllm.v1.engine.processor import Processor -from vllm.v1.executor.gpu_executor import GPUExecutor +from vllm.v1.executor.abstract import Executor logger = init_logger(__name__) @@ -30,7 +30,7 @@ class AsyncLLM(EngineClient): def __init__( self, vllm_config: VllmConfig, - executor_class: Type[GPUExecutor], + executor_class: Type[Executor], log_stats: bool, usage_context: UsageContext = UsageContext.ENGINE_CONTEXT, stat_loggers: Optional[Dict[str, StatLoggerBase]] = None, @@ -119,14 +119,24 @@ def from_engine_args( def shutdown(self): """Shutdown, cleaning up the background proc and IPC.""" - self.engine_core.shutdown() + if engine_core := getattr(self, "engine_core", None): + engine_core.shutdown() if handler := getattr(self, "output_handler", None): handler.cancel() @classmethod def _get_executor_cls(cls, vllm_config: VllmConfig): - return GPUExecutor + distributed_executor_backend = ( + vllm_config.parallel_config.distributed_executor_backend) + if distributed_executor_backend == "mp": + from vllm.v1.executor.multiproc_executor import MultiprocExecutor + executor_class = MultiprocExecutor + else: + assert (distributed_executor_backend is None) + from vllm.v1.executor.uniproc_executor import UniprocExecutor + executor_class = UniprocExecutor + return executor_class async def add_request( self, diff --git a/vllm/v1/engine/core.py b/vllm/v1/engine/core.py index 751eb3b40a68d..fdb241e6753fb 100644 --- a/vllm/v1/engine/core.py +++ b/vllm/v1/engine/core.py @@ -1,12 +1,12 @@ import multiprocessing import pickle import queue +import signal import threading import time -from contextlib import contextmanager from multiprocessing.process import BaseProcess from multiprocessing.sharedctypes import Synchronized -from typing import Any, Iterator, List, Tuple, Type, Union +from typing import List, Tuple, Type, Union import zmq import zmq.asyncio @@ -20,9 +20,10 @@ EngineCoreProfile, EngineCoreRequest, EngineCoreRequestType) from vllm.v1.engine.mm_input_mapper import MMInputMapper -from vllm.v1.executor.gpu_executor import GPUExecutor +from vllm.v1.executor.abstract import Executor from vllm.v1.request import Request, RequestStatus from vllm.v1.serial_utils import PickleEncoder +from vllm.v1.utils import make_zmq_socket from vllm.version import __version__ as VLLM_VERSION logger = init_logger(__name__) @@ -38,7 +39,7 @@ class EngineCore: def __init__( self, vllm_config: VllmConfig, - executor_class: Type[GPUExecutor], + executor_class: Type[Executor], usage_context: UsageContext, ): assert vllm_config.model_config.task != "embedding" @@ -80,7 +81,7 @@ def _initialize_kv_caches(self, num_gpu_blocks = num_gpu_blocks_override num_cpu_blocks = 0 - self.model_executor.initialize_cache(num_gpu_blocks) + self.model_executor.initialize(num_gpu_blocks) elapsed = time.time() - start logger.info(("init engine (profile, create kv cache, " "warmup model) took %.2f seconds"), elapsed) @@ -112,8 +113,11 @@ def step(self) -> List[EngineCoreOutput]: scheduler_output, output) return engine_core_outputs + def shutdown(self): + self.model_executor.shutdown() + def profile(self, is_start=True): - self.model_executor.worker.profile(is_start) + self.model_executor.profile(is_start) class EngineCoreProc(EngineCore): @@ -124,7 +128,7 @@ class EngineCoreProc(EngineCore): def __init__( self, vllm_config: VllmConfig, - executor_class: Type[GPUExecutor], + executor_class: Type[Executor], usage_context: UsageContext, input_path: str, output_path: str, @@ -151,32 +155,9 @@ def __init__( daemon=True).start() # Send Readiness signal to EngineClient. - with self.make_socket(ready_path, zmq.constants.PUSH) as ready_socket: + with make_zmq_socket(ready_path, zmq.constants.PUSH) as ready_socket: ready_socket.send_string(EngineCoreProc.READY_STR) - @contextmanager - def make_socket(self, path: str, type: Any) -> Iterator[zmq.Socket]: - """Context manager for use """ - - ctx = zmq.Context() - try: - socket = ctx.socket(type) - - if type == zmq.constants.PULL: - socket.connect(path) - elif type == zmq.constants.PUSH: - socket.bind(path) - else: - raise ValueError(f"Unknown Socket Type: {type}") - - yield socket - - except KeyboardInterrupt: - logger.debug("EngineCore had Keyboard Interrupt.") - - finally: - ctx.destroy(linger=0) - @staticmethod def wait_for_startup( proc: BaseProcess, @@ -209,7 +190,7 @@ def wait_for_startup( @staticmethod def make_engine_core_process( vllm_config: VllmConfig, - executor_class: Type[GPUExecutor], + executor_class: Type[Executor], usage_context: UsageContext, input_path: str, output_path: str, @@ -244,17 +225,38 @@ def make_engine_core_process( def run_engine_core(*args, **kwargs): """Launch EngineCore busy loop in background process.""" + # Signal handler used for graceful termination. + # SystemExit exception is only raised once to allow this and worker + # processes to terminate without error + shutdown_requested = False + + def signal_handler(signum, frame): + nonlocal shutdown_requested + if not shutdown_requested: + shutdown_requested = True + raise SystemExit() + + # Either SIGTERM or SIGINT will terminate the engine_core + signal.signal(signal.SIGTERM, signal_handler) + signal.signal(signal.SIGINT, signal_handler) + + engine_core = None try: engine_core = EngineCoreProc(*args, **kwargs) engine_core.run_busy_loop() - except KeyboardInterrupt: + except SystemExit: logger.debug("EngineCore interrupted.") except BaseException as e: logger.exception(e) raise e + finally: + if engine_core is not None: + engine_core.shutdown() + engine_core = None + def run_busy_loop(self): """Core busy loop of the EngineCore.""" @@ -272,6 +274,8 @@ def run_busy_loop(self): logger.debug("EngineCore busy loop waiting.") if self.should_shutdown: return + except BaseException: + raise # 2) Handle any new client requests (Abort or Add). while not self.input_queue.empty(): @@ -321,7 +325,7 @@ def process_input_socket(self, input_path: str): decoder_add_req = PickleEncoder() decoder_abort_req = PickleEncoder() - with self.make_socket(input_path, zmq.constants.PULL) as socket: + with make_zmq_socket(input_path, zmq.constants.PULL) as socket: while True: # (RequestType, RequestData) type_frame, data_frame = socket.recv_multipart(copy=False) @@ -349,7 +353,7 @@ def process_output_socket(self, output_path: str): # Reuse send buffer. buffer = bytearray() - with self.make_socket(output_path, zmq.constants.PUSH) as socket: + with make_zmq_socket(output_path, zmq.constants.PUSH) as socket: while True: engine_core_outputs = self.output_queue.get() outputs = EngineCoreOutputs(outputs=engine_core_outputs) diff --git a/vllm/v1/engine/core_client.py b/vllm/v1/engine/core_client.py index 835963f7ee86c..ee89cece73141 100644 --- a/vllm/v1/engine/core_client.py +++ b/vllm/v1/engine/core_client.py @@ -1,5 +1,4 @@ import multiprocessing -import time from typing import List, Union import msgspec @@ -7,7 +6,7 @@ import zmq.asyncio from vllm.logger import init_logger -from vllm.utils import get_open_zmq_ipc_path +from vllm.utils import get_open_zmq_ipc_path, kill_process_tree from vllm.v1.engine import (EngineCoreOutput, EngineCoreOutputs, EngineCoreProfile, EngineCoreRequest, EngineCoreRequestType) @@ -99,6 +98,12 @@ def add_request(self, request: EngineCoreRequest) -> None: def abort_requests(self, request_ids: List[str]) -> None: self.engine_core.abort_requests(request_ids) + def shutdown(self): + self.engine_core.shutdown() + + def __del__(self): + self.shutdown() + async def profile(self, is_start=True) -> None: self.engine_core.profile(is_start) @@ -163,10 +168,10 @@ def shutdown(self): # Shutdown the process if needed. if hasattr(self, "proc") and self.proc.is_alive(): self.proc.terminate() + self.proc.join(5) - time.sleep(5) if self.proc.is_alive(): - self.proc.kill() + kill_process_tree(self.proc.pid) def __del__(self): self.shutdown() diff --git a/vllm/v1/engine/llm_engine.py b/vllm/v1/engine/llm_engine.py index 994e68669108e..1b3a9f12d009e 100644 --- a/vllm/v1/engine/llm_engine.py +++ b/vllm/v1/engine/llm_engine.py @@ -20,7 +20,7 @@ from vllm.v1.engine.core_client import EngineCoreClient from vllm.v1.engine.detokenizer import Detokenizer from vllm.v1.engine.processor import Processor -from vllm.v1.executor.gpu_executor import GPUExecutor +from vllm.v1.executor.abstract import Executor logger = init_logger(__name__) @@ -33,7 +33,7 @@ class LLMEngine: def __init__( self, vllm_config: VllmConfig, - executor_class: Type[GPUExecutor], + executor_class: Type[Executor], log_stats: bool, usage_context: UsageContext = UsageContext.ENGINE_CONTEXT, stat_loggers: Optional[Dict[str, StatLoggerBase]] = None, @@ -104,10 +104,17 @@ def from_engine_args( @classmethod def _get_executor_cls(cls, vllm_config: VllmConfig): - return GPUExecutor - - def stop_remote_worker_execution_loop(self) -> None: - raise NotImplementedError("TP not implemented yet.") + distributed_executor_backend = ( + vllm_config.parallel_config.distributed_executor_backend) + if distributed_executor_backend == "mp": + from vllm.v1.executor.multiproc_executor import MultiprocExecutor + executor_class = MultiprocExecutor + else: + assert (distributed_executor_backend is None) + from vllm.v1.executor.uniproc_executor import UniprocExecutor + executor_class = UniprocExecutor + + return executor_class def get_num_unfinished_requests(self) -> int: return self.detokenizer.get_num_unfinished_requests() diff --git a/vllm/v1/executor/abstract.py b/vllm/v1/executor/abstract.py new file mode 100644 index 0000000000000..9cd267581ad18 --- /dev/null +++ b/vllm/v1/executor/abstract.py @@ -0,0 +1,48 @@ +from abc import ABC, abstractmethod +from typing import Dict, Optional, Tuple + +from vllm.config import VllmConfig +from vllm.v1.outputs import ModelRunnerOutput + + +class Executor(ABC): + """Abstract class for executors.""" + + @abstractmethod + def __init__(self, vllm_config: VllmConfig) -> None: + raise NotImplementedError + + @abstractmethod + def initialize(self, num_gpu_blocks: int) -> None: + raise NotImplementedError + + @abstractmethod + def determine_num_available_blocks(self) -> Tuple[int, int]: + raise NotImplementedError + + @abstractmethod + def execute_model( + self, + scheduler_output, + ) -> ModelRunnerOutput: + raise NotImplementedError + + @abstractmethod + def profile(self, is_start=True): + raise NotImplementedError + + @abstractmethod + def shutdown(self): + pass + + @abstractmethod + def check_health(self) -> None: + raise NotImplementedError + + @abstractmethod + def collective_rpc(self, + method: str, + timeout: Optional[float] = None, + args: Tuple = (), + kwargs: Optional[Dict] = None) -> []: + raise NotImplementedError diff --git a/vllm/v1/executor/multiproc_executor.py b/vllm/v1/executor/multiproc_executor.py new file mode 100644 index 0000000000000..f8f3d583618cf --- /dev/null +++ b/vllm/v1/executor/multiproc_executor.py @@ -0,0 +1,375 @@ +import atexit +import os +import pickle +import signal +import sys +import time +from dataclasses import dataclass +from enum import Enum, auto +from multiprocessing.process import BaseProcess +from typing import Dict, List, Optional, Tuple + +import zmq + +from vllm.config import VllmConfig +from vllm.distributed import (destroy_distributed_environment, + destroy_model_parallel) +from vllm.distributed.device_communicators.shm_broadcast import (Handle, + MessageQueue) +from vllm.executor.multiproc_worker_utils import ( + _add_prefix, get_mp_context, set_multiprocessing_worker_envs) +from vllm.logger import init_logger +from vllm.utils import (get_distributed_init_method, get_open_port, + get_open_zmq_ipc_path) +from vllm.v1.outputs import ModelRunnerOutput +from vllm.v1.utils import make_zmq_socket +from vllm.worker.worker_base import WorkerWrapperBase + +logger = init_logger(__name__) + +POLLING_TIMEOUT_MS = 5000 +POLLING_TIMEOUT_S = POLLING_TIMEOUT_MS // 1000 + + +class MultiprocExecutor: + + def __init__(self, vllm_config: VllmConfig) -> None: + # Call self.shutdown at exit to clean up + # and ensure workers will be terminated. + atexit.register(self.shutdown) + + self.vllm_config = vllm_config + self.parallel_config = vllm_config.parallel_config + + self.world_size = self.parallel_config.world_size + tensor_parallel_size = self.parallel_config.tensor_parallel_size + assert self.world_size == tensor_parallel_size, ( + f"world_size ({self.world_size}) must be equal to the " + f"tensor_parallel_size ({tensor_parallel_size}). " + f"Pipeline parallelism is not yet implemented in v1") + + # Set multiprocessing envs that are common to V0 and V1 + set_multiprocessing_worker_envs(self.parallel_config) + + # Multiprocessing-based executor does not support multi-node setting. + # Since it only works for single node, we can use the loopback address + # 127.0.0.1 for communication. + distributed_init_method = get_distributed_init_method( + "127.0.0.1", get_open_port()) + + # Initialize worker and set up message queues for SchedulerOutputs + # and ModelRunnerOutputs + self.rpc_broadcast_mq = MessageQueue(self.world_size, self.world_size) + scheduler_output_handle = self.rpc_broadcast_mq.export_handle() + + # Create workers + self.workers: List[WorkerProcHandle] = [] + for rank in range(self.world_size): + worker = WorkerProc.make_worker_process(vllm_config, rank, rank, + distributed_init_method, + scheduler_output_handle) + self.workers.append(worker) + + # Ensure message queues are ready. Will deadlock if re-ordered + # Must be kept consistent with the WorkerProc + self.rpc_broadcast_mq.wait_until_ready() + for w in self.workers: + w.worker_response_mq.wait_until_ready() + + def initialize(self, num_gpu_blocks: int) -> None: + """ + Initialize the KV caches and begin the model execution loop of the + underlying workers. + """ + self.collective_rpc("initialize_cache", args=(num_gpu_blocks, )) + self.collective_rpc("compile_or_warm_up_model") + + def determine_num_available_blocks(self) -> Tuple[int, int]: + """ + Determine the number of available KV blocks by invoking the + underlying worker. + """ + num_blocks = self.collective_rpc("determine_num_available_blocks") + + # Since we use a shared centralized controller, we take the minimum + # number of blocks across all workers to make sure all the memory + # operators can be applied to all workers. + num_gpu_blocks = min(b[0] for b in num_blocks) + num_cpu_blocks = min(b[1] for b in num_blocks) + + return num_gpu_blocks, num_cpu_blocks + + def collective_rpc(self, + method: str, + timeout: Optional[float] = None, + args: Tuple = (), + kwargs: Optional[Dict] = None) -> []: + """ + Execute an RPC call on workers. + + Args: + method: Name of the worker method to execute + timeout: Maximum time in seconds to wait for execution. Rases a + TimeoutError on timeout. None means wait indefinitely. + args: Positional arguments to pass to the worker method + kwargs: Keyword arguments to pass to the worker method + + Returns: + List of results from each worker + """ + start_time = time.monotonic() + kwargs = kwargs or {} + + try: + self.rpc_broadcast_mq.enqueue((method, args, kwargs)) + + responses = [None] * self.world_size + for w in self.workers: + dequeue_timeout = timeout - (time.monotonic() - start_time() + ) if timeout is not None else None + status, result = w.worker_response_mq.dequeue( + timeout=dequeue_timeout) + + if status != WorkerProc.ResponseStatus.SUCCESS: + if isinstance(result, Exception): + raise result + else: + raise RuntimeError("Worker failed") + + responses[w.rank] = result + + return responses + except TimeoutError as e: + raise TimeoutError(f"RPC call to {method} timed out.") from e + except Exception as e: + # Re-raise any other exceptions + raise e + + def execute_model( + self, + scheduler_output, + ) -> ModelRunnerOutput: + model_output = self.collective_rpc("execute_model", + args=(scheduler_output, ))[0] + return model_output + + def profile(self, is_start=True): + self.collective_rpc("profile", args=(is_start, )) + return + + def _ensure_worker_termination(self): + """Ensure that all worker processes are terminated. Assumes workers have + received termination requests. Waits for processing, then sends + termination and kill signals if needed.""" + + def wait_for_termination(procs, timeout): + start_time = time.time() + while time.time() - start_time < timeout: + if all(not proc.is_alive() for proc in procs): + return True + time.sleep(0.1) + return False + + # Send SIGTERM if still running + active_procs = [w.proc for w in self.workers if w.proc.is_alive()] + self.workers = None + for p in active_procs: + p.terminate() + if wait_for_termination(active_procs, 4): + return + + # Send SIGKILL if still running + active_procs = [p for p in active_procs if p.is_alive()] + for p in active_procs: + p.kill() + + def shutdown(self): + """Properly shut down the executor and its workers""" + if (hasattr(self, 'workers') and self.workers is not None): + for w in self.workers: #TODO: not sure if needed + w.worker_response_mq = None + self._ensure_worker_termination() + + self.rpc_broadcast_mq = None + + def check_health(self) -> None: + self.collective_rpc("check_health", timeout=10) + return + + +@dataclass +class WorkerProcHandle: + proc: BaseProcess + rank: int + ready_path: str + worker_response_mq: MessageQueue # The worker process writes to this MQ + + +class WorkerProc: + """Wrapper that runs one Worker in a separate process.""" + + READY_STR = "READY" + + def __init__( + self, + vllm_config: VllmConfig, + local_rank: int, + rank: int, + distributed_init_method: str, + input_shm_handle: Handle, + ready_path: str, + ): + self.rank = rank + wrapper = WorkerWrapperBase(vllm_config=vllm_config) + wrapper.init_worker(vllm_config, local_rank, rank, + distributed_init_method) + self.worker = wrapper.worker + + pid = os.getpid() + _add_prefix(sys.stdout, f"VllmWorker rank={rank}", pid) + _add_prefix(sys.stderr, f"VllmWorker rank={rank}", pid) + + # Initialize MessageQueue for receiving SchedulerOutput + self.rpc_broadcast_mq = MessageQueue.create_from_handle( + input_shm_handle, self.worker.rank) + + # Initializes a message queue for sending the model output + self.worker_response_mq = MessageQueue(1, 1) + worker_response_mq_handle = self.worker_response_mq.export_handle() + + # Send Readiness signal to EngineCore process. + with make_zmq_socket(ready_path, zmq.constants.PUSH) as ready_socket: + payload = pickle.dumps(worker_response_mq_handle, + protocol=pickle.HIGHEST_PROTOCOL) + ready_socket.send_string(WorkerProc.READY_STR) + ready_socket.send(payload) + + self.worker.initialize() + self.worker.load_model() + + @staticmethod + def make_worker_process( + vllm_config: VllmConfig, + local_rank: int, + rank: int, + distributed_init_method: str, + input_shm_handle, # Receive SchedulerOutput + ) -> WorkerProcHandle: + context = get_mp_context() + + # ZMQ path for worker to send ready message and shm_broadcast handle + # back to core process. + ready_path = get_open_zmq_ipc_path() + + process_kwargs = { + "vllm_config": vllm_config, + "local_rank": local_rank, + "rank": rank, + "distributed_init_method": distributed_init_method, + "input_shm_handle": input_shm_handle, + "ready_path": ready_path, + } + # Run EngineCore busy loop in background process. + proc = context.Process(target=WorkerProc.worker_main, + kwargs=process_kwargs, + daemon=True) + proc.start() + + # Wait for startup + worker_response_mq_handle = WorkerProc.wait_for_startup( + proc, ready_path) + + worker_response_mq = MessageQueue.create_from_handle( + worker_response_mq_handle, 0) + + return WorkerProcHandle(proc, rank, ready_path, worker_response_mq) + + def shutdown(self): + self.rpc_broadcast_mq = None + self.worker_response_mq = None + destroy_model_parallel() + destroy_distributed_environment() + + @staticmethod + def worker_main(*args, **kwargs): + """ Worker initialization and execution loops. + This runs a background process """ + + # Signal handler used for graceful termination. + # SystemExit exception is only raised once to allow this and worker + # processes to terminate without error + shutdown_requested = False + + def signal_handler(signum, frame): + nonlocal shutdown_requested + if not shutdown_requested: + shutdown_requested = True + raise SystemExit() + + # Either SIGTERM or SIGINT will terminate the worker + signal.signal(signal.SIGTERM, signal_handler) + signal.signal(signal.SIGINT, signal_handler) + + worker = None + try: + worker = WorkerProc(*args, **kwargs) + + # Ensure message queues are ready. Will deadlock if re-ordered. + # Must be kept consistent with the Executor + worker.rpc_broadcast_mq.wait_until_ready() + worker.worker_response_mq.wait_until_ready() + + worker.worker_busy_loop() + + except SystemExit: + logger.debug("Worker interrupted.") + + except BaseException as e: + logger.exception(e) + raise + + finally: + # Clean up once worker exits busy loop + if worker is not None: + worker.shutdown() + worker = None + + @staticmethod + def wait_for_startup( + proc: BaseProcess, + ready_path: str, + ) -> Optional[Handle]: + """Wait until the Worker is ready.""" + with make_zmq_socket(ready_path, zmq.constants.PULL) as socket: + + # Wait for Worker to send READY. + while socket.poll(timeout=POLLING_TIMEOUT_MS) == 0: + logger.debug("Waiting for WorkerProc to startup.") + + if not proc.is_alive(): + raise RuntimeError("WorkerProc failed to start.") + + message = socket.recv_string() + assert message == WorkerProc.READY_STR + handle_frame = socket.recv(copy=False) + handle = pickle.loads(handle_frame.buffer) + return handle + + class ResponseStatus(Enum): + SUCCESS = auto() + FAILURE = auto() + + def worker_busy_loop(self): + """Main busy loop for Multiprocessing Workers""" + while True: + method, args, kwargs = self.rpc_broadcast_mq.dequeue() + + try: + output = getattr(self.worker, method)(*args, **kwargs) + except BaseException as e: + self.worker_response_mq.enqueue( + (WorkerProc.ResponseStatus.FAILURE, e)) + continue + + self.worker_response_mq.enqueue( + (WorkerProc.ResponseStatus.SUCCESS, output)) diff --git a/vllm/v1/executor/gpu_executor.py b/vllm/v1/executor/uniproc_executor.py similarity index 90% rename from vllm/v1/executor/gpu_executor.py rename to vllm/v1/executor/uniproc_executor.py index f71fa16b16e27..9b1d9a40950c6 100644 --- a/vllm/v1/executor/gpu_executor.py +++ b/vllm/v1/executor/uniproc_executor.py @@ -10,7 +10,7 @@ logger = init_logger(__name__) -class GPUExecutor: +class UniprocExecutor: def __init__(self, vllm_config: VllmConfig) -> None: self.vllm_config = vllm_config @@ -54,7 +54,7 @@ def determine_num_available_blocks(self) -> Tuple[int, int]: """ return self.worker.determine_num_available_blocks() - def initialize_cache(self, num_gpu_blocks: int) -> None: + def initialize(self, num_gpu_blocks: int) -> None: """Initialize the KV cache by invoking the underlying worker. """ # NOTE: This is logged in the executor because there can be >1 worker @@ -71,7 +71,13 @@ def execute_model( output = self.worker.execute_model(scheduler_output) return output + def profile(self, is_start: bool = True): + self.worker.profile(is_start) + + def shutdown(self): + self.worker = None + def check_health(self) -> None: - # GPUExecutor will always be healthy as long as + # UniprocExecutor will always be healthy as long as # it's running. return diff --git a/vllm/v1/outputs.py b/vllm/v1/outputs.py index 8574987728844..acc3a944e21b9 100644 --- a/vllm/v1/outputs.py +++ b/vllm/v1/outputs.py @@ -8,7 +8,7 @@ class SamplerOutput: # [num_reqs] - sampled_token_ids: torch.Tensor + sampled_token_ids: List[int] # [num_reqs, max_num_logprobs + 1] logprob_token_ids: Optional[torch.Tensor] @@ -20,6 +20,8 @@ class SamplerOutput: prompt_logprobs: Optional[torch.Tensor] +# ModelRunnerOutput is serialized and sent to the scheduler process. +# This is expensive for torch.Tensor so prefer to use List instead. @dataclass class ModelRunnerOutput: @@ -29,7 +31,7 @@ class ModelRunnerOutput: req_id_to_index: Dict[str, int] # [num_reqs] - sampled_token_ids_cpu: torch.Tensor + sampled_token_ids: List[int] # [num_reqs, max_num_logprobs + 1] logprob_token_ids_cpu: Optional[torch.Tensor] diff --git a/vllm/v1/sample/sampler.py b/vllm/v1/sample/sampler.py index 927f274541c4d..d1a755be01ff7 100644 --- a/vllm/v1/sample/sampler.py +++ b/vllm/v1/sample/sampler.py @@ -37,8 +37,9 @@ def forward( topk_logprobs = None topk_indices = None + # NOTE: CPU-GPU synchronization happens here. sampler_output = SamplerOutput( - sampled_token_ids=sampled, + sampled_token_ids=sampled.tolist(), logprob_token_ids=topk_indices, logprobs=topk_logprobs, prompt_logprob_token_ids=None, diff --git a/vllm/v1/utils.py b/vllm/v1/utils.py index 4b26749712e32..6e7a7d4fe12cd 100644 --- a/vllm/v1/utils.py +++ b/vllm/v1/utils.py @@ -1,4 +1,11 @@ -from typing import Generic, List, TypeVar, overload +from contextlib import contextmanager +from typing import Any, Generic, Iterator, List, TypeVar, overload + +import zmq + +from vllm.logger import init_logger + +logger = init_logger(__name__) T = TypeVar("T") @@ -62,3 +69,27 @@ def __contains__(self, item): def __len__(self): return len(self._x) + + +@contextmanager +def make_zmq_socket(path: str, type: Any) -> Iterator[zmq.Socket]: + """Context manager for a ZMQ socket""" + + ctx = zmq.Context() + try: + socket = ctx.socket(type) + + if type == zmq.constants.PULL: + socket.connect(path) + elif type == zmq.constants.PUSH: + socket.bind(path) + else: + raise ValueError(f"Unknown Socket Type: {type}") + + yield socket + + except KeyboardInterrupt: + logger.debug("Worker had Keyboard Interrupt.") + + finally: + ctx.destroy(linger=0) diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index c601aca13feaf..0a5adfb28c9bd 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -34,6 +34,7 @@ class GPUModelRunner: def __init__( self, vllm_config: VllmConfig, + device: torch.device, input_registry: InputRegistry = INPUT_REGISTRY, ): self.vllm_config = vllm_config @@ -43,7 +44,6 @@ def __init__( self.load_config = vllm_config.load_config self.parallel_config = vllm_config.parallel_config self.scheduler_config = vllm_config.scheduler_config - self.device_config = vllm_config.device_config self.speculative_config = vllm_config.speculative_config self.prompt_adapter_config = vllm_config.prompt_adapter_config self.observability_config = vllm_config.observability_config @@ -52,7 +52,7 @@ def __init__( cache_config = self.cache_config scheduler_config = self.scheduler_config parallel_config = self.parallel_config - self.device = self.device_config.device + self.device = device self.pin_memory = is_pin_memory_available() self.dtype = self.model_config.dtype if cache_config.cache_dtype == "auto": @@ -477,9 +477,7 @@ def execute_model( sampling_metadata=sampling_metadata, ) - # NOTE: CPU-GPU synchronization happens here. - sampled_token_ids = sampler_output.sampled_token_ids.cpu() - sampled_token_ids_list = sampled_token_ids.tolist() + sampled_token_ids = sampler_output.sampled_token_ids # TODO(woosuk): The following loop can be slow since it iterates over # the requests one by one. Optimize. num_reqs = self.input_batch.num_reqs @@ -490,7 +488,7 @@ def execute_model( assert seq_len <= req_state.num_tokens if seq_len == req_state.num_tokens: # Append the sampled token to the output token ids. - token_id = sampled_token_ids_list[i] + token_id = sampled_token_ids[i] self.input_batch.token_ids_cpu[i, seq_len] = token_id req_state.output_token_ids.append(token_id) else: @@ -512,7 +510,7 @@ def execute_model( model_runner_output = ModelRunnerOutput( req_ids=self.input_batch.req_ids[:num_reqs], req_id_to_index=self.input_batch.req_id_to_index, - sampled_token_ids_cpu=sampled_token_ids, + sampled_token_ids=sampled_token_ids, logprob_token_ids_cpu=logprob_token_ids, logprobs_cpu=logprobs, ) diff --git a/vllm/v1/worker/gpu_worker.py b/vllm/v1/worker/gpu_worker.py index d33b55a8a9f9a..d32848c3775ae 100644 --- a/vllm/v1/worker/gpu_worker.py +++ b/vllm/v1/worker/gpu_worker.py @@ -15,6 +15,7 @@ from vllm.model_executor import set_random_seed from vllm.platforms import current_platform from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE, get_dtype_size +from vllm.v1.core.scheduler import SchedulerOutput from vllm.v1.outputs import ModelRunnerOutput from vllm.v1.worker.gpu_model_runner import GPUModelRunner @@ -56,7 +57,6 @@ def __init__( from vllm.utils import init_cached_hf_modules init_cached_hf_modules() - self.model_runner = GPUModelRunner(vllm_config) # Torch profiler. Enabled and configured through env vars: # VLLM_TORCH_PROFILER_DIR=/path/to/save/trace if envs.VLLM_TORCH_PROFILER_DIR: @@ -103,6 +103,9 @@ def initialize(self): # Set random seed. set_random_seed(self.model_config.seed) + # Construct the model runner + self.model_runner = GPUModelRunner(self.vllm_config, self.device) + def load_model(self) -> None: self.model_runner.load_model() @@ -198,7 +201,7 @@ def execute_model( scheduler_output: "SchedulerOutput", ) -> ModelRunnerOutput: output = self.model_runner.execute_model(scheduler_output) - # TODO(woosuk): Send the output to the engine process. + return output if self.rank == 0 else None return output def profile(self, is_start=True): @@ -209,6 +212,10 @@ def profile(self, is_start=True): else: self.profiler.stop() + def check_health(self) -> None: + # worker will always be healthy as long as it's running. + return + def init_worker_distributed_environment( parallel_config: ParallelConfig, From ebf778061db4e67c6903f8d6e8ad97c3db0174d8 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Mon, 9 Dec 2024 22:35:36 -0800 Subject: [PATCH 293/397] monitor metrics of tokens per step using cudagraph batchsizes (#11031) Signed-off-by: youkaichao --- tests/metrics/test_metrics.py | 2 +- vllm/engine/llm_engine.py | 6 ++++-- vllm/engine/metrics.py | 25 ++++++++++++++++--------- vllm/engine/metrics_types.py | 3 ++- 4 files changed, 23 insertions(+), 13 deletions(-) diff --git a/tests/metrics/test_metrics.py b/tests/metrics/test_metrics.py index 4a824c7acef21..b3c7850556f90 100644 --- a/tests/metrics/test_metrics.py +++ b/tests/metrics/test_metrics.py @@ -411,7 +411,7 @@ def log(self, *args, **kwargs): logger = _RayPrometheusStatLogger( local_interval=0.5, labels=dict(model_name=engine.model_config.served_model_name), - max_model_len=engine.model_config.max_model_len) + vllm_config=engine.vllm_config) engine.add_logger("ray", logger) for i, prompt in enumerate(example_prompts): engine.add_request( diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index 8fc69d96d321e..6eca304b45f07 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -232,6 +232,7 @@ def __init__( use_cached_outputs: bool = False, ) -> None: + self.vllm_config = vllm_config self.model_config = vllm_config.model_config self.cache_config = vllm_config.cache_config self.lora_config = vllm_config.lora_config @@ -385,13 +386,14 @@ def get_tokenizer_for_seq(sequence: Sequence) -> AnyTokenizer: self.stat_loggers = { "logging": LoggingStatLogger( - local_interval=_LOCAL_LOGGING_INTERVAL_SEC), + local_interval=_LOCAL_LOGGING_INTERVAL_SEC, + vllm_config=vllm_config), "prometheus": PrometheusStatLogger( local_interval=_LOCAL_LOGGING_INTERVAL_SEC, labels=dict( model_name=self.model_config.served_model_name), - max_model_len=self.model_config.max_model_len), + vllm_config=vllm_config), } self.stat_loggers["prometheus"].info("cache_config", self.cache_config) diff --git a/vllm/engine/metrics.py b/vllm/engine/metrics.py index a5ae21c3966a7..c8aec8dd3afa3 100644 --- a/vllm/engine/metrics.py +++ b/vllm/engine/metrics.py @@ -6,6 +6,7 @@ import numpy as np import prometheus_client +from vllm.config import VllmConfig from vllm.engine.metrics_types import (StatLoggerBase, Stats, SupportsMetricsInfo) from vllm.executor.ray_utils import ray @@ -44,10 +45,12 @@ class Metrics: _counter_cls = prometheus_client.Counter _histogram_cls = prometheus_client.Histogram - def __init__(self, labelnames: List[str], max_model_len: int): + def __init__(self, labelnames: List[str], vllm_config: VllmConfig): # Unregister any existing vLLM collectors (for CI/CD) self._unregister_vllm_metrics() + max_model_len = vllm_config.model_config.max_model_len + # System stats # Scheduler State self.gauge_scheduler_running = self._gauge_cls( @@ -115,11 +118,15 @@ def __init__(self, labelnames: List[str], max_model_len: int): name="vllm:tokens_total", documentation="Number of prefill plus generation tokens processed.", labelnames=labelnames) + buckets = [1, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8096] + if not vllm_config.model_config.enforce_eager: + buckets = vllm_config.compilation_config.capture_sizes.copy() + buckets.sort() self.histogram_iteration_tokens = self._histogram_cls( name="vllm:iteration_tokens_total", documentation="Histogram of number of tokens per engine_step.", labelnames=labelnames, - buckets=[1, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8096]) + buckets=buckets) self.histogram_time_to_first_token = self._histogram_cls( name="vllm:time_to_first_token_seconds", documentation="Histogram of time to first token in seconds.", @@ -361,10 +368,10 @@ class RayMetrics(Metrics): _histogram_cls: Type[prometheus_client.Histogram] = cast( Type[prometheus_client.Histogram], _RayHistogramWrapper) - def __init__(self, labelnames: List[str], max_model_len: int): + def __init__(self, labelnames: List[str], vllm_config: VllmConfig): if ray_metrics is None: raise ImportError("RayMetrics requires Ray to be installed.") - super().__init__(labelnames, max_model_len) + super().__init__(labelnames, vllm_config) def _unregister_vllm_metrics(self) -> None: # No-op on purpose @@ -421,8 +428,8 @@ def get_throughput(tracked_stats: List[int], now: float, class LoggingStatLogger(StatLoggerBase): """LoggingStatLogger is used in LLMEngine to log to Stdout.""" - def __init__(self, *args, **kwargs) -> None: - super().__init__(*args, **kwargs) + def __init__(self, local_interval: float, vllm_config: VllmConfig) -> None: + super().__init__(local_interval, vllm_config) self.last_prompt_throughput: Optional[float] = None self.last_generation_throughput: Optional[float] = None @@ -515,12 +522,12 @@ class PrometheusStatLogger(StatLoggerBase): _gauge_cls = prometheus_client.Gauge def __init__(self, local_interval: float, labels: Dict[str, str], - max_model_len: int) -> None: - super().__init__(local_interval) + vllm_config: VllmConfig) -> None: + super().__init__(local_interval, vllm_config) # Prometheus metrics self.labels = labels self.metrics = self._metrics_cls(labelnames=list(labels.keys()), - max_model_len=max_model_len) + vllm_config=vllm_config) def _log_gauge(self, gauge, data: Union[int, float]) -> None: # Convenience function for logging to gauge. diff --git a/vllm/engine/metrics_types.py b/vllm/engine/metrics_types.py index 5f7ec3bbcb269..5c7a430d11c5a 100644 --- a/vllm/engine/metrics_types.py +++ b/vllm/engine/metrics_types.py @@ -16,6 +16,7 @@ from dataclasses import dataclass from typing import Dict, List, Optional, Protocol +from vllm.config import VllmConfig from vllm.spec_decode.metrics import SpecDecodeWorkerMetrics @@ -77,7 +78,7 @@ def metrics_info(self) -> Dict[str, str]: class StatLoggerBase(ABC): """Base class for StatLogger.""" - def __init__(self, local_interval: float) -> None: + def __init__(self, local_interval: float, vllm_config: VllmConfig) -> None: # Tracked stats over current local logging interval. self.num_prompt_tokens: List[int] = [] self.num_generation_tokens: List[int] = [] From e35879c27601b09aab49e054786ce2a459f7a384 Mon Sep 17 00:00:00 2001 From: Jeff Cook Date: Mon, 9 Dec 2024 23:54:22 -0700 Subject: [PATCH 294/397] [Bugfix] Fix xgrammar failing to read a vocab_size from LlavaConfig on PixtralHF. (#11043) --- vllm/model_executor/guided_decoding/xgrammar_decoding.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/vllm/model_executor/guided_decoding/xgrammar_decoding.py b/vllm/model_executor/guided_decoding/xgrammar_decoding.py index b59a2269d2cd5..80e88dd5b4b37 100644 --- a/vllm/model_executor/guided_decoding/xgrammar_decoding.py +++ b/vllm/model_executor/guided_decoding/xgrammar_decoding.py @@ -148,7 +148,7 @@ def from_guided_params(cls, else: json_str = guided_params.json return cls(json_str=json_str, - vocab_size=model_config.hf_config.vocab_size, + vocab_size=model_config.hf_text_config.vocab_size, encoded_vocab=encoded_vocab, stop_token_ids=stop_token_ids, backend_str=backend_str, @@ -168,7 +168,7 @@ def from_guided_params(cls, else: grammar_str = guided_params.grammar return cls(grammar_str=grammar_str, - vocab_size=model_config.hf_config.vocab_size, + vocab_size=model_config.hf_text_config.vocab_size, encoded_vocab=encoded_vocab, stop_token_ids=stop_token_ids, backend_str=backend_str, @@ -176,7 +176,7 @@ def from_guided_params(cls, max_threads=max_threads) elif guided_params.json_object: return cls(json_object=True, - vocab_size=model_config.hf_config.vocab_size, + vocab_size=model_config.hf_text_config.vocab_size, encoded_vocab=encoded_vocab, stop_token_ids=stop_token_ids, backend_str=backend_str, From bfd610430c04d2962a03a2db304fb13b09b4f1b3 Mon Sep 17 00:00:00 2001 From: Diego Marinho Date: Tue, 10 Dec 2024 18:08:10 +1100 Subject: [PATCH 295/397] Update README.md (#11034) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index cfeb24cbb5823..ed5161ccffb45 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,7 @@ Easy, fast, and cheap LLM serving for everyone --- *Latest News* 🔥 +- [2024/12] vLLM joins [pytorch ecosystem](https://pytorch.org/blog/vllm-joins-pytorch)! Easy, Fast, and Cheap LLM Serving for Everyone! - [2024/11] We hosted [the seventh vLLM meetup](https://lu.ma/h0qvrajz) with Snowflake! Please find the meetup slides from vLLM team [here](https://docs.google.com/presentation/d/1e3CxQBV3JsfGp30SwyvS3eM_tW-ghOhJ9PAJGK6KR54/edit?usp=sharing), and Snowflake team [here](https://docs.google.com/presentation/d/1qF3RkDAbOULwz9WK5TOltt2fE9t6uIc_hVNLFAaQX6A/edit?usp=sharing). - [2024/10] We have just created a developer slack ([slack.vllm.ai](https://slack.vllm.ai)) focusing on coordinating contributions and discussing features. Please feel free to join us there! - [2024/10] Ray Summit 2024 held a special track for vLLM! Please find the opening talk slides from the vLLM team [here](https://docs.google.com/presentation/d/1B_KQxpHBTRa_mDF-tR6i8rWdOU5QoTZNcEg2MKZxEHM/edit?usp=sharing). Learn more from the [talks](https://www.youtube.com/playlist?list=PLzTswPQNepXl6AQwifuwUImLPFRVpksjR) from other vLLM contributors and users! From 82c73fd5104e010c2c98820f3e761e1e4f36c135 Mon Sep 17 00:00:00 2001 From: Gene Der Su Date: Mon, 9 Dec 2024 23:41:11 -0800 Subject: [PATCH 296/397] [Bugfix] cuda error running llama 3.2 (#11047) --- vllm/platforms/cuda.py | 35 ++++++++++++++++++++++++++++------- 1 file changed, 28 insertions(+), 7 deletions(-) diff --git a/vllm/platforms/cuda.py b/vllm/platforms/cuda.py index 10f83fd304281..ae1fd6d5ce068 100644 --- a/vllm/platforms/cuda.py +++ b/vllm/platforms/cuda.py @@ -4,7 +4,8 @@ import os from functools import lru_cache, wraps -from typing import TYPE_CHECKING, Callable, List, Optional, TypeVar +from typing import (TYPE_CHECKING, Callable, List, Optional, Tuple, TypeVar, + Union) import pynvml import torch @@ -78,7 +79,9 @@ class CudaPlatformBase(Platform): dispatch_key: str = "CUDA" @classmethod - def get_device_capability(cls, device_id: int = 0) -> DeviceCapability: + def get_device_capability(cls, + device_id: int = 0 + ) -> Optional[DeviceCapability]: raise NotImplementedError @classmethod @@ -144,11 +147,29 @@ class NvmlCudaPlatform(CudaPlatformBase): @classmethod @lru_cache(maxsize=8) @with_nvml_context - def get_device_capability(cls, device_id: int = 0) -> DeviceCapability: - physical_device_id = device_id_to_physical_device_id(device_id) - handle = pynvml.nvmlDeviceGetHandleByIndex(physical_device_id) - major, minor = pynvml.nvmlDeviceGetCudaComputeCapability(handle) - return DeviceCapability(major=major, minor=minor) + def get_device_capability(cls, + device_id: int = 0 + ) -> Optional[DeviceCapability]: + try: + physical_device_id = device_id_to_physical_device_id(device_id) + handle = pynvml.nvmlDeviceGetHandleByIndex(physical_device_id) + major, minor = pynvml.nvmlDeviceGetCudaComputeCapability(handle) + return DeviceCapability(major=major, minor=minor) + except RuntimeError: + return None + + @classmethod + @lru_cache(maxsize=8) + @with_nvml_context + def has_device_capability( + cls, + capability: Union[Tuple[int, int], int], + device_id: int = 0, + ) -> bool: + try: + return super().has_device_capability(capability, device_id) + except RuntimeError: + return False @classmethod @lru_cache(maxsize=8) From fe2e10c71b98a43ccde0e8aba0d4fe0d23369538 Mon Sep 17 00:00:00 2001 From: Maxime Fournioux <55544262+mfournioux@users.noreply.github.com> Date: Tue, 10 Dec 2024 10:19:27 +0100 Subject: [PATCH 297/397] Add example of helm chart for vllm deployment on k8s (#9199) Signed-off-by: Maxime Fournioux <55544262+mfournioux@users.noreply.github.com> --- .github/workflows/lint-and-deploy.yaml | 81 ++++++ docs/source/index.rst | 1 + .../serving/architecture_helm_deployment.png | Bin 0 -> 991484 bytes docs/source/serving/deploying_with_helm.rst | 253 +++++++++++++++++ examples/chart-helm/.helmignore | 6 + examples/chart-helm/Chart.yaml | 21 ++ examples/chart-helm/ct.yaml | 3 + examples/chart-helm/lintconf.yaml | 42 +++ examples/chart-helm/templates/_helpers.tpl | 164 +++++++++++ examples/chart-helm/templates/configmap.yaml | 11 + .../chart-helm/templates/custom-objects.yaml | 6 + examples/chart-helm/templates/deployment.yaml | 122 ++++++++ examples/chart-helm/templates/hpa.yaml | 31 ++ examples/chart-helm/templates/job.yaml | 37 +++ .../templates/poddisruptionbudget.yaml | 7 + examples/chart-helm/templates/pvc.yaml | 13 + examples/chart-helm/templates/secrets.yaml | 10 + examples/chart-helm/templates/service.yaml | 14 + examples/chart-helm/values.schema.json | 265 ++++++++++++++++++ examples/chart-helm/values.yaml | 119 ++++++++ 20 files changed, 1206 insertions(+) create mode 100644 .github/workflows/lint-and-deploy.yaml create mode 100644 docs/source/serving/architecture_helm_deployment.png create mode 100644 docs/source/serving/deploying_with_helm.rst create mode 100644 examples/chart-helm/.helmignore create mode 100644 examples/chart-helm/Chart.yaml create mode 100644 examples/chart-helm/ct.yaml create mode 100644 examples/chart-helm/lintconf.yaml create mode 100644 examples/chart-helm/templates/_helpers.tpl create mode 100644 examples/chart-helm/templates/configmap.yaml create mode 100644 examples/chart-helm/templates/custom-objects.yaml create mode 100644 examples/chart-helm/templates/deployment.yaml create mode 100644 examples/chart-helm/templates/hpa.yaml create mode 100644 examples/chart-helm/templates/job.yaml create mode 100644 examples/chart-helm/templates/poddisruptionbudget.yaml create mode 100644 examples/chart-helm/templates/pvc.yaml create mode 100644 examples/chart-helm/templates/secrets.yaml create mode 100644 examples/chart-helm/templates/service.yaml create mode 100644 examples/chart-helm/values.schema.json create mode 100644 examples/chart-helm/values.yaml diff --git a/.github/workflows/lint-and-deploy.yaml b/.github/workflows/lint-and-deploy.yaml new file mode 100644 index 0000000000000..ab6f6e5d2060d --- /dev/null +++ b/.github/workflows/lint-and-deploy.yaml @@ -0,0 +1,81 @@ +name: Lint and Deploy Charts + +on: pull_request + +jobs: + lint-and-deploy: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + fetch-depth: 0 + + - name: Set up Helm + uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814 # v4.2.0 + with: + version: v3.14.4 + + #Python is required because ct lint runs Yamale and yamllint which require Python. + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 + with: + python-version: '3.13' + + - name: Set up chart-testing + uses: helm/chart-testing-action@e6669bcd63d7cb57cb4380c33043eebe5d111992 # v2.6.1 + with: + version: v3.10.1 + + - name: Run chart-testing (lint) + run: ct lint --target-branch ${{ github.event.repository.default_branch }} --chart-dirs examples/chart-helm --charts examples/chart-helm + + - name: Setup minio + run: | + docker network create vllm-net + docker run -d -p 9000:9000 --name minio --net vllm-net \ + -e "MINIO_ACCESS_KEY=minioadmin" \ + -e "MINIO_SECRET_KEY=minioadmin" \ + -v /tmp/data:/data \ + -v /tmp/config:/root/.minio \ + minio/minio server /data + export AWS_ACCESS_KEY_ID=minioadmin + export AWS_SECRET_ACCESS_KEY=minioadmin + export AWS_EC2_METADATA_DISABLED=true + mkdir opt-125m + cd opt-125m && curl -O -Ls "https://huggingface.co/facebook/opt-125m/resolve/main/{pytorch_model.bin,config.json,generation_config.json,merges.txt,special_tokens_map.json,tokenizer_config.json,vocab.json}" && cd .. + aws --endpoint-url http://127.0.0.1:9000/ s3 mb s3://testbucket + aws --endpoint-url http://127.0.0.1:9000/ s3 cp opt-125m/ s3://testbucket/opt-125m --recursive + + - name: Create kind cluster + uses: helm/kind-action@0025e74a8c7512023d06dc019c617aa3cf561fde # v1.10.0 + + - name: Build the Docker image vllm cpu + run: docker buildx build -f Dockerfile.cpu -t vllm-cpu-env . + + - name: Configuration of docker images, network and namespace for the kind cluster + run: | + docker pull amazon/aws-cli:2.6.4 + kind load docker-image amazon/aws-cli:2.6.4 --name chart-testing + kind load docker-image vllm-cpu-env:latest --name chart-testing + docker network connect vllm-net "$(docker ps -aqf "name=chart-testing-control-plane")" + kubectl create ns ns-vllm + + - name: Run chart-testing (install) + run: | + export AWS_ACCESS_KEY_ID=minioadmin + export AWS_SECRET_ACCESS_KEY=minioadmin + helm install --wait --wait-for-jobs --timeout 5m0s --debug --create-namespace --namespace=ns-vllm test-vllm examples/chart-helm -f examples/chart-helm/values.yaml --set secrets.s3endpoint=http://minio:9000 --set secrets.s3bucketname=testbucket --set secrets.s3accesskeyid=$AWS_ACCESS_KEY_ID --set secrets.s3accesskey=$AWS_SECRET_ACCESS_KEY --set resources.requests.cpu=1 --set resources.requests.memory=4Gi --set resources.limits.cpu=2 --set resources.limits.memory=5Gi --set image.env[0].name=VLLM_CPU_KVCACHE_SPACE --set image.env[1].name=VLLM_LOGGING_LEVEL --set-string image.env[0].value="1" --set-string image.env[1].value="DEBUG" --set-string extraInit.s3modelpath="opt-125m/" --set-string 'resources.limits.nvidia\.com/gpu=0' --set-string 'resources.requests.nvidia\.com/gpu=0' --set-string image.repository="vllm-cpu-env" + + - name: curl test + run: | + kubectl -n ns-vllm port-forward service/test-vllm-service 8001:80 & + sleep 10 + CODE="$(curl -v -f --location http://localhost:8001/v1/completions \ + --header "Content-Type: application/json" \ + --data '{ + "model": "opt-125m", + "prompt": "San Francisco is a", + "max_tokens": 7, + "temperature": 0 + }'):$CODE" + echo "$CODE" \ No newline at end of file diff --git a/docs/source/index.rst b/docs/source/index.rst index c45c941b00e20..ebf1361976c5e 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -82,6 +82,7 @@ Documentation serving/openai_compatible_server serving/deploying_with_docker serving/deploying_with_k8s + serving/deploying_with_helm serving/deploying_with_nginx serving/distributed_serving serving/metrics diff --git a/docs/source/serving/architecture_helm_deployment.png b/docs/source/serving/architecture_helm_deployment.png new file mode 100644 index 0000000000000000000000000000000000000000..8f9ca29795ffe442c2d22a5ba79c3896e36eb2eb GIT binary patch literal 991484 zcmce;XIPV2*EXz-iVX`$5h)G|NS6-MEFc|}-XkC&(xi7X41;u#7J5)1Aiehzl_rE5 zdP@|fmk1;T2qEtk=brm`pSkCLpZMeZe(3?^%D(pAYprvg>s%}1iIy_WdFJycPMn}o zRZ-MAapK(T6DQ6f&z=E(l0v8+1pYbcuA}_$MA-n#>WLFKPpB$B(DSj_nDhQ{Z47t! zOGwVmJDQ5u_%u6Tn-AZS*l6YRijf;neX%h-X)4B*f8BqB=Ej~N{r5 z;^+F~Br#DN0~cFDLzj<3K5t~XXQ$0&W%)17+9q;qLs^(lQk^<`<>vk0PW&(b=@X&q zzx}#jq#JqvfByXkM}hChl8>yu?YkhD#iDF0`;O#t#>{rVxfNlK&&09Nq60wkZP!MT ztWDi5?)3xoXl`^Mb9T%@&V1e6jKI}Rle+#fw4HTZw07GK3l|~{tGKK@JHe)Np;9l* zxv|?6-Muj9A|L*A0bYCs%`#kU9hX|*i!ckq&4HlXf^uN|3pvALv+Y3Hmqr1?d&QA6 zdoR%9({-srhG5@;6z5Gy_@4A2{2iJmZm>H^0TkJ$kbqu^u(sNgUXEpOAHadZ1LlX- zi+_jTt7G5k+Cg(S^M&$j0!t;bJ?V$O zZY#UOR1K{F!k+&AvEN_cs9pzazbDmr2nwuE7D5NuJit@=>iZV^dP@Zo=L zmnk$iBXrf&WA3iO%4l(p`WhzqGJBLIT3?<7*%}h4+h}v;Ihn8Hf3#p{{pAg}DEEBI z=A>xdR-0;`6kqMNnOl#bu~*S?{LRK{ea2qFFsm81jIIPH#?nbNkzJNKE(og;Air0f z4fRh%HcAP1k?zVvHXHOYr{O-uyk%ncl&FCAcX)B4e0$%I2ni~zH5PAY(w|Js6 z3nO=>T^VfQ+6-lF>VhJ+m`pv<4f9^h@}w}I{oI0iL?y^Dh%|t{b!nqjxdc8rOfpWO)|2JA4BY=$Y*;sEc~Vtq2oKf?ZCp^jGr!h2wt#mp1}{+ zWFS9Q7PIS#l#JH32Ev(kHiu-tc0On&bpOh?Y3@4Hx+N!KL74A1BW|`}$6e=5l=Wu! z!o##93tnI99YYHu99g+bj2q$R82Q_%<3l2FV3qm8DGsaqTRz>v@ongx8c#xatTLejM zD4q5t)@x`@CUe(jR2C%Wmcf*Q!D>rD=k5X4PLQ`1y_O5F$xVdH)_Vks^WJL1`ucrW z^H<6iEH+?KMhz}o?Bc@%4JXAVEUe}tiRQ=U#a15(mzNKN)G@VLDX1cgLv=KKwRPB~ z2Pe|y|8&*Q`rH0fSjSfq7hV4CPmOBe{Ph_~|8gww-BBL$LL%sg^n_;O96`O?d-RW; zhyr)9j9UgKuNo)YmY0Sbaa6!>4>tYhr!0lKyy?1<=9~v?-4!vXQ=(Y?oY^jnKLYc# z(aYd?arHwj7Yw3>%pLM}F*A`7b^TVMT8N}F5`)7C%nIyVj@h+{D`Cl;XV#!g$ZuJN;v(^mv2z*oawSa9o3BJr~huU{i{LrUurzQOxddU%oS zK?beGo{3k%WRo&Ha+~$u_dis6qKVCk*2W3BYgKk+(*wjUU%6)x0 zouV{K@*}Fm(##5-z}+XeKYosL>rCM~JqRLwtD4ClOsp-R;W7l-=O!t-@A+|$n}Tl$ zY*#ltBkJ}7CWjt}yLL0lxSEwQjVKP%69)vFoziDl{o60W28U#wzWOpo6TF(rtOC~O zDgpx(>;s@%YCS5FPP+xSnsX&r^C1Dfi6)=*A{Y3`-qz;~RvuTc%u*~Ybbbvohap{Jh zj#W-?X?eLDh1eoBS_*al`U^Nqrp<%KeZ~vS(;TRFQ+987#2uf))rdiTWydjq!XTUT z!*k9&GobXhbDLX_+`Ch_lG)M3x~iTM9^Y5HroG&@nytBFmKg)^QB2J=Up9$*uNAX0 zyY2aMO!&-}oAuNtJ88dBYAt>z8?6g}6;T(}l{G_Z1>SiT`v-R~I7plL$wR-CO=|E-QUw7HTUir zVgpoJVaL7t68@WrdaLr2@XvCnB<#+!(81yy3_9tW*CN+4WDeZKpm6jK{Vmp|RcRtx zUGmVWHEYnA2wO*b1g6)QB7H;mXKWcEb1f@)oACoPEWxR*aXF@7uOZhlK#71jl-p1@uVDO4L2ldbR_(K4rG!0}x@n<#Us zfz&WdbV9pVOTivm*$rcIX&68HbPHZTm2I2Qh@bzlA-1i+y9*fuK|M0=9>y zR$$|wi&HrlR7*!P5hH=<5jOYs?y$~4H~G-u#${qKj5>{1|E*?3)@uDzHQ7xO&~oJV z+|k|~jBq2FRy#v7L)2ia^K_yVB{(hl0I{nW+VE>E%>W z1B+Lok*ZU*Ap>e2jtuUKes_6Yez!8{oUFlMKNz6Sm#-eL8d@uBB;P)S=M9&98=Gan zBXRpr^k&Olmap<|^r9`^;S_`=T$ zm+KOC3=)sGH*7R++2p9yua6PstGRfHO>S`cek)D?4l?H5DBA4;zxlR@dg6u4LHj-d zvM+to)@10^=t!$v1PjDwlJUn|F7xW?qtkYcS=PJGH*8U7)gx4l)f@A)_AG*yrToTz z;oD2$3K}`}3JNKB_)#9uMqeJZ{TRB%vHsY9?fF86SAR*6h>e|b1eFrO)c51!0wx2b z=f$7P9Ub9WhL)HxDwuU_a$V=^bq0xDy{-SW!8S@Av4K~ zc;3RiNM2u40}jpf6Do8keNCHs4u%9t{LQ{a35f$K9Gg9ITX#&;_yksKa8878TY_^c zT6Ba(uK6gfZ?z7?`J^xFp|IOWFi#cJm}xcWpYh3T7_89QQ={sT}+?Vpb@B-9xjyrBL2|d z6CKujNUvF=A~~IncE3M$tg~363l+=`m%v@9=1R~NuLda`H_u|~OLm1cKeHUXI#B#< zzj-GMyoiF2xCgMKaQQ1z+F|ywtNrDum^q9zKi;Q$AjQKR7e%$uzjNd6{#T@;1qy=m zO)g5#-19h43@gMZCf|7?R(V@L?s~<7xJ{U@n5%$5D}K_s93Lamco1Hqn4S}1S(rXZq^FHqmk};mIY_`A=!gmPultp*5G463x4P=LqVYqo;~?Xsah-)l2$^D{r%PTqlM=cAq)%)1XC zSJ7s>`0U0?xjZ>QXDgLYL|UK=jz@6M8&Ab1=_T-TG1zNkTB!32Bi5Z@x zw=3J9{e~9TNVBwza3lG6-vSoOM)FS;Lj^g zxmuX>0!nQ-LfrOX&BEQZQ%jvKQikimlTGdwP+ttn(>ocowdBbsq*X{YU34J(^xH=? zwpn4FX}R=SnpHW5x?mTZg8#VMf)Ss&3M6~?J0NY;{5*YTC_XsKH18 zr54vWkA-KY4tzo}Pn+Q-5pg;6CRJSWky_Y3 z8Six!Ni9TeP6V$p{dr`fj||%P2~@Y-HUrz?tHzXSkO7U@)qD4Lc8A4Y^^Y14op=bX zJCdf;zBWg4O@}?%IeNmILuEJuaSa5ENXwzWt^-b{6^P`LfF1K{p338l)ln+rDAlo= z9D3SrT|C1pL~q~^GN#*BLc18#(JQ%Zjgl;1%Np?%(tv4YL9oAl1&E_=fP@_ z_sI}?9H}RU*x^gRlP-Q$AGqe6ex!GxgNWsv?pqG?xi4~@3_6=~=)WRGQc|2eY&+xP z^ehDhtdk31Mbr~6`o31BpK(tNLZfSD2b(5bvYV+a3{HQ{Sq3$YYwC;i0x#ZcO!w5j zMG2Cip}NCk+@>?LR2R6A`!xHTbc^eU$mZ^^d>B*L84h*Ttr{$3JENA@utGJWlIMYh zHo`p+^qi+>19mb>pj4_^fHO_}YI>}Fpb)3=w*XZH3OZV_b6}V^Z$GY6yt*qbK%ZV4 zy_MuHw~luiJ`W)X!qr(CUmipzth(O}Wc%i1e0lyFv8~UKlpj`t#Hpho`VaVUyVdLU z2r~)y-s7DBIQ^0LfdTcV zx#ozvk&Exz=!~OcCD-wb*%pml$Jn)6-vhO=RX%+^E!9dMX$coZ(X+IodUK-Rf#NLS zNO8ye>kg7#i$W>ocVs-l#uzmA1E!o!2z1ha?YJsdd3Z3TgixRaX1AFG3;MMJ6vbeJ zW${h~&R%*G8wjnpnM2<(IVry$pyf)RR&HedE_uFJbFf${6Z^%W>2g-VX6vk$EB0kF zT}IRqTVE4OQ9R8UBpzfMh=P)JVwoDpU&w*|IO7=c{6SuFdz- zam0G0kN&O>66D7Ymh)2VRQ%Ku0rp*fw9CcC6(t*bJ+kCO0_|Hk8@^wx*ig&3>E7Uh z{SB&9MY{K`j6B)uxni2+>B5lrb#w-}bLC5`HHB!GIR)q*kmz% zu7_>J!r)zj?6T?7sB5}`hF^Nze@)~4oX36>0CAT~N0s9vN=t4~7s5FrKhuxt)f=1HrxykxF_>*>3Uy()%PRY zedNW$`6rD#Yf5wiIvUA93MP0u{!pDhuvg!l&o5VM{juUxc**%&7_jJVua;`J(5mTf z;o|wO_vdo0=cpt(s^1%G9Iy!%SM4-tf^FYxFb;H4)CkET$(sbRl5SH=M-goLx2B-Q3146JuZ*_Iu z<58uKy7uLQULkw?L=U-bG|ce0KKk*YzgntUP_T{KX zVQ#>(lC)mm>I&3#omj1wCPI5mVsB@4`$AQ;==F4!UIVf4*z^Iev2T|X6Eo1VpqY5W zfxktkCbe2gDFyq@li8&c?^10GgS|rfRGS+fG)5gS)+QA_B2k%Ce)qXn;a6EuzCW8FH&$AFT zp0ZVtV^#HYxV6b!yA~Zu`#_Q|y9~MtuerT3fi^sD;T5TTf>}m*w~Jz2uK9v*!}T`B z%|Tj?gGmYU-K4vfSiGu&R^;M6K~|plUyEVx2R+(nI=vU*e53=UW9-UYpTVExaZ|`8 zK(~vKxNC#_BYn5Pqm949!*RxaMCEdb{>yr}KUh}W<4JV-ljDFiQC9Tn6g~kQB;hn~ zt)9&)kWzL66ePmgAP=5_ZsUfs?pbxh7LYm~g?>-fE~;Nn(SPaP;tyUuDi%Xwec=xa zY}622HV3dDnGg@ugIz*1(@tNJG~*!oI8u0>AYhKhV`?ASc4k&Xo5k3ecl4Gpao8Y& zFXlLHB8gkxR|}w_RB4Sj>lFw#tLZIfd`(H;T)*B|^Jijs$#EnqPSgE@U8!kXj>#|qyoz`6 zYsfLO$`mN-LgD+RR_I0x_MruE%DQ~bxMLzQ7wpF7KYjVvpEW<@FqI_&-e)M3N?MM3|GC4)fM@O7^m0?FwPPr5-C@Sg z<~w8S-OqkGl`C}g#t|w?d?L@y-_r+ja0UkF!8CgXLUn849*%1=!`uR}c$R{(d~Vkj z6l-~m;I1ds7L3c96q6&$0W?TF?LhumBuYx98`mS}EF_aADMy4ws_=YuG$~!V99P2r zSSkB#zlqwd&P>QovByZ;UPBe6j?cvM_WHS|8VrYI`p(fCo&>41<=wU+ymI;Ge2N2$ z$u>a*b2%3~<=p{b-ZF%vZYA6!S8DE6@2=t~K%piU!)MT~Lhc;$fuyZrzUJSbH5n(+ zMk(~aUr7acFo3+a|J1C5$T*u&)>*gOrn*FDz+-=lGbC@l#4X~+Dlr=$_Vn;-7VT-B zRXjkKf$is4w&SwTJV7Fga;_ZV2i>wUzFp2hc>KR=9O1~og6Z88tHG;Sd{SyE zmtb|mRsmD~sYEq>%rd|n@@xyasGmD)%^kzn>^po60AcUL(Oq_3TJI5FCLUKeT&SPj z>r}1>BO&v-ytlXVsTFqQU%bHwDdP9kA#7%!)1PZDXlw&zkCLR+%vd`$on!PQi9mv+ z!TrmqUAO!&A9Zq^nn*fju#N#TFUkqc_fJYTfp&~6^)E#y%amV{;ji_bve_7sM;Z+V z>?NqUo2{Y%f4XEFYfk(UiOAR(Q-AJ$;VF&$3Yo{f(~CU z`6f$zR_oBfRO-DGbGY8YBcqPz#t#~)RjHhQkNrMFV`^y8Td$(DmNXdzutWwmJR&4W zLScDvg@?oNL48t*+la(s%DkY-*Lx0aB`>UWO^i7>*ODES_=arz!Pm@%Z#uM$R6qRm z!Q7c&Mu^nAOo|A^$lROocj|qdOiMy8uL!ZpH$DO5TCFV-rHlwxbmCcsQE zb(a|Up3Z9FlZ72bV!!wniI^3_=u6kC=RxTTYL|nabAC!*%Jp-kwLkY&O#l;8th5}5 zv?Gn~vWcr-9TeA|I)J7q_zQ4#e@pd`ZOm~(NDS90WmNH z#K*sbustw=>Fy>;`s&LR+6_xw=(gJR>}gHhkPMVJt3)%c~uki{>($8WW!ItqVRjyG667oMYi zDvxNu>~eV0{mUip2$9nh^70>nkbOA63(zFuE zY`hy{FUWbY=(Ia9*HrUR10agpBpl@1URW7L0>_1i2wW7AqbKq9XKN3hm$|$V5I9v- zhm&JhkXtBCN;SgzJnzqDlmk^A@})C1E6yk_3~VBV#>@b+LL8ly&^gg;NkHN?p%Ct; z<)4?wE~z_R<;F`F)vPCNK#EvXOr>v?G9*D*w56r&7o$s(=Xav*9@p7pJ1Ua+-H^J= zO1#$7S4xrTTS}six&g@|UQv)W4Wd5prU^FWCN70nBDq*m81VMB-K_EQR{(DxRPkIt ziPxl(hi{?n#p%+L4tNg0+7UTNIOu(dtv0r2*E-(raSpHBUU&wa~tTIWHWn6^bBJ#aqRm#vnw;&`jD-bL7C z{j+b|WjC$H2KXLNctsg{s1`o}>LSmVLY95`E@>J+y~TO&`1$E_9a>v84;GM`$1Fff zil&rlgWr)o0!PxYnF_9wXMI^`3RLQ#aIQ-C(vjMHQNuvQYMU_ z;~D}Ms)33I?`vQQ$+cG6MJuUG3%hkCupk9yoT{VyOq*|WRObvot@q|D{i)3;!N82< z&-!BETc-`|PnWM6M>goZ%fMQwA5bZoz&e_0kbUtuqX!i)euav~@+=4>yv?xi{(7dM z>vdGQ?jJc8^-jP^KB9&`;pLopQmT`qH+=c`lJn~1dS0*3fU?s5_rJ6NGgTsyQ4o$k zlcu|!`nTI<_!JLpcL9_#Ih*Vw>{ z3!ah7g*Q_r(Hxt12O|v%ISgSo#SYZa+8Wp4NI(0=-C_xNLrFjNErrNUjMU8(^XWSd zO7NV}FyxwE945%r-hPV1SCj`*bNht}b4{U1F8AwA=`Z0vL`1!NO#LH-*KqEm2^D>x zotl^wBgp{}e22N6(=}vOJo?~xjU+-r@9oP45bYZV!nb~+M3%5jE+{cdO zI9KfdZ+!93;)~#y44+M=rD94|PJe7szq)KZ%**NiR|4`-gmuZDz1Yj*S0JF7xVZuq zehvXuAjCz2XK<7JIu@ER2*gDraNIBl)hR@TC8_KmqzZ{@7Xsznds=4e;YcqYDu@>qhTa1C!n;@#V{vPa#mtx#89oX^iZJE znM?=zb(3vJ>~}`_z*@bJFKa{nluf7&ZvF3S7P8{y!v@Ez3IEGoh1#Ge%+4m781MDt zdxwJK1r}i-mk3Kd_`4CAn6`R1o;CkFjaEeLuE+a{`<{>gT@n6&{wLV@!80@=Hr7y{ zq@DGStWA*efVHkW`QQ?mybv?L%$F<+mY#dLz&ReXJE72s`hE23-}dUiy@f4(aNw34 zuLW^FCi~*DT~yiCWt<$(l0C}pb=bXjVBsyKCz(g1z6$5QO&xou^k!0U+y4CO?+U?| z=scZ^t*@L{Cpm3^b?NTa1Qg4b%{q2mT`ec8Z3mB>)Q+8*w;6}F`)*Le3C@_ zs!8mbRnPzZPXG9p)Z|n0tVCay;~`aHpKounFFNFM1~D*}fO{{}csJ3t%d?5|do}GY z3m=D_^Q0n|JsO;g5jpPbTB6rd8R;mxX7`^NFpx{pnb^_B?G@$0qvxsAxGnauk{Z0n zWpER1I{@rfz=Kz}y9_a-Q^57!?W{GmU;wVO$lOWaUnxBZ#^9tV&m(KQu1AlcM?G)Y zCl3x+1&74VBP{>qCqE^7Ql)usjc?=og(9Jw%>#nn9a85gQCnTP`9z2+j7C-%dVS9- ztBq{3lfJ8^1;wOK5tg^$B#A>*!-BS#*T2GusQCjQim#7duxnylTnL+#yai#Wl(zlT zt{{`7i@QzU@8%;T=eb|Y5qmDU?mvPTR+no+Hf}8Ml@dt#+)U-a{C- zLc5^<0cM++x9tiPJstza@%)UOS5}kjK&jsUl2ndu*rsV+j7%NLzs6zMwkhQM!~cVmx$}$5>ki&5 z!_RZ^9Tl^Be-(rO=b8NHj@(~3-*PnN;5ut<*CB}p-yoj{+}Z1aheN%h?G@T~kqAf1 zHu6Mp{#7Zm5NppHQ#|Dhh>E%Z?Z?1HF7H35#5-FDXp*#0==Xm|5t{t^@ANdLz0b)Z( zqxD96G%ga>;V$6_lBa%_7IbD83g{r!Hzzd7{PRx|J{$-g373Hv_L!p^moxb+CfC~{ zrP3lSo6&(US`Kjj;#nniPcZw4{AD_BN-x#pD-C3g7{H^q4M4Z%wx?GBO+}UepvFa0 z%4*0*%1Gd$;R4p?AL z^-_Fk!>MpkCWsX&pf6>+{TsXfd54Z)>G+eU z@_J&N3S1TYb3K~QsMdifm&wD(6>;!oB!w?|b4Z2C0aULUohDUh>mr)Ov^Nm`u@YfU zJ#mf*q_uHvQ8@d&uao~swEWXoXk;?+?D-Kgz?pH8#t}#{-(0*sY;sv{Nz9X9$+ORz_U$l%Cm84b?Aa6j-?B ze4Tm1y#!{uG)+mu434e$r``e>a&x=No5EE*KhN@tva!ClXlnCGJF6YWlZqWKnp5(g1M!6QFR7c5*-8 zd;w7HPn>EE9PD36s*Mwr2{)r}haP_mWjdVuZUnBZ&2?WtU8Js9^G|;0AE4cPx8NYj zO$JajvB>tCXeN`_j_z`|9Og2==Oll6r*$)$dDEn((2eUWg+A)7->INNm#1j&tkvQBUa}Pk6kVzSrGVCnr(Lb~TCySoTrtl7o-Fh|ofo>tq zQfT~C0Jey_3x&X#d7Yq7Nzf=yZ?c*U=G(eTZdAa9hbK|^NPWS(-`lz8zZDEbk$Ysw zrh+rIU^8D@H}ld9rE9-rWLYm2kW4JfRNbfamdFQ_qZXOcy6$6ZHOV~T__S9f1G#4& zlp^Ie_KS#29i^6YRxIW|@PzaU=j&)~W)PjZNs36Rb|J+{W*-mWrSD!d6VE^AdfiIS z^`;QuxXX`&uB{8y8Dx$we|{ls7nSBA^@)Q*uN&OgaaWPj?|otpIERZ9_ZtA3O11R;kJN9jG^Tl(Q?ykh_$oYhTbAy7=1jqio}=9;jY) zm|n2KP`K#N*8usw?gCIsE}h!?o{W$A#bCB|V;xvd!ZhW}so2~kbiQ#38$VUKny}t2 z9jMKJBdMbzU(RAri?XK#YXC~_G(`ytxs3as=g4Sf+p;v$66gw%9M~jeRE2!j7v4x7 zB+OCtLymsZQTOZC;t*`XVi<0 z`f=eZ6mlg`7O7^?uJ;4(?&PLF?R(W(k+pT?+k)>86K8d9*uUor@L~qiiQg%GnjDob z_iLw?jwaXRL+%vT`?kOXG57AEShpW&Qm+fud6~6*wI931p+l+bZoJ&cME~TW=Th-GdbnSwq3H|K}F!~AIw~wv(=n%u{ zV%3m3cM31Ul{-SfOGtQ(ewXqdVxLEYTCVeXt2DL|lb|DVUYnb>cQ!q2|9i_dIU9|^jS7Evb!W&z95`HAHmUY6oG-d&s#e^65W zdCm2p%PyHl&B|zmO~{`@_UV2;kSYAc7BrpOV|k_6Vo_{0Ic;tbrGO=vPd)fn#I&~+ zetEug@R+rA33HWCpuy~?-0xjafSwlOlzy=fpMOgY)^FcIh+m44^K&v>)c8c{o8ZoF zh<{<+o=$NYJ2hK?C~{STS0gUnthSmw2q_C+N4MnWhy2xX_fuH%8OA>@5)9sc+<(Vb zN_gWB?txgKF}Wtcy>r?lqSANXJvhYAsAB@<BhMs|Iz|bgbkhqQn`SMLHt0IeBTFAy7qj* z2jhkM-AoOLm^9WIN=mYCe|UEk{NqUphE>KPv9-T3oqzs&LhVjV1|iKqnF*r${1*!2g;?naAZA_mj=s?(&d-*DJ}s9B{g5% zY(f&4#%sC}CtE!P3hEhnKQ4R(^7Z24mV!^TveV3Ea&AIiK+J;QYCXN~r%0Hx5z z?AcmJ#dQm{o#M({RzT-sOpkD*Vl}{R%mduU&#?}duJ5n=I8TyzUzZLQ8k$vi!fdFA z9CyC;koxy|N1Vab!I+Pjb1DBR7UB}C&qG<64H$rozw0D`{WrTBHWuL% zmcFhSf_0g6x}kJ3#mJbIjEqYwK8U#XL*PO;0sv%OYysyH3|uYuyvI~C(K>83&{+IP zF^NL^11KCn?TgKaxQvDQlw+N+#REBNMZ}UoQKlNj`TeEf3!ny(vcQG$^k?;3!$Jc_ z%PBS?gC$Imd+J%^&V!r2+I=G}HlE?(2^1QdzTo$z2~1}&o}JN}D^_69*}Q4k#TpRq zrjQ}=B+g})Z5jT(`yQ+82&s%bZiKgha!9QHWwRU|2V2HKuX}h|O=fs^0I~P|$gr;e z?A6_VITzby$~|BgBbhwiE|poBT_!!@uHyEB%K@WK%C~cCl_~TMSNhka|*!3D3YzphR<@a_({R>eD&GZc7=5QXEI#>(|P^Z3lZE)1w^>j#bsHr zuk81)3zg4`V=GDLo0Z_46AKjkQqjWuWgQi1VqTUMvoT_1L@e5SS zkn)3(aQucxcUiM5{0K?w{8fH6=?>5}9m<`s(Z8Iqo0~=~&Sg^gaV)#Om2u@rl`D=R zMNc?>h{CW2&{Ob$jr#4OH{1@tZ;d-gG&nV1f6ufx&FIo)foC&T{t8TUh`1y$qn{G} zg3@QgsJaz`Y%d;tk4Rs!15hzQB*g&g+1g9pnlnc{khP&wwTM3`EG-AG(WW+8839Vy z?#&ru?Z(Y?O{lD|8|+tkfCrS?`I@-y=(aF!*delue=m4_m{N{f?ev~f+Tol=fp zB0R5^q1Sed>K1gyIB%Iy=$Z5iKLB)7Y%4}iMY;g-y?ca+iDq!GdlBignMh#-JS9SQ?8x=< zgTYeQb|6Nv^rv@jwbn%ez3wL{I9obSc=+jP|hjapH;c&Zt@Sso_|Vk-m0BK zT^E)DEJ;ArW;NmIg zIbcW%+66oF^N1M8OWE-)3Kd-8q08oXw`NwG{=Y}WJlfgp-f%oT((*!o^RronT>ADd zEfz-dEQ_{`yZ0oF95-J~R5^JTQce@|zgin%>@dg38HxVxr+RT_8I?OfCWN}Qg!KlahkDO=lMNqCTumhW22WX<}SO1zs)rLM0xir18e>D@`R@$mzSGGG%K_@Yxy!fjKa56LW|<0I0m|)(Rfo z0Q}4un}0d*&{HqDU*ooF9DzALm>is0>3N)`HPtEJ3mR!}G)&Daj(IAFX%!y*TgW_g zma43s`Q((~|Aa;#__@gPnd|_(y`%5Cg=gwtY9U+Ud+p?%3O3e!hW$g>wf{za7g>E@ zCotV{7=??FH#dlpPVW~krtyTZ#&uK}?+r0eYTO>WMwfsI+I=v)zmz2mB;YEXvGtpk zs{F%V!)Yo?cm9PJgI_bgh+MCD&7Lgxcc(zZ^9qyrM1A0n z>|ey8-v|Y*R)f2R*a=Q~M%u=Nl@(x+(xWZs{HQEux9{twCY^ z8rF;#{uWj422ued6gj+F2}3HoUj>#jWq^4j4U;A7hc+?@Kyc#Nmr}Xf-#oGU1?w3b z<>com5_r|`hek=P@V=Tz<8jOk(2UM;|{j5$g+bZsw))*#mBz?ps@g z^(US9{qJSWeAAny17x9N--I0?eUV>6Og=9Hx&_jp(3J`D$KM{FLJ^kQ_YZ!7N4vC< zB*>&d8S3bVrWi+)_psxUGELksY*KT5qfYMxs;{TT?Q&CnuSGTKZxH5c#Xy?r+{sH1@P_1AwVStS`1kXa0d& z$z&6O>&Li|?||8miQpublO6xnd2DQ9Hkxr}J6=i19KvZ$-D$?{R%G(oli69H5_7ZJenS!($>^6_pa_o{%j zhg?vNBCA$4sq(&6TudlCH1Q0bj=1~%!7AaiUQ`Q^wHX3JRX+%Ei%h>n+BONZZYh?Q!?oNT9ITtS7B;>j@22bvwbsi^)1L%OS=S*s|@l|JX8lGRnOiuy16B zo7RknB+s_Yymp;kqc%jVC}{w3@eC~>v59lt$hGSLa$VhW|4I8*frXCRpr;tR=vo&f zrec)A)h#=BO?)fuT{Uq3786*IuFXpd^&Rx(W~_8UsYYs-WC9k6R)dRP@4a_9xO5G} z)_|x4DA!@b0D;dj*C#SFuoYohOJ|?YGO8EqDM{4J26DG8%{}*2lb@pPO*f7Ke8MG%uQ>sWJu6mr-vn}(^A_w> z?%_)sebz#+`Qg7tx3)cm+8?I5xwm#^3e4}s3`PSLE4ltjq{vgSFYa)EqHtN42Z(sa z)uWQpzhp@1e^`5|cBKw?RPanN?$$f98#?v?_dK4?n*uD zoVy{O=I-T_J#fjoq};nN30?NXod4-=0@9H&?>I4TgS!D-cxlq;~vMamVFEaCCiJkWkw}(DQybCHsgTP)QXycxy) z<7%DL5I{zpc2224ArCN9otz0^3=w0_ru=M1V z;NAsO8DU4Sk|cfqBi)uDR5O^R35nXqatkFvW;3u0c}NA-%pUj0{j)QpP{l0TRX`{c zt(js5fACAT$ggS3qVA8WcuY>dTC?G_8+*QVT^Kk0`f9_%9ARM#2$=E+3@tDe8L3@% zPG@X5>B-Iz5MhCiiwIy`Zh_WK&4WOFg8Oq5^}76ZXoP}Bv;iq)W&G~%H0X5zOX~`; zLd*KQHJAVz%2~W$twL8mPA9a1tfeOx1zumCX0tsXysZ!&cZ*$KoIEC#2fE}g7Uyy7 zIOUZ4ILhU(lc3B0FLzrs?n5&=;7U~eYJzmRfUx6|v@OT4_n%2z`CJ4MdHHE2^{)NH z8mB3)r9)EUHJV{t+>JpV)ul;g?Ktk&@}Qy<5TM)>?ZBRrUos>c^)qX=9cN@ZAl3uz z@_oRh@~H^~ z-A^cBdKy?~tQ2Uhb?tdNGTUAl+syjr}$NGPu`wPxcYynGmpHT^XZBeyd=BK{O; z0pkKhEYU`l@kM9;ius<*lH)+k?<{8jd(2-wjg&=@CHVHv(zWR0jg(4#e2~pAiA>|z zZjDo?&h|Z~s!ieUmEp0-maR?UHLJi;k-5RX1oh=>t&Q^J%HRA~bP|N$A#y=vkx5&w zC+(QD0X_TFf8J}*#yi^DP~p8inpQLrG*Wa^ob&0O)@%&xk&4;TeD=b+D|$3~F?H-x zWGiIGHJ^QkWQ~#iP<#i4At8ZU!^4$UJStBOlN0`a@~YAU{+mil_jFF>-Qm5-!69)a zw49?AueFjjH7#Ilfvh~6Q4nk8{qqXj9e$jQ(E!0A3GTr?1b4UK?he7--QBskOK=J9 zF2P-bOK^9Gsr2i9-P8T1XXan)ELPonxp!54bk#20fs5%B{K6;me(y8I2^rn<)9p0Y z@F=5N-F77df+gV(PQ63=3yDFKe5=d@+ZEyAoALlX+Hfwiwi3r?`e$)iaxL#qUshAr z6i_VOFolVX^)RRKQWUg_XcznhhYc^H)<`cNTutQ~i)*PL2( zKARNpuWAv}{t4Unf)^CzjQQ^>K`miM&3%~6C!72ze6Q`h*4z0mhbNXD8q%pBi<{?2 zKYftvZYT_AiJzgAFLsInHkMe+_;tlGxyHe?R5cI2TUKN1zfLTWRKQKi)F`9 zb9@@979Td2dSTM9FY%+~M$2i0Lu(kWvOt?j!PK0IbOz%Pha2XcsXiu&PyvpNrP6${ z{@f?OXL=&god=Sn*p{)*f8Iaf9eq*VAE}kUlC+q(A_?=lr!ZcsEEBq`7S4AjKk`9* zT*vTzq(zv<8;+6KJq+>(k&OB)Bj~B>R`cCgcYzfrlw0It=7JcG{D<&<1#Z%!3BPXT z#y2yBbu+}?p}g(ub2LmFgG~<9yPw(yriUo3e0HBws7zN?rNhum6l!j)L6Xc+GkO}x ze`3Ym8AMp(q9Fgxat~9qxO@60T)QG{H%&w)86u zI!o0@rRt=lu%AOk&z&c39}_-rO7~x9&-w0oU+y1Q@AMjnHsVu)4?-k}RazvXNu+~> z;#}~W)6AFQw$N3ihew_-jhycE**(rx-!(Mni^qutUs&dsC&pdrsTRU{Mz~0065PZ` zAU^WLxn)r`W!#Xx3p%k#x%dZp~$bb(svrZ(IM{ zh_Qi_#Z0uRM06MV{h3`ypdO>Gxi~@+qP4nrwOPVbop)wF@>{ud>*c26W~=-aNrbHh zXCBSf^l7Pr00B6-FVtJu$ROezXf(lFB?8urRWdD%>m-fW<;2Nrm?E(9CPFd$jegQ? zh(s=NyGlxT2fc;DI^M+o`-6|zAFO~!B4kX16vE{22bGoEi+DSk8z40yVMVN0n@H9C znIyyV2Uqr3mO_ZYj|^-E7>5)B)5IzdP9G6;^`-P)lN+y9)I9a$H5+Z^Mo}BV+tf=y zGh_2~g8El7_i+Nn@f9Ne{d}{Vi>dk%czME)mGkwsmhN;o#28lE)nzanJLJ($ z@lz?`qW4@CbB$Ui--G!ZZ=@EYtEu%X%QXn2Dq}BEvE&*mb*=fEbPhk$?cgmhRIIdr z%M^7mseNP?DMY_rt`jrmF3rR3@Q}cZBau>0nYpqT-52od|5tMk`5kyK5~|>5iA5h> zwVTb?=M^s1fhfJZ4J7MdB(p+uGb#8KUvUxOV!+^fF~FaLb-cZjSssFQAL2+#x97E( z3{`r)w!M5=XNZERP~t>0X=YT-wkrYv>;HbHq^xJFpr{lydBZ~z?QY8MfGMz}PsgE6X>ypmIXQUT`RW`#Z8B}6T z`&Uk+QT$!Nem4h$0+IQ8tLkzOTMktsZp3UtgylL1K8j`;Sd_)-^;_25HR%2wNOrn@ zZJQ^&&ILmK-EzJB!F2ebTa0Va8N8@sLyWhFV%rN3WM89|&0Vsqt}n+2^D^yBYzRFe z54P>QVTNzlTXsA@uf;L4R0|O6IFR@q^Aj|j9O^7Pu`5D*4zA?&73@RJyQesQ}~wkeaJR`zN;nQ#B_FnSfXaZ`(CyU>D_UcE&b?oS2;^68un_5H|6-W$S0zHHl;U#YEd&X1|^q@;<%i69GMhV zTOuN;+{C$f{K@fsUn9rJVMUa)fBYE6bI|Z|!iu+uEokac_a(AyN#)|(@cU`Wt-HpW zd1Axf2is4q9>s;ccP}yIT6Fue6?M(Qk4!oZnLjBB=pPlIc?^{%lpm|wwDP5dgf6=iA6w}S9oM%@w?`78G>?HEu zi&N(H@3d>3pB!ac5%rGWab=L<-r^W@`|VaaVbQay?*kQssWk65PWq1@^;9heg4d;5 znh1u*?A<-3i3G)xNFB*Q72m$>Y#zuwA3JA7_GWhvQWtKQO05n1^$fqlmwi+c2)w0g zr?c_xex9X4IMm1sM|(_>v~a~b&S=3e_RDoAK`!z31KS)9@`oz1^M+si%6lv53kf4p z@{V$Q@%Zq-6}-3^VHY+e^h`6S#hb6}LaB#ep4cGvYaw%h9I!g{eE3#MI?MIE zHR0Mw8h$%J>np})u!;mvp3qxZE5WMsl{p;?{{)ei3jMkXJ2G7n{wUf1O5cC1(N=?I+$Y+#(6Rhk?%Q$7zU|!1c=$a_)sE=UeZRP^ zd$g^c^l;mt1M<`%-XhL-kzRBigKToYG}!1W|cwPui5?B7}-r zE$Gv&62651c4Y?7c8aaZc?Sz9ne*?`=~Z3oi+HYlb_IhUq#DakQ(oToT$$Ker?0cU zb21LjheJ>%R@NCyMd5VRUYNh&F~z4}u4*@9m?$J%f+KI?STN_HKcFtG$gH#MPxDdc zg?*$Kazk-hZ|c_QvWLnvNPAyIP=MWQXg&HU7K)wmVHue-!J5`H3Hg@Gq`9qsbPKok z0sh|dcI2y8?{mIel;}(*1d(ycyW&|Iko)R`7_N-1j(lqRlu7O$PiHE~WQTMtN$_ZND{+H(ArvOG{zPefr)n z_Aedtn~$O$*jn4?eu$pLKW)sy%h>9*;XY)vKW)v`eC7y##<6l=v$&rutiqn(8GZvf zdu#UrjS&1l#~M(NFc`w9;%f|4E%G)*T(Fpoo8fKm6OVe_1~LDUjOU_+gfbxbElt?EA7(|PT7?bRxx;$YS{Zn1P-w}zNopZuf&|w~AF#=mO#PVh z_TZ(?+v~F?kR(&a;`^<+`4+v+{dQXaasaBKS79xjAJE`t%Nl{xp^3bhjUs*#0K@Q= z8i5SaA7H^I4ivmB{WhXcOl^WZ6CQpa@ZweLkZt0bJn)UPZl4$?0@=K~LKS+RsK~EH zB4^^MdVS(kO8u^NXrq@lv!1i%KkD$t&3g`$FBJp}udN2n9&N61^R-@1mT7(I#;8;~ zo`qr7V#CBz+$Uklm50(+iE@Vn?!7bp%;QOPNrdaxyAB5P$kaK}8-Bj-L75fkR)NR6 zo*!FCZc2AD|6U91uIjfty|AhD{ldl^xJwNe;SN!H!}E|s4t8>!ZgtG~9Sb8Wqv{$1 ztL?mnVpP}as%%j_^?liC5>8xYyKZ@a4qMqt4&eIaH&-uNV~Ckc(lAdnJWHGtA6y9> zubaA6UiA+H0$tl4?nLJ2LMvbTckGX-QSh=GX%TvO{H|Kpte$_8_+4z5nR?+1bewTP zCFPX-r2cV$jst3I`TlxGauxzdP6DcoH!&EQe_?JeK1FnbhS=qy4svzT_3nSNQmSp@ zFg!TnH9C(4WsZlN(9mpQBc8*qF*J<$Q6I5Bn>#4^S#90p=yR3r17(|gW(E;yBJ4y1efGKWyA1Y2(yd@YSK+H~wZXa@H%`o@ zg$a_zr$n;K0-o+(0x=dcX&)NVN?%&Hkk!1+!gpH&S8lr&G`;a9|D)dGgFvPNuVFy= z{_qH^#>s>gjzDQ2~rR^uDzzj)@Rv{J+lgad$bO(g3W7VAUTDt49_yZn*wpC_WULv$|H}R6;j~SAH-DaV; z4`VH~o;-wPeciK$T_OCyFx5%~2H5~(jfCoy``KVj{TdOR8j6cBkqHCV@Qj6f%>8zi zmx|((>%b|HU48#B@rAftl7|{^r~_Mg23jvR^90S3B*s@UqTti}B;uTVQsJq0z;*%w z2x^xg4(7Rm-Rs9k2PCG#zUnj~5g+%vW?SH=J}ZrmZ6o_G$jo~b&mANO`RC;i&GdP* z;ahK#P;j#x!D!^{50hppo3B#9MhX51QZ#Em1CLVKenF4n$3(T|mggy55;fZ{$;IXV zcAQzPd0z6XXkGR5uDx7n(Tm_$IPy%}$l#I$(e;N_zsw=ENs4B%PTUa@yc!Y}%@J~P zB>V;EpqZ0mc`7Hh85tna%L`hU$~QuS$cr=y=OF?NOb}Sdj%fx8Q_Lchp%}%>jhtJQ?%z{o&5M< zIBBLl#G8-;6{zS!kMM7I?hk7?B=7q2yL1WGs@t|9Y=}{y67YiMt%>?bY!;f@LhnuEsYCCp3pTjGNFrR--BImOC@v}Y5wVp`G zhWME{usQY%^K%S`E`1X+#~xR#Of)OV%zELe>Lh9J_QV_iWJH7OKCNra^x#P=XG zLeQ#tA+@EK$&D@$!&0+)%hj+i;8J>?=PfuN6$=`eeGN9xXFVo8rYV zA&FZxFO7DO z>ggV*XV>b-ixJQaR%7yn~7+D0a*t&tC$* zk>oaI4mcs5x%_9uMsq=dL(pIq@mf6h7GQYNZrJR@suy(ph(P`7*+apL@c2Nq%?~xq z82j4|;QV$2DS@1OkbV*6VLUm@Et{GBau2K$aB2xWxsl&N`+tfQXRE zvW%ZKS+zNb4M$o73;7$bOgrbkqGYDV23#<%{0tWlMWdE;?PGX4n z>ifGNU8fAIpO5g>Ts_**ZixEULaw*CC?(4RWUVfz6vFjecWR@hF_XQoL_2HULC!Op zOi*+gRP~_O8kc`-!ik>7>>I#cNb_K6bH=KLAZnm{;L`|VK!{>>vhwUdcQVz? zh#0Sk`gslArN$PfJSs;NrYcT=yl59^$5HS8#iHMc=R*ZZ>%(;<8CP$!zd@R7t5v3k zHT<|932TH(g(u3u-dMJ+qja0rvl{yANiUu#@9ME5T9U!st$=r6IR|;8rNEu<$hpvTgmQtwV$#o9cdE+`!v)gq2R&}%i8G?;$y z-cYOBttQHJG^ndg-F>IYE;@TkaO{fZzSB4w{~IoGl&IMw*f0Igo6CK9nG|o^($;^? zJ#M4aR|6iF!8YNNc&fQS`gJ@|x?gwZ_i|mdHDBGQC+7Ru#`{TgE_1m|+JMW1PiWau z&U;+*e0yql4)b}#Ohu(b<-U3CU|f2kgUHzL^83;CYnwLTez`H!H*AydUSfm3uk`Fm`i{>AS%9u; zB#kcV^A8oK)TK~03*CHG0Wi2RTG+DWaJo-IXYauG0)thG%#1hKd025190u_6r1Hhg zo}J0rJ+I3i$VZH*VHoTQ80rU?yp!lzJqwq6VniGS;y0*#Lire`_Se@YSJRBN<)6Eg zul!heRJjNx))!RN@oL=BAtrhbuM)o$e^MQT3`*B4DJI;o?t`DWq>8x zcn8IZRPpngll7zI!Iw|_-{mAm^re@AUAbDVbh_&5sg1jGmh?$3U5=OcP^x;H+Sq6J z%}0*!9nLmU_Zzd5^ur(~NdnY%9J1~Z?C~C%roLhKjWKr}q!MOpvAeUAG6Zy@@1_Iw z*uB+KW#)CHIZSXJ07U0%a&W62Q2#jW*Dem1e&Wtz|8aCWOy#!79?=KfD^{M7NNM;- z7kNPe%+MK|=AzCt5`wv@+vvzR-rLbS$HQ>yBm@S*w`+)n^+g+9IWS#nM~6H5?E`G$ z_83dt{gLbi4d8Oh1PEh$=4<~LlN&sEQ?Ac6O{==N?J4Fly3 z7#fU0OF;A-z{b(Td?+{-ZS zih_wE<_@kVO_ueOa!d&dw16zE1d*a`%ijL}RsB+ur`jUOoqdQYgYKe1{g(GQoeqE& z&Rc0hP7#f25@aEqmi>uVMI|04)7md#iEX88e1zBNuS^iV`%iYTSx+~199C2Z#0DcU zSV#8P&$*UzWzl`p+7)c8(+=NRHnE=`Y2$wiA9uvy;F>3_`xXl^O7O02(_<8FNfUriOkTlsocATaF_A`np4F6o? z>96_fyDa_|sYbO309^7F3RrOqNT$47S)fS8q6^`BP)4vc4-NSF7B$cE=x8WV5g4c7 zu)1s-jD422n5NuGq4bh@Saht2b_h-p+l_A_8eBJ8gBZRzANh$HO&ri0_P4ViENV0v zBtw$BaS&dn9rwGGv0)Z}ksUa?*%;Ov$E7IFXh}sVogS836ljYMnpRF3=_^=SMh4~K zhT$W88h3@_TN>LxKrPs?*Ezv(L+Mv}_*io{z}BHe#ivQ$n3kO!$YK+B_jNG)tK=NY zIsdz$T+Z-4E60ax{wXOS}WMYjzg*kTF#L!5=ugw8Dhr@6G8R z#|W-Gf(YrBHCWP@b9Y$6@HOr2c_Pc^RgCde_iXtXE?J@p^f{o;2niX(K#=K5Ril){ zCSfcJ72-Q!7413?)H8N&R&ClA0eU;;!1G_On+$dz3|PrLX|dcd`_huOK`D4jRhYMG zYyvN}m(djSsK@+Z=N6L#Mw$zT<0fJqv-MY(63Kp7!A|=Q2HmQ>Zzy$%IC`~DXr*wY zzDscK4ufca2eAPYN3LF6*dUU1kGrYwUzG@F+*_VXr`WvlL~p~BZzhi#_r|%YzU}tf zet&P!*d~nt^KBtw@I;k0P!Hv_lBd0kWTKA=ermK${sQYdE`V;88D4Uvo3hZLJTWiV zjwCExu~0cz>PUEv<>d>U(6zo-3cs`Br|{_XC@E+4Pf?SVNuc#L(-%?inh`YdYSiD zW=q$}a6J;d^rwn0w2WuE1it}Y!ZA3$cXL)_!3X7*@AAd&fE;}$?P>??scvb0dj*=C zO8{DD-*ZHOQ0Mx(3%|)hQ#)-%tqBcNAezAW=>#>1R5JC8$m}tB^iGMzuo!Z3c3g+4 zn@5+ky9ZFkDHYBv1#0xnBf<<&t%O(Fs(Mz0J9-p6Rly%62SQj~3UmND)rpAX!GW47 z)1jv?wZELhJh^=sSIt5vZYRTHm0j@!p4rGHd@=Btr9Bb=YN$n6==&(Ef7Qp;@V!1p z{v_j`TSU7*83T-z&e`O}SLge4`F$BjCrg?7OlXiSazGC0q z@i@@QOM7ZHZ+^@2Xcut)!GX0!V9!qoiiK!OkmFN+{b1l zaba0u?yj4SMFqE*yrC-l8DEYVzvJaKY!TuA^l)o=Y6O4-R}Zbv{4h694Z{twk)JOqPaAhZXa=9{2y9?|& z43J<|g!|YU%m)|kvMOx)2XI?hC~;f4vJ=p109W%51!soO;gUL3lkxxzOzEOrOWjD& zq42`&+GIFPu@yXwT z&WyU6oP3eV}tI=r>5@&B+mm2p|7Z5%qv6f%1o z1%OUUf)RoHD=$dc5=>i(x9?2M_$(S<%2RvXrQci+aDra}@*kHxUY7 zClxfIALMpTLLzdBLrnMnr8{4Ww~MGcBU{7*DEX}~;RI%bYjp%-T3M=ykG??sIMjVm zq@t0NwdA2EBPTBrB5^w6nL!gy-|;zCvF6nkJEN2m`qKG6Z=*Z9h6Q_#V8Okg0m9n= zs&%BCVD^mYrPe5Io_*9HENM6~b2gr%!ms;d2)u``%MchJ zNf~cY_s>`VFc`vu)E9UMH8r2UK(iL`OwmGoBLWvN5IFIJE*W}*Xo__VUI#m{&2_{L zOd4aVi9PGh`~h8CBC>+C{Hmr{nNZ?5hBeWeby}{F@Xl4fn=F%#7_dj0OR|Jqujqm=qNv@DR6L(zO+F?hmL_;TnEEpPZ*Z69PeGX)Rz45)+RfQoIP*0pkA|L|tz zZROzMw@3daa^{xTg>rNQ?&RIfrNiFi^Zny}eKOA2u=as{eT^F3rLXftq;Od&r>H?c zgganxqF4h3E(e?A^*)(ByZl6N;vMot6=))dXO>6Ga;Xn3^{ZWQbJI^3mC3N=sigbF zdZK|D9Xo|{jkqhGpFY5C(%;@BH;2CP9*(wXIr3aS;2s+5nVp0sN>6!2zq)_azO%T8 za&94!i4#PfbKeAmq)+>nHFgZNTKfgh@{0S9wf2EFC)$Z#Z148Hj&u^>a+b^mU%m95 z5ShIRYTQ!#Mt6ml6%fI5Dt8$!Z*8spxGwDrx+aPlXzItO9yGhXCbnZ6e)VnA&J2s# ztZ3MLReKl9GACJ3W|S|veIw_^Wc#>S!~GW+2O&7BXm=G=d|GisM&wlUOwEN?AM20k zKZ>WT#7e9XB9y}mG0tsS6x-w0R-TsdT|{jie#=JB{p)hCe3mTq$Ll*<-oai=1;qm# zt#>&c*U?)Z1+3F+=^~!60w`RLrH$M?c{*PkuFIl^#L4OWj-~r6VU%s3Q6oMl0mQI) zA#|G(Ht(}r8T)r}9?egD&}i4Jiz_=)F<8RT?kh4 zccl)N%?t=x(dJx=VF4dPrnsRd$Sj_f;7%uG@~p)+BEjhA^3-TT-Z-r8+*h>Z-0A1T zEJW5>eJEj1c@)932LI~4$T(kz7KG*g(?=;wR6Hr%V3hJ55txU)G%LIJR<1-IXU;w> zX@#9V4>SF;nELopet_QknbtHaL~NoqN-Iv4S%QRAK{$>1P6>_m_@ip~*paNMreko9 z3_zqDBKIAU1)0CMeQrJCXpr%Y^0)|c8OO2qe7U2Z2^)0Ov@--rq=hnSgz{(HyubEX z5gfIoH1cqrfx#cTuSazSv z(C$LI&YH$^Mhzo(WkVZ;Whyjd^Vd!Q%a~?lW0%HwSUe_oqKkE+69=I0*zv>LXtD!F zraud2mH-8^Fi`1kfrg6VZWFiN_8hZsjpfb^7G6Ps^CvHB#}V`CBJ+mCawuPydUsD{ z698hIxp2k4Wn?os4(Fm!lQ&UxZ*G{$am!l%5QMdL=^T0JS>5clin-MeZz9X(o(!$u z-fobyvQVbLJ?m_-n{lrlbQpyJl?qUgmUqny!&KXK_dW{|jP>T79_W?TNazV% zV%#QM?j&I=b@rF+ax&xlzt}C#QYSwd&1Ypk)`B1@c6c(WcZ29dca*=j-vU9e_9eoS z6gPnIWuni(?yOsnG@?fmiL&-I#!AP&Ui2@ZjE`sDP`GeCW=~g!2@NaznEjMOzyIr1 z6ik{p~9^`6?XWdNAkfngks6RpTxF$FG>k_NVx1$$@ zP)OO43K(NO)PtE3C1E#3PZ@U9oNQVs*Y=f*8bB@9j#iYd3SGOB3}Y2Fz88Jc?dKhi z9E`*S{|w_?qdQ0=-TmF|fcBXqMOet{VhR!!sMQi0hC8T8=DoL2x$m3XoqW+(93d=| z1KD?+996<)%)MzMFK5pcCn24bixnNelkB+rx7OtYAY`viG!wQT?AMKr?ThF7VmVFF z63k!9b;gdZ^;kWE-YzFsj+YeIw)gBxfV7kk9icXAU z{waf%FLX5L$>^}{ir1fz@G<;=v$5QKb{l*=o#2AnheFWlDol{2;c@NRsiIDyGv7bB z>ozelvNnHiJXzeKZ)AT!TLzaO?vIIltCFWXcFIdoaTDGrs_Yg}Ej_h^?f$kGqM2bt z+9f|+Ih_j~a0A{}AFBCOInO4MLFfpogf4(6NhiT)l0xM5{Pu-fv}hMBFIge3xo+T& ziA=}w{0Snd7s#|EfU46Hnr-j#7hVUoGtU+@#pQM29;_)vsUdx#ydKeyW9u%D0j69c zHXDs6lX(KqtXj0ly<)-9T#JVJ(2kL(;>uGvWn!qZZL4rzR9JaAhZki!ab;84>zBP- zSV7gNrIVB)ZumYOy;xp@>%t0l$2PCBji;Z*n>Yt8P??WO1X|Q>NywL6uUJ+fn~Kj< z+s)^>fnLZpZdjdzm@ArFY+3EfBe#52zL>lH88}pqH5ol=K!E75%RdUcQ%R59qxd|2CwqxO!h%QqtuJMMf$DG{)+)LY+ zBk=Fft9pHFC+s8XCs^Y&E>2x-lrqif)dvf1_j&`h6dm9J-@ee_Nr&o7I})gjPqp6| z7S4xW|3RGp+uvpKburAFRX^?Oqdn~LpXVfU0DZo4im34sAm)9AB*1(N!&&JDBU@OZ zH_`Homw?P(luw_PhMZsb61nRuJnx$x&o_P`cVYl$YnWTkhRhso$5n%asIWoVN4DN!hgfj4PIU*bvP9r2YMVS zIlgJiP~}ARr+>|YG)xAF&aVr+dD=(mY`;4)`~o0wK=Zxmd;_pG3 z^mCdVN^a8%$d2oVwFh@^vJ_a&557=+4O%aj8!?9$h(X#`FIDtHiQnH(j_{E_UY<>u zeshBWRU9eLBT0!RS6OLCAUFwmZM;jsHii|2-|P+v6$e1pDs(0k?dfJ$giUHOAuoc2 zjC2-uwVy3sFTN0WQOXa4mQ-dvY&VHF(5t1O?HMF76!$OtG;Cxl3&T-YQe}+0z;9$5 zZIuAc4NK-?$#TjSd#F2qrP-;wUzJUkIcH&Mdb=Bo#DlDskQ&?P4vDsd-uELo=D<@= z35uOZ0T#cjP|JI?%iO+1irn7pVfE+5U`edp4YMYJLHUMPlzYiW*wHf`pq&eOj=1V5Od)2ayzte)JKhetrPHlje!osA*rIlra z^oTxjJOfJuE@pcrrE$9cx;hU0+u+-s+jnd(xr9bHXU%P(!&o|fM)_ZDnz;pvqjHnG z%{>nv338(oZlI*cUKHRm?lV+E+CGJ%Ni7fbUCSQla=JC~#0k z_=CHAts5s61WDhgcZtZQt+Xh5$?#M1PMQ;vqc|MPKGO#p#z zN?AL-#Mp9ynYHihro~sNBWllwEX_wn;qVv~Va_EK(>$o*%ZF&XjWSV-enhw)@fzXB z4dVwhBAE_9@DIFq#JVAzhkOJH7NyHUYgq>l_hEj|_>tJZoi|yla5+`RZIR&dnWGsK z#*r~eq@18~Y9*($?F1AjGd#26?Y0y16ZO-#bVt-QrIM-S`om*3t-A-?+1p<~bE7#q zBGoLQiRSXZWj${Hg4)1$YMiR}y2y2kF%4k*~k7BHu7A8e0@TfBMackwI$o+9U~JPVIQrK z{BDJ4Gl1k{C28SlafNc&#QIUZ8D~I|+4Dm>GC;ZZsJAK`m>d?fP9h^zg-a*$siHzb zS;tpptMx(K$Jst<^Hwx#0=fx=T>HBh4`kaT#~<}9`YQ-Z3srkN42}wz-Y2cZK5k#l zGG#u-WT=h;t8y;6Y;iJh@($O&Nq(+cV!7$Hn`oSwA)Huto<^5pO{Q!txXl&7lA-AI zC{R8Kx_k}UIJa)s%-xI&o~&2HWC66W5-w#e^J>>UPb1bJywwYaZcMnEDR2h*dMD@0 znUM-L-+7iN>wIJ!M;*xn6u!fUM%pnDL``{5WI>OEzqNgZc+*CUnH^l_BWV=8EQgBT za6OT(CLvLTWuFT}Sal4Jkb&T=L^CzZgY0SqidH6KcDk4UgtbE?}P3E0OJp`Il2C+1-<4kcXhS&qDeAG z`WK2dG@C3R620WdLZk5C%?M{_D4o%}zuPZ_MU{u@Yg!az4gi7VcBmhsrGkU%B)^UE zXz1uCKtxMA!_P2BamUzV^YApZoh^N0HaWGYR@ZR*#E;dyWB${@t+YTEnwO@5>LK85 z$Wno{?Il$+H;+y5o$mH`$4`K0QBG5P`KUolqaDfxmQHE>KIE$6roG_s!y+?okzX9N zh`4j``0^$*xySb_O%}yabXf#B%-&D1**M+AP|dH7O%JTsMIeRbF^H`Whs&^hI59j@ z;WRvY*lHea7S+WB3sU-ouN!cy2ALgkKL%F_=Dt+l`ih zN;0afzSoB(mh5I$zMLzH+f6uPQtbGRa}&U6{uatAy(eh{p`o(iUmm&;8z2LxgoTe`!Rk{UidL1GL`QEQVCEU~*FU}Og|sp0Za*>{#Tw`{j<)^e)p*X&wmD2RT0yIo zj*A|wRO*Cyn~1cnbIp;I9x-l;M~;;2v@dmS-&iM|e#1$2ef2zi;zIrX!wKXTi;QxG z{tIlT!v>EKFQ8%~Ii=mBbcx~zpLZVWAfr#Rz%no?Pqvwe{*D#|8d6kbM~qx%(B@{^ zGl$w5@O-ny$a{HMTxN?DvL!luxI2I8JJ|qSc2N?``V58w3^(gM+d~0ob*e&}pTPih zTV5OC#?^!FF>>(=)1rNtt%t=NWt5})s;{u@1)=EUyE*<~*Ogg&$AK_Q%jnQi%nZ9I zaqfsZK)J7&`tztu%}91MIsvD8dUNL%DSbL?m!WLyv_S@B+G0{t7jTN~@)Nou%O#do zcc_YT8oI24Yll6yBv#v2h1IgZtQ%3>A>IY`WfE!pmY(_w zNv%IchD*r$zq6qbgHfR|$oV>k9qJZyTYL%2K`eW>-G6HMTiF>SaE3Uhf`SPe)Lb^7 zm|(-trGdS=B6wKAMuWh~oAV~=IC{te`V6T6`l1qd*CFSvABWAhPfTBEe&M5@=?f%E$BBCZ-_P-Ji$ZB%cvkdq9>g_@!JOU|0Q6Eni^AYzJY4NyQscGbQNvr>dn(dWwl$^6Edq_Dw3t3=PPDQ=Zy4ewTe>81Kl3w(?g6iQbWPvAcf_B4rHWVDf zw@_)yQc9~kN8j|NZz6*8vxRYM{bF3Y54{Hu4lizbkw+3=xW(D6EY~Ehnq#btxu83` zaxzuV&iqV2v)2xX4X9V4*kLO1X*V#Usr2HOh^cgTWk#|{Q1G4Hf}7V2@Dqm=N6s^& zxKS@%X3wwX8O^bLYoapvZoHWMh8?4g1atE-ouy0<7b8tJ;K=2+6Gcey?zP^;_5+vHT!C9&^ zB1d3qs2>U8w0Y23r4SJikfp zD>JHNSXJ0w;d%Dd7&p<7=6&yEYhPm#j%l7uPh4Jl3Z_8V+yq!nS5L3#Q4XoEmt#?) z7X8U6H_3N~ELeA8A~6Ck>52UT*&=Kp*11J$&O2T|BM! zZ&lkLFOT4UcbLRzL6f~-eH*BkF8r^JaNo&%99(R41e(w3aS&vsC23C3Co;CK=1_~Q zHFjM4R!|rOvO@@$0HkVrq|RQB!$5lJz)R*QG2AjQ1j>^IPn%Z1^}QNM}?}rX=+CWiHTa_t~l(qlkibIP}C1){&f4)to#FNsHMGuyJ3W0Tt-W&ZF=Zl*OcLmKMB-jDB+%$G~(QCj$VSpisc z@}$tsQxGNW87dbbH{bOdXOPvpnm8jGFo}5YLLZVNKmeUnvR3XkpPAm?bc);`{E!4R zjhhbdmeXRZUFW)l{R~EK&01=2;0zLup#OrBS=NELi+o7IvK}ffHBl1Pg*pQmWJ^wA zy@lsCZ7$A#e&eGX#JrVG5+KBji7c$A6m1|#^*j;NjopLs%6j9Y`$|}U|wR*MJKzx5v1mN@HfW$w+oCF@OHxVpA z0?=Qo6?~gfsDGHIOTYVFTkS^ay<80z!JE_pjaf>?Z3gm$odE`_+eh05c8&7}o9>Z1 z&5puAHLi^0Co)J7C;ADhAjyH>bV~^(HD)K{5F-(h%nTw;+BzP{+^OgHli#)OKN&sy zN~{kH{1sG(lF>N=37`L4Q-vHOwKq@RzmS{D5K$BIFI+I8GSrw>c*0x_$I$t3A=JLU z$UFf>v3!1nQX1C+Cpeo;8Be?1oAMWd^_|W90H}g^eXO?>J1R2FbS4A#t&bIK{yk`APvAA2jb`BxZ|A zvf_bW4F?D~z18#P7@ridsoE&j*T|6t)`3{v4;A`q@MpZ*%m|MY#^)n}Pb6|D28tMThp0PmqR#FkIyPp$fpjY667J)|@6n^{y?<_x#=q7mNTND1F$R397XwR@X; z+kw;S6QN`Y?yrp@{Mj0W9~%~v%(qLZ0I7`w_sVvL_la1N@U!9mB(sDnBam~;Qz=X) zBld4nz>dd!yKCTPQgRbtn9IY=MQZe89@J5hFs3S-lC0@P_I>0&-84zylGOj`qgXb5 zGZx1VgHfD>g`j6Dd%*{CYBIYClR$XmT=kgZ$Ie)7l=Yt`!Pf?&EO+aCCJFhU`Lge$ zfbL+d(v+*54yl-1L$~=5R=^+HGr*kdFDQ35gM2AC?LzW}Vx$JxNQpqt=UP_OS=m1= z_-~EAd=M4rcR+3V$!l09(`jS1V}G`(e%JO0v;KUB1t6z%sEnwoe^*d#TNW|@a?=R) z(AZoys(wqgE!bNWR%tHdYAIK!!g}E3d#|(@H634SINA!lRz=Zw_!g-DP6fHZ!Xf}P zBH3y3H<1U^6sO>B0v4yAQ$x{l*}nE2u8Tn$ra!kWL$2m%8750Q zz}W!0QLg0|Il!zF>KrgeiYy7bqfh#&o>wJjUyBK@Qgs^pFe=Yem4sP4#)_@^<;fhzJ>7bzqyt^+LEDHK9H@IP7!{!mkO z`b({6F2@5AV{oC0;8&LWEth}a5)3KR2N1Vrdfz{^0K*NH62MLm2Wwa!URApgqJLzC zF>J8u&2GwPerrHyAF1oquTQO31jV3IiOoGOck#dSVt-7&b&!u5W)K2z+EVWnV_6?T zrFbJ5f_Xw^J%CX8C;9!$EL$fcw0X?p^qbj4uYVzCJz1{BW1LpvhKlzU{Nr|uok5*E z*^2^9p6u^}|KUNbzWPG=85}Kvi=pc_1Mi=c;!n%`~vin+; zaC2`V{`7u{*;RPkgR6tA{Ld@#g$eTkxvK)L>Qj{>tfp}tW01_sNI`!$8yYqEa^+(s zE|K?fR;AFA4l6DVmO>EO+(y;fe>s&`)JCt}pxV1FM|Q{Sh=(LA5a*BZK8=M7+m8tD zP9mH?-Cwc`V*$klX#C)c%;A4CQ3BXNcR3DElm0$LT(e=(T+If2D6p<-TK zhks_s6AS9WA&F%$aP(>aB^>w1n~efsZ*fgkf_R}FAi0lW2 z1A=`ZH_o5e`*$z&k9qdrUiz{T0o(zL#|m3Yt3hfZgMcj_W@?NzZJRwY`l=^-_0<8Uc>)CI<=(EO%m$u=Ri;HNMqzN;+o9+$(xVwf|`c2 z%71ri{QnN0LntzA`XS=>qdCxyKrG?X#ol8?otG#Ly!~@q#LBWJBCzIK)ONc?rxB7 zq`Q&suJa7Gd++!A&iQZn>CD{Ex>sE5S}_eLlWFZ4hJo5F6wJy=db9Xkep~OxsA8op zB4wlBimyqwY+vw?|6iQvmy{iq?kXWR>qLuN56p*y5e;gyJ8PBrf#7>WtTcS8|9j!x zfuB-@3@v*e{sFI5WUV7+<$n0Dw$D z!GdQXmWv6pOKmpw`F}aBXag4rvu&h<>%hg4Ru5op5EEi`T!P8k3K#>gk`9OnniZKRnZ0 z4+XfWFGov8f88EB2MEu>ytAjed}(A_b>JFF6pI)rjaWYxEGb@`(x{}m85@zti+Z*j z|NbX}{&CNLeU#{2Be^BeQPB~?8JZ&Xp>yMoE-;rB#hWkbVtwf?7MM)>=TnHjgk}Ra z-T*0S`F)M)8W&YraxqGI8-+uf2u@(YLUNW%wSQ)Ka+ESg}@)l5I22+(TC``#*EVwjn+tD9F#O(a{WAzNiu{ejzw(*=|%Bes~`pWuDvZj z-T#L-=hp#?<#?}1S`S#1l2ADCkiJk||2Xmw)6#i1_c{E_{s{=idij{O(vHd*SLlf~ zg&1k4z((|^Ai9r%>f;XzrFm5z$por#ZfCI6`6%*4xY2r;%*wg4PNco0TT?2`H7ixT z-p-iHi;E~rO^kG)ti|x(3vzeLn7e290y8ER*H<#lC+mM? z0gg4mm~R6ZtDU}) z^3p>9*1ye{%jzqS8~-@fAJ$J#tTYIdX7&F=Flrc<a zh|ZS};E~wL^JI2+Dj=s=$lysaxjcH4A5PtocN!}G$i))tY+8i2!%Ca46X-t0KSK4Y zr(n9=W@SyS7p!e8<&6LuXT}n4&iJ_6^Ip3Jp8Jc}`8Z!A^ZyNaC zsg;49bZ_;kspIKN?EuU)A**;P{ z*AbY}6Nzu?B6M79&f~uiYZdhc^5ZxF4U2Pmzb~*z?|H*Sf<1^u>h>`oa!+*_e>v4) zSSsY9e1@4_N_iZxv0&WRI2%mQRPwK7B0`0x%G|(ylnTekOc6DN&`igwcxlE_EGK*E zKtJ(12IV3DAlxmarhXc2xp0s!4MlLRE8kB-tco5$Ul#ULMIy zEwsLrNeQ^Q!-+QVZZ;wp0~)y84F~fqr+~+a<=ZpYJAJCwTgP_8uyxW>hL6;UlZtMA zp4em>=Yp4b^TzQ9XD8#aksV-M4p@&Dy?Cs_Ff7jLWEF)6Z1IS$x?pPCIzx{!<1Up4 zw);I**R>G~CryP;7tQ^wLn2_5`l4<^e5~VN`M2L7(+n63qmlfHPdqPfxi=>E=eC=l zmL-%1P0nP_Om1A;2;9Jvd0Q>;IG^ZV1Ka+9rV;UG*HY7LvL3Bf;j5)N-{ve7A%PdZ zieR72f5jyWDR>G=@~+%^ef@zzRo9YOXe_ki3m7G-wgzYTfQI!&JYaQ_i9_CG+N0+Q zkE6L`ii;H4pS(l(#QYj?ayvwL6nZR?1-aY#P#PPueVrGWvjt}Pqx;(Xqt{MS)l~W6 z?ou<^yH()jXc0uJc+c-S15^6z%XtMXT;z|bg&w6yGJ$t|7Gj^h5Lm)Jxj~?k*{BG;$cOWY0+XI<{r<3R23PBN>)$v@` z$T?>pxD}ET%v|QN1KZP0>cUE?*t0q91@mknj8gPIw1L+;2DU;f+{-Ko=6A)Ya5_mN zhFS8(dqHr{I9)|=x$3Q|Swt?je73-lVqIpfQC+?;#<^CMQJ?gjYPmX%EX6fKwoIN6 z;1pK;n5+iFvgA1B9-e(1#~#$`w6Q)`@mXBVp>0Ao2lg?_l9(8s8FmmyW7Lfm@l;F- zko(;L9N_Jq15wq^|E)^*>Y;(b>o8hxXtuM+5Prh~mUvzp!Rj2YYf-lw=kyxXh?hk1 zaOjPo2B}Yys1VsBV4Y0P@mc7Ati+MZ^M3HEh1$CqgyRuRihD;QhOiH@gp{SafUI9M zpy~PY+Jj@pS5lLLwE%A$#uz>mHc`*Lb;^ACmAQU6Ut`0&Z^y-;;|vd*T(?u^C~;mR zfs?O_M`LGIY5NF!J8rWjUs^S% zO?BtP&@3EwXSB&G`2sF}-L>6GQ=&6?w;&(Sb@-BK=rES@s<4|LkUj){;8jnFKFk>z zD68Yk+)ALGdO4HX|LjebYdC{}y(q9UlA^>D=baa#=I2umcyc62OQK14`ydfR`{iT# zO6QFjwHhbo-J5$c>eX)Q96>y)oABYhrRPCC>T8$Dm9!Fx#__4wVG8c0>=)5Q6tVA3 za9Y32jjo1?_9}wOea896z=hnD6V^aDY>Fr5y#lwnI%!ty`?AU!0Ahx zY&&7SYbRZjno*GiFo1lMR?C{2@L7ZNlLk>{LUM?0)Tp`w!R*{!Hg`*tT$XP zA%PzBsXAtTz6vJbANE@GM}PT;IRWxJ2622Rec_>sz0`YDcH1f`Q#CGKEK5mC zARu?+%M7XYV(COy4%FEebn{lU7jccbNW7va z<@oX{G(wmVSey9PY0G~64e}-KND+3r1hAfPQg_bK?*dfGKNZ3Ar1%wTFYWl3S05fl zONat6^@@|7?$=fRE`x8c`30%?wUq?x1viO)iHq9@nwVcS=5h3>7GD8=wX3X(l*iLx z_iizP^-hE&#Dws~1f)XhXrpBEF^?)-=j&ga++2Gsy+_Pq2bSV~0LEX+ zw#I=$GjU+g6X&**cMYo^0O-q(f;4zZpcDepmsCYKpWUqtK1ZTydz9LlL>Lm@tZ1oU z?}HM^)qZ=e!R>cTVh=zQEJt3m5E}F|f>|pW)IdAyXJ;WlMDlH9gFIo;rK5waX-=ms z32@G|)tp;+wsXAZo{NHH!ZK^(p-41KUB7ds${{O=6UVM@-a+JbvwwP@r41Jv?|{% zZ1eZ?cAyqBTPq5PmVqt0HJm|f>dI$>H{^D2Uq;kcTW@3yU6h9BO6ZBoaMIihQQc4J zp^&$EdGGaye=moB_r7!P=mqG046f!`kAw?o1M!=IDHQOqRj&>AOx#s+OZkaS95?$f z?G5(qf(E+3$f$svWHC}2VM>Q-x`s4A%vHrYy+_+6O&HiV2RLeilN3us_vnes8)!HM zI25I$%5KgI8DP&SBQQe|LZA5v03C-R;5f1m}0v5 zS)J;}LVHo)wZ#B?-NHjF4AeP}b`m3{J36urhIo)rBJO$fDCShwY%Hg;dC19)bf~L8 z8?kZk3lPEQ#{BKnf7xckQ^5vtoU+q1qYz%|cnS!hw4=Sz+Ahs?z5Dncez1x{&67nE zfllpTpuNFccy47n_L18T{y2sczmytps@MA6PpaWS3sdK5YqKc?cwZP1!-J)}GC&Ag zI~R3vN;6~;IZ}S}T^SZa8pch~z_=n%8lvz4e*0!8zzd|b6_FGzwbN4QJQMGPBypm! zs8WM$>%LsG&bDo(|2vxg22IKD$w7WO?!F&I|5dNiMsCO=M4<)9Ad2mDm)i*HT>?*z zbPXlB`WAmhSx@MN1pci=)lT2oNc=?VoF3PFyTr>yRu0=e!!Uyu{6=L8ml*mG0uR`m z3xgI@J#sVtg2jWPAD;d*D+nmKXU=k!#ZOYK<_SCVsDC^>TPmljFU&nS=v{kcyEUpY z*|0++EQkm!FAM6>g;{So+8z710nG1YX#>69DwLK&TD;eNCf^* ze-U4Ay!ip=aTCcVkvhQ10BBwazygQ@%yF0i zTG5U>&@r91}!2f~>LU0--bqUg-*Gh=GKN@#|~i(m})loK(3Sh6-%!BV^NT1i2*?UXkqKr9+L3la2r>R-zrZ3Kr*6J z&?;B?wVQ9alVCwg8X`<-Xh0t|z*;N7ki7@9StB*vELL(lJHH|c@rQfN>Bfh{Tfj0K z(OPa+(`*vGk(0^v&Tv&l3eBpi5O#G&z(tAdr*vgrqbT10#VFmtUYC31;REC7 zQVqig`(hyK$t<8lc_=$+qZX$2)%XBwASbA#Cm9b3=y9#_=YnkgZz3zW9iDhn!0+Wp zUY4`ZqBvMl^!y`r`wpZcpSOR-ZT?HN3fqmh_?Ju+!yhHuUK-OXRh7|Cy(H|BA2$){ zlro*GNOqLE(Cm5xW=PuF3Vv8bF#i;q=OXqL--qJ|9)eNo%x=7yvFTN#q4pX>idY01 zYZbcFV2UQ7d{>1?uozg}CIK|Akt@b*22Sb=|GN9X;|=Ju7(fP?9s23WlZz#Jd|h{p%`flisY8pV0L$tp0n2;|w%&bWaI zj}WL#8t-E9z{lg{B9>S*_$vIsyrQM8l)_&`=tFY@yC2QD)dnlRo(@vGNE zFMjz!;71FlE=J>#&?d*P_;D(rUi;Ln&g> zNz`@xl7PB|lc5_jkJ;$PJ#er_~y7QFf92{9Mxapz3CcM+9Nz1|%=psFg%9#q?`2;SX0fe~AC z%t=lij~->0Hzc*6+Jz}icFU_f5oyT%=mq%ZSqK#%vjZ)Hnb@T6-JOXKgMaQP;hU3c zmd$&zs`_H0GmbWS>3O6ag71nVO%Won?WmrFM4jTNp8XYhfj=9rNamDq>^-X5ihspd z{wa(KfIUVCy9Urzawu!3qAIX^Pg~VtBRYP6EruyP0|_+XN>UkqN@oFh5I*ReWi-t^ z!a&a7Y3w;Fkj*F=#bpy3Ft{lM^g&RN|+xsalQ`{u(hgjaz=CxEBT+Rgw92q31I zMeu`yXYKIZep&n@X}$4|5tt3r#tXd|=&*>$GJ710h256Pus#1Dyf1 z8z7>$_2zjImrniWd+sDzc9n3T%R(7QEXo#ZzyLOJP~3c~axF8I;Yk3{$x82Q1$83&iUjdARqL*weWXezb2;6pr$?6D6Fo!-m0#uDl5(1&Z4Hy z$SCl{ybmM5?+i~77VP75m>`+B<0AD|2>p?uXV}HGNuE#iLBa_~y~EgauuH<1uNy^I z_I>^4wB#dXPtclmVafWJW<$E>)b1zUfuZJyK#=i-{QadN3uHEGZrj!@-(b>=X1)p% zQ*Gq{D>KfBzBvP8>i4(IiGtYKYJwcfdk=8*#`O|Ohv+Ig_WbD%3P^2`o&Ar5MjM_) z**-5$4$oK zItWJ@K+N!dd%r!cp&R7j(a~Gsau>C+{;v>@t5d5%92%TW3LIHz+XU--HNPGC$LHO? z9W=t&prvF||1>A)yUakz&}{fKer9KxV6T5I4K^ zf-*;Suabwc!g!vB1mnl}B#=xK-|&5a`wyv1d};`B>)Q38!*w^b?kE|5y%%6?%R%UX z)Rj%!ApIa4Z5|_Vps;!36S7I#_d_IM-ffR~zaLQlymGRT!vang@%XXI@MY$WfUJyQ zDCTVC;fu?V(!6WCGd)dlCFw)>oj1JNJZP)4nV2~d6u&9Yr83BCoirs7jZ>>&{)#nY=*6vuLh1xHvW`EG`0H{UvalM135sn@k?%)6Z)#Q=GHZ>C`Z#aIn`pWoe&B2={2Fn9P{U zZ1gF*zPS^sDKXl2_CuF}GYrbVEc2l+3!A(Ayw{d<*|^-NWpw^lUy-WbG?rr*)~_ih zeBsV7*8oa`e#OTymY~fgh@U~+ljQhv8{&iYQT4-EEKZd%~r=QOg`P&Ml9T=NzOf)QH_`J2#7wq?O zzd0q)2t`5t5^>qe&gbLHY1(^b`Q){A4G*oF_twXyS(SGIrMBurl^&{RBoKA+BmW5K zR27_q{Hz1w_FxrOxAAY^KJlL4*@)5}YrAC|c!-F44CQ5xirO32s1RH8P?IV73hYRR zs;wwk?HLkUa(nY8oHz`=tZ7FplHc9qucyeBp1XkJTl|=KJ(5(ApVlKlRp0(B;K-0p zZ2VZP%!6}Z$^C5g?jyJoUI+!DAG#TU(EVK9bJ%}GS3-lf(+smM&Sm)>%r4708^<$2 zJ(mDfEv;8Xw#Pw61DqTWepfja6@FeEs{t=Oo`hqsls!l1C2zu@RsLUB^wY_LpZM|t z$SaHV-G#0570pMDMkVKQs($`2Ump{~G_UNac-7yW=fh%LI-a>g$3opmn&C_QI3FHH zg>{GeSVDc*Vivn*`OsqFADRg&Bi=n_y7~O6WQT`Vo>z#5C~ZmLk8l3z5I~_6yREIt zr5N zuox-{;$nKd4Qn=S^n9>m~D3eR(pvH;#ZqBcsTHI~P8DHd~PJ zm0)D?Rd|on@21+n4vi9~ucJC6Wy&<>+f&I^knYHb?!B-#%A&8sFC-p%)S1pPJN@V+ zW3={A{}6uTO&wZoKT{3%xgS+?+uZc}NRQWV>ipZwi3m@T#sa1jSMPMi+9@r)S?NT7 z%botRQWb^oH~Wh&hvv;7a0~zS5YB9Ot|C`i0>jG z?ZSV`E8F?GiC(rA$B%ykYp!8Eq)`hTGm{+&+v--sb%=MH+SdbJ!j?*S-57j1CG~fJ zhZ|sdEM_L$;r$=sN(902K2j)#hVqT3YpKHnuO3hZ+p=uj=#iK){{{l^vVa!`y*v0Ba`9WZ1#`1$~%N4^AQ$A`|ZyJb)*x|h+97+pBQq( zz4#OJes}668X$BDnh1j&zx)9>GEQW_^2Y2A9l#aTo_q+ueG5MvS^=k-qs=0HPS1?b zwD3iYdMR`>IdSWs+VFJ@a>qP6fSs*(|E{*-;hh?r?E}L1%f$cctwAF+bAEWbD;n=) zO)6;%G#!~-nb(FX1N4}4>}Et1gl~mJ05pEx#%P2f#hUX@;WkM(7j87T9~$-TYRxo~ zniMk25;P-=;p>-8pQ-GDo{idSj27>0X7~qHYMfOHK_UA+JMm8(DDRoH{8QJyUu9A) zylL+w_{!58i`;8mc>x7}&luiVaD0q1=X~pPl6v6PYF|+TA=$Ld+z9G?w}1<6gJ^nF z3FTLC1m6Q^ny0BW6G7Ysnx!!qEhxYu_rOkBx-d-TnhUO086egfl z*}-F;ukX7*N!6{3uQ2`>NvT9P1i>lJ*AXR}$_yLG%?gw!F}_#m)Ka3W2GRT9TH{2W z=OyVg-pu+(CIqBzO_u7Nhq~A3%SXsNi?z82fhwmB?e~%1Z3YN@aC=Cer8((8kMNrp z1wxAg9rHpmO-6|#tFmTdK?2VGVeDjG|Bn`q`3lu2SkL~@*Y8?9QGCZ+d+Ti5+U!>M z`e|XZHiu8QcCj#FRby-SGICY(wt0|Gkn2)ACb9{zx+jh^&xJg0b~SKtdHCHvCH@8F zL2?w9BQ-|BsFjY^lFws2sYGUWI%EQWx3phIXD9&rW@R2+>9PMdy;f`c_Rl>FzvzWq0!77k8uz*z9H(wB6(nP(?+dGGV&7VWF2d*A`T>6;xf zHLNu@xRyxV&6QFqcgFcGtR+pu-&eq1+>~ZCvsVY)NnD~M$~K`bSNh5 zXoQ@2J}=iPGuT`Qu&~C68iK5v*z?{70;oK*qM?!nsEX|kmlxXELP;G&WIC{mITdI) zGe-9uNDQB|N0t7I3;RVRJEy+VAwQrZ(8-IvhV}`^3$1qlay))e1o|n>qT$BtK)H z63iJ^Tc?D*uL|9^Y@zY;W1XGtgE)ro8_Wn;>-cz0Gythlfu^e^DyG z`>#1rL$&Iv_H|y%?ntba)4p#-N!!`;gk=D7k3Pk-oIF8gk9u{S;_*@1u?TM2^6obS zm^(ylryAM0nimT~;Gi>yCM2?A^6G{le82jScx9-fCB5f<8BzPd#F|aVtrzPh7yXfU zEwaX`^8`LL+rh!;N)5p->nZU=>V%Ml6k+! zey;O!llfgB7gRGBvaAc!!4F0LabO^k-JS3|*}Wc-#$}+LV251&9Mm+{OLht|GGaHW zg!~I+|MWSzLJfct`87{kHnX3wQb!Xst8@^&dH@zIz@+&!)=k&r4?>ta4AjA9Yd$h# z)4BWK{f)nJ(AIaUf&n85yNyYa(MQZ+omxtb>h{=i&p_{ro}NDQ zZB!u6J>?G#=jL6|P#fB+`JzO_^bc>%KtrsaNThluozpQhH&DWqa8xm&AM#fm{Bef4 z7tE4LOQBAQn`#-|`sA8E4{8}d5q;O(6oi*!w0qs_ihI+bMDD6yzCLG3GqR>X!5AQ| ziO7cDrpLn&Dpev~{{AtX3HVNYX_U_mH3L?ECkTGpQiBzOBR&#W@0k(KEAZvk?F>_g(^ofQg{pX^F80C>jvU!x!8MpE`I4~Is8IBl`rm)_T2Bp_#$BG zPpt9%0r?T{0^BO(>o~7l1S1vLK*e?A?~&t_JkUYr*2K){9yURmLUc25vov+5_irZ| zj)+g+)iih*J{A%A7WBc*%5KM~E$1{-o+(>K8BpSNgU-Tukp>`n>{RU7|Ld%`?!I9M zQm+i*GOiHUITs>X$DiKFD{B?2S?=(q;D|>hyQ8G zx9{yIfu?k2g92#eA(d(vheiXr86)dyBtN^?<%~?0t>vPd8%Y*Ov>Wv$iPy5&OW$291z{Y3@pl9Iz$v!(ooSD8L)_ zaO2C7GAV}KN8OZu?|R4m*{pt>^*^3cCMgw~)-?Q-*A2#<`xG%HR7tlDZ}pOZOxLeu zh=K$4U&MU>i8iQo9M7uD!NZ8T4=z+gxE^B*N5Lq+DJy4|ShLNZ5FjvT;jz2mZxuM{ z3Le0>zAhe)MIF9&$3cx8dj>jcm(+)M#Y{c!sG zQMGJN+&7DT3-&E)bzhHRZ7y(X9#X8SfgvOiapJ=^tY!a!SQ_3Sgei3ezlibWo$qLi zGjZz_&nwM`9izyh*N%&htSTm zzJZEqa|fFQm|3Be zYZ(IV4nX(VI=6zxO)7k(N!7SOBSZ^h=CuF=oSNTQl1MoCGx@!Zie(K| zaXl4hsEq|DeJ&56NM&(-_W+~{3PDnDA4S_fYp+iZd`s|5>K}Iu_6<#t zcqkUl$g@+US_!(W_8i+pOAv?JnA+M3g|; z@~d2}A9qGDp!nfAe&TYF96gvbTuEj8JbK}7$Oh2jSrp70&(kXeH3Dz7@edadI;PaJ zQ+c?boLNwfZ)^no0CK%rwf`-R76aM-GCS$m)7a9}4fSqp{z zFBr8U^R}|#T4Y0VsKS$WQgMgHS$$~9mj}U*0ElB@OnD~E9X81~Z{Mi*a_eN{pbZ4ZASblrk`p&5z_!Dj8egBEUfzH zyyww@F0j*MBN0u`|3Zry!hw01ns~&$%NX_z#0Lai zNRh`icL1V__o}53@*i5r&vieeKwhWm@ zxLceLyz}>O>&_qrT4GfN*OEm<*U}w0d2JtQoNI>x`lzGJwj~AUE+wqLMQY0o8UkE% z;n!o`ya}LEWi(ASzdjR5{FyGp8{Vq&cgcTuIB~DR#3)c&);T^tqb>{65g)7s17s>| z^@MLQno3Lye3u1VET-Ml_CuH&JQqgq-f%6Ft4DwN%OM%2LLTGuC&MIVU(o_By*C=Q z#}_sz{)j04PkG{`46d4&t>@eQC%5u63taOD42Kd+RdUZY>PC2MISJMYoofm0f)r?5 zi=@){hhZCBw5~9hR!jSPR;DVfn4QKDxuZ0*(;cShVRjX%pc+9&*%E*MJAfn4%h;B4w-K#b{fUh3vg z)Xk3!%3bZC(ATq^hFmp6cLtM=CALaDE^WvZvwPlL5n^ZK$sKf~aXhjk;Ba-Tkx|xr zA@Ha&Pc3!D1^MyBY`q1&DP`Z9Ejn*De0)btO-)VGD4rIx=xO(zoSr&H zNm54j=pO~!tk*faxMcHnDLS>E!FqR}tP)x$_{=(9GFr|xK#&XFzY~bxaO0J12$1wL z@%53Z`KBRTb&ch7>oZ^4|ANTh94$Ax7IQZlh4W_ z6;as{RQjXf2EZUJtctw}iQV}&L}n*fgl8xcK9L=FLdWkEcibsH&+;G7@PBb_I@oO3 z-)uPaG(Fs0leLllEGrk;ICFI!Qq69jA+wULUp|za>pTMyGz`->2dicdpI)h5&ZSk% zrCr>i2xrQ+N>n$(Yhcp^%oFIiPLEA+6xoQ+?HKsA82k6E`?Rck%J+ua7_as*QTs=Z zLsvILR8PZKC$G%ER3D79%s??>CE;|+IFeW`=7SzKj>!FxD!P=*`%)33EE|4KOeQx? zA_jWQ)|e1JN=!>~g~cUWTeguELz0qAl9EMorK%w&5+TFMX&8EVoe14&vTz{uh=ay@H9_OBl(??3=Zbv^c=Ww6UWWr5_ttc*He zJ_4xq=Lu|2vWS2@?=?{3z3CqMS(*F}G?ShqULFtyEzFA~HXkCw>27nem#me>^)67n zjEK}k34M_dX|P|QLQp!(rIqW}tCQ)Jx6)@f%LOQ;uzWbOJQi1HXSTeOqy`gBrb4w# zr5U6EJDtBJgYVkO#ILwGKSHrOH zu5lTJU3RWObF1_go5=AwF#g|NFF;dE*`O3JF> zp67A2nI6Lv^mmF0mYLx!BsNn8JU|EzlP0^7?H*AOu~upUsmX~8C>uykCMrE$-#RuK zC_b+0goihnp9qr}AR{k~r?dRCLUIQN7vb_GKGLc!9(BHKLu662Pl`&K3oSU%1RX!L zKt;-&#??*o$Rp3CZpGwzLr$IC1lsIqy}mk zmf6HW52bjkl(ytG8D=HNm`bl_T+x1HN%!ep>NBq}t{2acYIQqZDXmzW;W+K9pzfnQ(Yn_+ce_vE|L~RlBrv>D zM{MS`M~rGuD*0BfR$Ebw8idPHKuaH(v{v)n94}HIR4m85U>2DLMJ7~2J50Hf+;bEQ zz&-=Q@I#ALK~@WEL-wgD2iR)=@`^`PDgVfc;xs@2Wn6KdU5$5tZ)fkO;Xynsn+gul z=Wyb_w+;?NQc6ilRd;8%HBpj#UyG^t`2{sz_^d8L{}rNu_MW(LqPdk=i`Lg4)eASf zTqYOLm29}sL?2v{%F3NAv>2q#O#%13|NZV2_@APSn|Be-jNCS`x6A8!+;0hCNC zGG~#%jScbb*0)U3ALs=X$N*%1ZFM(lJTTP#QqU<-V>g3!%^&yifesp1fQK8DQfSrt z>pq@vKpw30G#`zs2c+a_kL@ldGk>+CS`~~7hwQm~NN|8&W4ySp;g+I=FxdGvvubrQR2^=3Y4A;V5)1tR8;euyJL}acCBq2h|E#<#t{ofN+#)M zEV9sP!<_qS7shcDBz9C>Ic<(ZYSH#yG4S(b{uB~)&yz+v0=IJ!uDXVRjTzpRnn8}u zYm#!}L8_XT{66@9!ADQdAgLH|jvMyV3+dfbRX_l-Cavh&xYa3jeYJ!M#2*w=%Or9r zrmJHV_YRfOuWx6ctnO^$jtI;ks!{%-!@H}{5%kJlG;(SD$yT~83vdrzMMwGAAw=Yo ze4%wNxC3)G)wu0%$hoiDVUYFhp2RUfn+6$SBQ+QlZ$o&+Y;W6EC|xzp`e|>)=AI6TZJ;cF08|?$#P8j z&$2%+ng0u9wnA>x=QpdQkH-)lX1TE(X03r+vs8g@RXb%<$eVcQVnN`KotQ9Uh&+tN zb;;Q$G^_D&tl*M{_;f-}cB6I5%+YGA{)0-?$ei;|;>ja`&5sMgbwv9&nM-N8Tr;Yj z`3GvZO@O+S1GVLkjgf9Lv@dL6n^m^ctSA9XUW5mTf@YsS{l}HNgd}e&3i~i|us!tK znvMMFW@?;R7^}XL9*VE|!Y@)a=oDs%xa*O;&3^l_*my3-{!>*JuCTDMrO9+gi2{T@ zJ3bp+jzTuVyAH*8jUd9>Dl#rIJpE#2nCuc*0xd{a$(ddDi|l(5-53Kj=<+efj%(d_ zna@Z4>t9_za-cX=A@D*5wRiLisHx#64JWlLBgHEO$+$hgm(6IXutYB)tOvt+CJe>M zuErSYe1GDtyvPfWQ^Cc-hBe!!dCR0B4L{>*m^^E$n&%qp9M#h(vD1O*CN#Dv$ZGbH zDv{h-xx>b8_OS$v>DlgPl*RD4p;&bqyt@bca(ymNPMP^Cp;EP2D+Hge)mBL~yN<$N z@4Yd<$KBOH0v!d_Kg?don?dQmP&ykA;&t!n{Mw$}BoFW1dMC8iafs{7LQzu=E30GO zkUR<*((YhP5U%C|?bD=@n~YU6bG&!&yFIdQXt|Habfb7&z(G-gd%vA5{d~WM@TXm# z%0b4mnKe0K^a;PLc(B-d_!&`pw>iKAA=q10a&BsEM|%x*!*aAocQ4-{ot?u%FC0@q zeOoyFD?k3&A>=*R_{-13I(CoQ2f(eWy=ffnKyPYTiVe68ip>GxulAo?eN4hc0QzH_ zmjlc#WX9{;ZlDZojcY>dfU=FTKa4>)M}1?;2}<3A(ZjkFO!NYBu!7}kCUq--UJw!0 zmI~!~Jl-a9MFe_3EUp5ydq&{ zD!^xBeRVLIQysiP&F`SA_!;-p(7{=+;C+QiEsq%pp;)qe>j;A3sR)yu_D!$YlYpAj zvuX$dDG&OyGn03?%;;?X1pf3K{ZAljB~XbRo{LHlR*tx0&UE&0zuSHx#yZcC4&^&S zdQ=KF1NB~^D+)EyqfXASchyZe)RL`bSO z^GB*uMy!4`8&mGtk{u0ux^lU=*nYNAKRROT6l#(YXd>&akjz=PX6KAqki7$)p1THZ zF!7~^4w3mQaLa})A6W+?M3OBkO#mO5Q?8&-lJ zIXgW~8dFEG@MecRbU)Irui7@J*nR|!2epy^f?e~WIGXug`2&euyTvVBxIG0UEFb}ISQ1iF$bblX;0G6a3M#imAGIcjlE~)TR@<@1nPaDzg!wdc;^_pN z^$7>Epj!D!wHFWwCNc7xrB-~Y!d8E8wL2J9I9Wy{P<7{JH9fO2lISCfhJ`YnNIRme zX)H%0$*%_6^7)mGMaaUvvPiq@@9&vuT^(d+xqCxN9kLzqzs_4Z<0*4{vPsQfz;^FV znqw3z<*Nys;Ei?>&t4*mURm-77)is1Lr3HlX&|h%V@dgXTkKHWYMjO3t#%EtmvTi_tLyX_eF!YzNu|R!6keXJCb3gfd6TZL#u$S_Dk6NbR(lD~`tW#?$MZ zO=|9ag3m!s(i^u9Hlg7k*+xRzc_d_K5^AnrIegY$xJ24caMvPT45nm>1fojN1pZQ; zm$?dw7Mk(~E(d6BsylDO!xUcLJvx%lm*>bHHlZwsa4sjrJQl~9hE>7`%F(&&F6|>9 zOFweKcgk~R$-ALfg_?--OD2?8nHtAQd0FJmLYbov*;K0cSCt;Ae&{^guaQ(gFzvxg z-48xVNX=4#+1;+%V07g^(T(#sgK<1>OrIE^6=M8W4g-T2(%{fg?>882WRFtx@s5eG zXGS$5TJ1_6f)Mkr??}`sd*kKfzPYJ!)hiy3R8+b*a|MA!5azkFGK(y?)Z^*;It`OB z-^>y9z=5H$J*u&ya#mjS?rSq&`J!95^%LrXc?H`VvnT!RhGXU14JuBEF;73}<{#a8M8I+Pi!bTGeYoP* zch>OQ$|qU;5w-#A+MAHYBRPWVgS--62Fl0yLTkLWn`!W=a!iE2P4iO z2jzVUXz#E3C62)H^oSX6U6{hj*D_Yjr|$`S(ZHSslFDno&Ey)~QtlM;vU4$8TR%rT zm%$VdwXzn~VjRH9uahoK``Dn2MD!jT);V>kUVcQ;LO3%U398%eUP|^^HPwc)VyQCB zrgKykEh@*LDO^}RBBRiH^AN5zzEgXN0a+tXwlT^V-=t7pS(vLO?%ZeoNEalw4?mO6 zDyMKpcEtJi>ac2*1=>~+eAQb=VakQxDf+NMF9TwN?|btGfAGvye%WGo&^hy91JbrA zk6VP~6O#7p5s?Pz4aB`j{zNfd{?ux5nE9qdk_=1vOc%$71ei4$>^#I5HvVhgR>PQz z+FcVpPfWU%qgYL_{E!}a9rW=L73GruYLe-nfTpFTWwu&?hCWj%G_b&O*Kp+{Z927i zX;Ljs@G&7EFV_3UtrTHY%^r5fV+%JZBF9UchtP98r497HkCb`dJ_*y5=B91I0du=W zF96qmzPe`hCAeMX6xYOEYW_#x_HPL<|0EjF@k^;HgdFXr;Vs{T?+BH*W1~E&OhX6e za4p|Lr;zoZtnnomd(gu^yCxLk5RP2cQ+d-+V)UiJ<;1+^czSXdVIR0vRbS7gf{L8e zo+4p4j}^2@aq%bvyJc9+A>5_#PlB3*R}x%zz0V4wLik%LY+M1-Bj^d4kkDYfO#MUcIYY zXEK=!7wX(GWk2&Y#@EB6rbaaCooveX4JdF`xC{5>s)tk6(#Pbp1Z8+tn14qS_e>MH;W1i-7&Q1K|n_qsFY|_~GfzJCZHc~rThN|-Q<*qoSo4~}KVKIj01Z4OPF)fBhnFAx| z@pGG4J$58qsNO?DHO*CxNeDY4ox>Mj&DTHGh zT;f3JZy8foUk&CFfoUTDOzCgS*JZtw;X=|_f-l0jPta0Ok&Tvgq_~5}vBXoiBOHRcV)E>!&tdSc4^UJESbo8*XEjGQoz}M2&TD7(6w-V!~b5aHEPmB7yEr0 ze2T;;?#H8}K~ zBbc$cq2-R!M#4pskG&LuGjR2Xi+>jj_>-4Uo^RLf%s%78)a#lmd9`(iy zWYlfJio#_HmCqBp{fSb>EGr-rF}2{9kZ`Joah{-{An%;_^VJ?=iYt8crNm8Bb)z_n zO&m4{N$Xt;VQWdXQfl2>^iTzCq$9iX12RB&8HRc@A_s5?*dhxou6nseGFhraFGa0F z6&kKps_<4}Wmp1-Pp41gW(xSB} zosOG-*&fEeHIF}>$1X!1F!UzLyGEz)t6y%>*L?cbrf^WMCg&7K72VM66^3Ytd~=9u zZ#&Ln<3tR*XtoNg9}}kq@ffP%5tpse4;nOv5!_)l>Xgib9Q0P#!>=CW`gcFi*N3)+tVG_n%0>7(I+GW%wQ^;T$u%*oB&1o z-0-BG&b`@}Wy?~v(mu3%o&FyZ!UW)dyz*ZZQ}Yf7p(c_JaOkhb1Rkcvj`{Oryxl%e z&TTZ-xIWX(lXn7U{FmLf08F$$mGbH?-Bm7V)PZ3bQ(+(aV4qtt@0rX$>OGtYNixY; zS2xfB_qR&pQNOp|IDL`*!2fbR>+$>L?qXis-f*b7SLK^TL&%upUa9TdLgPf35~{CM zxnu`xROD0$uAbKI@hYvbdBN>eZP++Ncz3cwu(+#(;9FW#JK= z!1gf=q$^e9MlJWe5mUuwT5DRA@?8vRI0s!cQwfe<8CO4SP(uByejF2WqpgGB?kz$)R! zrsh~-*=FPcBT)H9yPp?2cM^8)kINsvQ;>B(x%_)m-}!HCWXcN0uJgr>Xr&X@($f1PIdy+4 zltH2)?Wcz1AbQi|ggmSJ>txgY^PBtY)%4fv9tQdf_!BqM$D7Fh&aCv<+-(wAD}T4A zSy1Kp%k$r&?w@@GN7m!Rs?&k_wjYnG zP?yovE`FZ(j1B@dgXMC&*0x-pt$SNFQkxs5A+aa?Z&~7eFR|46q%-9LH0#VZQ_Zuw zIqr1hhPh-EyR#~bCg>%p204%9cDOI6M;B#X9n1LU)$G**AOZE{)CZ9OZRC;p01WyB zJHwd+=%;fejQQATqY7bmX;? zA}Iv=hm4SulCuwI|H=X!?|v>2MAEh9bH(O~inm(*K>S^j-StZevn1?DfpW6V$s&-n zFi>~qq`XV9pvP*4Z44$qt+#-~uo!^s6+@z@1!^g>!Pnek4C*)i@{L(xoHCceQ;ygC z;n$etC+ZyN(ZYcwfV@w@ZHWd9RskEAjkr8&lpoO@o z_a3;Fo|<4ZBs?V?3@J8X>?KqhISsXgn9a91h9FgkTL6g=2nHdfQ6zub;F~(>9I;FT zT%s)?N(TTx4O{v-!-ZGbknc#&RlqTcm&oCV3wRb5@+=?&`a%|FZAA|*UmY{Z8X3fSyC6wvJ6-MwzFE;X;_y zcjpJ-M(A?)&-vuhM*+0^i$PMyUSzJ%4NacUsWj)-H}A1$4$Myr8{EKuB?NmuyZV*5 zsg~NwWAO4?@C6&Gmm)q4HTc)YG|K%29qpgh+t*9 zh*~6U{K4d=tRUef*4#1NxdAyzX@Dc|QA4#)MV) zPAjAigU{A`dI>GRArg#YfDxQt8 zu)zULwyx*RYhrDeKmV0oJ*36lh~7H+imDBe%YKn~7xcwgf#amkw8Wb>_wxZQjkfIw zIXUM6Jk4e;`r5lWJkRwK4sI04grO1B)sS0?{n!)Nn$IyuQ-++&=?cEk?`P+6=v}(< zGdA>nd-*p#U^?X-j-%?0Sr!401?PXJ+M)cJyZJ3k`!$AO+7)xlfF~vY4^;b?t4NUd zmkGH5ahL)9N1UXp#n|C(oE#L$Ai!y|kbEu8`M@bdK)eZhQNfLv)=1a;~?yVw0dtjG%G9*?`POx;V zgRPYyXIPt90MjI+m5=BR?SMT>m(Oy`f>3G5YFP-MCCmm$OHPKsnYqdF+4if(9=$}` z4O!Xh9Q;C9Sp{+Z6xm>Wai9r%qYG@*`~u9tOgWswA+ve_dEG$yA-Boomu3heZ~*4G zku-SRHb^c%sB-`nVL-z*D|l|E{GcpKO}{GZ@L42Q2h2CB_={dFL$JlK+~qcbiAXwQ zm%feK`rG!m?dr18Q>T(xZT80CCO6HyyeZ8{QDt)#UI<#wm3rn>pHz!Fkc$;Tao8;l zLj$x=wF6k4G8vUb7@V`=({#=xo?k7a)DC1r(M5JV=-X6J z%S7AQ#9bpmQyIZ+KA!}wbfKN)i4jU9i`j%SQF)K7-v(a#ePgSYOKr_ouSJXizF0#z zOE)z9omsHSz#-@NyLbZO@iv`!{fKAY;A#eA{Fe!S$XGMKO??b&srVCTgc z?a+M>DlGz~yC{CNa0#E0$f>KrEYfEuMhScO0f*6I-QTjA_g{@&tGz3X)Gu6qe_Z$r z^qxz~plXMS3pgjO7b8sr2zJWD0<6c=OJNFYB`%Jy%OJv>WzEk`LOx7&8N$4lbzR?4 z>CY!&#&Q;0$}Udvg)mi>zgch!ef9nOrM-2&c4j#)(3O2n{x8d78y+xtNPHw5S5F`ps*2C^MOv2*k}n8`WShK#;3Ww$-rXTrCP-vz2PEJjyV3# zr3AzZ@t`+*SaSkWd*9B%I@&F6!y>$yKvr82KovI9M0?1>x9Knsu*u?iZ~daMTc%{4 z^j8YZS^-0H+<j44O?4NjTq0Ma|JxNEYHF9&9uWh>k{@qZ?aT z!2M$f`|W=*;YFd;A}tLxtd+=iL3M6kt!`qnvX(?p1A0hgC0Ae;UM0KWewc?Vt5m}% zh~$J`3CD!Frp1E&Gu}?AeQZ@lgLbYK4l4fy9|g{FM$HUMH3Wlmei)i}9KOyiO#w@# zh7P|nDcfo;4z6I5w|u^Ne&b#Avqx!9jvY|KTL%@JzRA8)ascp*6%NZ4v_+(7=n%Ar z8zN>@$2Q%%_4o66wRkoHdu_uMja9Tijoe1nG9Op~zOXDI)`7?gY91wBZI|X10%b5n zK{e2?$W$6ko|Mim{M2KQ{88~+oy#^GdHe!Y(U7{g-7REL9#pg3LON#`g_tr-zS>L$ z1{l($$_Lsz*&<{&n?hubiJt2_teCc#t-qZRnwGo3ftU?u5@iF8dfOf$JcY_jMTaM4 zK|k@K&Gm8j+vip4QIwnZ43RzUyZhKq(lzxm7?+C~n_RF=)>U0p#B3#zu_*o?mnkIUA~ zoyL5w?QHICDN#s=zBpL0c03(wr@U)dftP6k0T$fEKs8DWIM(!8t-JVO)5NV}(&Wa? z&Q!+CfA-RV2F7Zy8>>HB^+$W&57~?wQQ0^GUzsySmM;@Dya}=HJIy->gx2p7jVF&| z?FBQJC>R`CbXdjLV&Tq+;&MLQKkwGKfBpIUY>fhUgX(Xt|3^{(AAQILFMuI=CFJCx z^@o>8dr*zbBhBI6SPU_A(}hYZnLb-i8T+S(DJ?tc`SV-Xujzb6 z1(mP6LX2S_@#u8`#7$6**`ZnY(BG$!Fww?PW3x;tfLcGZ5pi=U>FY%(-pvcivLCpG z9<3Z%!I1Qz1E5=0uncxdl42We2sMOiM=As<$CM?Pch>5^&lDyVHIM(9jQr^(v zXAGmC7E8GeMPMF4I3TwGsxZTpEifqV)|Q-uY`p0VDG#uywXw_=YgM^R?U`` zT*u6O7_t{-%ZV=Vpl|BAb&??!Es)PdKa#?01q#dMTXcYAa=bnfL1QeF(j~IOE*J`+ z$HLJ>jbSe;D%E2~#VYAlC?=ULF&IJLRD__?i#G6%384T;E7PD=&Ud?vQZ}In6}RS= zq)}4U8W)W-5GhEl!6Uj7wE;L)-lrJ} z6~@v;PU~FM7#tHzS+g@(u13`op-!#BZbQjmslj&z+iB(c4O8U^Cp;5Yheso9L_)TLz2EwmIbEd~9(#rM7k9}x#y`&O?q9E3wXbVu z*p4IMX!3s=4X$VTlTJx||Mgs9Sqr^|VhHZY=p~=6;)x1+KS*QIrN)Q9ZS;T9nw)V& z=yIpw3e7Cx4hKUkHR^Rub;B8!$)@}hF<%9MqzE$3gRtX$}!oPmYsc9}iiZt`=rAiOSerfQCj?~PhLbG=+< zTuZ2pZt4-^=NwKnu?Y%nNe~kzUj5diGrc{rOh9?D&lE1<}I-%iV1TP5szH33F%tP87s>dyPqB3mNo%cQLkZR;D z&26pukkxeND486)?)P-^>1BM&^be!`qt5&)dHeA2!2NVw$M%qM>fwIt?r{{tzR0HfS*4bd;2Ybuv@Z>?k2m=7^-M+1k1@nJv-gy2Wh?JSd}oMW_ktcZpNh|Xlo4c^6xt`R*{rM zLAW|@I8lF75uitRbMhrBqkqu^s7hYE32kQ@2PObw2vrVXwQ7ujvX+R)DqZ>^4yM&a z??I3EE9|dlmA@Fr8}wo8sCKAUi;Qlj4>?lhdyKJ9t?sSbpg|8sYv-m6TF1mPXFwOV zA>Go5QbQG^yIR?lhI)Oy@wnhZIuoCYJL`cCz>p(Vgx3PtB@^g|fdw*1?A`4md3tlB zQN}huwAVm@^|i?A;mkhuRLO~ zt5jr3cB3l$(lK;JBo1mk4y&>YxjA|8nwF{XNGN?=>yHHplg(D)o1q?&_JF z1xY9Kt0QvrECJ17Bwq0AzIVh8HcL(fuVxp2U!vop@+Nt-upy4lf!B;aAU5~zmL-Y38VMDYxiZyT_%|RdHZZ2N?yJnTvhfQBQ^lq6mm*kZ%!i0ioik zUF+h|AK{#EX&K$R(N67RLR|?x>wLzs?|7gdkmJ_29i>@*0hUNKSYFRQkgqF#HGzuY zDG52>Twp*V`ER@kKn62u)eC8{KIXZIWWaq0o(LH6$z<7i&I3UKxG4SBBsG=@`xA)y z4|zmOBJ#F{Q!N=hamxjfOSuL>%@j3g4q;=aX(RvUOIt3;OD^#9%$v~zw-N(TFcfpx z1;`58bW+7k$paNF_QMScPD5#&S`EFYwdBJ;#9RqMBD`^hT6bk=s$VDcU2AGv$`^}y zzcr&5pen6SQaL~^xTAy<{BZ&h0mG5n8(wpCs8Kj~^RE8YDrQD;489$48F z9p>%G5GT}%mBu{5e+-twz@P0;ndu*qp!R2*hRpW{f2xzz!fMiZ8Fs1Vbq7iM3%m-J zB92!VMdA&7yd^|}=YSc0)I=U8N3d`pUqPeKpxwP7-WI@ueW<)xTCuNhRio$B)U-Tf z-JV}xg$WysNE;yt4u;#_HD&mgl|kL+p%xvQA1po`H#Th9ZVVcej*p{QeYwIlez@FS z{7!e?i~`7k0e{6%cq{c8;TZ8PO{NP5#`f3R86%nWjL;CS>?VeyIUaBqT7Zz?4f@{d zEyitTQf!=lt66b;#Kh}65y5ovwdcLWsOjC~r9DJ5 zQ<6Z>C*8=VjV}M?!+#t^`{qAR{l|-j?9Y}GAJ!wxE!%Hl16UnkGRAdT;~%a~nc4TCz7%nBHJ|qj zQkzUeXfUAeucB9YK%VUttm&cnqlxMQ$u0ad4-BT>PE9C1#Q(T@?F;(2u)#JD;hZ_3>m=%eV-&@m%m zCz*N{zt;@a7UfA6um)A{Jq>_A6@6DUpqykY((uF`ex1By?6#ilh^!qqZN0urk}LWf zr_p+EarJ%Jyjsf)x$cHKJ6~_6oh4Y1Rv+xht2bV`9PdQz4XcJN@2_J#`CO4NJQp@D zp5z8WlfI#i>t2}EMJGQV-1d8;esXR{-r2ta)Yu}winTq!>4xOjp)6;P{%B78=NICC z;WV22Z8{(Q#gRJmPpefvQ6%l>8w^G!AJ1Ri_StP%2;>)(DBqYh5TNfl*L}MmULo=MSn=>`#gXdGziA zZRcbYSPVB&MBwl9d{IZorP1hk@o^LbZIP@bUIzpr4HS8l?J2iS~4&;!S4EDX^P zcqnI3C3ASBnZ_}9!U=kaVHwjCz}706^Bf9{OV(47Eu;aC3Q|Nj8HOx^mJH%w*qfse zw01r%3Chh+6fnR3_Rx+k&qgo=*Ef+y>Xq*oRL>tA#Y>6LyD0e>(c{9wG9&uPVDhng zR52Z|Txxc9l3FI2m}n^#aysSLsq9Iy_EU@yk=NoD(o?rzw56_z)x??;ph2D~0@(tX zMu<>~Cjk-q<7=Gg7b!DhemscB0db{PZT1;V+7lrW zrZRE)T@s2DnsZ@SEc~S8iiWg$od}z3=up(}e@jn|5;kXB@vDOO)G4-<_R=PUwF@c_ znvD6_KSVOm7kNJR&cFM}-wU>|u&^4szSye&=fnSJIps_2x>?YVu-+{?e6e17>b)_x0XQZSISvEF#%@_87usy~~ATUzw{q@XWpwb}7CkNZB% zI}$h&-5ade2FHGX-2su#ws&Dg#7%t3rOph0p9C%3|2h^=5vBnUy2^E>MwG>7oc^?U z+E=}ulpU0o^_Uy>Y`7lfIMKHrP0QV)@#itS+!5M39&}e;c8!0ze?&NV+I}!5?eA-H zM3hC?$Ub|;O#XfAM8vnR!SD4ue{rl#JX@p;C0(FGl8YvHVwMr8Y?(xbo|tlJ!lg02 zcO+cv8U5p$EjOYTeV2TmTd5(>L`*Y$^13JL!(;b0T_}JDpndP;+^RKG!^jF;r6}Yn@>0YyuAqc4Ygz-RBWiz8FPNeC z4u#1y3n8jh`M2L8$DIkB?9xU3(?fe~HZN<5gLuBn;7BPUaD5GW~VSkcC$!@#3 zrVNB&P>w=e4lXbm^i}+Mb?7-UZveV4U5v`g^#7{|`)x4SX(PR$L|8c}u#=-lgCi!9vQ{MQdY%V+;z99GD zKl*Oh1X;Dmg%NZxo!lx|;_-OxuR4F6X&xGZ|B-a0@U}IuYi3t}I{}hC*awZ@Y-Jv~ z_k)L0yxwK`+}r5*j4*sBl@DSpk;*=t zcf;SAq>MAynBxm{1RLQcuu5sm**Kzma0(eltYn_bZeZx?G&@r*@F~5E$Qr$J&;&Y2 zWNKn0qIsmao#PIYs0@4hn4Js&G=&U^s#xh7{XC$BUnPZm=2&mhm7-lyc&1#w6C;f_TCR4p10Ukh{RMk#s zq=hI-Szk9uD&YaxYtxJ-Ny`ul@zi$9FiH3^i~8fGYg%>I7z95`Q}NePRV%vWHx0D2 zmPQ}tcMS+M;BpwBiaDca6T~Id_rph_37#lP{FV~C#?(x&SW?(0-Y;$YJ zwvqB5^)LyOgZ8pKOc1g>wh(;oH^U?)r+%JK->k~HL;l(64HnQn4oi!xHzpoZykFmV z{EE1{s8X*Kwo59SIcve3N=<5SA;8snK)Q6sJfHLCI%X$pep(BJ(y=$vua9hk&l|{H zNp+)pBq*2y4UbhGPOIw;S|e)+po`@EGg6e%S&si7ng1x>c<6tsC{vc_hu9%*kMyH; zXcfhm_75RZKbsO!K_Wyy3u0D+;?ou_M_|-GX_+9b=;dvr-0ydJC-Eb42nIJl=eSS6 zvwsqQjM@!I*p-K$nF;_14-GctDm(O`7BOl;#jKGUv+xp?ofXEnsws4$mHDD`n zb*OQVVg!rXd`#f}Sud4l5~pNohpg4eJhh-ZcQK?HcS6ib4KeaV6dD0~X#b?Z*HoU5 zJDJ)73S=uOp9kC%60}pJ)~_UyYe_EmHaZShV<@KMe?$DeH2bO-^t_Q%Q&S&x|Fe$q zWpqKRIwHl~S-MIG%{K>Nt2TnvD|_B}A9cDPJ4`)x{g#4C0rOiK>jE(=#~wacuQ9FQ zAHn)#gUWGPLB{glNK7}70H)u_jncR^=`;)nw1Xn+&-mF36{5nD{3Lu^%sZkdONnW% z_|0-vf6u-DMyoeoK-sBQ#`%n=_Q_#D@7{Lx^}+JWfm@Uz9Lo!KODAP$rr@i3liTn0 z6T^EKzCYaV00ufmz@EMwUYyx}?8AsAL~VOpSLfecOgZ&pOA|0ZuSUdwh1NGPxCKHL zO!`3`DOny;P|yp#&AgjxSQB*YNwEcPq8Dv4jKqZ{A5j83{S+WhFvGTjPfC-dmUd*F zi@J$>=3mdEyi{m!ML6kT2A<}X^cK)Gt}{W2S7_eYCY1kDOk=Rr;zCfHIT6ieRw6%z z0*&wh0BRXUB4blB(*i8#ydgcEBLa!sA$f!(59sJ;%2ct2vV_V&mTX>xLl@EJV8ci(?|cu(r2a z@Rx{y+3U%|FBi8_r+f40v&={LzV~AG&YsImp~R1kpiq(#n#?4Bib_6%Q-{U6$}fNB zhU!(Aj&l7F>9uA*$kCl#N^mP`Gx|k$&GHLTyTc^)#)Su~@1x7&F(?1^#I{ni#Fxm# zNSVF7lDd&@l_I}m!cSraR47HV6>eDJShuT)U!-)g;%T>`415Rw8Sa{QP#kz@R0du? z3k1&e1p+J?zWvx0Y2gqvA)Uy!9seBGi22U1m`cJiOHZGL`sby>ktMiAjI<5UI1=+A z`X)$bIPW^q2KcQ-Jl0DdlSr^wg-2;G4^PxG3Iha3QoZr5b{5aMT+&3Q_VA8Rlq%n% z|K{pvSK5qNxz$Aj5Se00p%0NGB>Y_Tp=^AC?rFSnH$1%0r3^(L*JwO`XsMQ35Hh9; zNFdCXH;l?t&&`JsZ0;4JvQZo}Moj6qz*&m9Q(!&1y#dM^1=6ATzWQzUe!0Ea)7YH6 z{&X7gRIDKWVEYXLvzgZF#1!>;lS+Allb1`{NF$U7_F#U8Ja^3eeAH8VyogPPp6!7| zrO67-3UNSmnR~O}n~OVA^l-GMh-HzJ6T>?jcT0YfgEK~_QyI*51jP5L2 zGz$_F?b6XM3il<#TEr2$_-pOui17aE_Y1!FS^Hhq3!$!i@!vJ?Sn2KV?k+{hc>9I{ z|D(UX!R9#4RyhcDvCw*ISez@sUFc`HGRyPF8D*pDz5I0d5795?t1*MP*#Y`Q_3#1} z5JTkbHtqRb5dD86ObTx))4L)Q8qak$rGGi!ZTGEEIv#v?emx3p&`0v#go4!tS>PA= zdE2q~O)~U{O*qG#vKI%ytzP@nIQ+^D%d4nB&i1{< zZ_Di>kO-iXtx1au!~~SEtJkXtd%ftzr$foYW~4AGEQ?;Utt=*axN8trQhBwcQ3`@M z5H@H%Zur&*-3jW5vp&Xzvr8D1b4b&_(R^6kvQ7TPI-)I;yR|`-5p{`ogpn#NHeK4K z%9!n_HzJNTk#gL>Q>|uCcw8-wh^svI9`Y{LssBN97_YssC4K?UiX54OF%?-n1=*~$ z$;1iot}m_M#k$O4)8hKm2TVlki#@yo4Gh3^v}%lM)@N71!tjsE`gbAh{$)I0Dz|;G zvJo>2dp?fgMO!F?;q{5IMrhsR;m+y^%6kHz>6F-rD}L(6`ThQ?BE)VQqlAxH;&J}H==kVg`A+9D3h*_ z`fYBxIBJ|fQxK)pWM_P-snfBd)4bxblOCtLt6ytI zILVSje;KM7T@Er*Plb2#nbhdz;U0Q=T(;}NwSOi2K&lhIjl!eWDf^$5GYsw5$q6>y zag(N}$3uOb<7sJU*BQ(7_dVjb>ZG;0FoMtD;X4I?%S!^Ids^h}mMXp72E;y^u?)0d z$@kP03xy^Gx}$-gOSPbac9(cmWr;H5A+& zP3fYcd3e}Xh+;(mQ)yRId;)IJS+)dbYdD2-1{?t5C*Y*b&!exAd8qnH}f6Pd%dX3-lIQS8<4A&agpRnx zN`j`%B!Me|4d+o^jvjCu##qhegF)>Q*JwvqojiX``u=*LvHAGgM}N>!<-fFg+5lZS zgWKb!6jn|oeZP8wuxAQf_cezeX8j;JZZCwk*IV8U_x_KPM3@oC*=sG@Ux)1I%Kt#*J039G+96ZdL4HUO$L{iy`D=god3Ox$96$y%+BYh zVGF`vct5-B2Z*E6QQDjqZ6$FF{#mm(iZQr%!O98>3SW!YwGl6c4MeAmxQ@}1f}@5y-dQ6? zExy}oHAZQ0NQEwLGR8zYk4j0gEb>!GP#(G|L{yqT5Xmh*jF!`}Liu=mHx-&2k1%6G#)c#$>Vt;$JRYD$Nw!jAIH(wM)TdXM!{b5lyzqNxS@wi6ynOP*i zchqn(AOS@)ib;nh`)}nD2)OH&AwHs8#*;Sdy8Z^K z8*Nt;LF=p09a-Xi{lS|T{lyH#fa+0rpPE{t!R!l(MO(8U<(=lq<4^0QE-+762={lZ&_!y=Adc3Xk@aB3Qp-n48esLMd``3- zhwgg6ne!q*GlLh4=24pO=9P$)!Rpf(ACeMTo7{J{^?4ss!m11}jGRm1peU2q*Y}+< z)W@$4Xp7qchJy5Ng7*^|RNlrhGV8t?8 z^AfC%Cx{PRvlk`GR_{sz0E1Q%0R@SQI?%%8U+`wtX` zKdfSz_xV)8P|kBjor;NOQ=2=c$uz7Bo*ha#D$o$$jKt0cPtiKwJU)dG3$eJXMQu=@ zh0<9`5}8y`LFk+3C&q1?wtKYW(?Fp&kOfL!W`ah6&c7ajo|hRE=FJ(_DRfw~DT0jv z&5HH?E~!D@C9V)I^o2Ho#U1*@oBlK-(L6#K!1mJsmwlw~+cobSV*dM$pcFoLW?HHL z^cK4&$sJZ*7Yut_CgYO~L?zUPUN3g$4}^sFm%OSl?#bV-mzVY(&ytAPxX^O?3H}aa zUI*xBJ5OL_I3ETy?Z>-2{BCdD>=Kz=16;-S0THz#9lEulyD6@|_77jo_)aq%nNTKE z*f1K4tpBUTSuKrV(}?GGBJtteu76M@O55Z7hMW2+xz(kHH}#L(y5W!R;jo*1F&2uu zn@}^Y>2VSKTiHO$pcP){Hf;1_&)R+?PM)QE`eQ7Z;IowBbP;^1tV|T)Bq!U5#2z|19-f!b^vrOzpmOya7;N+BCY85VB+gj?h_4V<6UpPp3p2&l2sUsk`+4$ zz+*--)3u~U2^Uv4v9d~}K*eZFHLODJ!xh<*iyu)6Nv5*c@lnHCF{f(igG!9Rgs%f% zHcPt{-^Q)SxLDKAFaj@mvSq2kF)2UAQlsnBOuD_K5p}V8xPKe);>-@wim(M(moe#^ z(8P8>idCjs+UN+uK~UBCIJgpyt5XvS!&dQ zz6>*A#ximol=+r>Wc_F><9AIs$FYKs2Xkh<^HiAmz`jUj8y(@K+hJ!Yo*G^1bg^e) zqBSGhm)BO`tr?AHOjQIU`)&X7XVv@pkrj}QCk2X=0zZSJT8_h&_=^99Fcmdbo`LNf zqb*~H;RFUY%N3Yo+ib?SEAJ(03R*(&MZVdNenV7tGTI`z8+IQ;B~_{zPOuvQv+6Bx z*o7`xU<}6&J3+;U_}%+T?2c9v`IX>Jk&s@KYE-{_z!<(a5|mLh+fW~s>=mS||NF#$dz|N;@KCTe zbeLk2=slQ&T~!P_3oM&#A63%^^3Rf>XzdOB9G3s#O0W%+f$$vyPtZ1tgwWS^OlssP zjzfx2mVk$={q2L|?18{A9EztQN2t&vnV=lYftfpCz}gK31LJ`dbWmc-`%^4`sGixO zFGX7F%N%U&%j6mn;3`+7c=dm$R->+T#~PlbjLxklv_0?nkiCuy>86|I0T7I0ia~RSrq^m~Id6)2S$heY?+zt$J3>nx#XLB6@_j(P$5h6GTjw zTHqgE?d=Y|49~RHie|>OsiXVC;dFTIYkugxv|OsqR=edx*%5X!l$6Q*+Bg)^n-teI zUh&?$&i6c~tb$cBc_c_fji%C4gm8uIl>t2@$}K}N<`VC*Hau7$6m&XsO@syFvv$=Z zFIZdmC>Olx-S3On5g^_4GIVH{<#cNoK27=~K428CWZQv2={-tXb{H{hE(#7NWUfvJ zIHNqH4Gs@9b3{o8tar*4z+cxc!>zqf!!^CL01Q1a-CM#8F^k*pqfe|81l*WwgNO1( zsfr5u%-3lbgr06Fn=^`zY({O)czy&xQsXm>#Fi`wp|R3)^EL>g^L$ z#Eb4o2pV>2#6<7o)NM{bHrCvBI1efXKO=m<{wiC!o7>th6yXP+9w#SIiSAn2amI21 z1#*Kt*zeNnRkLvw*6{dYbX|8pc#)q`;IwA5$we3Rr(pi4{Q3`j$nc9nYk#$2jh1fk zKMJZ4d^PXo2&$c!wEyzmBEQppCO7xZU#3McuM7duIG3c=)t{x(4DZwCgdd`Y)UyeJ zMA}6wgDxcm>OntX*bf*+)2M}ILs|)-%6Li)4se73tCGrhM2HNi0Ja@S*;Y6$!%(E@ zLt}iKuzZ#vT5co#J}|}&n37jMyLo`4X%T2!E-63+9!m)nE2Ui{3PX~vmDDPSMdE}Z zUrnQ^#e|xc%1Sy$1US1NU-et7L@M)-=XX!AJtNr(XWLR}#cCo-jj+<2^r(FuPr%W% zn$W2MkZHy8BpP2z+sOuq!cNtLnHkgI1JXt2;f0jH1Qm_^)D#X*=zqM>y<1aN>s) z;J)SwN0Z_3wldY5JInjxLwt`D`@7OAUU1h0_4&my<0uwX^HXD-+Q04y^MIMFMwqw7KUu|ns6FuQi1+L9@|KRdXQ7{|Radfk}VRsv! zs9fH$2zlx7Y^bPn29-);3qxVq6k?R599CJmI0Rk_nuUn3PDv1(4x2&lCsUJA>dWO9 ze-k;Qkj0@*RrOx}BX3*4BT(%D8POm;BVFt`mq?5!Vlrn)bSctE>)UiFo-)>B3jFR# z!lT5L(vgAUiS?qweRZj0c(!$xt)6*ohZ6Vjj$!lv?fN zIMh!o2FmHjsmPjX&jRM6fc7hJ&GFcvhBE$oiqv>uP%(mDCn_}L5#QTHQW^BLabp$) z%UPNq9X5EDcqHJL9qx~HjWKrNq-Hu{g+802H&CWXU9U)y4<4{C{y8|x<4~QHQ0^)B z&_J{~-hH=~{>I(;$S8@M>`qIPCQvKr5Q+T-1n_-=q?k%Zd`w1vqSr=e~c2f_nFXs z_KfU5O4q$2%zU|KQ0hn2ep6hc;IQ9k?|iDaP#5j1y7Zf-a$(#!1IDqLxObZR;ZbVy zuG3-pyj!xbpPLstzV@-9TUZ^4>%HnV9hW`%%{Yr6$;S9HHC?`FE3D`>U zLAtwPK)O>xQo3tsQ0b6vkd&@{dGGfh&)(1bfe)<3uvm-hJg?*U<$4rLRkw~+ZtRr3 zfSe5pld}q3!O_&=@ArWfNQ${?swrS=C(gu*ZXFsLJeC(subcotnJ$}o85G1SheD$O zzhL#%jP~vkD}+o82LOX2$JX9ZFN2t6)RJStY$~RvUulzS(>Mpv#nFw%vPk8}3g#si zD#oX<0RAZmdw@+isZ8sv1eo8Z0DaRW*zI$4j0AkFDIt9=e6CuK&Ji=d&R^z|Nr%Rj zL`-?73Xeag7f*!4HCc9?Zkt(CB#ihyj#-pwS%oa#kg7uDDX9*Xd|FPBS-jFd*q(FD*yY-;%i&rdLjwJ_JLTL^lHg zd(PiV7z(q<_g~voRBN>f7Iy4XLF|4)$`@($hwCd$pCBeNG6v-BrQda!X^2?Zn4(oa zYzixbIZuYo`dMcCKLkWCO2;{K7vu5J3v#-6E^PI?X;lfv~t)MudE5E zAx%JPpQA5>*-G_iqYkte1qDS}RXZl85>xQ}e3Q7QP5i1ro6t56I--2OCcvUU^Woyt zs~<(T=UD&J0^o5R-H_Zmb;Y>RhU?2{)Q|cxExjyhJ!@zB961kbRnq!;)MabOQq^&# zOYS`Ms(CYRU8Ai&sgq*Q?B8m*Knn4^sw%~znoY~?jp%AQ$I+R=wY@&ZQwvLpDdugnQx6N&a&~D~RNEZIzS%C4^0yMT=^f~21}rd!WvA8 zaDBWEyK}JHzHxnmWNsrOCRmD>>es_FLyhSGy1x4_H@=-Tvu%ynV!4glWDHcmG%Qh>lj5JjaZX>}h zQNDS=k%>MhkjQ(-AEm06@Jjk;2RKSaa&y_|wJl18fg4RnM%n z3_c>M)FvssGHs?RH$)$gfL+R5}aO`iodr`W`be zCFe04^B1?+qJ%vF7%*(uv zw>KA#WzX^u#u1^0D#>|;*JUK*R=#x0AFe<3Y`V*ZnupO#*huh$uL$0buDN-f4lUn* zpZgKM0W~e!DcOv)@E1#MA!15FqSM2xU8z-pZrn_qk)xu{^?1p4Hf`FONg%K|5{|x= zQ3R2tJHO$(F?7FavDFud9_&O4Jdq1UC`9k8#dN9kAr7S#TLrj{Ejz29Xz_P zT%#;gas(X_{^9GbaXhPk>((iByH2Iy9~V_cIyS4I{G|_TpE2 z3n_f<1{(rJ;ahWg0b}$Op=azeUvphN{PH8o3iaRg8Jj-MO15j11crYv19)(s%~8>UwlXC-1d-x37(Lza!z6@PTDiOl{5^N35V?C?!#! zBg_X}s{qL{?vG}_DE%!Nlh}Rp%4zd+jSwcWE&f?4U~+))erSJTKo07a)wh5K5!zr9 zL`GfL<0ak$V~0uQoQ<14_A z(Y#ZUttTBj*FUFMit-ZfGTLHcNP_Uk{Y)Moa*0c@ZNYYCch@K!oMVx(6V$?*Fjxaa zlle!q$bB5Z6=xq^SodkhoE&q>vgw;Ipi4h==Hf=i35G@{dN&BL3&-aJ>0E+_qU7T@ z@4I>RjC$G}x_f^0YG~t6)Uc)V&ZeA!_>SnES;``Mslsm~u~I3lR9CjM$p05| zvkwT&QbLR~@VnBC)^$8}<9mI7YR9fVP_ypl*GdZf6@CGto>l}qmCcS76%745>+$u3 zG(QQe9+JEL>1!;2RHK!r=SA>LF%T-D)i+m`5!3}By zB+RJY@qjjt?maL8L=9^LCWijBLdZ%6ZLXTBX)N_r6Ie+lqM-Y|XC#^_C<<(b| z4I*Mew-t(|u~sSIx`~x;?3aUr4pEpwRVoX}d4>UL2c8IfFHjSY?&We{NW(dP%0PBKJ~{VmXAr zFa}-PEW1HRB+%lZlq>=|PC6Tv3Aaq~3g5OPc?RCe0G0J<323(M@!q9-Fjq)bB5%SL zY!Z-YL16>#;WU(?Z#6AcDT~Gk%xhjPrNpUPg=T4_1s)zseq$nc+R--Y*c)k6*3u1* zI!8C(F<;+)V+fOUpti&Eiwi1)ktD@j2XULbdX*7Mw9V1^ z)}qSBGhFA5jAz1$0R9x9Z~{ANMB%hF`9h^_$*Bl@qAJ({TJ_xJY(vd|?gac_Z{nMG z2&sl%Ow6)cCcnc>mt79Y|6-&f9`S~GNS}}bui}J{+_b4*RhnLk5qFY(Zlh(GN1OCG z8pc*>Z$a}`lIa^Xdc9sf+0}q=`9tde@}PQi!4vddZB5N|Fu~JPBe8p*F!*TRH#|Hv zXX!M%zy#?Fk)9X>-416XK`FVp@mfZ{8scB8QDDL|5JVKJ2pr6ZC_mqKEGhAW6o zq={FNH!UELs05ae7ZRIxEzkgZf}l>yqBMMQgpIi)$+n|Z|KoSK5!dcs-bD!YtGlAf zO!`~wrIWF&w&gytky`TbW(KqaB>M+6Nhln>olKEEJD91`i4BCol*G8E-~aE+Ze@higViadlw?ECNE z5|I`9vl9oSq6IoN5st}=;5dlT$CJEBXR4$=;PPEGEoQV3GSoY$N>*>=kt^Wb&J%3x z(Fsw0A2DQ^ybIQV$V4|JfX1~4WgviL?yA6C>c`i)rDz;8PCQn#?_1U>LR?PKxwS!w zs@j!Il2;?iT*tvyBfh)ZQxY)KICN@F8PLqlUIBQ4e{ zMI;=>AA^;1*c&D=5?iH;yLg3>!4T;{E}v({#%R$eZ(AHJu(=Ex%mD81JG}~@tDnH`WX_~D76C z+wB^J` zSA!FFh8n*|ud?_Ug~+}Oht2>VTWArxN}5Ss3Wj|dYtfL}des21gbZ;J9Frzig1uD= zWFd_C(Cb?Y8noL7;RUp9$cQ2kn&i1d=iM3cz7kaFJk$H~P09R)1kPq&NW+Q>U4c%4 zMNE;N$`R@5Tk(X0%PYBa^>TU@;R^RRw1X+^!)BoIBN#QXc!|H*Slg|XHRnp{ap?B& zW+xX$Ef>0c5o7=@0*lb6ZEkxVXS*c;V13O%hf5b(b}@~iij67n#@ZLx8I$@7v90_b z8}V6Nul1xYrjiH}6S3Sbqn~Yf^0hR9PKL)beJqc{iHTswvpM%TO$=zTHg}T@z@1b1Y)JnCYenN++&y$`E@`9Tm{(KvOUOlEdf7{Rfe-WZFUbeC4aFS_1uVqmy91dYhofZ%@p>oo?ai9X)SV>1GkU)`rI%6?8GPc`MPN?f(0K{N2D-mqN@^vRh^(0Ed!yqcK0_&CYWe z<-=|vq1kG`!}F;ow&ef$XtyIRz|m8yYWw&I8s6=We&K#2gKV0RIILbqV+-+&EC6j& zM+u|p6;U#LP{AqqDc%!C5J66sl?k|q#47nE>6oL;Iy!|b-3jFi@TaQMNN4sBgOCHg zghwQThjlBj%m~Z?JDO^1P|WMHS4GVL480IIyI$g4-_>BoOQ86XD zT6?LZT(Jd|-w>}Kj9Cjr3TcCC!34nMyWNsp4=#W_M#-pw#)d-h@aBGO?Bl$$spM6g zH8`|RFt6^REqrZHbkwM-Ozt=94-OBS2Ux?ecOisMHXo!og0sM0W z1FQf&i;*E{CtXW7v-v+yo;PY87y3sYJJW2nnJ2SflEAW zL%j?TGQ7FPO2nNEFeRIeJ}2L0&?<3L!6b7SeOF&5^?}Hzd>Ng8kH*vBY6d$R*rRfc1X*$+KRvn^#iTyBpd|NI zJ^bcyy7+anmgHYe#Uw}Nbf~X;!$s)dvE_Ye=(-on$Lz8bu@wLRTH*c?)c{iRpqC1T7_y+tIsIPQLC;3a1daG}-}sj4uSe|po6TKz&8AzD{x)mHw%nH9t5-;Cu zWn!qU`)U8J7ZV@K7O84Ie{zy~9fUwo4CLjKY$!OX4oI6Y{UnZaRQvM6MxRB39}B=S zH-dTs${Kxe>-o~iCHa|^sfO?RU!-SR_1H4Z!+K3QM=85Vv>D5+nNq}Y`_h^RqLfN9 z_NK@+yI8YQ#*gSer1bQ0st{qGC$gewO0rK#YE_a&xFQnie!HhONeNGIXBom zfzNrfs-PLmCI2=5E3Ou~FWQFb=4lTq73S#bQtKBxd@u*cpv$CC_{GFqCA?MFe*jHM zbolte;9|K{8Zh{U%g$Kf!beKrQEWJT*h*631a!^jB?D_4E|zPHqw(Yotgf4FDWVR! zh~89eXbPs>IeEw|?+XH(TLp{=b~kyno5vcxjwfh4MvNR{XZ^&LSY{-gfhM zm`gqJ6=qNKSp*m4>uLNiG?Y-J=z9nJxD}E}lP+B+cxHi$aP9FCL8NO+uZx_1#p?n< zxX1bG)N%o}Ci6ERtM47BP9nQm5$^mqYy7{hE#Dd%);qgIVsw(Q-X3BkHp$Eb$~}OveKYS zYN?cD`8g&Ncp$zSppN?KwpHT66oF`}989Hh7sV?jR}Qj8X{jbv>QKiceJPoks;wqd zTd}khJQIhY9ZM+xE%tpgM158w|Ksm&$;6l|^bD|!!ApQcXgmTSQhRf~pT+D`XpGKm z04m1oty^r9HR zRF~HLJUQt3j$?!L0n481>`=0cog+v_my}dcP%~NxY+3oeJgtLenLto_5?7Bd+&{nF3&hy`k-p- z=xmm!y2qW+ZojYkEO_e{<+$}861S1M@FDOe;erGDAy@YH-sN@A2GIxrIe+<;$45na;~uJ-+K@7aT&&Ebqm@=G%jX!k7SicX!;qeK+GS+G9zrYq;_` zjv&tv`p}oo_owxy%bu%b9v(o@=p)psHowNy{S^hlMW@~wr0VfQ&+BUet0PFW+Z~l8 zGU~6Q|Kia8HO?IA{9`M1`YuSGd(VyEr>N_MJ0idm{-KPx(W-+F$eHFOfAOc;3v%6& zS<~rR$hr%_ueBaKY<(uytzo$0dCkqlrP-fIZ^C0Z+D{Q^N(UlE2hjV(M~M@A_|niD zZH$AZA%B&WG+3eJQ3jxBFviOgAr)$7S3PoNp% zB}yvbO`tKm2uudKjS^9%GmE9CXF)e^k5|PFr5RDKd{;y#Zv9upnp4MWt$ID!BSat| z+cRGQu)alZkvtaY`1_28%n(FV&D16$_AY!y|1}3$7kf=ZLIF8(%niR5Y;?=^oJf=8 z80|7?{>U;7z=@)x7Q>{9*Trn$_Tu$|#8rV0Oy$I0yrI-EjSHR)5CRZ{nvWtih`We> zD}2z{DaV6EmzjX}(v)G3M10ZoZex};L%g6{R+Q*0B4pmEPsw*)K99!zGko505gwrR zBb2%`IWMB!EfSy?Vyi_k_uZ>_yrq%AOPWi_49h$*>$S^s@r1LCiJ8KdaTJN#K(jx( z*;ZC-F=_q0M%)K8q(xr?>DTeN{M7*6utDl?2#_LJpr4f9=a7fT{9;EwiXrmMP#~vQ zVP|!XHH?4(V7+1>gM|ZA%vl~Hq3p9+C3?`JxSz%W?T9`~OhGEnqqXKk=@(hdzj$c` z&L3&;4M(eMp)Q5C^+Vx*9qR*MC*K5IPAI5iWLS+;mS0t#*!92i9v4;qz&TrO_$J72 z_fS;>bIM&5Ek+PQI2Zfn%a?2MPEJlWONh@gR7`!pKLuNdLLd;}L_7@37y*cWGJg0& zaWn}xmY9?tBD=sL^|Djn8Lib>>rY&T0M&VTDRwb$DJq-URm6Hv^BE~Wq9-DjQK!rC z(bJ>N-Ss^+Dk*E7#|Yuf#G0E+woFIB$WS=7@Yu1fsI)?KP4$WF$Z|<~KB`LPe;c0z za6TM63{?7m%ns>TTRU|8c(uMn_wAPk^qDEFtR~+f3pg5Yp`wSPZ`r-PS2Tx1e4bcb za>OLX58|?9a~n|c!E9^;fD9(Cs*3%e7Ac^cqSaSue(lAyOH925HoZq}UCq?e%lbfi zXrL>HCfp~Fw)HSn6&NRO{ul}nKH;W1Ib^G|I(V4Q#|tO~&6z*pa+yTaOc_5KWj3kN zu^5hSF+@qVcMY|8N=I?^#PG<`pgbs6l7bWXuWFT2T)AY0QACTV(9Hmbl!+!|LHUR^ zcvy6uS=Z*}IHy?IWZ<742UcrVH&Z1qh~ZgDp~WM{ zZ%GS+89+i=CsUZjfCvIGjfKw7M2nXXhDFK!MXX(`;lM_J)G{~6F_&lw*dR>8aAdH} z+!ziCnX*)bqdCK(Lk2w0?H1~;DE)J%ZK1TqB|XVs{ftCr`J>Dy#O2ygxqBiM9k6Ew z&=C%scA_4{hhu40v~4x>81NGMD+%4n1bl^{uCIYUAePer?Rn8~-J)7J z5UCjFjU)Dx zeeHeY6^zWmco<6G47BNRn!#cy8WY|@j(}1K*!Y`O%uI!6N89k88Pwk(1jA|~C80}@ z@ZBD*r`5&9h5LwL-(@~~Dh9UjO&*=O6CEStW_d+bFOb;G!8_mQJ3TWq?A7(amB7vq zxSsU=A1~8*tf55?PiYWv*jN1)Rc889h0H56`#D?pSAl(4Z!rln->fAY{f?Drnpg0hOq!Z{TV(LB zWZZMxH2IScnS-AP409O>D3GZ7yNTQ^Ad%kdIrbgkg_Xn-?OF9y%Hz@QA)+K| z;CSYn%`xstaU*}KiOweC+nj7V2lLpG9#zn#G2wo}V1NdL#dOr(-wn%5?xjL9v zK(xS%B00^+;?3gvIX|umIuU&MIaX3<+NXgGi4tjz=5l&A)>%*}T4OD=d*GdC1e&V6 zg=vr$0Gp~dgNY!&A`6I+^BqfkT1F&>3iM&(EKh=f=4ZxVzN(_!8;|Q?KkPm#j*~)2wKEsgnXSx5q6` ziyHXDmbg?Yb9&DtTMOmZ6fQL$6lZ-R;hZ}Rl$w=$7pY1)*H`p4q08x61Bve=+60@5tvk zNZ}rF;|L?MS_8Ttewm=X3pSmP^E*~r{kl?y^z1_1=Q8lgY;(Xbwgh`x?Esa=G_^do1 z^gQyr9CUb7!p{<`pl?#C8~U6L@ZDvZsce@aPc?(>AunTsa0bem-6`%K5r>)#mwW90 zr3Ki*)Ie6%Eq9uWX65ZkiPm=cMrRB-c%tB7cm0535?<@jS~R;t1&Nm769hiGA+9Hk zTE?nCJ;g+U(7L=|O>2seosC4RMM-JUP3hnFZ1An;8aU zH&mhYXBL`3v?an!(KWud;f}9pCWfYQ!9tih8K&m-cR z;;J}iT%_AfQEs3Wa1G=u%MWtKeKUQcsj?wk&i$x;t>o^ZsB<+akWXT&dcNV~W0_s5 zlBO(=0YM(7;MvJ||Vuf4$%t7R(CXx89y$7EhETRgTD4z8$ud?AK>P$9#Zv&W^sBfrAkRuh)vyRc z&ZwC2lHnEv0#Sp(A13d^YRTOH7~GmjVdyi5{e}h4l=2ufvQ?qC4A~S;{kZeZPPCm*ceaiqT85tV z?9FRIzf<~v6a+u3hg=qc=ZA~4ktmDitz{BzkBi7Y|DM)fp&&T?UMPcpbd54uM)dI# zZ4oD>yoLf97XOnU=*e4nW6&VDQ+SZl$&l6>AEV0y#6j5c`r8)Z_&6Oq3YYvz==$~F zkmi3x$^cmj3j9sjr|7rc1l|WzdEJ=oPn~s}T=-9$d)5A<0Bu8xS{glFYO0nfy}0*( ze>J=B-c z+sSO$d?uZcR-=>6CV=w5Kzs~k$Ru`9q`@to947xQV0q&13qiXQOl;yyo4%@6dJY{R zVhfqveQ%>lYVU7{dmdEx zbqMMEL=zd9r8|LQnt&ekw=nSNnq+HV1h0~Zts}2~X4)A`4@Slyj1aG-h2;@n-LF1V zKKLtmFX5^;`AO>OksY^RD29ZFu7{D?i#j^4UAM>ON(M9p0*QIhWkL62{lvaBO+HeX zyTliXXu|brHKosX)w&I`HoQ!uXWmHul=%4EM+ulY28xPtMPaol$unvG_e-mefaQ6_ zOD9zhe)c0)QSehWWT>#LOs1T-z8>+sDQ^AU8sO9)G0?d0qn$NJMu;V56nXfz$ zKF`?k0l@*v8cT7hIh{x@alc%cEi?7JIB(b9DjRulu%e~*9$x-F+vO*Jb2C|YeKucS z3dmRcpgQhm&JRt)Gks1StqlJ{TmQ$26AACWiZ-c!BBcMl8fC&gRkGP*R8D;5vUb=3 z2wm{!r!}LZS8~D)=VmYY&Fl8QlR2sHjQxm_y6((IA|)kt{2@VmRH;~a48;3`14BBTN&j4HN zR?-)J|GU7;DR#+;m%(?bg-~iTsJGYlKl4D_ub>e;nqp}+Ozfb4iNu!6785H*# z7x-4vl{mHEUM@9njE$~2c_w6K1eLD%qCjTB{31CEhF3CNZ#-4LY?7uCN(z%3tN{N1 z@~uZ>1md}IaqcK*Ama+K9nzt(Po}T|QB`UvAwAe+m8vAewoH`}+{zIR#&fM?h}cBz z3E^b=$()x|;*b*v4t21oENQEhqoWqVED2G@v(J~)Vt1G^w;uqu0uiwVPrX&um6Wh; zY;E5~_L$ZJ(YBR3^^)Rf3O-l=P(;fylQgA{j?P<*4&iGl zyu7vRZp7=&xJ>_OvYl+?n~t$u4Xyw0`^4PmA&I^1!Z=Y+4Hhhc9m8+#fBJr( znO=6;_a;XM`g5$}qZ~ri7>&ZKqQXs-S}_fN3Dqnh_`ehXZ*ltn7zIC}{C%xWcsz1} z8VRw#zQgbP1asQs{@lGsy>@Z$G*ZzeVJ$VFN%opV!DFvx2}0}k`~zOh{kHyvtDBpQ zFWvK+)A`nL^FmMomNF?YSGc_$7jV42JZw z8dM^s{qJ9e_hRklbtl`7LuH51-Fcby8=%?)e^{4N{(wiMijQSw0!hu{BvN=4Y+l^9 z2WZdGmubq+ni9}niw~FeOwlkIS7Km7!laQ+|GFW?`G>Y$UiE?)?$`A75%J5@1SOeb zu2noXmMd9`{z$4sdmBR+P^74TM41v+U*$F%VyIg zb;)LJOLGqNm|{-ra5cmpV$;RE9I77FsSjb>E(KOSRh{8WfT)bTX>|C$m46)p)hlk3 zWTs0Wl2EA{(TFy6=QoY{G5_{M!O>Tk8i%Q7WW88F3q$uA>kWG)B{;xEj+s$A=FU=%QBWH(K>1wEk4$b}7A>N%xtA ziBWRMyVt#BaxVO}L+X&dzEVq@>;&C-R)mdGaD*d3rkD zHUpl4e2z1vlXx%dHg4F7t%%FWH133ix28{%2&5eXQsDc{&Q%V&hTUxl+)bk(7#&t@V?O@M&1RS@|Z%^9m4FXIAPhhcRjP+KN zv(g>Cy&#?zrclHwGKcp3AE9K2f3{tH`snu~XyfLSU;X|*R|f-N?f56nr(7-^^YCX-rT#_7-u? zDt#s(8?^SXa4k_use^cn%vbj8ThqtG}N#;C5zJp=TOF^1z7<&-@`$>>2@Pe1 z=Q2?-{o$gB=tI@&vel#wJU_gO)o6rm_Eb-W4tJNaaqrYl_sr|#X>S+#)wW+{X7;Hb z9w#y#UYcF4LhbI+;gdr4XBn%WCknnWNcCKj*U7efeCP28UU$FZqq7UW{>{r#J%=ui zw&T_cpNr#dF3Zx!XZN48RT?~`smnP;(#=Oys`J;TIyH{_mbYz+w)aiDI>?m>n(!3? z8r{ss1U2Z||M|9Nlj*w+Cr}wdmF%zN3;Ft;>Jz`3BAL@hTxnW%E%5U-!|3Yanj`R& zl+T5qj^Cc5V1(M$wZ4@^tkss!uNhe+;QBHt`iPfEGwWdKQH8YZN7714v%v@V^W1}( z-O({G*H0_ic_u1il|b1MgRz8FOLU!r1}pLeQ4%4=du{heInTam4LURDtx?bQLJj+<q6j$KN(y_UF?cK9<$AcX0L0Qzt9{-qqYXCIc?FYlk8ALk$a`%SYaP$Q5k4P%NoBX)X9M#pW2h3;pYPeN==Qx8`Kjk>~Pr3>DetnDvPQI@m z=f__xoWO2rK&$^*QRs4d9B=7E!O3gnJ?9{k<_-$qKR=dC$r_@C-{&X%733eZOaA^~ zoVE~u=#4d&&MmdhoQ3jtA|6?C=<3ipiBwpSW()k%ZBpHNSH9BL?GRwH%qr|3vZM?hIJ`fu8T>(*}@K~+XvOBUrY>Bd3a8cBe&_Yb^ff(f%JN z-B=C`ob!)QBNEO_P0tBspSdk|Cup}ugxC`l`O7$B?f$tU0$*yZoyR*QyYKjCbsn9d z{sMcuuiCxOtld4yL(g)8k*FG~um={fwjBS+H2>1oHO)xM^9gsCDKGV4UN%+>=@SA% zF9Bvm*o(e^x75OC((Xo9ZpH{#I+Swo`od=;0ismM(V{8Bb~l;^IagzgI)R*`6amGi zN`WyiuyBK@nC^NcqB%msknUN!L-hy;19IwqEpXBBNoNo3Vc(!>mF}-EpxY9(Vequ# zHF0D$!6iqaBPk=y9_$_#vzwin>(f#Nj<$NYeYq$4PQ0t~pg2w64LUZ)f3HSx1^!iMDPQ3XV}`k(dE%D0Gl=?NZ$D2I7e`yfU#Tqm4VXHNj0leO0ycw= zgZ9fhT3Xte0bQsx%qmLBr_Oz%YCjvD_%#ccRuEn?eBjC=B{j+JW|qbpdxA+1yqB=? z(wxHP%oaQFsSdFgD5lkUWz2kbXi4HbvPyJ@?Dj6K}eDc3F%(7G8J#Bb554) zbYz{WPOmjI(I4E_0pkIDS&$mnkoCgIUHKw-OjS`Jcfd5B?%AS6@PGs22s%EaR4!CECJ zJz7|@Sba6w;NhZ?=N{F-;n~P9_tlP+3r#)WWmfGP(ak*urJTz54zmo-_YNjYO`*V`rZY3;*VG%FKik{Ik7y-n42R`7@=m%6!udnI)l7!uIE{?g>= zjb~$RXE3r*@1X#xot2Hr$#Ig}HYS&EbB&d70gNJW{;{#KwvVUsTg15+D_Nr~{*>qF zJ_A5QoP(d^-Ih@G!|ASvre+FUgj1GQ0JGuvJ;=DB(S^i+(v-?F=!0UyIKLd@5iIvj zOte`#jy}WkEcsXoyBP^i*22jS_nqaKqBPVDW`XGr$r@Z{$Bm!M_9|9*Q*FCa?Zhk> z=yB?!zx16r5orE#qf{FNoP>*2A4b~FCVA8=%+U??9`PkMKZq5Sdjlh1ns|W|+C}T# zVEgW8C$M@WDVN%*{3>F3aUKt2cM8HU&9`NqzkW^=e~uT6TX zh`+bYM`2{$mtl6D>~?bhOc|m@uNJ}a3SNpMq$#8Ow2H~_5?2`NZ5+C#aBwe-`tgyk z`jeD~$gjoRzS3yo&e6eTA^KYiIkK) zA<9bsr3pML{NtKq7(UUDgsDCkkY%4r^t|XI3SS0kMsbx5f7M6iNVu<#gJN%C{T}eY z1M(cP5z`9b7J=WxtSk{iheLU+-mR-Y2N>4lRW4{SESM5A`qjOni{h)ptMJV>^b5$+ zQ58S3r|J<(rh+SzPli)X-8*gg`32vzsgrzWpG*Uk#sPw$DhKqIbTji11<>%p3 zmK7`;;wP_h?qy+jEZ9BBvlP=>{QxkuYcMDx@$bjI&gOTZ%}NXDBMI zt~d~F`xAOk%t>ji&AF3mh9#m~&Rp(lB>dci0nPsWRls+ZN5@2X83uRsontQhtG%UY zSo28jZZBTqA~CzAXDjRoe$(Faif2#U? zqC5O3kRV;Z-cB__4|uhyqXplAW8eh@?Aia(b`Qa@wXxYMh-QZ>3EzsB-j|9z$^^Dj z6zgJyjc4%6O$c4^98WL8$I7?wLD%eS@cKXp5JW7 zS9sH|(b@euN_UoOFrF`|gr&4yWxKB=n#J);zNJ2f5MW#D*Q+X;Fc>Cwjgr8zQ1lF~ zrfCjxRPhsaM{br~WV&3RH@_U(!i^^i=UlFBKX|d@vn}*J!E?Lg7XAouSqTi1@80eB zaf!S!PE~V4jN@O2bZb2c&y#ms8o$2K)zmBzt;KhAF|-P3y7cX=n!LFfg>xH!=c<^- z&V4r}>c-^$4Md(AwXY79f6lQXpZ(s~TvfK2fh$e>R*mYsd^asZQRq10blONPI~&om-J8| zW87-Ri)itueF1Ld08QP)gw0Lyvfhva zIs{inO1Gij&`Rb;Ra#J_sy?w!`AOi23O!Lg~xig^Be?!-~zAyMUQPWL=5N;U{@)o99tsY0<=7o*h2Xfs3bk zX{8Ce1AGvHCx%6{zcHz7Uk}y??D2>SbL;|mIEsYXp>CYmK^_UD?VIl)wALU}#7GoT zyXI1e-xaC7LnVQ?l|{Ru$l=LJaUPX1i8sPpY{PqQQC=S7j(HX{Y<}fz`l^l%(gpVu z-=@dd)!rAv_eb2ZDQ+p;ig=vxgG_36$e2sZ)bielePdqhgu#cH-5MnoBVivW_NPi& zj1eP#LTPc00=ljiZAssb5FH*?*#BxB208~VsynWr>wV$2_sGb|){I3vRmgZqA@Hv& zzI;glMhc1irk%Xtm+6~VuJ#PXvy@rKuf_clD-jSX9YpGgh}iYPEiEnR@yeNpIr9l; zmO1xBwDChgIm0T=_wcr<t(V+O;dm<&Xv1RJ!K zJgEeJZ6LVWq1pDJXp(4dKxOiOZ3mVRU!>M_;YR zfE23s0V|a=tB1TADDE zG>elG&d^0P2mT@gep{$IT}0n%O>}b$CB_$^$G?<-<~q-gZk@mT{l>^TXqh57AIMA{ zRw)$(#|u6|kAS4NE8OWY&|0U^mA}=MOSJ)P>yF-lfp{;|5F_`T*2f$bXO`M7qqqvfxE;fwB1MQ}zqeurkl z@8{j>p7VUCkz?j1x>-fjKX4VWE9~&GD~WH9qfaK~Ema$(Bzp;f+mVbBheG-Y;}m@n zJEjeyq=(S*t?wE}3>VC%8_;enWj~Y%csL2;#s?^pQ&DA<=MmO^?r8r7s)UE@{e^~F zm($7qb9ByNlQB}#95X;b{2o3CUId;>Mf9yW8$u;pti~5Pf;Fr)&BQj*b3uNAH#GS0 zt#IERsrrgzq~+n*vkELjcjbKRR0gM};xV+DkDa~D8^y$LD-6D+y}{BHHGrwG&3&e~ zINOGvOlZAyG)0u}Rl!V)FcUZ45YSB3r+@VAn6`)pUiHKMz#KpSX@WH0OO-F!caBFU zztNyh@*s_AxiiAuoP3O655vRy+~({3jU)9Tol0c+6JI?1F0K+9sDZjw+>Mfauap!A zHPX*#oln5Fxe9j^_A>616E~t?p(D7@0wWL4kVNrcT7Zx0vz)`9ikE{mVTa~EAL^<` zk97*C++Q5)^~)j{{-Bar8qE5Gk$!$j$HdB-eBE^;YV&w`0DCROBXvEJvjBu3AUx?N zeo@hONa)H1d8*nwG`Ft{N@jsi7vcP=+>E(vsEjG%jC^2zh{T(tLW}0r2=VY2@K^aP zxtn7s5igCwu$ta%?9J_E|ekx zn%c;|?RR4XjNfvMIF@5GTEP@C(u{jXv%ALs`F^!-Quw}7%Lp!tUM}TBh<-e!P?20J z`(a2pCUMy4>~Y&kr^4Jz;;q_2%B0;KZ$&eqa4d?}Sr5l$gik$*7{*9z7G?z$Z_J^v z#0Xnrwbf2ra$4@CVOMwCs@zooQ6zOA?pV#Jo!S|s@~>q7K;?7VqpN_>A@p$Ccb=5{ zVE(@3+xR4}(V_!p%`Q^k{9nGKo6t!P0{IkxoXOJY_CAC$Dz0;!68^=eClY3l@K)!J z4zlt5Z1g2ici2?i+Nv9I3ZNuXyfk}eOiI!qlQz4rmlxqC98FxRpUxr#zTxigDYn#| z(LOk!nw=FtzZD~srBJ%{kF?ayxF+*?YOQ|03g=p|&3*sVSetq+HTBsoj-2b$4WWME z*VRAWzp4}Ga&J4b{Cd=!mkg9Xy8ENf-$mK|qTGyZCO_M7FxT-N(50Z7_oiy`eYXAM zFS7Li(Dl}DQHSsL?*!f5jdUrYG$Y;JEg>b{Jt*CRbV^Ex#DJ7^Ntb|hm(q>r?dR<8 zxxVMxd;bA*@x$;ub3bdn*1GHeZ>{qGzAu_|*9xv{KB|!fSM{LzK+bM!%t-Q|~?@CKh6Fm+6Bg0!~5hkTL!#48uZX}96BzM3( z=ZvYod5xStcq!TRuyhX5w-_s*=i3*^Rsu}?N)*X)Q$y1lD9Dgx+*&ti&(Y-3Kp1;md+6-TG7`k6#PyRtWOm^$uSWDnrlPzWGYX{ zm!p4-Jue~p6hucXL8l5*1sa>Q8I0z{FFwJQtm_mFD7cdIDSdd#&58fMpw^E+v@K+> z(>(hn)Ea+*x+Uu`XgAAzQ2gN=Z1a`n0qmt7MN-@WXLunQj>Al}eq*;YOtVWH;rv-3 zpXj{!_tTR&hIL(knZ8oO#ie&z^~%^H%7DJZ-19x0LAqcw;BV z8ZIqAfZE&Nwjfc7-THMmE9njS!S9;MWWfm;LJhHCd2{#)-_?4(ZI~pQzjfTC&YaOrzQ}5&`bBR?ZpLdOj*q6t?ay6(K(f8q_$uM&IFo4ra`|bD za-yWXT=6rqO(RnHy6PohS=eQ5_Qy>txw_L%6R54&UCAw z(t-Yd43AyhK=D+?h)jCCUALkz$*#np`FR0Pe zf2O+1ZFYeAdzhjvI7y9NLIVCiUb{V<4vcuto6H}G&1L#RwKsY%0j9O+dJ&x^JO7%_ zsqhCejxZs!^2DQp6{{uH%XjIZOd5}g*WHgc#xBckh4jj8E-@C>a~pf0Tp+J)5)XDyw?}n zoUzd|vJ7g4FQ1*CzZ(AeJAyW7;(i~fQAl;DY4^&ZsFi(S1j81h2w6%1yg!t2Es7;K zePw#@@#+5eq5H?j&T8R}NX83aWb?NUj>wVVM*!;JPO+Yru{Wzf(?@QKhOCb6(M#O! z9L>9rza_3SU%+YA zIBKYZ7)gZ`R}3@pBy!va-OG&3O?+#=It4(-A?q|V=F5tMZ2&5tLvY)>*@i>R;6^Y$ zZ9)80*Ut(Vq@Jg#?8bea)p>*&HBO3M!d>c8AE2`@-o3Vpde5tKY9AKpQp8YQo7Y+D z@rQ^5E{*j@9a695^2NvG0n<<8=B6i?Rv^ST{;m67q6A9EA$`;)958HHVzv{kJlI;x58`{4qYSyV-XhF6jpft|z#k%aYoKRq^2%gLYm%9n~zSaR{q* zGL$u$d%>jjT9mCY2dcd3SC;0LvDjj+B8>JQ#a#<6MQPHE?r#0c_(%_7{5yF(PZGCx zaXgnXTizXhYCFE99B61nFf3O5#8_`h+twN81EN98IMzAfSzGO6;u~+BAdoXE1z4>F z`;C3&U}IL+UrDep^yY~MqLS#B98OUY(;e34l6q1EUDbG)q zNNSGq2N{M68{r8$#SG)){q+Ha)A4Yo9Pg=00+I~IWH$k#~mmVO6ai{!9?@bG9Irj5b1pfjt3)@tlX4ssB!lhW8r zyWOiL=f58r7w)>AUm>f?kKV6+z*S8MSp^4Sq3zAI2uwyGh}PLrtgJBLD0pakuQqI& zi(-iVlO&|T%~(V`GlVYtpR`mBc=)ZTZvQd8zebKM^947ymDh8h_{R2(<7Qv~f*LYu9 zusD%Jt+MxkZWECfplo#d(S01k3r-h%y4i%h``WP)Kqq$xWk6PTjAJCdCu5O>YqI$G z0s!)0ahhrC&>D3TOB3`}a6E;HA{; z5%m~A$?9^Ah}arp`}?Rd{_4oTg=*z@m*aDWrQ-i?2>-uBmL`U_!0h0{uByILe8qK} zd_3*)L$GD7dHOG*e-AMdjkTen@sVGvVwH}o57QM}i5>UOAA{Z%9vg+&O_!jXZFnBd zXsEWIVg20g3MAh7-d-kZSeB3dBHkc3|GfxII*BOG_?a%U5BhvXKFjt)FKPcYqdQR% zIuj#Bvs^4=U7(-|nBnsLGFAME=nptnYKGw$d#}oM)d80zlaSDiA)M=DO7dO3u7)Qs2btr9nP6W)jEKb zRGd}wwV!^92TsXt)TJz_bb8MpHl?Yq#qyit6%hkgNPcvC*wpI0(Ho{k5Y@2fE<4@) z{HoNO3Y{of&&BS#n2xrMNgh2d9CL$u2HnCgLrnr+(dYX0zu3e;2@^<(@2p(p{j?zJ zpXraUji}eZoMx$L6*Jdq-rWeUD2omt zChmH>PBHA~-Hm^>={IX$iF$cA(VwkX(vF&dy<=tQQ|)o>sfj8WZ9DF&lFWbRR&$g? zd{GDp;k-ipCf|8?WR{sd#{jzOo~M&TJVCAFc8Ah#6o;b=ReLaqF*8X3wav0~{at@s z2$^fu^dm_oy-pMbJvX7Fexc4Oif7qoYpYK?7S=_Iu_vVww|h6{OdlqTU^9Z~0xFBt zZOJv6>B)9RQl>@YWc}6zeybeHyqSfcQBg}b`Ef_^+(KFKrNC0^p`+TKt)gUA z`)OQqa}78V1|4JpWf1A6+n@_20HreIoG?+gRJCkR86zJuKxXlzOwK~P@#xa(_jox! zjSoj`P2i515UHbCt}A)4gqkmQV@JF1)5@c<#$IP2uc1u5_s3B+fjL!z)sZI_slAs! z)5V2$ydQw43@rO$*cpU5m7=vc?x<7scuBayDTH#?BOeBdB(m)J^_BI4~f*w6Jp z(TME-iEECZNPXe}M~6|Bz#jpebF4wcxFo)W`n;IeJk#|Hgv3KuK{bO{2Ce|oV$E*Y zBW?aV3V7EbNl?&Xg>nSbBFZ~Eu7@VJS7tl8Mb31LSHve~mFSjs=oFAokkBm}$dP4x zWSaYlyYZ|`6x6(6f-FDwiZ909wZPI06b{yC@(OM&ikL1NfxQ`}L_=Fk64DyGN6pC% zyfSESUeGIXse@`msP2IIW@}c@Ohu{$#fece*(Rc;W-_|5eJ9g_h`^JimSCK{`g=;O z4fbPG38)ut;QC}ZFZQt|A?Nt4jn_Z-)=i>;`Kh6$uC58;a@xk{*Tj!%+~z~_hpGn~ zYvAPxZK+WXxMvRe#}v;vcoS`C5tI)BoiD!$l9oAXUJWY8pV$V5tCHu1Y*)gYwnI!c z#kWWDd4){(Egf&08rUVtIT>R`-p$B3A>5$}j?oiW;8?uxbSd|jHF9zL}mIUEpUlKC!RcF@!;v~>F} z2stumBj#sxG=^vO_ja^=JKsu0`fQq&>Zy@cl@Lw_ddeh~B(B##*&MzEsbB?PI2%in4FiL%RWLnK(ag7=Q6qu-I-5cJ>{LFY{_L@H7z*(HrA1v38!$P}A z`KLe`_CAB`OuW*7!(;@7)<=Z#A>z<0i)@g}OnN0gMkn~qk2vn6X#L{&sC3CS*5wj-Jv=LNYKsq$yfS^_ zdXp8xx?GMmQ5*koo<^yK2!LK1e}k{;_DAK9ek86-IWR_EdnqPrlrxR->sB~dCkn-L zkb;`3IxwHF+We!m^qH8io!D?e3ao$KJSclvgYhVL7(rbG`n7Mec+u^~i%MCGM02(|cH(c#Y`er2ABJL&AFl!DgS%OBEncd6<;rLTVlMPnxPq39 zLb#}6R0pgLOcq4xqemR#P}vRgUNJ1#6%boI6btbu$>ZZ5{>kHHiqhTp6NfdIRUV`P z)!KM37whPL9ZERJ592uOX&-U_V59bXrCrCLPU3{=M2Cgy+vmkB8)gvGI9G0jXwTd6 z+^YDPSXxkgyQDTFw_PlCY`Cz`jD)eCE%`<#x}- z&JA8m9J&Q8Pu%`y-G%zp+jL*L%N(pMYm_+dw!{(NSXq=E1+Ue;KV(Wt&SAn`edG|j zMf_?y9enA09g*XammR$GbaoW8Iy@dPD5om4-SA^6#L@998(wtS_SXDZnlFde->je7 z14jhDr|A()B8+Adk5m@FKA+*WP=(8etRw^PwwXxWDnU_h3TS zM^5j(5j^qswGOh1+VZI{Ds8R^Qq2PCDHU2TzB2o?F%{-Op=!#sV&MsA*s15%C|xr$ zu}Fbv$FilyqQ6KH^u)%*nrZq%d`gi)komdVSlVq0g^m=DrY_Ve%_yg9!otueUebPD zTKYQ|14l?By9EdT9o|~(!kDI7RHb62qLjvH+>P=D${58Fs^UoMozDkhdjV$2gQ>R+ zuk8{%ySHO*RF-SLIA<}JGwMkMV`)#h18==@!QF@q|c{gKhnQ0iZ- z6vbO2I%oU!>^zvdDDR{7M@7}DA(|tF9M9Gj%e5%;Qw?^FPFhS%+yBZu7933Th^Lg} zR9qNd;lZhaFAn||c*lA?ba}zTWgXUF{K(=`dQ`0oortf;NdlTCd&=OrAMu~HI&ZU%M^`DWz1%o;+ zbWK%eU!I!dP0Og4>whb|+GBJlS@3sE{QliBiaN*~ax;R?dP=U&hKB@CimD!ol|{PY zJ&^(704h@;h4gSXYCiNB%s#<0&YmCf_vyOx>}%ema?SB_%ei5i&}SZTm)6B}LTq@? zZ$O0a6mDAogOm-ChAh9eIEnn$8#eGML>c7#5|wNx)K>X|8AnZ!WjzQb8%(C9ndFB9 z0iNDrhZJj=`M9+-aDoZ^&0=^mj_@8G@`^skau?`W{ALa6nEcYkwlLeW%OuKyb+?u9 z1DeOIF;$_K8dVwmW41XN{>Lumu2)@#aDHhmVA##VxL*TWW zi{ay4av&+Lz9YxI_AR6$MRmcR&{z zH=~|a34|m?zb^Ywo4ig0N6DLEa8~%46t0*D*UgkPW<4%^Vd2Brg7y_Y9`&C@Egn9U z+((}Fi==L)FT^u-*+Ac%#Br^$_^*FE@N4z(3ACs%$CyZ z`Io2zO}aMjx!|6cMO=I~@ZemnUX{1ve>1<;C}VLNx;dXIGZZB4a*mUoc*6MT#LnAx zePur5j@w_AxrX)#{7&n9C_XWu8hBS_1kSyF5TbO>1)8(oGrSF%whMl{uKt>~}Je9g;#_?GRJ@5g*9G(Un#dz!rQ)tNPJW zO+qys8HG(9X+e8$ESbUibE^C8bO|L6na}N3=E$K!suHk&Ih@Ft#pc(DOAi zI5~{6ixEVl#eCqRZw{l#@AlpQRx(78mfo*Ia2$zAHi3^v5U-IV%5*@4Jlb>ntKDRV z9#bB3q2i?mPY)DIi~<=}Xwi_FeeEFalBg}M$9hDp7)pH6ZHKYTf}xIKUS3lpC(Qlv zrtkDKoABzddN0lNIEc0pAadv1s?%Neaa3yw0bma=>c|Y3uRD!o$^jdC@hCIS~U1fUaS*KGe%yjqShq5yPG8`!Vc*3ME~3 zeHgF?wC?E+2qMeaCZ8qu@DbNrP{D1aUlWiI-HNs4SgU$`-fD5{+w@WjY}44}6XR}{ zw2*eWGdA@^v#erUB%kNkNcf%7uH=^kMj%QmYLcsC$fvzkl09hao|se54o|b zriA@m*F9dh|FsPO4Pqcs`K*O#nwV5|Vc>{o;K-={0U?motply|8P|~V@u-5~j@hn$ zJ9o;cOz~NQVw>-`JtPh%K7WwZ_#va|#`PzIaCl=O6{#oo8+jnv>2qscq(vKyg8T0p&(Fq(7q|D>wi40XtK4}?-#{Hm|74?-fTvk4hnVC-edgy=)t zKmiUovWpYE_c zF!W315{Qf)qXo9cy_c_)ty}FJ_p*g(7tqv`eWZ+g5G+Q8Htu*Iy^}em~dATbXFfwrk?!1BRr3&5-xn&vVWcL~ELxAxBGuRNx;$TTkyA={X z<^10M17y+Q4HOseKUwEFy=&VEw{I59`8y$nyU!P(GfOlSAKWB{{VU#6M0f3%^V#-V zj~dBxA=u~q%xx=|GCpPX$?x~EkFEsGon2?JG; z*Nry!{h#PKJl0PEkv)o;k3sF&G7z z=oTAAXt+|{I5Z#SJX_7V>rQ>4f~Ary%}f8y=D5w9q_ao4PE7Pl2lHpDFcQVe94m#Izmoas6pwGMWpD zK5K2~Wq8h7B{2C((7MHhCaiqI*}6^lt@O@*_5`|mc|vDtznL43C0#thb70HR%BL)z zZ?9Fwj8@KAm9}ziEXSj`Ji2JudkJFS zSJ+Oy61j6Vk-xd16}ERgwuz-Vqb z@Trnj*Bx+}X~gPE@YG=EP@0sTIUZab=G%u*tJ{f(Bj5Vsmc4_yV zEmkT^yM&Q={N^7bAaAie8%s${7Z2`wof#su1{Km>H(6%=7j>?g(> z6`X6lh;{fO$O{(Q3~lOg$Z>x;S-2Ab4Y!7*sjlHtx2XBN>;TAahwonzPp+%SfS7lP z+y<9*jo%cqx_AyJJ4yKUt@P>S4M#EP57+pQzH{;9sV-byl8P=}jkU}1_ii(Fr4@yY zAM3)9ece{b?*~=>dSB@72RnGWtyqr_{`qcVy}n_;KjG=zauoe{$rOrTD zZth)D6W*Vj?LJGIk0-t+FF(upm;G-7`%Bf3l+T7gf6JLKyp2o}z0Voh&a&Q|E~J=s zO&4`t5Tksh=$`_7c;bWz@#oQDYr4;@Mu6u@>ZfJI3R90FO5?ZNMRZ3YWWVc^DsI}_ z+uMD#kG2fustPMs7#sNn3g%z5t;*y?_uVH!#^*BB*&(@?}RH*yNB>Bd5MyCUT- zi8dlqTT_9(I`mxmBJZkjpoRJJ@2CLC@YDJ7%x%;~OGDfTEb7cB^5lIH&rzZHt%K9z z{tA5dyT4jBf-89KvV;vkaO*;anO~u54Tw1<(9A!XD2U+{ZEYh&JgMYri;Q}AucB8s z)o7Iv1RHKGvmqnDuMRH}pZl^bHpbESE?=v`x(V<*msrFT%owV_+=!Zny|d+8P2&)+ zQOh)bqu`gcan)KpVMf>TL}jw%=baoXhCg(@tB_V1kKzBPZ=AOb@3f^T*ok%scMJW6 z^<}PX*;(Z$7#-`QyT4NDktX`RrRGj`;`>U70*W-s?o}E@EmwGWodBh%@tH1jV4xV& zr`W-0pT_qVMEN9skTkIN`eJ`B7!5iHU1T^&>JB`rdzF{?jT@m7FMeg)av%RqAl~I# z%qxwQrJX?`&&X}r;6OiIit@w^5rRGQWs-wRtu4R z(b*7G9Y*T$T5@dWUw*6FlM^whw81p#92jq=ssgqKR2eQ$PfuamT!%w9TSy6gX0pB_ zJRJya0tHw~MA}{SQ5IBzcNdd3gsPG-y^?GvH`T7t@sL08dr+pb(61=v(6>7aqv^3y zse}Q%6RHjt%s*)n++b6(!LW|>C1)TIrrTm7&|Iw=!CrcD5%IEl#jm0kbu#V|leIq( zM;#>_b>Zz!Ho4GGDs`(9YPei>7eXRKidk?VFAULu@3|L>MhlcDdWO57JYxT<$+&je zF*P>xS9PgiV&S3_B$SB$N(Cb|r^K-%dQrBQg&vV9wi$|@llVDmeQUfewY3O1#-96@#;9@RgsPB0jkEFv%^^5e$s8t^Nv}wAH+pzo{pF6-~kWd1jyDi+!gi@JRHo)_cYs3QaqP> z_eP~b_spb)0IxI1b^muZQ*o|ES!89Rgc$o`f zP}rh=PG#&|xsk3DuTg@lYGA|bg1Yn@LQ^pmB1pD6xgmX&zGOYY>oCQFx-T9r@|=sP@9L^b=^ZBKNX2veYQYpH&B8m*_>M}Y*I z$nsELCtdH} z2PvS)JQ#rYv*$I=zMd0#{jotU5?1FxZb9a)v`wU8^s1OL<65sw_Za!jUMPY!Z|NNH znEJ3jgSid=&_CRRD_UfkS7((<)pk9;SQ-jc8w2wsSHO_*Wt344aYpHv4XszA z>~-al9>-HU)cT8>*1}}g{fhvmx9NoRN3wE(`&&_gOb$%_&7050ftR!Y3~nVaucnnH zL3S_$zSI@a4!qa#0)*?Bxu>xF-UYu9L>+riy0!=)OQoRXs5m4aI^91k61P#3YJ&Dv zBafBEzCz!hcj1FlRM=R$URyEVfztA^Gx0`tDt9)xavYFh?+FuKZ5@smusD;@f2jZ- z;B`qz&)jS-q4@eZgsR1d`D@VFpwjLo0v-Ga%EJ3NUt@+rB(NQdnSPJ6&hM%wBn2nK_yH#b zJ7(Ww+HE3{kjmp=Z*tkOw;>JC<6Z+h1E%YB1YSIF@eao+7l9CA8$9+jEaWw_v;i7<{ zVQM`J-7tqorzcmE7T{apghl;`8xVWzgs- zhZf_B>lF>%ZvPQ(gD>FN3ix@=e9@3_+vJ0jB|gKk_|dY_D4THQhcnKUi)t2p7mIb$ z)WqT8pYt_mzDaLQ;&<=%r)ln_;bBG3KrYVFv`%BM$cOc#x}_kOELWu9mP>(0cV@gl z!^1AlE!%V7URoYDvHfrPGkxU6ZxACaERpM999tE|4qde$sUwE9A|9*S5AAks02H(# zAEu)fM|HCtRq-ovE<0dsPis3{qg4N~5<*!Cvt)RdAolQuXqR}ObRr;R*g>aORo08F zfLpWw4f|GhqoIe1vjvWh|afM6RMx$t}aqFG1 zLjyft0vlDaOPNJHT)YGn4{D2a;g^x7<2L9OB=N(CA_CxT_E zOUMW-L~Sv<6ES4*1y_3#7?1}45vt{u38{Ul4x(&_ z3PmjV#g(MyWEyz}1ie`*vd9q%mIWHj;iWNZZ4}f)Ev9L5OZq_mh1;pugTT&^v?h&h zJ!4TfmqiW`2@m{@CxP_cUI|J;uR@h`ep+kRDY-`J5$=X&l-ltTM@sdIs@u;?y}@o7 z%wm6qS1so|A}?=fR+30-Uv{wwVu3CGSc|J`I>dZ|Evv2g?ozjGB`FaNKlcZj?`_2J z=(5yb602j=A-SNL7lGSd^6aWXp=3PyvEr6V3UNtD41C-QU7R3B7$<`F7&uB*IZ3(A zLUfg*IM`p_#6y)qxzC?~?anO6lAnY+FV0!1=6s>h^sH_06`*&F()qO%B=tv;w z!6%2sT;c7~SR+^($(HhkMQ=i`7>IpOrgsY<$V**)5tEL}>m{L>WU87OePr5zv4-?>FxHYnW{3%aqXyEU-;NiU|onzl}FDFO&kl^;YXEr(hVUSr6LPqA+?XpiI zO=)bEf3F=|MI*v(De(7Tbh+RDT(SC28tI7GwfMwbxb1-Df1%|6eP)7D>cV>q9`l4$ zb;A#*3Iq1mmaLn(3p1%3IqW(f-@Vb??etFxsPizsb)nRci#*m_hCc)k(7uBHVAhwM9*vw|w212GXL%ViA^_JeO1S z>YTnE#$suko|-%rMH<4$ibWG?HHk?aYpKIljW=TALzg>>129z2X^3f$0LO`fd=m?6 zP9SwLy8&ZMt}(jbgC4CP;3c+PeRiA&V$A6Q}o1eKBrLNFY2K6^qk?A13vVZ|m6rPry#W*-oNPUwBz znyj};DOmEJR>hUkvr4MGO63I{Oloeu$u2|D@{L4_A)y=eWpQ%~bmWQZl?uk_%~mWQ z2%_YQm^C?uq%{vJ^c3uZ6c+0$U$!bHorXT6*uc01gF%i0(v-woXU~@R1xj*!MClba zUJPg61Akw! zHfSuA$sd&46SwaFEKPejZVZ+`uD2ID&ez(?!Cg%;74x^_WOUJK5@BV0FGbV=Y0V_g zWvL*z#Gu2$#nRQ|HkY94blE-FKV*1(XrFFIQ+;#Js8p2D`3>vO^+%>j-l$fBdj5K4 zOZupkXF&wJFJOFcP4jFuN$ad9G4{8(G~n;vBH^hN<<+Ed#;5GxJ_fWm{`H*<)5Nbx z%D=Jpm~#<(DIjVzY)L%zYe@%445&lKbdqgU@x80{Xuff5cTA^Z>>cSF=4h+^z2aMSaLquoMEiUraA}IQHU{;v`zkV zI0>H_Rle7XuZ?H0Jx)WunZ*^H%V*~wt&TCaCOx~z+eP34ESVgD_wFTMy0>qiUr z0d+P?n{4ws|5`n|m5%r}QEbLcXwepoiE^gn#u`GDfCbS=8t?{HIUXC2w+@ZNq6vG- zFqSLi`y_E7-y!XpJmSH%Y1pt6P+-6YW21XTXcxUX)e6rBV&h>y5G+6(*y^=J19ayF za*=c|Gk&&ovG7PGc3}RuNo&iUvG!iEOW@V^%3sICE*ygz<)f8u;I-f?quTt6S#eC z<=b;97G-VEVWrB6(!-X`wH@Td_J+=`}-G3W2LaFCp6$+FQ1aM12P4B;^)ebT~z| zGD275&f#Do!Mu3ZNxN1Fq%8gRtoV6n&Cc=#f~`H*J-PZKt(r}uV#aj?y%AxWwC|wu z&T^`8M_+T?TY(`%8$Pysb>P`c)cUJTJZfjg!Gyt8Ptq)8QBbe*!ZWJh!A&)lB|UyG z_S*)*$Mk1C=xoCr%3vg?H0fo0SEzNEw4XUFb+vx^DdCWjdV%BO*G3|@}zH1ktH26GROy?<-q%vX*`D!KwPbT1Hw3? zbO%>?T9xjOBw+D>`%aTOMU}2rgM)n!tC}0c3MhaABD;Y7+BLc%*W&Cw5^0i=hdqlS z@7;azE5!19J}QR@j`{9AO@KBhg7m03sd|K5oO-1JS;f{GK>aG`+TDPmYNsBfvl7gW zEUDZ>iF)v{(rQgUh!kIA4Y-gn+9&CcK-AcqVl4&JUktqY9&#WUp+#nClw$jdW`+B} z$Ln|C_3(|tCX)n*86(b;kxJG}73w^=b>!eipgVp9j0+*C0WaqQ+rqI0*W0XNj zo?xwpvhke1w81tT>>+lbI^`MKF~Kg9ex?y=LjsdqprdFw6rFI!bq^rcbzLyw?>D8} zz%`BhD|3|`T}W`^=PDW+>=xcq?0h8?f*N3PaTrKxnX$*}Hv0Tl<|fuz^NXO}alc}^5Tc3d)#;Oeze zStKh5SQhqdY4Ldm;dM{R5$%<|fkB3>-V{z|zV%zIVt39i*UBFy7Y>VjCkE1;XO&)% ztTAzLvWwI(QvJ+nz4+y|;gjf_p!joY|MDuRny3S4&quu0Oc@*jTG^oK zZ^x|k9cC+)zdrdj4JW()SJF#-a1WG%&fXqYejS?<`byezJ%@}b?1@CyD^??L;#f5s z>@SXsIwEp{kjf3%GZZp!o>>(Kntz%aAh_asOlc;*U;pg-)|j>zNx6n(d*7|NJ?h9g z?pHV4->Zq<7YCeebN&qe{r&8_UfkJMXYSHJdFg+Sw)`WUY-uq7^`xl2PVtWD%J_Ha zOA0JXQ{X0X{9b!iXLQ6_3Ca4>6}qjp9dfFsSo5C7?R)w((|F#uD6Yj3Z!6?Tw@u7; zxs@Pedjd}a1?gE6bW$!1`FApHhV-c+71Z&9;`IDF?3=T^ywCM>k|W2UiDgl<3dS2# zORFaIS+=|-r-?1O@5JxjcTpyoU%sr1!xUYld?^r`Yv_pfdOr&q%O$Rjqu3<1)f_RA!RtKpJHN>A5z3 zMX$1on@P+57nQ|4s?5)S7IJ?UP6LTNx#^y-Maj*Du~^l&yjKwu+GT3_8gk@&zY90B z$#YpgOOt7;3@i++YEclx@$&Jiq1@d6^RWg77xUetO7%^Cxb#X&S#a;?A*AfRtU|Q= z%01h|QBm3CW$;@D?>tYvv>(}EB>>LKlo>n1ZNNwrUjVO6=c_h>t5;4&`@~%-AlW*$ zsWvh)cqsaW6CuuthcjtS3dRg$E9fWb7(Yl^0S+CB+=xeP$XP=brfN#`{oT?JD7BrKq7_X)0zi|(W*YE zJM!jmn#D7?sSi@$9fr-)nwrnb7X%linw>+Pt8%DDe9Cd@Mn6zSzJSg&x5P9;Mxk#l zv0XmdtF%$q;{TxmQ6O4<`EAUyoKA{-ay^)GL)MIVN<>|E3N)Y@&c+zCs}9(WuG z0^RMD3XC~cQAmpjMge$%*I$$1Pg2*7fA>nMIrqI@g!$*OCrg-lCv^8AN>fvbhLCMe z80Ss>ZoFPR&EcHg>UE@%nm(GwOSr{8TvxgIK6TbKTxt57QtFxOUW$_IFT06)Q09eG z=l;*$RRbULzmu%XE1xUA@%vaAM}+#nbeV-Y3}cT;8D8sQBH!x|*IQZrrv$39R?_CR zpL}9wPrtC67M>VrM6pe19&pVqh-GKB-ogt~73G|xAm*@FAE1v!Cz-?sbWFLDcJeP3 zCx|~iJ*`Zo+a{5JZ1JOU0lK+(a>Un0o8t(daiiZVT&453_33vwQPsd^fhhllaXfer zk5d0&pAHHM!RgS~J(GiR(kJTH> z9+afksw<_d0?%&}=@3@Twm?dLM2~8L9OU!!-aC)dj}U9)p{rxYMam!%U@|dI&99mO zg@yJK2+fqfG!74>Wh?st+j8wbdn;|1$WMMEdvEefouvqS8EJrmFP^?!hA5^@UZ4zU zS7oW|O_#N_b+z(LSs|&z98g3s&~pdP{7eZVTGNqf)1(*cl4vlZGATJ{yCh)?m8pu< z;lM*vZj_s&PReUNd#9ry`r=x zt4U4*@qTi=3r0)*N`Q@Ds(e!b2u16>=GU^lR#EA7bNxRoz%(vKNZkP2PJ}n*eGiK` za7A&&qRLOE5dG2$Bz{|wR$`TGIS5~f5r|8K{%X(h%t;&2 z9V}SjH4%ZgaIa`L(Bh{b-CySh%8>-WQ1iRmW19yOSxrIDj$9r$MZ>2j`Nl=3E2r`C zpdVcj2YA~Eanh2eXEZNzdUx6{rj&j_EJh=2JKL`ol>kYjV{%M`S(9)Lx0%ie7;O!} zgnfg7ZR2mYRwoF%SV1kEq5dSSTLhQ0*?@@yVJWc1SdTae1vB z!Lf*>4Cw`nLoK>-+bieS2zF{ zwEZedkl0oZuh0)Hc)gMI$!SIPKwc&*_G@yDv?ZGVZL1;x{*NQ4T2%8fx)Y1`RFxN& zN`J4=_ID?)?OwOnHc$64p%D|Wwo4K&J|>|0JZ4X1j<24kCwib9CX!|c)sK3Ka(7>j zB2B^#ypb2VVp5cFo%Qzjj_IbmzcQ^RvlvvvTg$-7V2yQIlrH4KSA6_D#Eu!UBWRum z=R-;Q8QT5jn)!z2wPR60X%FW)LyTu0C??~}57*C82M1v>q=D*)@*mM-q-IkU&*>fH zX_LG8h-#x1wL=;y$M0gab*mEnO&`7?rTJ|EkW-4%lWXzCQeghci$c^|3jYK#knJGF z3)$rn&m@8reUr$J)UV8jDkQ4R$1e_>;o;F6a zw#N)BCH35=>uHfW=ojoCsbsPYB&0m1l^+{>$2RPzHL)xxGbO)D(U;V;wTWW4r+oVK z#b|CP8jt@(zIT@z2x>c;;E0F=`92-c#05}{{Rzl+wg9ID$;aNGXSp+S&9v4$WDK)e z+?X)x8&mk)+>REgR4%u8-Z-c}JD@jHf91#g2gf1yKbFWRK31QXy>@%f0HG2ceQ|HQ6tMTK2)b`wcXWF&+O@qk~XSwWAAas~mUXpn+Gfo~@ zp3QggiCP0g#O8mN{%t>CG*DpqyA20#pSa={m;R~vJTO@NEpHyB)GEU2{}fPth3w{f zS1acV$82ZA3I>hl{<5Kg0LszflO~`AE8Ibz= z0n$Zp3tU++R>eNEq+0C*4N}o$%_8W~;(@Q$S_lo~` zwf73KVXXZ)xePgkcYMtG{1({YE>IHio5=eA(DjyKZ7|xhFa!%yAZTz2?zF|-DK5ob z3KZAk5=yZID=jT<#frO^;O_2LiWZkr?8|%ZKKtzNx##`_9teS~thHul&HO>P>`UAF zi&+)HNv^O=+&#@ikGcp6tP59v{iaNjC9;siPq9_mvS+ z;YzQrdR(n|51%uilwJ_^j;I43bIuAvTFOk*bp0!q4PeAtDi;#(IiC?y5J8#t^TA+! zTDYbgUrLRq{SHUO_K4x2tv3y&P#t^p6aE^b56Ff)tRs3>o}74oK=PYgU-zFqtRf!M zxdov;Ickp2o-rohL21JMxul|FaXhcV1ik{+TqOmQIIE3&087Qz#fq7>AlGbpVr{bv zao-A27%E}SjR$$YTYRq2v8P4*W3%|xeMc`1vH#{RdBbjh3{9YzJ_-U)h>3a{&Z!mX zspQzNYG2gwfrOrNa{H_d!-z;V5XlLY1d>hI%0PXBmt@ZyhwWknLk>!cekSbFF&ELT z*P4w@YJC6Ajskwyznb89dC1dx#F#1OlQ^xhr03dvP441WmCl#vA;pz=!5gT5+_LG{ zyw#VmbHN*O6w!n|x~$p=1#|>miKm(ih^^MHZzmfh1nY?Iep^ZXKjy6ei+{7F3CBOK zuKwFN&HJT+Z-Q=ZDfC5^c~*a$?wd5R;J-D|=(=|KR~h_Pg4T{nsuqr_#@T9?*j ze@CwD67)I#bFo}bO=h^WI)nyKgr@)8qG{%YT;9{*`mgctqPqoaU5PGM@?`HmXnNUiY$z*n+H>-c_o`y5HTIf^^a zk1>0mdx#Cwgi5%Y;O#zD`_j7W89Y&i9{<3jV3f1P0kTD#aB_}18>Y^6v`2B5XTOR1_^EQ&>qDJPWNakZ9qsVSECppcaJv`@`#e>8MV=g1mo!;t?AI>31J>-vtnF+)#kF1IC#`bMBx8)}-INh$6+X>3 z<9xb(e1%4SfQF=~+vQ6ZWmB8xb{vy(qb;=*yMsj*flV&akPmy|N!A*K3ot$+niAkM zki%98Mc|;}h@~0%J{o2C+v*xtQ3DW8{vByH^5`nNIu?Cig+@%vnUg#x&TH}wOwWN+ zF{NCZy7{BJhl_+u?M^Hec$Z0D5dkOEE#xLgH4a6EcU>~H1`ZK9z@$k8wqA%)f8L7h zwHF~kWwFQLD;A+& zRz64lTC|k#J1$v255PdY)rWf@aB~*aEVRZi>JL)MmyE!t;2aF*_q<3gEkwl^`Zbmb zD=Xu=Wx1Ov(dD;;vra{#fLHf`4!y{vGWX@3AYkbJ%YbG`d4;ui4rHSLtNL+zy#4)G zE!u5re?jjrP9F92D})^5XCp9|Hv=`h!6HRn2-_PPF)khC%Ae|%- zP}x<5*>i$zDeMOj`ZqKRnm%AANU$zE-dd9Fu^wgo$2oOI#3otroo3KdslX=yRsrJv z_5uUNhM)2Td0+Y2jn>}Q*7E+(S#)e{7t*wcZPN4mCqRD-pj^Yh);=&VR8#EtOH>v) zIXQl(IWD!-=NQQ>>K~O~>8U{9BaInYvq^+^V`1_AF7x-t3xRxI#ns3!=xsFn#-6TR z-ng#^-)tgHWE2`tElvGgDb2qIY<6n+$S3fwn7`lLm5ln3t%mq89~x0kv8d$T@x2+1 zbHiPP&+fRM?Je87(u2DW5BAzH6kUhBNI$|om@myt-y_MI=}l%Wtv6}Ds$K?zu{0~0%!_c#Oz$56AJ9w2nYzL^ z#AU9n!s7e~RrL+a!%4iZ_90s-3V&iw$umdaHK#GnC5#hpScx;_vtpl4+{S9yz}vs2 zlidvXl+x618IpcK<4|O{T28#Cpny&|$#A~)kRD?rYfK$gIb z@^9=wf#x*rBR1kMFXBu%AGF*6P;*L1%U<};HE_KmDmo|iXJvlYP7@n4g9^{U0^v^} z>b5IuP4DYnFUkIUbqkvz(wnVcfo`1T!5jwn6#5Js#5@ZN#{?DcHkeKSga%0JCY82XzI^uTt(G>TluS zfWFgY(Wj{|;0m=g0DWdt&IE4Qxhwe+C=*IOy(_@$$Doqr6W5ML}pVxGi`c#qpkPS(>@CCW(l ziUH`|<^eurYr!{Wa467-%-^-C5s*o zwF%CQk+0QdMrqaX92?aIzu)%ul6C&&FpCR0V_(rJp6O zwdUg^b@=smxZvsejHw3Fa0T>a;BCgw7l!7$h5(1wjAumE%Ja;!D&ALloFuRuqRjjT zBQFf|gS-kf>Wj~w^e<>T&d)_Q zn}{&was`y~!P#(mn=%vwE>TPVyhK8(TtR>_de+|D4ko%FW0eAV371or1DPw3IEpL2 zpp>*z==g)XNy#;)2-0wN(DqvG)w_!WU}|k}qXJcke{#;uJaQ zX&9a#r-o5wE`dc}<&+I+BmL%*FJHOCJYuF2yuOTdJ`s)Nm4ab}i{De8dENjoS3Nde z_`rC6wsYHTBV0W{OHcZk8_l4(#-5=O{Y^@bS(SlrM>jMJrkUg7Eqf*# z#D;9CrQH0;xLwnG0*UVP^9|t)o;SOg==j@4FKRyW*_MjY+DLt=ynRNec}Ez}LKX6O zyDN9QU;ax6hx$1N4h{~|wul#4ciL}#zo>vTa~hb_A!f&j_r@^gArHAfQ))xvW(#$1 zr_FyqH~&ZVCzn8*wP1&A9`PKfNYiVZv#nmR03#^b?c(0d@UzZ_JLlMb?i?cixADvGo~eIpry)pJiFGs6~>nn)BFD@8J4{P2G)(Y})RR zT^85I8x@_68#P%r1nYT@cK>duR9t{c5=jawQfF;x=(216I9OawyWi9C%srI&KiS6QEZRBtTW=RVJ zd?;+tbF9kp_4A>qGL`1Nj{< z0gTKP+XYewZF4n7(yge!Ui-)O`s>$~k%z%54+R3C3*a}(G`E2*UqLT9sy^oo4GKIk zegEiAnYjYAO0|qo-Hz;i1KNmlbuxR#0C-ca`Ir@NXga>_ZAV0(0Qy+QuojGAXXD`v zb!nA;7gH$gpXo)|_5AI~o>eTU+-N_A>&R0enu;=z8LwAh&ZHq06TQ6a$_ii)B6*3R zm!`_?5PNcCgxf)~G9I`RZTdeOn8ysewgAw0#569IU-luLlWWOyI~RfP`=ZHwVTiz) zG1``XN4q_ zD&Op8c;V+kw%eOtQ?1b9PK`yk_zffr>Ux;sEO2RY64iiRL^bD}drQdi7C zaHzX+Y+u`NPho5$^SQzX2~>F>w!mxkOG_Okh!0=Pqql~1+B){+m||7I=m_K48W4UX zP4GxN6H(Qac3gk~q>KmFBTp30=-17-(Cr`_E)r1)&hPh==##=ccu0CXx=iz@2-U27 zuULUCN;@jT9;i7f5`(Xb?hAAStzipzgZ{+u8N!xh)wwB%Fq9Lc4r_stll8GVV6!ox zLN)mJ7puV}VZ_TyTfa^w`lQ}B#9`mNw3)pKrxME%d1$-18yIo%}Le*-6$mTjIQWX zPXh_w7<=Zt$LEPtt+Nql2-Q@_W|$;}jrUOGX&n?3?y(J$uxi+OJ8g&TfBkH2CtoOs zfQTCFi8)zX%NH`}({qKt{cuf$TGv!Gx8>x`U_;Hc`^KZgoA6>X0Jmuewd@_v3W?28 zq-Wq~;dhgI@bNpxHjF8!cZ~>>ZDtg*6SS)L4h@W2&fvq3}g zC~f<=@WFZbZT6VLSPHdi@NWU(?IG>2;+y8Egu~RI^DNq($XlMT>C7ugOJ}h|#ru?r z))*6&rTM2*IB$6S2pP1fU^j7ER8}w70>jNC1{ZukF#tYX|IQ=5L9~whgLze=_*xSA z^FH3h+sn5XV+C&UhWv2nk+oP!*Uy(@jT1^+C5xbvO>|-=oMW_C0$0)(p$M&d6C)E9 zs*!f;p%=qYV-xlX1`M#&NU=DqH$3p9oqV;0U1=I0y9kpe8D~P`2x$Y=%~{zNPOK>o zLeexeBX$Iq6)NoRd^qArbE++B(Iz)w;i}NRC9R#YClzff53oay;?6-0;CPttQ zVo?4~5m_mfb^2vjT!-#5uI1SWsv?HxSCVyxWlE0e-zz(E5V%h3U0D0szpsAUu&%{< zZ7Dr|Se$|U|JLjO>>9Oz(Q7hlDW^+ww-x?VMbp&t(7R8nP^$wncH~xmr?=6?>n8us z<cnBqC@7p4U5A$AOW7y$mXq^%1rB5R}P(TS+rJ5cb2w6K9bF)AzUMdMqczj5ZF zs&T;}>Q|tAGKtI~7JdV8ZT+YOVp1Jgi>c~oRFFGT|} z4n<+AWKfpS@=D=G=Jc&i&sKQ?MRB5mL)U28L`kj7Ey~Yhl&I&xwEFlRBhOGcHnY3} zId^=i_0M7x*vrbSJnr$NV|#CqO&Jpd7ukHP?Ti8tc-n7}9sGgx&|otq-L>z-7t`oF z*N*uYeLv7B8mRZvH>{l|8RG283s1t=e%UtHMl>!2!b%lT?$N zIdLeCj|k-ow{mN{!0>zebJDf&wF3Mu$hYn0TrVm&Kh$?<;+t5~k0XYNt2SJXBHhRM zih~8MfA)h~$qsx@Z|wGpd?C6@hqb&C#f*x}q_-rdgVkNmJ$if^TAJKvuu=*1>s$O6GDQ2YG1{1`gOVW4s-DgQ@$lu%g$Le<{M83-msCw7sYN zd;MIU^U`mQ6fPU16;7IJ!=s{7NqGeGwMJ8nJLx#{(u8_O(?P~n9>X}4#~l6K+f_8K zH-AhoLHzLDWaC&pE~>Egn89(6>V_LWWLJMNdy|z}tvEAoQ$43+?zr3xD`W4f~m26_doE zC1!Nw{UQHcrA2~ABfNXB+U9gZf&lobVKxmde-5Q|m;;5zN^1OY(bE?@{LB={eeQET zb}QoBUW+QY(32VO>Tv|={)pUL^7rI~Yw8_MeYW-#bKtIi8Tm8jP{7^0W>{wny+`=A zXvB3>XtQKoW* z85%kiy_3n7o1YBzE&OXnwsLz4_(?vy^_jJwQKWMlC#Y^V@8pHYE(F(Ov=`0ePM!7l z#aP>^IcP1&p69sZDa177b|)J4tRMS*9=}(gfYS0if(19{j=?`3k2K22ket`^GhM^O z7bnJg{gHqUlFA;Jbt(GveVPpTyk;oQ8m3QQf8YLhv;Pa1MHUP+)B;Kth0!1EH)bBU zB6*5w`q{HnGF^im8c-Fl4Ae4x)hu*y zmG^|T8vQms_ADViBZ?dtwn4sH3w}_FE+M7OgOEVEWYxUQkb4QCFKZvgvAaG*nVjAo zKeNGb+1Y>VSl+i_t&j=oz9(BhnZ|xf*6&#{#~9HQtst5`*$MN2iXDBP>cEo3CEAG9 z4ngk&vV+u|;s8G%_r2$~T$BC`Xo%9i6iuI;C4{>Ne+`o~Y7mFcfs8`?@Q$o7F5}ml3CHiN&4+P4`k#nJr@@qQjHA?F})%_&VEVp zporgtb|XcZMd|5`28whaQVIlmMKdoYB)w*rkSdr zQ(Lvz>Y7sdDc#i8e@=IQDt-lsBqbraZ|tBiS>^oiBHEDpjzHeZL_ebvq;Dz39hdN& zGq+rzW5zzc>0+1L<_~fZ*8qd#a>gW|v}f_h8!lju?!yuniK%8#ESAjJLtd&NKQhsg z+8>v5m43(d*^^S-0A5Y1_pW9VPd}Dw3p)%V8rPqQkRWiEz`W`Euf=?q$E2~9c5zZ8 zs}C2`MR>vJ*=-W|tl2MU(godQJ|SRmGKGi zB!R(&&lwrycWq83#bmkALX+;Xy7kJ1cwN$xxw4;X}{n`Fb zjHyk^T|JW`1>h7Wj+$ICjK?6N zF4nogMT>ZoI$XN5Mf%&<}&q4H|yNP;%Lg;uxadaD^G8(a#1WhFs?yEt)6+~g`8C0Qe%G>WIs zS+FG?)dBqg0#s_`M#8HbTm2DTzyb`12aJ@wLiBkKY5~4u%vzvNHE_+P{tfbtB0}<) zt$&<-r$5lJAnhYz8No-9*rNhA-`Aa*hY(R>6sCoS-ZYFjs;#hh({JY)o-+~=bleEf zRhmap+;l`Ru+l}nI1w?hmWlGaO1c|h^u98=Fcu*X_svK^F)nCVu@>>)-GcLbD$XR^ zcUgEV=Cg>IwGtLzkt?g2?ybDr|11$>H-VGWZ_k-A%jeWi=obCa%L${$FPkxL)s~C1 z-oh+f%PVmm&He)& zdNjiL$EWVor%#sd>soEdF)Esl_AmlkIC@=YAvF92+7W> z2R>7pcX)vzqf}p;-)NW+1j`0&KGYWaeq$9-{HK@Tkml@2l%L#`B`A$cdunxQH&M-= zPkuw`z}8!@!5!xb(%!lkLw zs#{YEgs^xWCzB}*q`PQT{*)1S*heh*(g!uC25I|8V<~7J@RUy{iO;yAUK!=3tiFaY z29Zq$-6r)lp)2sSk0E*jFbYKYT$ zg3PKZBujWKZ(ogW^D;<4a-i-XT5ml{5yoWW3vJxjTu=QjT^90zBt6oMXV9OeWeU{C%j^`ouk}jt1w96T9g`(@Q6X*2e%K- zueb`;9U=E~LfwLPN_=5kII-*GCqA0h8KCXr3jSoI1aZotiw05q3oenZjh)JiWJ6W5 z{z~UXlrPJ1A?EGqa2rtBvMVR)K8n#z|8ck=GP0n)d%H&jzRLdQscUGyH(B9WePB`It z+GAINNWP^TYX*xG5ffSQQJftv?EufNga`fy8A60=LorPtLzJ9^4%bT2H=(=ZJxJvu zibZsk+eZ^}y*oc9*2#LsIvwGGzKiwGxXvO_tUp+m7*h3_*Pgmn4I;&!YvZ7;zGMJvmM3%mj;;F_Gbi#|q z0+x8wac=EWk+|vcmmdmIIcl_b{r|^JV$>O z8b=}K|5Y;oUSB4@ht0n2-AIk<;X5JQFNg2mPFioaS= zbKoQ2y99g-^cbKJC@;5_Lbo`4X7?`|88X*R{WlV|=ArprtP zj-QH<&5t#ZA&Qv?h#L*cm^wNB*@0a_16H7lSB-q46U<|_KadaA$hf!dF0xT{L8`w!4Jns+AKk|v+S zCJs~++G3(7;)IdL7^H}Z_4tgQ3EVxCXekDH=kXjxoq#^#Ox&uD6jJfTv@o51a^Xz$ z(`KaxPGdku2;s6OgRX`{3^!qatFtM0HC~GyRT@?tFlt{yn~v|KHIXF=Ex{8=+dxUABx${7Ss+LgZHC9~?H zv@g%dr%Jq_KkdCUIdPWGEJbB#8JptFoNsCz6Mb5x6r?v?H#(-z?BB!e3p6_{PL@F5 z7tNnsjYf(S#wnU9dz0WduwE}qJWi>EzX1hZ2HSCe{`mO?25uu+J{jq-ej1I>|K6J) zVK|rmZnnz}!~h%qm)3-TBMZZ%xOVN&5eNO&AiUv`k%-D_%9D8G zCOWJAHmfIr`&+nC(;vd~jPI+su-5MrKRh#0x%tRcZXitC;>Gm$DqffaS}2v}rl^lm z(Xs9IDP?}xr0Cfpl=YbI1CLA;tE`tM-%(l8VAD;5u4S!AqJiQ2`@E%w>lW|pPKqL? zyyTOl{i!*e$m-26*d=K-OVMZ%HEzB;-nb}Q9IH5 zv)(%)8n0|Sdy8`%*Op(lD*@}K_jTSP*4E-u4&PM%ca6$_RUZOMvOo?N^gvuSiOyc2 zWrddEi$zLQ9lhlmFq^s#YSgg$^YD45fK;fDJ5}JG(7f|-_iUkY@8qr`fe<$H zK`82Trwx+I<*yZs>8aOBh@`PBT5D$U0yRuzA^3RYiA+E zZK|rT-Y+`$ZS}%HL`0s#QaiIN{~noytx5fYx7FHySgk*;zkgDsoTwLyd`A^Q%!tW2 zRJjalP3V{ccUB=aI$-U1qe?Rd-6QPCRsZ3^ukNc8Qc!cAFI0L0aSIvR@&2%ejYHbFj*M+s-~#BSo)qL01Dgrv~cmKHKD6TPsv z)1lIDe6~^Te*|{9!#l43pvB*@ViEr3U@z@Iq$@+LR8EW4c{@6CknxyCXT@kM!wI_D zQiXi?n)BaID9pFVT#WBW-DUs5gpoakMWFADg}ltnKG6Gr)z?!YAPR&Rh>7dj;k0bh z>t$fx!z$R0k`^#xfz_9I- zC-Q+Qg0E2ufJ`7YWaKh^@VjbYa!tBFC&mUgfj*l{Mgc_|xo1l_PmE>(~gef$z{8c_&LiK7Z2 z4Pd_MDl@9B^4`}Gzn{t9qnsr@X}fL+d+oivWwzPu<%Zp7_*&%qzP-E9p1m6Gt0VR#QPa_8qyX&c_Iem8RnC6|d&*s$BJ zvSi5J2VA~$+XPj|K!vb_bh^gPR0=6bN0s76)fZa2_o%_*p{$Ziu#LIC%jae5rs2`< zp&216Q|o|k;dO&`XVRPsJNp+6u6L`QfuY72e*zhrm4JT>-gWod+%bM%?l`L{<}s0dp7)UePxs;R6E-uzNRm>0mEGUia&!|#5NoE-2(s7 zrbTx5eTLf-%l-jXY%dO;SWr>1o&JB(s88A$kR^e8~!Sm&lw{?stmM#EifSPQL_T< zA+nd57?F%KpWcj;yT?rE)G^5BFt=oZ4)UBxx%0g#%Td%k3LQ{mfLw13YcVimpFiTK zO^TE)XtRL6u(#UKEE4CkipqePR9`T|rsoBnc<_wT;Cut3@E1%?pY1t`>;)bBl@&#k zKQD1sP3sZVqG;lKaOIa8RMcyQv9_4;&m~q7ZDT!L{-|m`IyWz<)u(O)HeLzbd~C1T zJ3bx)`M}i?LuOZNCoql}OsuqX$iT1qmVo8UzrHC|>1xFYZqT!|yWpBeA+Fin`Qr@5sKzUIO%U+va& zr#PeTxk_$|ek=U8)lDGPLm+h@hRk;E=3Ss`>p0d(wuNxNws|R;6ZW$6?XR- z8@JWsHxQ-12?qGF?Y*!xIsHc7Gr?x8@@?!e#w-1CrN|xvv`ko;1i8+I;jvVxbnkb^_w&nXkvE#C z-5K1se~vN_zaftq9-e7-oD+WxjIi$0wV9L+G)w4LRl0Ce%srj1W=DpxLAZ3(zi^mN!%@ zf_Wu}ykF`19C0GD z_jsI&rJbc`symG8<^BV=`S+jlztHox)?4DXx?>t6$P_O%?$}e#>eb_OFxfBSa5tWe zU_+rimVX9INS@Qdn z0x<~k)p8(ZeUq`)=4lTvQG+$V9pADsz3VHMa@6)w2@Jw~F_P`4d!)!S4Xig~ zoEZoOe{JHN+~*vcc&5>)%Lu*|g))n}g1C@Kwdy5ZCuh5Ul$KdllNjjIDx!ckAjaS) zWkgR?KaP?>84s1t2<<#umkN6QpLN$(2*silP0?P80w27}AcG-IjS5WV;PawkikZjS zTiiw}V_s=Hj>Q-E$TeNs!P_AJj!deso1CV3?|EO=exag0x2*n=HX4l%lj{Uqh2csz z={xy@)*;IB`reNp9OpHl^M07#+QW^c*eg5Xx*I&Qnp|1rxsqcgvkn*$)&vn<3sU1> zcx+ZUWw={1Kxq(Zy~FQHIdhuQypsL%6vRt=j-U3_MTe__AI^LZl>90MTx+I`;_#d* zP^``(!BBv|!Jd2H!<}QWt@@by4Ywkch_Tgj1N`mk_YUQHu zfz3xBsD`{DJ6f>4IR-s< zgM^rvFvS~1xNfnEco#9e55zJ7C z{E@wP*=3yv&wB4NZsra(5Hv#BK@%IKH#*w0VIsJK+xg8@(&XjAvr3f~k;It21172= zV|`bRp+D9+J-{r*%;Z@hUtv|JtgOXk6nuTRyO!tk<*SP?G{Yd0RYKgIAhUVX_~ z!+E$oqW3^r_J+q(@>!hz%ER8g;bl9kY+d``qu-o-P3zg6U%%LsSzQ-NZ6HN_2;|!^ ze68J@aI&75EI8nB^1NhvQ}QIxnfCsn?R3$=i;09 z!mn|tL+{Vr4@~{e(|4Kek}6(v$v$^V6T(J#wfTm6=;Wd4w=nCXFu27v3I~G9HV;eF zg_QB=Xxf38{N{-Y(eMft%s35cu;{DF)EF(D41N_vrn3nYq3#W1bhqXU)-TRF(r%O850AfIL#qsD<0<)QleAu^EpD&$tQiQn zeG1uIA*mq1$bbHVyLn=oRAv4Zbv2N1vINHC!2cIWA~K|Pvcl{&X#}?v&+k+v}V*c3w`@G2U!(c%+9C)&1pJJfri)0t@rJIyEzi z$dyB7B^7RG3acc(M#5YRF}s@c!lJI#24pg|`tub?ab1Q~cK&;c%sQa>=Y`>(P1XF# zoa8Lz-mQp@d?t!#Rj+E6p!XaJKm+%xN;(fsd{ z1AD|Yf${D}-$k(V$Az)*i_oSmzE-T@;?I%`2_*b$BBUn9ISs8NBsA%O&~$t%lYJ|9 zi+ZC_^t#LHUZBb;8!cXn2$K+Rcz9S&*No-YcpG*^%%hOMWQchZuXbX0e8PM4O1WJW zUP8psP&pAHS5c0!HRML`e{h*BzjFy#;@onr{^nIG=k@;AS)l@|!qrfg0G7zd{l%4V z$CcX&S?2|vmT{lpAb8WSEojqD+VTgS0!(D5LO-0(wQ%u{dSz_P(4) ztb>lW&FiORXA7FZpu>2<@Wtr@?)u3EylGwOM3Tm4TxFG$=!HZrb?KZNxiqi%%y!Di zMPB4dNS$#=$L``et@!o*&GUCh#Uvhr5cww7Ki9pj2ESkET#|-cWj{oNAc~d;GbPHZ zF$#v!LO&kvL$Q|Dv~M0~cF8)e>$h98aZeAN=qxu7P^DGc*WFk}(%CnP$5)8tZkBEM z!X}nb3}yMhX~NwDXYc>k0{s8mKeQtUc>z_Wlp>YjzX)4S0bXJEHR9mi;eISt`qy+| z8H$5P@f_W?ooYPv=6e`dL?X}(RKs5PrAyU`=rmeGDz6AK}t`C%B8 zb^JPF2nYv_57m&-_Yp!J4a!lJWtOB-6M=@G7dg=_8J)X6k_EZ#5 zN=UC6FO%O6lhU&e5u(PWSG|@PF-}I2%VO&mjuB@1(_O6ErRe<$XO*T`c_0QsFi(!f z>ia&-z?*d2>76i>d7{$p2l@XneraP~9X)z+?u{f?}C9rAFY#jR^ z&f2e1xbr0PNt=1eR%hporgls*{jWgwJY9}d+!W`uxhw!%_!F0}ewfUcdx=ocS5E{! zY&a4aY~azt7^WD^ktqP2`3o5W$E!J2lqn!eXMUh{Yr zq7^wekneo0u4Rr?Y)FAv*4H~d{}6oBB`Ik9yn4Es7`yP;GYkE(7?LX@`xZVua~w%V zp^)}=RL;Px3JZ@)f>OVfbh=Po4tYN~OV0ftud3_Zu<0EVVP~T=j(eRNqS?`UteO1? zSF~l*9(>_H7Sz&wWjQ%0S9<$@9Iv?tILkV@eaA_oir<%i`$K3NA|sU%iZq*2RGvF> zCKIY@wkT>;U*%-SQ+$t4H5D4w{*P|OGPNUnH;UeHRzSV!)UJ8Wzqen*EZf{yTP<`>*^#e1GSP$UABA$C&s zI>X2Dxplr+_xOC>{0l7OD8xekI8Wyh@7#7}xD~uML&9DnD`2Nc5F9^YWZQ`1JY3@J z<-P4xDdqKJ53%I+`)r;z;Z-Bvd;R>*K)T94$T-hp>6M28Uf=3=RJX&uS%L&s*{Wgq zMdg(|($VE*5AtNKQ<^G>t6q%lS}g?N`tAT^69b=6<;S z$iP(XMy%P>PImYEvPp*qq)ydH4uZM)IVpi|g~_q*|Fwxfh689Yy9>+xu(MrECK}|w zCU_#tI^f@By@7&r%35%xh^Ei=q>kVfRpXaTcxJbUq;@FRLjj1Cd9TM=Ptp^lgyqaMiZ(lKI z*LahWFlN7!zxT6DCX!hRQ4%thyqm6>&YkMnw;GoeEhs2BpYRK)ZS)qKWIvZ<@d(B{kT^| za6~ObpQV)Nq0t;Ku-1J7_N+j zByQ29H=JY)e0|5DZw#h#O))(B@Tu)XZ+o1ncZDY(r48U?`DjKB^@Jg!bRNc@&8u5u z@=cM1#-+N3p`q7VAjeT**Gf;$Lxq^F?hBic5eNYoU0N;#%A#KyY^{EzlN+;_gmxcXw;C;BJAF_gj0dv-dgs`@xTdkwM1b znfE>CWwRS$CRWqZh%49?=Z4ala5qm8*pz^0hiRs)Y6&L?gcX89P_{8(lO}<>wQ|mc zxZCh7XR;Hvo31CX6R+#HqW+%NZA_R+kV|17>pvN%X)8{hTh*9!QaVxlLV#8mRjSL`!b~z=Kwai>O`J8vpZ@3LdbE&>A7+IS{RYH7JpVD zY6JPtoG4t2NqoZAErcq$`Rg$=L0j7mIw;?l7D6y7&C7A%nR=BEjC*E56Th60W@R0a zcVCp_xWCkLF8TS`*e{z=u8t7=q&&Y-_G-EE-ms$-r@DCEqO%9Ry(fOygHTNT;`6!x8ir zviqXe9p=um5%){QAsD7RpRcz#kVNG4Otg+c+}HGK&52>oj-$`g$lVVXBrU(7um z9ei!~rlSz8VTBLx$0Es59OS$4!ivR4$BP1$%(y4tt&QE1eib9-gHylw|NL+Mo2PrjiEYA! zr*v=)yYE9en_m1aROitVdbLNB<|7QzaQ#Q$_?(3-+oEuKogc3{U9BO}K_u1>4=ySB z{SCQ;Js~+6jhmahRKH1mBV}}sBU0JUO#oB1`xex!^!q0fD(+B?y?iY%wS+F6bK-$X z6p2p6Dfu?-3w?qih8~#++msf9SE)TOqL4_U;|zb5Qfz&bu`H%HpdUhn2FxM6)}+Q^ zsw&#PR#epW|3b|G*|*rMWqZ_enSibjF+}EWjOjfJ2q_Z_!3O5UJ=U$pZn6AVUc)W4 zCcGd~l=iPmFOWZxEGU|fuW)bDcA246cnFtF?Aa0a7t$ik%#wjGgLGC{0{Q!dr@W$E z_g=Jgh6KKMO0evnO<-;cJg4j!5JfzQ|9kv5kcaV3gD2-De2e(lAL87a)+8kXPw5lcg8$$2=b@!G6-if+TlEhLnrJ61q zi!_IoIV`f?<6SBwU2OX3aV{3R&u$K7kPWJw`Xg3kq@E2@=`@%N3=IvP_GzDBFuk`b zo864H)VJd%E2tEf{w5q;MK16GL{74_oW?X>S%?aj_e{L`ZaK>OA;`vUklsyhUTE#Z z!2sWu{!I5lbtIZke6y>=nj)p|dM02I+(n;# z&1GYE-R1Q0JieC#!bOZ_gQ=t6DJq-9p}Xw|#=FbvbvVh=3+rJ~oG$B2?2;NRml&5m zW&7{#OIO&+W5aDa(o_m2I}7=sg?wFkd_(;i&)$9&|I$EE7hyK8Iske|cb_j=Qc``d zs1YEviCVvjlcyv$1}s)tZr$JB7Nr{#>q1Sf8ert_V*AUC#GuL0&oo8`|7mrJh&bwi zG+^0L^u^$dm-Y!1>7_f|Yf+Yl25eAGmw43?PaBSAdGx_4h=l(rGm!VrpK&RccLiZ( z`@%j)x;2>M*OgypVejZ3|DsEP3=dzs90)LlG<(*`aU9gr>hZrb3t2SugLvu5FdDJU zxNmDBzS(nOFj~(O8+know2$_cJ{UPH6F)vL#CVaB@zuNoYabt9a*$elDQL4Clv(ib zqt}Kk%RvQNnpcY3uqH%DRsYR&E0)D{3y0-s`enJa@j~f4A3?APBE;ovh`qwtc-_;} z%ld3MNbLF8U2H4k+2!n2s~AK?*Uo#z{e*~I#N`jce6G#j1RKJX9+C7Ve$`ezsB;|K ze`A6x@5}$y;7&)p$3vd~?VsPr*`q!=J_-Lh=CE7-(?I4IqMGR|FGanqP`C~sh zgYH>$b&*7?ZZEu6#m;{94)mj5-+4%Zm!(dG>&#vB{h50lqg_u9kw#UnK9r-2|8>5M z^K5mac%1r^s1cwI130M`SurmP?$X=ktb(|CnTi`1a_K->gS0}xh_sPv(!exPOW=V4 z4zjkFZCkJPkkPJlbFur(OVyKjgE2I(c(qUj0%*Q;zTwh>BDJ-q;})}JxG1)b_Tm5^ zIX8)98-*x_7dgzi;_jMZ!9W@10HkbY;FgX`Y!h2zuA-bH%L@c_`B`;q;Uzv{pl~1aL9Hmc34TJHH~uGvk8^*%Q2)kL{|G`!&7n$T zaj8~Yfxf%SaHm{RTA2FEyxS|HJMJ$%tTof9_28oKi8MvvIdkjAxwFEAI(b zu4X@)ghon=*BT9!UYXDZ3n7SMHEaw;Td)MOiI6V@@3tEsoo(!GCvam1zrX}JMa@ob~o5|`Xvj`Q{=P{eF)g*&UV z)RW{`_nw?1?mD{}G`>AMou57Kha?q5jK>MN4uGGPQ?J`2S-{5c$@3P*U<@++fqjzwbEp2-oD- z+>HFrO;;(Y3>}YdL0gX@e;$y2vz~DJl*+#+$At1EGF9Rpn4awY5pFN)$8G?DjyH80 zJZ$*``?D=o-?hML4tg64Kt<( zj@+gei_O3_>$UAOczGYR-1i!SM|*$u`LWd(^`NxnTC6s*n{=nr|CKfD>olY1}G`q+efMRrKT1U`_u`KF8S_d98%i1%5! zmAbPsr_1M?qftD!WqdyZG9j3+>>#vgtf*nu5rD@UNo^G6x}Vq=7_X27FJ~3w?Eh{& zOpZtSv@~5;l6%41cb?k>J3E%Qy^fO23tp&8!S1=Uydgy@ZX+Mj(kTHcDGVK)ZT%WP zy~6upe{6mji)%V>Ad11L93@E~(02xTXN3+Uum2aZ>QixbQ-Ei%oYCk{+>GLtbM%Cf zd?q00Ey3&cO-4=q7pQ+i^RQD)UP0zBo$Wa5;B{K>7ayIF@s39vkoP5&%CR-Dz0J&o zAE~6hr6RZAdnV>#GsQkt(3kh)7s-#e>(b-`HZy9|EjE`G>`vW6U=#PdzWNupH`jkt z{O{&jKB@dSC)U4}5dWuD%K~Du@{|G@ie4%Nni&MTi%lUs8DiHKTGWuj%wVWzaM(UEwO>a% zZ{#hQqmPPk;6lcWG%CL+KckwPlo3mjMhwLmW_|7x{NpS-B%@G3fSGA_7C#6fM78R- zR6-WE1Oc|bWTD!E@WI?U9c^bct7{5R@Q!BROUuRPJN0XE`c~2Nvl}o0}}ti zTh)nNBbB{F%FBaUtk-Z2-oJ49O7~;A(kFY8sfPURAHCc!2Nw*E-`AH(Z3th}6vG%) z^}f5ZxKVO?2(xXLoXO_A)}kK4JnG$A>ot7w$JwCIu3Gz@U&tIrshWyt3&Hu;otou4 z=b6M)>OgtXZu7j%by*fl

e$ndb7MXvj&!p_TTWu_ z*hzHV0DjdIryLF(Z7B8uIy~+iU zobN(H+LDPMgX+Y@&mc!6oAV^v9D92+;cjUGCbD;|ociJ8Yi$J2zr^VzD+c~*S=BQ3eP7vj7{eR!4V8ceGOTIm{$Qh*0!{3OUM?J_44|`X|I(jJ zuY6kgsV>@;`w?Wey(5um%k@+sdQ4?r_u_v6RwjEJI<5Tn*g?o3-ixw2Ki8rF4{ZZ+ zBu3@SfF{9TMAL?kfD_+VIiv>pV+ovf&QXVG-$%1d;Jpk&`#O8(*%9O2iBdB3pK0KSRXhI6o~yfX(9JB~K0b z6w6VQh;^tVX(Ad>p0=@Af%S%Ga93~>Sbl2ZyX^(<+0!^aC1n)Mp%pJzK3?MMl13Z# zE7aQ?uctP@aqr!c-3iIk8xvhMrIP<}T>p)MKxD90BR^)(p66rsp!OVse<8GH$&P_b zztppWFE8PD5lj}@qFbtVe?tye20Xbnro@x2>G3Mkzv=_NLJ+|o zIl08S*BHbEm!_W#PC zp14FP9!;jGEpS41{j+}K9-FKx=eQx0gDy&Fmzx2K0^^@HiIFI! z81?qG<`|8=3kv08S?Y63UHzhqnjL$}lUIdKNKMLs)71m!g?bYzQZ56IQ;O)DFQ2E`teE4f8`4N#f-=xgnBMZ1 z9|2A%|GL`@uqyTGp8M1VX;f0939kM*o4z! zFs0pg2kSKZ%7*WXfjr1FC_VgJw&@=1McW7No4YRv*t|S$gToJ>XfR|LKFK$h=gvxU zkesC5J-%!ifBf>g!+Ob(hU2#1pUI!M-dFY1_^n!VC#SIgNr&ReA}nZyIjdV+o_&cM z*(l@Lu%-L1F4V{jH($%(&b?)_Bt+*$jYsX?`PFfuG`Kyc1a3F7w$8L(lX5dH*?Sp` z#Ik>Xvt+GO-;%yN-x~NsPZW2l4^75oZ2`QLev&>)xBb`s#N(61ozR^2ZU^@%Y$Z)r zN3$LfTc7Wdx$Zgo6?fd16`c@!+3;KVR*-utyzi><=!v_^#eVW7usiuNEf(hZtiMvQ z=6#Hu#>0x{cB)&=PnGG{MBvtJTFXdNEtk25<*VM6y-G8?&^$gTa{JH$467THM=EJ7 zRhBCLhb`=ts_wVtrU=n17xClfv46_pyj>@bpB^uzNZIOnc_hZ|f^9~nUA7zt{!-D0 zhd5W`q90ms<~D`xbxd21&E0H&J>G6f_FB6TwyeIcN_8AJ3`H1wV<3)HxWR8(xU3qK zGvW~%Kd0r!>{QR~59cTOv3f(NXDc|j$Gdj>flHar3WUm_*CrJR&iH5gXrukP&tr43 zR9}XKIDyJXiYpc#`%W-Ls?&-1rAjn(c)AZG;<~@M(8j7o{z0_ja>Pa7r@Nm}`$uwe zSd;6nsp_-mUadpv?M{ZYS6h5UQ@wfe)>mi8T={p{TrJ*&V`3+*j5*x1J)4^J73KK5 z?&!q?t$I~|<)9+XWijDp$I+uK=^Bs5oMgq}M`k5B%R zLiEcdprj5XWKwoedpFi^XWf5Ed^wr>6q^zF;t{{ygvmI!L=DPWCdYDY+L!7AE8nfclU;DwP&Fv2h}J&GUYaL)q>USOxmxNQd@ z%zz!1OiZio7`r{>4CYvFj|_)$1uUXblV{NZl^;1}&08LcT$jIXQ2I|r0v47bT2QN}t+(|mETiNt6SVKD>049ja$lO{xJDT)=G z7YYL_ffq_p%o$gDT8s)_g}o=MePV)x1>AWZC0;-j9T8lL?=?oFOF%y;A)bK z1#@JY2(v-gHm#@so<3RORf?;*IMev`^P(8uZS!z7+{iMM-@ZBNxb4PtFe3nW6y_&+4^i&Wf8HEZc8~<6~au(lQu1z_(5W%r^YjlGcoN%P|Mn+qB+&a$qgd(5QLKLo=hR(=;ZtH2#9-D@{=jUk z!~ZUxj#BAE&xVTOy?e^|8wd491$##%XMn4_`{PrB(f;0KxWmo)yb$M%2sb>xWO_eW zs%>6jy}EF=JkH2}lR914`613WZTCd7!8g3{rowg2umC1Dt-Nx4F2hRjduG=8Y0YwP zSwm&v_^<30mHVUFGCFYGe_K8~w|BBMV@vsoMxL?y=#%GyXKYdMR zMdk^Hn8}Y{$ctQ_-7H7OJFa)0z};hDVhE##mL|Wp?;l!a8XmRh#{(rguhjzQ&fh79 z#j(sVP2JXy{%xCLqOh+1Zyn_djSvIZvy|Bq%RM5_uS!?{@Uz=V{$yCyR4chw#aWMx(n{T43#( zq8_S*_KZ zpDQni7OLgWu$QE`X#dqMg6f2xsb8I%laq%irhqaQN^=%oq2h#4+9TlrQ}Smgk%9VQ z58*a6oGvHnewxR@%ZHgQ*idz{X8wXi<#sY9UrC%4Tf}{0=^Ay zV>%>S>XO1(fiN_*UqA}?hTl0_jINVJ0lJuCz~4Ae$O_%e(INqr`C)zFAj3TXiWW6{ z9qq39M_3vVTqp6wQ9_g&IiP?fm_?8D+pm3%l8D+GiCUEeSF?l>6^%g+?AqLm^v1>* zCx_zM`STmBUZ!+BKL|Ms!E{7Jn z2fHV>ke&>J&B3Q_sXmEp(8TArwG-E`x`_nS19GvO6^1W!m5-TSh!e0_xt1lN)Dteb zwm8Dwlh_eaoz#xnT$2^?5=Kd{K33ByuR8bB3@s;A2pJk?b+n#=f8n7w3to$h4;};g z_2{_OQ~|MAvFiCO-BO}%FOk2lNn=_qw3OTTRw*59;#j*mg#Zj?)CrG17)b>p=|RCr zU(*6PBb$|lgadf#h0+Eh;{KQ^nU)-}K&+1K&J=l^yQ>)1TmRn~>CWqc7kyw-~xscda9XObiAz+(aP6+ad=T3UC84g{hK2*vE% zS2%4+l;j%3j0VW2E(4DstKR`mq>8=ZC7+UpQcZJ~xrSDox;7PiNApnc7eblkxC|Fg zuU6I;)Zm2`%xlGny=*#f%5~mcAMYN~1wsHA{Wox@6w-swdM$6U*pvNOTS>3OYfloI ze$I4&)~>tJAT5P1zX=DAP8^|R6B5Cewz_0vAGHsb6YtJ#<{}3=txO+wDd0%2B(WI| zC$h52Rk4Os{PtiecTP7y)A^2a|4ff%R5Z5F=$}fAcnxl!*=Su8AMu%wDHl1)?*Nf? zE3LB+7;%;jAo4rxj>R*uUu#-swfoYLO%B(5k!3o;X7%;4HKV7oAK&?oq_bD!ezsJ1 z%rwkz{4lr|Gl$QaMo-&EHynL~FBtlkMGAfwaG1-_$F#RuDjf)agg4l~^zCl5UzZp^ z8C>eDbZ6t#E$?_blQdp9w3fcwEom>`lwS6)+SrbhgkP=Scb8Rk^z+)u?=F#IFVDR$ zVZRW+dzZo0b}dWe2Ni}Njs){F6@U&N;XtXwif7M$dal0u08Mm6BImLMF0sd7I-@y7 ztT`C^`jTEj8jt*@DnuM!50tXIT2e58T-fTKdQ74>2fFKbfWU8dpsR<^TwVtqwJW6h zFemI~(w>7T<>f95NpW{N_+x)K8~LN-g1w$lB|EohU)tX_$$Hf8xaV^>_;rdrdyV`A z*RuX>p^?E|nxU6jaCLPxzw?$1=<&3B$?{m|_xGtBvX=D@l~42h)=N(*`vcuOd&5T0 ziOM2>zm?lXw&YW>p8B!O(T(2MkKN--HB;3d>i*|A`yZ!UV0tN%BzwliP3{^W`?}sa zXa^k!J?!WYj?=UVs)rEjE#RUd^Jx$=)b9D5xqth@XlYf~`N{)`s$TTX2^;GQ!}jyB zi|dsx9s69~hH$WUgeWx)wL=zuCAqR%z(*Vpe|2#spo6Xx>&1f7Pws<~nzTKx zrIaJbxi8p)Hh=|%3%QC4gHq%^1%+>Ep+x`h>3;53Dd(M5ujfLXvxJ+N1i!mU9R zY*rR_U|t5$mr|3#NB;f2N2rLw(4HPYOSRrHpx024MQ?;8swp7g-s^$#55lHy3jG)$ zhh*FsIysz&&lA#*#Em4F*5dLA{Pe4^!Bg?lUZKv)+&JRxC+y>H>r&=f)5MHJRMpuq zovJtD6hBjVyUT&z$a~0!rbnKlMOE2}#UExUzX`GOsE#7OXDZU*LP~;=)dt>k8Quv4%K zdTSNnv|skY_h!%0c42SR`zdnQZP>n{usTOwMG{`S_PP1JB0~xB2iaQ3zO*quG^i#a zJKsFF{*bVl%n#)4P4c-k2f>IqS~kMQ_D$TmFAsRoK8bCc`D{+EFPCvoGFD%_`8e5# z3wW+cz;DI?OHO?Y;or>ZE$ECOF?f(F-uA_vbZSc5cxR9$y0*x*xtwS9s2**_lv2af z2S>@9_U6u(Xxq;J+@hJW$Wk7TDcroLF^%sxD$C=1!$#%^81bgG`)TeH zd5pAo9~m(0nBvuiE`#Go`WF0&g!4w4;yJ|xJTMzS3(N{8J3iH^>&$!HsBKn;BEY>{q4v&^s zm#}8Bh@{Eal{Lf>{o6mqy{diuGLJuBbGw2VxbXHu1Ec4fb*h?9KH{j%NEt~&s5dY+ zSjs~>b)>X~IRV=nXuW2_VZ0GWrHWgH-_*GsxkEv3E$+R^Yk?n8$@_duBfAMFNRgC2 z=cxb4sBD_fo3@7FT5_kQAms)$I^E_N2GG&htXKthzJr8X{^TRQ;Ih`UuvNiR5=@9z zBdDmoi?pEL8WuEup=Z6!=g=6s|I6Q>_l-d4k>W|?YCPR=MdP)>Vwrjl(bvsefs1Dpc1gyR4h)!M>QfJTDe0W;w? z!1Wn5?+@X+?7_ahaB4?U%o~i=Gnz&D&KimnnJ;}4Nu>;o1FUKL{*I(rmut-qQo4FEu35Uat}5Z^;IZP|;uJb9 zXY9xKi;P>1wJ`=Gh2x+}%&}0GReZ><^1zKQ^}F`WI(liDXgIX}3kS{I*N`s2r7?wI zd+;;5ungH4`4#u`yqNcKm8JV_VmNFiGoz)(aLa-AKV7k#B5I|bf7zvd+voa)fJ!`9 zQ=bl`TuA(c3FFOtHsYriKm?}nMO?^rOFE73;#Xah{q9;UuSOnyeDxz>@Yvb8y1g#; z3<0T_vzlqVDT?*g6JUB>Jv{?sCX7- zWMo!I>fdYPP)$A6`M&u+mwv-Jh10;zV_Z@K)H$f|zYV)yZvox=EC*sO{+%D4m!$iA z9XLJuOW@ay(MW!T{Xy^E`@bV)V(Wp#x~ zgOE)_{lS#2;Y1om_how0P8-Xs>g^10^7n&7Bk?bg>!-KRknvx|LkAIEo0YWwkIt^x zicCbh_vf+(oQQfIOYsd;7W>OXrPO$rVw%FEKA@aH2cr`k}e@Z%V)hLjC{i%jl|H0gn(J)>wp7+^qPvY^Ls3UXy!iR+vb-?# zFv^}#X;NIHo~PMYup?Kb{LwQT*RZ9a?UW`vi$#4 zE)A6aM6+&ct13oMRptM=Jp--VUP-_v&)=%xfn3>ww^x&N*)Cw=c7qjI;gp7KSfV0W zM3QA0WdCmy=dn|Aw|inor^3zY7s>%k1mG@qhrbJuW$}b zM1xF%!-S+=yEI0f?afL*j_wNr8v+LRVmsZThL?gO{A<^7fiTVRi~v&$vEE})r1=Gfy)=d4`2xrk00m_qnmYDRojZrY2OgKQstaY)u zC#$&a%@=Ffj;=2pW~AM5_|*Kkxo+Ee#9h?tRb%)#e{sLO1;Ifx>xdLMig-pLHJ30I z46IJB`>PATOd%T3GOM10Vw>S>q0N^;xo(GK1&6910uVq1 zIQ!Vk$~py!YYWfcca7j+y#Y@4TEb5C;%AV8fjbdRal_|lFNGPfSPH8}L^nBE?IPnzXmnPqHNtce$N z^{C+0X6U+yioKl@p4@g1E9a`xGe{9QpiaN7gV&zCWr4{{-cLGcwy@9rr@#6Cf(C)( z+JT?JG2Pj!;_oMuRAeg!@6{eMg(hvyYg2i5TJz&eRD!k#H^7`c$!>X8CTN z*ZUOK;J@XyilQ7CpDU%P7`-$4t3Zb%pXEG5K)Vp9itz6(nvJJZgqG|%w#(L8kWS*; zH;lk;?EIli3mZ4%#$-9I*U1W2HlD3bs=P@4jY41Js5WLcX~RlpiU)MP#I0SwnHrYc ze-zaFX0BOXPs4C=t*+QSo5hGkw?Va%?*wL;BiOT1rC;87L(fzWj6n7{XD#c}DH72E zFe_ol@{*Mmo5un)garWv?u{0+1wX%04GM)rTY}!8q@bJlv@tuPkO#zqy1=LgQ zi(8I2mEE81rR{QiS&>-*zj1KQsVZgl8DV;@=4{1wM*f=24dgr_+oTiZNCcJC@ETH8 zHSzRHtG@O%4#eN%Yv4q4BCiW;dl{{smp>#^cck?Gt?LHz#!$l&)y(?x{1!LhXE9M7 z8VJCLDh|+Oskcht*89M$`J;4lX2LVL1~aEu&_AC=x@=!C`pc!Kc(KBaj-BX~kv_q!A zv$>9VpzD$C`-fO?v0%F<-s)9AhVEV3t9Gj4_hrM3w%8d|bd(m(Ke3J5^Uaew!yCh$ zULnma_^+%98~u$`5Tn{g&OKxlVEQ%@0p*$QP1_Xrb43Fxm%i6xAD-6G=q5VEcKMPb z=R%^=E5Io}DJ6h*s%PO6y+2CpDyfU_+9dMX9wN>nzFM2*cWK;1G9R{k;eKLtJeeI% zTb%j)xJ$SD_wd;H-}j7E!zbg;c4;H{7O~p)wN!On zQ@}k}`#mbG!t8QCJ$sO-n&kB1Fgt&zP*$|vAOEb0uajq|!qKW`Z^WYMuju*4nW$$6 z@4xys+_Fa(+^NWX|xxU)nO=eJ+k# z4_x*?5x@UFB10OHPtd!^QsFe|ys#O(PrXTfn7^^Mk7x1r!+QPqTU|UWUV0z&w6r)C zwDcejgoW=Uo|5F(v(EDSQ9$m6?}{FE?q;Znbk^DvzlY-AO|r14d8$fea|?n9#8cS> z95CHTK>3DBkD|}_T3Ab`#t#-YE>3A8MrHA&6rX^0 zeV{>pDK)y_Jf{)UwYg6vO-6v#4Ky^XPz_RJA|zaQfI7~(CDlsk^z4#O_FL`kceyx* z>~W6j)}{JwJ|F=Q|D+V_Mpo;dXtX!Vd&VHb)ELAL7?$oO{TU+T_qX_KB^+g*(?Uiq!iGwMr@VCDBJN)U^SMr6k_aZZeGfmqiAb?sV92$fG7whpFL!C2dy^je%_XX(ge_qP(!LU1yEFN#bz`>dBcWe1vsmedt7 zX>8}$2Osrq6QzGE?DwDxjf5(?j99nbJa#OCD);q@0p6fV{tukBtMVL zlXuKl)IGN+2|zNp-N!_BeRg0isOkR;TK=!vS@8X*3Owcxr!R9=D4D&wzjhoPub_)* zzIc$<^*oqvsZc0)`KLD_Zgd``X-2H2Q5JBJyr(?k^Q_nCe|_4-s%m6Zfrxe1!NI{{ zpPzig0jnXCvw=jW+QomzgLa!JW5HS%ue$UgpwnP8e_ zf?;#yjM@21Ml<(-hJIaJ*%o)wGnWINkdc>(OL$mVx0r-vgeV74S!W4sG00H^(6Z1L zrk8*HP7HjxBOV$}iTgRgrZL2s?uzJvT&Wj3tSb~-7_*_g?^Eu6@HDxy4w9vC!tbYN z$G-*NYWo2(NCRxCj5I<67*GtHfMlc}NPQF3_Ir+ItsgbsHEN-LqcSMCHxDiw_Q8fW z49UdK3XAZM&GoL9ye-@yMuH_ayXK+_j{8x#VmF)0 zs1tyic0>#yJDLX+$3@yk-m)EcN4=PrxCh6QYridE+T>2RT!WVIO2;1It)*nul^n`I z900-ws?pk&K}Se#ZumvG0dV2WtqZVp20;eBtd;7my*1Opy ztZ@XNq|#w<4fwAGj7EEu4$7wuIN@)vxIRrF%ZpvRIrRDa8T*b8-|X+OKa~RUL{;Yv z{6=@r{WeJdGf6?qv!M3u$Gr5VdD(}I!}|mS6K}h3jzT(bLz`FKYlBUimOfR>aZ6s; zsI=kuSyVOFYK@PK3t5&3z=}scV_8-%PKWJbLE@)6A7W>-(hTYL6RwKLTBx`y>#~(9 zo*=;)zSd<3R_lit<*`LgKJH|@tNcM0yw7Ut_V79C$788(`*xr@=;;u1lL>xMQel(X zWHv&;brXC1B5YLj7SqD=W;WzC`hNZ|hx<}<57__CFa1+76F9`T-2IiG3oz#^@hA;E zuphR+ie28=ZzLvg=YiX|p_L9!hWc2_0M4=z4v!1SzqJ6fQ$yb==fSnOR81WkJ&3w_ zt*WP2!Wbvs1+8S!0+sClNJ8kv8h;W}R$NsP21f^UBZUP(UWNAk93~0CD%W)FdJfcc zl0cS04){TmVVXt2DAcT_8L1nK7sqd0Vz$8_ilsF{^u`!dC(6t95k5K_3GfUn`DESt z@yu;%rPb3eLQ+q0$z^mkQdrWM4w_!=87hY^uk(!#m{8a@$DxF$^mTo^;Z95Le1sFL zfGaJFJiy{^@(L~KO!)W{EVma;5`Ygt+2lg=B>5n3=dgo(PC-KLpbK!kIyYYum3n&_ z608M<39(|*1avlr^YZiPz;Z&-3IS=G&|X)Or@(7**HWutp#?iR&!|9G8K&}>9rolX zJtbiX;H_p~11k#;bY&I^T1aH|LY^2oqJqzBS^_hRs zLI7ik!-l5~(U5Cu>*Ol>J=zYkx19}9HE&E|2{UP<{!k_JNhUcS3`&ns)xD_#GYSZ=9_J)2LN;ohCIbWLx@jKkx_ zq3tO{gLcn9t-*9?VfH#j)l)ugmwN?xVMoW_P50n9WPb=gkKOOv&}^YK&u(aCa&m-L zhu8ktXJfKo7SG%Uxs#0O*7qwF2Z&91E4{_8fB)~#eI+~lX;Fa5377=)F35XVZ2WLY zv~lUb5KLns4ddZtPSKrIGi+>#+oFcer2zXOjx_>w4wS@BYesB($F!33{2HhRGn2*i z42sHUJb)dUNnryQ&;w%L_ohjST_Tri&{puDqHoxk8y5n2p;}aC!f*35zn2P-W4=ef z$xje6$NkJ%lswibK~H1MoWl?aXUwStE&;wEhnU*aQ?E2Q)gZeK#gi6Ge0EezF2&(M zg=+m;C~n*$Wr(bysKjsEnv=a`2%w153Q$Bcfa=JIZ*BE1A*sKOU#Bj1`Wf}x49zAM z8@Hx8BDCi<&U-9+t1q5WB4xZ0<|tXf_pp7KiSBkIDl7V@@G&2AB+y^Exu3^|Ui<>$ zZ>e&acRb~=Upi6xYOq`Y{MD9!z7K4IZ0>|z=xJ2DBOpzj8=ee0a`LRm zw+te_JAdjM-G2v~@JT1l^v^F-&jzh!v2D+i)_h2}QW@D9dfzT$U&RH=7Mh?4UW$O* zDKwC$qUFht%2vga2LDhfm>pi0KI;;M|6!3-WBWb@zo468PSMYy*v^M74VTXib!qNk zhe=ZJM%v2Mg!B-lHpR`)xn{?*MHB|WB_jaATW z4k;%1c{X*G>F!`SoUBTHy`ma|%2e#01P3Nj`Ybzy!UzRnRp1S@el0&&xbOKr7SFd|FgDRKv3}DhwQj* zQiVFmhvFw`n(+i~aAE6L&!)G6(vQKPQdVqE$rHVh)EnP!F3k?7SmP5{5;#Mk0uu2m z`)*56^0Y1|O}3ctnkXrgVVX_nx8I1RH)U^gV2O z_5;ocl45bg--vnLmC1}pTb|$KqjT&f-s}XFK6u2%%yu-<#?C5lby|%@*-%76zrb7X zl752Jb;C`Qm|I{1TL*`%EN>~Bu@+Fw806&cmR{ZZV<$*1jWNz#*wJ0Mj^KBs9%;Zd zf=P&29~ChdB(J%dLo}P93w(p7zmvs zfqq>eER47dK?tptyF*2U2pv{G>^SFJLOWSGVT^-?{U*II(iJL#y!B3g zgHuX$1De>mc{G{);(FfF*hDOXR3Jq_ifg51!Ym7Kp>#1zXp3=@b_1_6Za0vULbyDe z8<94j0NQPJp|Pzo+ItHE#E;WUOZ6nXl@xCa{KTiUbpT`27HEx`jGrecOWc{$cy0tX zKi$J_W*V608~$q|J>10f!6&ajNf5cl<9JkYGY8XHX8P7ED4r8r7tKWDiSd-S9AA{r z&9BCP+utlmjy}O6<2JN2){)jInQQmGE7v+uF511FcF>^5pZN2me{V{$h2^ncBjg}I z34sTKnl@267cS#OlxZb56<2FD=9B;TQP_tPb0y^_wcKl$^q05lmwH;zaoo`p9G6m&pE=>`iD<8N7ab3H;3NlGuJ_=fUna92rtLdH=%{>bkM9?uDUkDR0-eY88SH4)l*yY+` zdP5-pyWZ_nun&+}b0vUMzpnljl>nbvR8;Qo9hXos;v3`~XB=0=NX9wlXhq*o&yE&n z6rlEF?c?ZZ>K7!u;6lZfynxn-Q50e1;x2eJ`Fp8?!A@sV#rOKm^VgSOSPx%Nd4|{$ zX9AN6R9?7+VQFes%LV%`+9)@q0?D|Hgf%wfb`nu$P!~{oQ2xkKGjmxW8AIg*oJSOW zSb#U2(emHq&4L(-&rvwSc&4YjFIT=5R$?g3_0{1FuxBU~vNiSUPa zWo1|Px($Vpj82X5R*gg}b)o|Q$()1Lg&N|+bUglwAyoA^~2DJnybiAeSxJpO(BL+cScGr#ZWB`K>L zY-@oUHoRgF#JV!jvJe6YLE|q92;f!DzBYBhCi>gR`BiO1JMLa+qa4Fa=dG?PtoLCN z()xAC)|pGh0#FPdG~pK;*afc`7$FcyvB+$(bLS-58~ZqI3hFxM&-1t%UO|XT5wGLY zvcsQE!w&f65nyMnR}PSxen*7uk>jz_5kY+D1V;Xv^Tn8X=a2VFT7+p&FSaGSDXla2 zHe;2)lyZO~b{6YO9#V5a&t5R)wnDLZ{UW`H6dx^crC52cP=I9i+l25hAP)wZmAc7k z9T~eTmTh=>n0nzr{R;DS;b8XJ9Q!Fby&6?w^yY_y6PRrRNt+Um$|uEYS=1BJYQQ%@ z`y`)~x&za?ge8tQQUY>*cPG~9j>U*76OEdh-Ty<^TSmndZb`#54J1KBa3>Jl-6g?Y zgS)!~_a+3lpur)yySrO(cXxM}?_};=nfIId)vFh<4!!!3+O?}{#~i13*r!#)-B%*P z7}ADsX$%kXurkh73*$X>1&1|&Cb5V9ysTHkWx(?8O8miZ4@z6I$WZsw4$Z z&`Gl9czFy}a0j_~9;BK&T^jqSWVgzMu6Xw?b%`GFX>jO1QSh~1nfE}ylc}cc54!GY z6f}qUciS_g{G5Z*6)1@){rU-mg@S8C7IcdZrDaKqM>`XJ8PcRG;Te-%B5cCeZuZK+nN63QddvCkUlVe#0do`d>du!Ix#h7lI33;qJLpfp!_l<#bkuh?Vu zWH+Z>fUs-~9LX@<8;)0FF-`4&ShToMd>z_E!RoYg|EfG4PcD<-9P16p#e8%so16qF zU|V1IZj~p>3mmR(r5{4*7fw?~3vla-|M#=n(G<23Ie@N7%B8ruObH=vPmFAVQ0eEx7UY9{0xmN zdU)x5wr7Y?Xeau@k~gOL1So+38^5O0tajanGU<7AR7E~-p452iZlwyi&^2OKa+PO37 z4`XChTmrMO%fA{{&S$dOYk@tp-c*CB!7E@3(n{229uh3EKvA;gX#4M&-$@5{G4VEqisLoy09n*GSu)J_fpq`9?LV)3Vd-@yDj4g(La3 zIkDEFiQWs@&wx}mACStb$`b#_R{7V}@K<RZ%g>5M_F)`w~V1QFM}j^EXJBZQ`NT^kz2QAOTzf|Y;pDA8inS%T_vq4g zeM+w8*2u_?PQpnu827$HQtN);6t3xGgQL4Z-OFz&WepL)Pv$WC%04*1c&7QL>+{wDK~_K9e9`|rW4Ml zolGZM-Gzhji+QFG2dJZ64t#ryNZe)w?1PUe45cb(mMSBk@4qmij*c+D-uU{k>vS8GFews>xqrnXH~YB{KXdhY1D;N&S#2LN5@330 zO&k=>Qn>soI0#G&^6CPwdlEPBGeiQrR2RO8P0la~5-hCE)iOuh3a&SwAubw1wWx9b zcI7rXaY0)RzkmrM&5Tu(un-eFpy-U56}C1g0w@t|;@%A1D@3=h83CrehH=jvMa2LH zBR1$k)UWE;%lR?nKH7o!--1qBA6s-^j~hv!MPrEN^0tf+qPFPHV%%%fxL8O^%+bD5rDI^9TBgmED74e!G_cG zTU6=mk1XY2%&%kkLyV^tWj6(V1U>R`=I31>YRB)8+)QdIt6EKvF{CDF!7v;-r~qJp zd|SQYFhG{;F#elpvZ{i*!f+SZKT+&312VrW&a$XHp96zIE;+zM-1|Kj54Y zS-r=F%Usb(LDK=6V%`bNedg-MvzLq z;P~x~vLDCImDBJ*8xI|0b^9UN?h)>&eC+x#O)H4U_M6Pf^^z?ViruznmZ@*pb7p2g5 z;kF8r62ot0R2X#SCb9An!Xfp{Es3~j*!qu9Q@u;|y{O0U#NoDnz#*p8L(K*Kff-AV zVOqY4W7`kZFb$xW?SSE;u#%GC~Z4&2hp{jjd-S>XR$c0*$3|3=&lscuDWY) zcTj_2KzC55`88bg#c3;PxCHnn?%;e7!iVX1VQ<=d2)Fy#Dbr*vl=XtRVwvjGc0Wia zq;-84QpToZ3J0}AZ9pw@{(-Vefn|kp$HVPs3eWdv37$;aKOizt_?7Ziua)kc+gfIN z(XCU)*4-?6zc+E0Z8AQ>LeVblCcQF!XDBht=lKs;z_EdCTq*v)*0SdTyq0|r#-Qf4 zZ4K+!g8`qHJ0;ywx;1n`-#2Lm6MQJN=&M*eyW18ATc{RRR+G6)MNK(^vnyAbI-YED zXt{)$$+oRInm2n%3$4(3{Lg6HHtLIR9o|B_2Qyh(+4(#y|2sl1zr@&M#_pZKdVSQO zqtwnvRTpyFIhyR|CNl@3#N(C+i_tGV*e zdFkGb8X6jow_Ue|tw3Arf_e{0{fH<+jMEvUzAZANPWw?-g6y2xFD;1A>WwA|+&fVH z)#Wu)U3so@RS)w6uMRBV$Lou0Mr^n-N{+^v4GPo1JRJNUJvyy)sK_8e7@N*GOSeVb zfz9!9!3f{4P;%W5>u}RulUaPVV}Ms+0LmGvBq$i>4jQ-f8eTk9Lmh#G(j9b{h?%5E zsK_Y~q5+6pR<)3h1xrJb9d9^dm^C;~m?I(Kj0Sr*56xzBwsADO_ZpzLpaxJj2pQxc zqQvCa59p&rxIb_O)go5+UtBBNWDEL1gNqroIv`b0yZH!j%ZN3M&M+Z_kt~oTSL9uq zlx90;LwYFk(mc3O7^E=9K-#5IyIKoL4hi!}u9})zUA-ibG{{PUgDIr@D>Q$_>JXSx zm2kD#yR{W_Iao-|xN_kNk;aTE?1;cV1Cah1Kf)zwVp?V)2;$h`+sy(Js!7P(QJ`1! zbYbz_3C-vE2dvSi-(ur+D7);qbIrZI_?m^_edDmq`Kl6r#G|z-l!eF&exeD&EDWR& zVqwXhyWg;ykVCOWnM`&FQfW6$`3lt@aVNi4QxY6-Q?)hB*aC&{<`IrMw|<`|p(@2{ z94J;n?p|?rF1_AYBioh4h2W-s!7G@jFAf zgE4ULxwRi2Pe!}nsLKB7ZJRZKPoA`fllgRHhEXw^9qy5bDj&7n|D={3c2LBdZN&OH zKI*De5gZIz606`V{ZPn`k?p8Yl4>4uz-V>NVJ?0pq#~PUX~k3fh!%OxImnH;Tp%~k4iHS^1ykEu5L*$ z(~T5?h&UdgRv^L)J=-6{-cJ=B0`wur+o=smKu&g*8ffaKjL1GK_+{+!@Pg*@dKu90 z*lS%{g2vwWfM(=#hs)+Oqy=}94wne~+Q!i+>l-xqD`pvMAB`T9Aj3>}-Ud6xhM^gJBsH_#qgMJu1{hGg8*%d_>EcAPq;bM z+i0emlrL{9$1psSVBu6rK|?BNPgjc>Vfi(fjym`kp73;n6}wr;oV3=aoiV8*sw)Zw zTc${XpPKlU*IHcQ)z~pgzL|B@4ugj}*30Sr!#avje$@!S)GY6-SU3^mO#5edQwH40 zVR-5c!j+#IN(x}qJ~&a0mhN!qYy}Rx6-J=(gYvgC1=`QY3W}^?(lOyiHRmE_RrqW> zRtozedlW*EZ{HE+d{Q638-#zndunJJ;CljFLE<;LM>H03XxEB&LX=s!h1nGNtV=}0 z+}F~&va;}=W*h~DQ$`35X`E%0w@R}mvfki7dgyms@K6CII_HfBQ5t;CqlxLBoqR?C znc+2mjeA?SZcdY@k(2rB$Ee3PjxxT1>18tnw=bGKJw4QFO_C0#a`Fxi)r;TtZ+XvZ zZQ34@WRlz=FV4HGP2|x{g^QGDC3zz=NlD*Fyl;paf7$RK4BAYlTmByl-y}l9BXp4nl5v+J^x?^^YLG5EPa;+p{q4HmyEpZ%T3KKF#=^#T~(@WToSebo4a*m36;ai0ch34bOCLeXrz{w>Yp zZCHL$6Q@L&^~nSe!@Y4E!mzCF&@L7``b4ijQ~}`2QkkkwB`Yudfd%b}?{L_te&Ugw z3SV&;(T{(4am$Wn&cyAs_TUQ}efTWCI$U7lP||4}{`T|6$3w+y_CEPnVh8drDtJ?d zgLaXy2m5?#nb;z(c>|}?NKCwJQ35Y7uc~RJ$*_bOchV59_O1f3Ln=DYiTv&=d~kv;AT!>&uEaO6IElje=}E<;#MPnu9ok z$>+dsUyXM}lyCJ46-us6#GoSh89Z(G+_@a))%lF97sdh$2_VHAF{=z&A}COg{$Z@p?iktLSF+Fn&D?4J}_iu@%8T58KOUA_h-7#e5n70$fuI; zY(z|VE_3lzM~~?Q_MV2#^hUU*Rf2X6V|SJs_S(;YRT?oD+ODG}1eo#6>9Y=jefj~$C{^Ml1X{a4g76(U^*H$I)@bV{*yqIMD} zvXeIp9r}oBi2YW|cJ;ne_15aK`jy8ks7C+P0`z2%5K}_PN_Xv{7*i&K#$lK>W@;DO zi3Ca9g#$67*lzI8gO^4#$ai`{#q{$TfMWXK@U&#Y)n7ym+F@M-79ag4UM;evA^F0dJTC z*EIZAPQ}WxIJj_NK;(7;I-$ETxi-2Pq;+b zwKtyvFYzqbr~{g?chUAjA30qF1EFjnHc`sb>MPO+KZFqujCAcDpj+`w#*+HlncSMG zBUQ19`+2X1aRgCx-UypUAlJ6#ZT_ewiOzd@X-z^)n<;oB>pC^0(e9c8yH`s+65H8iz4`kCxLy}{l~OKmE3{t7!&te%BHU*Aq#G&w4u zq{5UI$b7#ZzIGd2oE3A>{VY%q9Ie*>ZTGb{C9+I|EzB+e50 zny=5zUP$bn$3=Bzc^8@fgI7QP3S--;;pp}70Md4Xjf8XRMVbKM2$6`PCqNe;H-6sr z&NwDFNZxNxCX9*4^+5gbgu@3BgHo?AZmNSsNpSY_s1>p@T?-l{kj_yQ1$1poA~4lj z1hoYv1XTr%wL)e0pzUCUVPgZ*+JynRnU#(Kk#BjBE6fZuG1xBCd5z&j%#egRSg3m& z>CF6_SxS$}#+kn}M7v$#3SP}*mP}Ko%zWMXO_HG@?=c9a-{5!@TMD$`NU^Lm~OD3MtfJktHH)`TLxLk1fQTtr~K z&z9~r6dNk5syE5;v!reWY*lHer?>vMQy!>FGoS&u?pRVpb5ML#=;u>l*o%MT8^_IP zg|#H88m+WerKo3p443+}m3XRt(kR3*gPMi>R4i!+2(?zS1Z`|ez0X5+v41#R4s#S4 z3omK4Kn(4e4c?E|Wzvo6Q4wQ7Fa;?{uAu6nOaa_$79`MDy30fY3y{l$SJ(1tpTTVq zOLq^C0``V!{2GWkqy=!dRCm~0dR2>$J8C(wnB@Pe3;ZnHV0|A#RhqayfSJ?ZejF+; zwbb$L=@~;pByRnWyuDS-cli8YWqBRaQ;R<>D*HU#nU#N8u5`VIRdD+eRjX zWn|S*Wv9#xzxQ>f0$N-n&b)PgQBS=$;@$Xy^6fV5U+RB?41WtTr^?IO z$_>P@n+47N>AVy5n-2P(f{trCyCXJRR8|)Wt<$EVWDOSpVpyMmEAM%l9^eMA+bs;{ zwi#uC4C(`WF#fjd9fl3;UMs~*z3_3cO}JSgjZhqg?-#y9HFnmYoEAvaNH7`T(H!TQJ}!X5SE;S9(Zp2=~DD7@gNYt#caJz*V6+K%a_4i6AkuydpOzOxEAT)fYF z8uF&Bb3He>mEHdL(rYKo739Ib$!xZm_ValhC!Bfivz_(TC(`9P@jTepSibx9tHyp) zw1h#eRGo#K22+muwB%$zv)ZUfH1)}^&EjFm>uPRO&WrhcX}KvCx~}Xl&_>(a#dNvO z>-=`XrGII;6h*cCKWgRwplUq%;g7sVf1ykD@?gELgvDG>Hmtt38{lK#+#f}0PE(aj zQOZwhA9vbvqxHSW1pJ}90rzi`4bkkTXOy!%6VnJ6^7?4O->?O(HDi$A1W_a3;tLN$ z42*-}{n43S@;iL3z`>B5psH;8wQ}S)v`{smR&Pw6{d3o0wxJDHCF@+IwfBc>V>3Fc z0yR0BapHMX(H2Y`EHfR6AuHqdu|D;_CWu-scCDS$R?>R)qHv1@sAGP8pj_^DK0=C! zDh$(_EtOE=?ivJQcvo8kvtm9u7?M)!E=Ob&Ck6Vf)gu7HwO#DPBhh9;QiuS$1YrV44oB@mpNGdnTddTUKNsrc78wYaS!6 z*Y@f_hgOKn$>s9FU5V07A$SIQ;8tXDE-Mguz~jH8e0$V$2yu`QsP|dV=xfQT_*Rok z3YP(jui)U!t2nJ<$z6C3qZ`lwKQ>>?C+4|ii0D}Ep}coH-d8Oob}5Nx&~4pq!D)#2 zSCLvC_!r7}o+H4!l0M852rP9?@3}uBBOW%Y5IJs7md)%3Xzt=+*Xl2S8%|*oIk!O! zC>ddxRh4a!m^3}g$xHKoi3#34S)s#lHF!pF2VA77toKF8@QZXVV;IN(-b?@1S@8=1 zmFUgA`KgLC{E-TL_5{^%(A7i4u?e;+x`%Z zPo|-%*ML^N-Os=BDEz6#WMdm`M4$}Kgy;qh9oqeYZXXOUW#fE1_imR`uWUPt7u*D8 zW*6US{0@sAipc;KZNDCt?&kLPtD9$)uTJ}CD6@KhW49{S7wxlsWzh5{bWE)BV}Avv@5qLt5O$f|7xBxO`5=vubXUYP+Q0t9$#y{j8RQ6# zz@nTeEm%HpfT?6(mfT35O0cRR46*^FEd3Ue?3&twF3D(`faXu?*~?1?OEAlf+v3Co zVBgKU?KZsqA8{&v;ca4E3>vy;V9?uj6DU$Zp^X>`8!-dViihk#B|@@h`lQf*%C&f@ z51yk`YjAW+0*_#q;Us$Y^cwa>*s}pF)V>~q3Mp7iQ;<@~#2o2T*BGXmji_1W^h1AD zQrt9;5MRFWdMqZ`YvlS7|hv^-8(ext}S-m0M zup#d4dfnR4v4FS3 zb9TLCOphf)yf=T%&Y{IpO6+(kkB_;JF{Ok1=It%>$o80E1jxOae2#(tbwJ(J@>;W~ z+T=RvHVr$j;RMq!PKkKh zN^A482ad-w4Efd7%KnG(=DlGBlH^JnmnJ|xs;R47A{6oLXEKyhw43HrU`f(Y^nXF{ zznr4@!fqM^O;VVX#d+JL+yvJp|J~}jb3apYV~uE8Tx(88A{V#a9lxyAU&;}W(r(Fc zRn^|h+?3)GDYZy5^1+K^Z>Xl6hq~OL2VVQz09j!q$3fU1?_%2{r8=m>w?rt@l0TxPYBfI=wIMD?|Oc@-Sx;r5#R>7|S`R|*|K9UP~Qt`||(767|I`U$~6M8!}nn_MAO*aF%Z zkn@=n;j}Bo4d8;)?b15q!a#yB_^=7AOq{)fwC$xBO8fKZP6z-dBXsVqMv77I{Eb87?exG9jeLBx7xA#>Q%hkhW*};6=CELw1^IIrxWqKoZ;zk>LRnTxv1x


bBTfpQYB8*FR3DN|eV$N`PHUn#5n1p`*HbSIlm`klpDd#b)@x8k(>Bd@AK^l(f9F zFj2`p%(LN=#N`mN({}Y>qf|#T^}qA$z)tZU)%Eo{xev)4Ff;wg1B+-0n1<%^Lv*=>c8YXe-!j?%eMlZ`8$(c{I05iy#ln87h#9SO_D_N(p+OA<1 zF^L;t)~+w8U^bs(!`5%*Ze~ES2b(R1(!!^{1I^K$ZhcrzWR`UCX;?cvNYQ}FUbJhw zh#KHfw_Tr3=6pd2bsfB_&yH)7>71!K!;MtIfiryq94QNJplY)j63v38Ill zZ^pA>nMTwUNum8#oy;v2^^2z-{+yZg$HZrb;Cu@3-bsqLU{PhoZ-H8{yZgdYz2*a9 zxEDyazfB+QE>^DN+lfe&hJMHXai=!m`^Q5dA4uglH39Ijr97TMzandt-qxE)lOad9NLv5A2)1c4YZH?Im&Y7lK}Bt?8~Op zYDJynVVbr>Qz^96FM14t9|l^E<080>hCdqwK_7<^Kdks!(BJOpCoHtqxmM!H3rsUORijRFlrc|zPE^?)>$UXVtQaH-a=*&@BE zQ)>uP7K8*t49$(Zg=a=@j(D))gP)NbK(4kcQXzx+F=73krt+tDO#66%$n$h;$7-h{T&0#*;=#YdH$m5Xb=>oWppY(%_mhR|1I+X0I zg!*QSK|br%ByFkC`FID@%;Z_$jEpSW@G zD(+sRet={COT4eNuD}IVV4g<`Ybv7I5wcZaR{Vz2t`7k|15U4YF%Z(R97tXt2=YJ$ z&qv$T%*l-)J+|v?LW>Sjm|}1=M$eI7Ulru%@KY-d{t+)OUV-rI?xU)lZl1ytifFyRZag zE9!|M9JJHb=99-*?B7=F?_y+{Lsyo^=5l{e(oYquvs|BYtj>!RIU4^5Ir{(fUCU{x z%THT$6+TtO5OkfWKbHeRwvQeUUWbcvg<`~ps#ER%#r8NV_$JM86P&k8q^b%x1! z!yZCFGq3_!P7qAZTPsaF(^Ygvkhikj%HNo2Hz9-1nEFOGYA!99;v{3*8%zEL(ETk$&47d`1;X;j$`Dxg_ z0*=!?zzbLDD;);pN`_7wAwP5@SptsIxO3dl(-fREt66&nJq(XvDNJHKri6A4LD%F> z&8=&3Kp`IvkI0gbkUEi8ZBS6}>)h@1O-_^B;t~l3QAxz0>cN8xugjW1kdowc*8UGJ z*WUzL^a^Fh5V`Q!^;7&N?TU)x9?wx!H761eQ{Bqe(!IOnfh2WQPvo5-qka9p)n_*s=X&>+LlO0 zz!!K6cHNTRcpo6zZp4`m;@|M@ea4K!Es_^%9JuO0MSL5<2k+*$fWBIs@sy-r17{_` zRizU-dT>!oQcZEaa@OFZsh zciS%!)pj06JJy?f=t<(X4)D||Q*lyar2P<1V>!i@#NrWv*;-{Kmi>?5t^fVt2MX&m zZ^onQ{ne?_o#!*~*YoUw!-LLxm*K95uFTu-`kVpl_&Wi>-Bs>EBR^BEekp}%rgEb^ zJd#-J!LeDmT7m&3*l#u#deXE@D(6 z0rGO7^j$f>xOMFWRS#}FFVL1o?TNqvzH?S#{+r92njrq(SHn}@_N?L=y5?Lt_b*@E zDPX?@$}6KIP^xv5&i$z%=+UY8tti|+*x^p61{Fm@{2e`tgy4(B+kIGZ;ue|o$6{OQ zPw-;A-{IyS8+d;v`n>p@x}4gbT~yAf{E?mF#``o7Oo9xXM6HShj5;OZjpMutrtFgJ zQS`vsKw=s*Bi_JX%}dyFY>KjwjJ2e}|4JfMalkzOp7a7kgcM#o*5^)rWl_llvD1|q~FtRYh7x#$vMgXJb6VclDFlFrg&nQwQ>uA{c{83~lio`ic1VaX3^ z6LoqmxoXv&B@D1EIfSk|=z=xTqXttkIs>S5QbuJD38%OYw`ZR|UI&zlx?xTZQz|j> z6^~n^;zVk3%1J6nuWj(&bB#1{`C7|tk{2XyIi*Z`i*Y%tRiPFmeUU30p<6|%Ddk3e zpbUVu@Y+^K&!r?(EZ=BJoR}V%0%43;pf5;`&&P(9naK5p7}J#7oNGugvVu8sb^XG^ z5D_^YphSo9!*727-m^J?0fub`c{UY$e5wdNG7MKxbQy3En~x}~XpD)=j2pq7XCV(e zHyw2L9NwrT5!{Uw7s?m+@{BaVnQX&N#Z08x<{K&NRCq^}NzOQZU~ zez7mSJpc)h(fi3&&50ec-zkF-dr5NSbpoUxZ6+?8?BTQ z6il4O6lB%2MF!6-UA{4DJmU9FyP99?2aTlk_yZ$rJ)Pq1EUmK>V{d3|w=!*rxwKp6 z&u&NR#F#4CLPGV^*<~7;=mt(Tp#)o+FU21=a`~G%#F0btEM94F^iPx zQ6ec!8Zh-c!t5i)dB!L=+&82>v2%(X4WkO=kKNCP3s_2X^?m4<^?lN-A!C69Ee)nd zxcLb01CUC|*ZbKNiZjP{AwoCAT3FP<4*1Oh%y^^NmSR*%!@$f{gn0Vyke)wGrd^&| zOI~kpwvIl#xAEV(o>06+Mv7vIHIf$0srn#eliVM~E0n^J!$uCIIE9(APbQHTn4niD zX!V)KFs1*w6#o2kR&j$wtTH1(tS=HH{Rw|I&U!c|NmjRn;mN60(Mj@rm~>=6Czn}x zRhE;HF5EPR%Cfm_dHW~V`TP>rjI3fWsn}qwwhVy?e9FarQv#y}KD(*S=b!VW(1pMM zz0&gFKq2}9We;%2enWSACCQ6NnRF^|dYfC()1_}erSL!Y6E87kuA=U0YH5T&rYHH) z@kpMxr-IMcv`s(vxh@#8#$w;h3exMU28xM#MI#QuF@p(e@p@sXIN_Zi z$UaH3cRUa)^BivR2luK;*ce#gkl{<}W7bhDL!6?IuY99vMa_e$(bCsjy&W9FN1%g@Pb;^f&;49{W4tx_0@W zB+_@<(jr;tA~<3Dnp!BkDD^)Lq|qCSx+`=iMvCe28WVX!5JjWHJ-V+L$qkuM!zE17 z!<^Nt6b>kbqCX

z;ih)Wn4i1;eoglHkcb2hLe-<8m|oshMLYLuG`NXpaJ5A_K`7d8)`20>31sx$0Q=bTrpGe8 z+@$E!YK=4~tfv%m^GOX5Z+Y&H#wmSZEISzA{=l5upurD5YWj9#6+AK6XCGp@_<}Ab z6s2CWhP_4=;o3eY8H}U5A`V}iu67%-MzgQa9$UV;!$~RndliaIHahH2Vj7Xj^zl5}wx;Lq`<6TX zn{T?>j!T+2hAa&BzA<=U1m=g@6! zL0`;E1h)&@9<^)#=^Xqu9E1L&Z+?=1&ML?x=AU)4SjJn6{;HQ=46r_`a$Rg*=Y#22 z9P8L#`rllfJ1*Y>nCCw%ti$JLSJ$(PBrU2QEXv87s_LZ=oL4KFexA5d%zWw1PLN0g{{cn#uL`mLBVk`K?y{t9FX?{e8Ql9Qk;E=IuVN8}g&tul_*9`R|tbT6V}XvTk?kSoVO=%EF+z1&O>XG2Tg< z0#CtXPyZCb4^G2Ve6JxvgDO$mj$2mqoQpaA2Ap@EN2hUqD(bqBiXB-5i<*s@Oq0aY zj%d8(jpMzsRi=%qjx$TtTS%1KjNqLeTNW!88g)o=GE~-QUyWj}LrDpC6AUmEuDZ z<4W8!!c(^ss>cV&H>cyBT3`%C2ONoa=j!Yw9G+pQz4xWREr+*~u()`v{+O%o9DFA; z=q^|98dyJ8WN!IoFk+@^v!HoBsUW1y^jF&eFdFI}O{f|ytdn|U((E|LfjT%=<;D?w z=O-NI54u)AsSSCoO=z+QmaBXyRqtRVb@BZPN>XSF2JLD|lBjHYF9}Yuh?`P$iT5fz zm8MuqLC5^+%t9K&5~5GAwRlLYkftn5-`<3yC_wqJZH7QR{IZ3XBSyUcx=_@2i5n5w zQj#*Z5M!W9*QEBC!3)+q+AxWx9hlUQZNA`q6vDLRbswIoCQK9>m*FUEIfx4ABhX$c8yE;x8 zhvFVV`hvCPcn(F|3Km|UWcEyXlzj`yZDVWqX0#mj=r+luA_;3lC04A9~;=kOZ`Js zK%J3?@s;)EEAD>lyr-R+*CEF2->6mohbI4n2tR(B3LQmTesoUei}Ad@3ZcnEUO)a} zhIJ33#5mglRFm~L-=gi!6VY`N3Uhx2sR#EGjGWfjb!5dN{BE-ZRHJ&K$%4ATQK0+Y z_pQJAN$d75p}E`?#EGHE=^gZY;xs^asNzR@{&!$TsYz`bY>MqAsbYpx!gssp9$+>S z{FG|7_?VNwnmNoJ3BL+nb8r_=mVF+3#-S!#l`1#<#!GIsQ$c7C)ouM9qY#aq_`Q=` zikfVwjU0cEOlqY=G5a?|rUS+)z~C$s_oOjHk@}D%PGCRq zaob47nQ<-!M;muH4*NaC9HV$Cf3m#wnG8+*Ep)lda2yTm9WP3~aTp^$GaI6mGxvHH zB%%fXV=HH6C0Q)_kfCl0761F;1!=wXu-Qk*(DP7sZhjL?(M7BaSC^q*5DkJpevkbD zTGqTmk}!I0Rex2-TRGagu->EuVqompG^w$mj}_bPWOsMi&=!UTOL)~B(>KS16~aKl zSGQi<7&XSG=cXW;i=TYw2mTx#d*hfilCO0HT??`F+NV>%7iK)w;ywz>mR3d9t6#A4 zJeJT5(66d^X&kf?MyU=*6UP9^OhG#(2<~JVy7xsc1o&$Y+A=)*h2$i$%GqSG53MU( z_fpQwaSa+;iREO-5MyvS?Dsr2(TwQ%;!l6wT@0qw4!8jg8!n?UAJuI_ZUTH_RvtSW z>eM-&LmZYq{}ptlYHH{#rR$y&Sy9uit3?GbOLyu~M=vv=dADMd$F~CyH z8K|$iZ|oarpHK^ne|L>F@B#z2aAPBMhCAGnePse=ecF#U0*K{Pz?jZn7L~KkjS^2! z8MzM|fk{QKODVWoj`m~lGj(vub@xa4EJrzl7YROmL;WgRk|{{_5|H0 z-vdyoZfPeO1LL?<;C*Ar%KrUL+P}x!eN>*w=F^9DIPCi+K{5mKUcTURBH#?xa#nP? zEH^v&IYyy7MH_c&mxHfN4305 z-fn}va`!>%K(O2O-rPw0BmAV*R)b|8S z9&$^t>u)A2j{#mAbdiR{NZzzza~E>DlQJZ)$DDq9Wu99d)${dqp}*SN7->UW~(Zb~0ERmgvHCIuOGjOjBGEj7STV%n5P#xA>6Fk6)O{$*I9R1z! zGkp^#gazfDHzil&Z4ROZLnEiECj>b$-$_{(CdRJD&i66L8B8~p!1}$%jA!m!TNb;? z%t4hy{JaAmPBDk|mM5`>!A;ZimG~fr_0V@fkylG9R4c#{p5%A#QeutmPW04xY~+|V6Jhp1()IY+(F@d zZueqNPNS74H`dld=?#gaA)gzbyYvGk4PC7eR^3}n$=cG#Aw3IA(3nh+%8M;t#J8P= zq*gwvhS6u@i%H@NdNXsRCGZ7xre9OtlvP&ANIa48y6Wn&+FNDfLdCzE{Nk4^VE<{0Ivm?M1e1PWstw#n(r- zC})3wO^;37ViL5HwEh>g{@3pJF_=0>-v^lI1X_MErxWvOXO4;s1@u(v6} z*kWQ9ufl0TZv*}Tm;v5ku*|JMy2noq!yI~Fnd%O=O z06z1>-cUgU)&ZS>Q=!6)JSnsr2V2~3w6)K3b|la^5`lR=3YSEIfXPtl?)JF|+7aF~ z9P3c;Jm3-{m$Y=y5X6}hV0D}No5z|tFK6hjW(t~4*zPS?d3vJ86Sl$yAV({rf}N@c z6IPaBUsXo{EPUi#5=lbK&rW#Pg$YYrFMBSHX14b|Wf71yXsY^VP_V}j%yJ2+nK$pq zvaa&kExWuIq$T}feD%8f31pPEgx5B2L{G zk(`DvkUF4sC*>TPCUsG-;iTzV4vl)3(0?G~QNFWpw{!fGEXe|2dOeo%)NkcB>SYEM zclI16heRy0xn-7=rDP?NfBy{VQ}2hPi8o?lHw*koy8LPv-&v#5)`as3(xByXFxjXW zW7ZRJlw;hK3HoOP*T6V85iD2?`pR;`v+41i-v6xKbjOJ#z_ukIUA=P|HJ>e;>}Hs( zdw!UHP-F*1tcXFf`u|Wj{IvC)F{$D9EQBmLr)}5ED|I)U`hUs?!%`7LwnhCYu^+X$ zzD?B&SE9ZV#Q9f2BHv`>vnh5H*!}aod%}`lO4pH7(czqU*OMQ}E9cVb&LwGa3hKMI`uCAw^ram(Q z$5$L_n)njro{mf$1&sii!i`ZM?Is!Tq}=SDjJ9l7F8hu-+gjmTeO%d7KzKCJBuz$m8f9W5S5w?-E@% z!3)LfE8o^XtGx-i*M8I&_xr5^54ojX?4ms*c8#g%%YtEi=XxYJk0azrg0;ut5b;iv zyQjY4*dI1|faM%=GY5U=fyQsOWOUq5k&S3fbd`4x4J9igC$qy9Ed(QzE2L#W9@q>M zCoy!`e1q?TU^)$kkV^AU5N`f{H|V=Ml6cf~hj5droYpRjOxzL$W+s?ek)BL)cZ<=b z*r#~+QM|$;ryZXr?9P;Z)%2}AEi)&-i$X1yg{bbj!vYseb@wF6b}be+d6{o5_+fUz zyC~eU#FQ|JsvZ^f!`8*8|HL#wCQeka8__lNwj7>fV#V+3hT$0zp;%<`DmSe!X^+w1 zNaD3SVRzxE1+hRy>oi0lo@O+$yV20;TVLUR%+KqSt=G>3{>_TQK3gIazn*lFOFrCZ z*Y26A4E#0Jm?6Iyd{dYiMu5Xf~*i zh2tNox|57ZhXAPP0waxxvZeOGN=CRV4V|}$HR~HxCXTOwfU9-Sw@j^PSNIJgp(s6B3qIIZ*4*7m-lHGst>m%)?;IW4t zPLf7%|5g*zH_uBcpq>r zLkYe%Ci?k%1!q&@wu?U9@*ql^-bTF^eAVu^bNz2T5UP!ijLj7)Q&~6jHb1}5eA4|% z|Nkt9{73qGbp8(%JRH$~BZ0WR)6jZq#?ce`|M}QYi>7)+1uPc~Z41atloR8Qr33n< zeOHMG%z{iS7aO(F1W8|>u$CWiNm!*NPXJrk3S@LL(D*TyfB;|_Hc9ARS`z~!=1X); zw8+$tfa*cUNa4>IXsYQ$&nKh4qi@Y7fq;{R=yPbVfqekIZdnmQQWKN;& z&cOSj=T?|79{~L@gx98UZEf}#cF(s&pyAAJ zuUKg35^Hl}=xZK@CN3Hlxe+vK^h4rzyHsv(l_kDm6|+2`%t5Iioj?mtHwMa;hE%Gh zA>drX?%Z2b6W@tE?B|^l2q#YrqF)Tpu<>b| z|AndQgM3q>c3Z77g+x)nlM_5ss`26|jjl$$=$2dGijjTjIa^?;I6^8dXB2?i0f>&; zc5mIU%g@2YTTYmS;HRfKS313(63BZ$oyoB2IL|fb!h?Q{9={hM6WY(IW^f)eBqYQ* z*5%{_Ty~}huEZybGbI*VA73gxLGkaK=tDQ1;$?;ig66bZU1kgx-&T|nJI=*?SOQgJ z8{A&u-4)w?pbMhs#XVaVq`Hn`1+9p%;c= zbC9SL*AJEIizCzDdd?VP_{Y8gNvg$C_tdc!reDC-#kZ|CjSC%(Pw<5xqkNa4pUG*R z#S3W~{DLnBKgAd>d2S~ntJn;C{-&nNOM=*Tg@{$l;oiN--89p>WDoBjH?@2Anw^0u zeNG#>d;yI(%D!w0OeD@jf-3B+ow8$0?{V}(5@8;&>ildaE6cl88!Md(L)^omYT)@CRwj(j_ zXx=Wa>3?{C8%M!W8EEHJzhdJ(m;LbO#gE$;tgo&9v<>TbO1Mlaqi^1+K}>A}RU-x? zE_qdL25r1Ev1Eg!FoT|1%zVDs`2}5deYy-eFM8aQDHQO3CBu_cU#EN zUvJdr@H@kT4yxw-OH+tlwl%r`8cukZa7YFD^>{mz}MwZo%o9d^hq+s%v> zcl79XmJ1Uik_iI2OfQY~-Mvq#P4zStTadi)Htst$!WOg3+j*Dxfhbv0A)7I5{VvdD z^{4a;p@-EL`v!*<<{^&FrO(eja&EkNZB0wV5I#nqx)!-d>g!tk3A^qQj<}-#w=gQuP$3nKYIe*n3s64 zmuyoMq{l`V@g9?2omO1?1UW=JkB+#``-~7mlPO6kkLqF7?u zm#3WxAA{a0`S1q6d`dRgBWQ}j>9?9l7_KGy7HyDvzg>(bn<+aquw9b=;SOdXQ_*$v ztCSCX>YggwHzo*XGjBIUb`yW?dv*V|>nEj_6h~RH?Bh!vLXwmP%&_<(PE0wy^V}UG zy*{=_L}%hplahgI(N`0hJh}vtG2BGXv|cHDYU*;zTR)Uh>^H6!B~h~Mqn}fE?}SCT zv{nps%;FpP^gG|IVWd{Qbi+&{K_QI{@(r_dun0G z)QRQLiBne0XRkk+{kZS}zuoi|n6v;WNX|)vezCgmckn#LG|BQCtYjOwLH9=th@bGh zlj7adsctQOLlwSay`T#Mva{DO6PZ;kOl0D4P$PIB^yCt=q;RF9T*kH_$aH(SP(PGDtLkJiKO=!v%70yH2 zWTJU*R_<)ZUU^!)FRG>R(Zw!}EpAgB_)n5j|6|}0aU#uV3^}*O!so|!NLTg$%UB$V zxvznDcgEQX)>{=p?p>WnS`cLt{SUwW-%GjXMVMW&eKkT8QbNiJw_;y>kEr;hy=+Z9 zkG!YqqO8zxtliPXf}7Pakz^dQVe^SBv$b|K!x?6B)k&rU8MW%Avv= z41)x2Uj3}^2wD>CFp6gjHP8TTB)V@9gnkMJKuWaruxONR9l(%6VF-=YbVf&prhxE< zGbx~_7eDZRpj8f;3u2+L+~u}|L};;lR)QVCID)Os{o5>DkqK@`k@Gx=2bKPvET{dk z2{Baw3L6O3-h8bizz&|K%H12h+CormOiO*|v4B+CrG_lo)qau>QPl)#UEnl4dppDQ zfUe^_2LPw%H_q(cK7*u{MqRANqggp=p&Vkwi(|mdf?7tsPJup>MZm)Bx zl0>>3k6bG4-zALiEgGF_x}ljeK!;@((hylZelpG411>3T9RqPPm8pVHOE0(oeG>P~ z(EvD#Tt2*6Bb$|JKNqr2zGj8NW%KE?@g+po=n-!ct~Zvo@X~Lv5RS)Z+X6QJ23Bpb1vu&ZIC$lvi!KJr>2;arQzVTo>6{ssZc3GN>kWjVKXKt z`@JfguDZ8t6#jq#gMH112 zTKQiR)#`S*=mM4YGiCpYwu3cEo+QDKB%mXkUpQnriPep)Wz)@vl3%B7>@Y8$f4x4u zcYCCI8b85iv5@NeZZHnS%r#4W+vgvo-A7#eoWWG8-*(fM84*)(?HI%>Bk*L1v=sP~ zQ>;u$PHJz&?(XR?4HG#7Pd)QEe)~sRuicizM<*(7yjOp_c3AXhpoq*nK_z zJKY1H$(%x5iJiH&T)=JY=7uevk&b$co9m5?x$5^_Z_-sm6J^U zyY&{UINhRZvlumOO;vM?XO{dY78n8?4D|kZuR?{(LXL!gr8?=@ zh%FYdTl-6w^VYUj9Z?iPbkyEzR`NlW*DPuF>qyV{0fZ06DsvETRb@)mRxa%o zD7=x0orh|i!K=r~gI(c|)-I4rC>4`##Xb*#=fSLXqh99gxJ0{qQkzOe%87L`J)21C z$)oDCr5tcL#m_$B0o0?Fyp)YULRa9K^i%6T3qd8xJTRkfZ5 z9A62@o(u!&?^6m4-#K|S3FTgqIsOi-VW^&FB9O3?Ytt08LCvN>eJWJ)K#1)n_Fp^oI$#ZO$<J)1#EU>*E3H5`brjR|- zVHxeChvM*Kz?d@`7Xc-}ADr!a&gcL}J9P9GS3E$V9IG55kqHEc6XiZ%l=ayF{)5k@>u-aR+m?bjct%Nmh2Ya))5J68HK6M1k28d|xjag{b zN#akU8KNMPP{JBzBFCZuJIHjD#u1m+xn7TRD8DqCpEF9xTwIW=MCAME83&*ab- zhMY@Y?tRlf<~Aial5B*Se+&?JcPSZo==w((z29TRt|Bh zu9N1g`m~LBP!Jb;^n-~$B>#Tm{LuC&u=-)b=&I{#$#8M(`ggAsheLe|Fs~A7Q;Vs& z^q0fnT)%6itM0V1?3O)`r>RFv_J%WWwU_Y2RlS`R&-Bh2CEbJQW6-QSkF~zv-+pG5 z`LjUWmt7&(X^~K7H*cPI9B$jjYp(g%Kd*?}G9wF)DT#62W|Z}S-y`#TZIxH7wM$6x z{`5$-?JC#~NKW|a2O;2W0yZ~vQ}fF-cgL*<=}*blSpF8q5uX+_pER+jmxY)K0!UtG zp2&Ksxt~=s>OSfz*+g&&c51w<3%`CRRL?X?WQTo-jV&cn!vkr5YFENjCIht~%5gu{ z%TBC6v?4yQ7Ttw>$Xqsy{M6amiAX@TJ&bf;&-z(i|6K@N8l;^2FhQ}8{mV;fCx3K;4t!x& zwao_lnmn!$bLr!e(MjY18h+EC|&eBM;q`H=K*q#;|>=o`uu+~YlgYQvoKqhX# zpSh0;3}h@UvSVfY?sX%`C*o5tInKLuR$J=vCno3kJkDEPIvTKnxx{-$Eg{H(d|>#vFoEbdL~LJo@G z>xurvxIQ0BZt{JudG0&HwQ}R3i()7wyM^vyDYi?#}q ze^K?_@oYY9_pMRX47F+}Mr*6qs8w5QRqfilHc_jHQX_~2t(uj#s6AqkD$MQahdCK)xG)S z^u`f}m8yT+`cGjc`3mwku%b``%-EtNtLAMvK?R`e6YL`iFCYC)1JK3aQ(W3f=t^dZ z#%H43w@wtoFi1O|=eopb)a=Y<+1{nNNk$)2LQXa>P8V_i<+qdneVFh-^SuBQs_=U6 zTXAdV`p`v*&cn%?eons5ySz4P?NF`VXACYW%Np0pnb%sHlzC&qM$`GAdV*q?DRL9v zLa0%%ge_y~W&Cxw{{)*a7kz%RjTPR$Eoj?J{v7^s@ zD0>#a35QfL-xs*M6*g&Mp(L95iMK*Y+g$1gyC7~%U#zudfcV+5=hF>l0rAGQs6lhV z;%F9g0l$tc6{r{bwgz{%x4<=ZyaFpuXxBcP=}Wnh^(}r#UD_%Sg^J$1@(Gi$D(3Uk zj16=0ACj??nASb^f|D=86!O5SajPQKgam)L$?3zQEW2Zs#^D=vyl>fpm^yF2_`x0* zw5{O|`p6AI>-p^FphkIm`jbk1WdLTO?R<2OmTaN-zjF&i18FVhHvIdS-xdd}e{qA= zVRJE{ibl8eYlmzA=x=zAIllR;TmAsNlvcJjpgTsIrfkzXnmbF+fPy^pig+DS1w2RG zv7xmIe^Yv1_EEd?Ic_vr44Z@BWjyi(iBX2xUpTtPB++jUtv)vYQ@4YSl{c_s6DQV& zZj6V`953V_QHAv1oUQ(zi{Ub+<=#1vq)lLp81+s8EIC~r+&gy{xq}=djRrxZTGnN_ zH&>?FMn|KA#x;`l^FmPbRaGZs5w>nu!J22hccjGGS}jl4nY!*F^|RgW{^u4z0!ENX z;eo@R-z^O!EUs`;bZKA1gs(SkbPOE9@%*&8Y^gBqZSkWhT!jBL>VWvMe*C_enJekU z7lA%W+=iNfPF=RQl^AC)M3KL;2a%N=a;{34`&0iZUCo;@@%nfeUcB>z8Kr3>!Na^& zpWhvp!zMMe+Nyrz@CTf*P!R9bw$wI+U@LjloXSs zwXcuti-o6(<;j{=Z19*EZ1`Rb_O#S@gt{tD?6AJX39f@$Hkj_e^RA?%42S@kh*Uf8x@{AW*jhb zJ`z1sq@o+W%cmjE^<}<&VSA#hfz3mvC4CN^r>iJT4Aooi zpOmzDsF^KA;^}I5n3w5-n)h?`r}22Z<-POcU?W|AqYnI0T#z8VI^r^k|@gAzMrPlDcI4 z5_Z(u=CLz==`W?PA7@RLrk(pp&~jbI6?=x3sDfO!q>q|U_ha;o?n$9PL@ZR;;coX# zSDS|^{GkB&4oCuccHit=OW!9Co@y3 zJuo6G0{U^8_8IoeRakWQpEfWu&SVBQ-ja43f!nRtj45A})iwyYvlO85c|!&Xjg*^n z+Lm<&VKu@8KCMs$L7@DhtphEH=L>^#)sNm4Lne zo83N@v*SjP>s02Y`%0&-woZ-$!|k^Qa+yr3Drjk}ofxdQ>{tKTZPuo_q?2_Qv>kMG zaXX>Pn9rc0om*kXf{iIP%){Nb5o}Y$oM?xj=mUA6@VP7Z2>fj6q@J*fU$dG{ojii^ z<<%#%W9P}(mBzT#xJ+9v4Gpq0JL>M|2G(hc2Io~Nfe-6fTkPQCtUdjQ!EN-A;U5ld zr(6U}%g(dFt`2PT#A(g|uYbLO+CVBu2L@F+T&SS#JrOoYedRvon!Fqvpie5k&J?I` z!3`YdPh;mR6BaZA746n@7>Wx^zGjC`4ijR;yh+)a@6kW824wG^D0-WYt)a`Dm4Qso zQUw01*)96En|sQlcKNp?+1l6%rdhgx*Ga9uw+McDqEIjB7>;V!2o>D_diMcx&7Hfo zw#E9?+3nc0tbCrQnSrnMt@0yyP_JeqS$4VllCD|iVAE*_R&p%fv` zkHQ~VfMeg$-v-m?aiUjG{JEJyGUY^vw&FW;M2UfBZ-iAJ<_RU?Ox}-Z5{YY2OO3$W za+rQHN(oXs{FdE%)J^Ge#KE{q@3899zHej1-z5JD3pu(u0IqbgT%mE_$p~`0w+t3s zaYTO)zNa&7#4B4Qwg=Oo# z%Y?9#fovI<7ekE)+HgICPmIptidU~@kbsC=W1_us;+)apDwu%ZvN5Lcvh#I97NkjF zNa56aNgT1lcru`KY*rTA@uBHA?}c!HV9zQ!*i7|0qszASR(Jw_s2P`E{iM|(_}78? z%qK?hZMhYHDE&by-?FYx`5k%l(Tr73F?kh&eCGrt#Jg>#zIwf2H-F(qQMo)afS}U) z=BSAJ+o`O){=#U~vEs!$wZipZT7@KGv5!J{e&ju>W9|xNaUjEIch!=Hyy#=q1^)L&qzz=y~wIn46F_5o}oweY-t zgtRAnwIKg>7t5PthRYDr+vivI$U}2(NS(a1L~|_>r@ys%MvcywnA4pqp0{}+~JE+xa2CFXuEfK_Be>{$E4hztM8Sm#>=3ZphO@BB8EL ze}FsjAl1^o(~i%jxO{APZRAqd_-@tP(w@~LKj>lQ(%tqncRfjyj%a)^a<<E3P?{|OZ7WfPR*m732Ha!z)Z}}q_I{B z1SHC1GZ2CnTVb^Yq=mbZ+c2e4+r>$dw&-*VZ1VZP-aebO2rVk^qrvMu?U{;bN0yFc zTSI01L`4N2FB-dQi@U&P$)|7J{cV}tMWm0-QrQ#|m6h9U%vRSbyntu(FYR(0!1DU& za9}_Jpr)!KWS5JUiA}HJ?^&E)zr)4MAA9FYu&rmPE&nDHuKx>J+z|`)i)m8zT+F*~ z)|rxla_38KK1p>APCjPO>t%Bg{8fN2t+nK8s#ifF*}7~zX@3TNQ#>LcIxvk#Qu?vo z7hibB$Qu_M-Yx>1#~;5@>ra_M434xlWSReg#m93+Nsn(E27U^6RSmyJ7-J`<5mxe~ zwa0C^Us68lG#lfxn->g;rQ$lk#4?P|esCZ;WUABBhk$!&1ZwC%isBuSfpUp;WVXEb z$`K?B)~haNa6@bBK1vgsgS2!Ng#H$@o75)cHaAZ&~bb$jSy}un{BN&&=&cz{{AAGkr z>fJ|EHbxcOxmjCJjpiXf`=AQG)D62A-`9@}xD^GePdbeK_adULyS z4$1dGCH*HWApOQ7Av?%}=O0f3c=1Lg!D}Ia2T7~wL~v4jC&VE&$^7|cu#>YDYn1E& z%X|{D?-QE)wT|{a!{749zEC16*~rD4?sjL8vQkW{@|Pg2LQ|5O@121C0)dBnT7CY;YPtww>R6(1?8rakuGXZx07yQmWP80)Y2 zQq+By;b`a~dI)_#(cmdN{`54oPY9`8^ZdZPMn%Z}#V(Tl!NIo(vtgn4rkFe-k zfmfKJwS5)J$LtP~p801@rjPg-?(en+uFv8TXCB74&fY{1y3AJFMa8OJP0x0K{RX5iYF7;sct&o1V@U4& z%i#@3Cbzc_E7vYpxbe`|{Z#MuExGL-^NGK&Nk&VaOUqZNTW~2P%X44S869Z#iag}m z`AwVF%=XUFjZ&gFMp(|50Gb$?faO3|jc`AX7d2b1s|6_{%*Kv=B*Vv*0_nF8n$L*^ zkCv|R^*~x{t|!zwA&yuw2)bD^(xDw(M!P1Ex}&1pA8nC-+hN<`Z9YRL1F1-+n#kVY zgj`ItHQjuR%# zdBdY1+`ZDa_0FfUd=`hUlRpY;NXevL_x&3g+={=$)F=wx<%Gky>dxjFr^@xq>D?xI zfHbb9!&-x3>|n++8+gYrQ858xTA-eeps+L3ws9| zPWun}mn0PWSzwXL1g78Sayp=*K%!)3C9>6&- zVaDe0)P!AjrZfVi$LV2D{jm0dH8V5# zq{q;E)Y|ALSUt6rHfjYl1gk~JCnBf?UG8466?$*X{_t8LIWSIHnRW_K2Nr*IfD73l z_$iL^$Q_hAccA*AOT7?08(%V_p1F$nxbl{( zKQXA=xV5;mujKo7m5R@nBRXi9Yjs=+4^xZ$8-e}mQecVoIhNp_S>_}}^?%vD75=uJ z$~bMZV>YDuj}^#k9PNRtA!*x-$+H%+R30~nq_UpwzgEE>j?%O{7`^u=C5`Q}O!m}0 zc}=`n#GefiHyGPX9M9@r+XJHaElYI#F@qT%%=dX zhyd}k(u%`kWNmGC(qiOL0yFzUS7#~q1J+CtSl7HudxG^)9rGs^C=V&^$0&?j<#E+w zslW{8pBl8U=ok$6uViOd?hnoMLRD1NZV~LQgYK_xsVb_%zqpGj@~Y|}eV?p)uRHt2 znJnfi7(gD4CPXYB#bvCgx7%pTJ6G(718ae$e<}_|M6S9Vm&hb+1T+3T8bjJ6=a#k0_LhMXC?I z*TTViEjv;n1ewX}W1NvAu9M?!W6+(h^ADes9y0jD+lZaEPx+SJK{=tnn>&Y!k6%t) z+Js$(;P}5dZNpM_9bkXWH(a)KrRQ-@BKGHhtXlS~FQ-Yn(_t}Z1N<+CpV5X@@BZ7S zNj-qDm55yFO?HN8@7n(h#xH?;nGS!v6G&t%nG$m{ z5q6p=2@f}|AfxC$M;=+$rPNX$=b#0!v@1zh4AkkdG7k-LT)T~F@gZ$~r62iYvo)Zp zHF3Ad?uK3i`9>8eS!{(^cqV_PUP);P5--?L8DT5B^MUz}@u{agHuI|7mK0bkANu0wh0MoCOUy9=@EKHT5Vn?+q&)>@Mm4RWS{v~^rTlq z<75hSV?jJgNoga4riT#@l8;)bWRm%XVq==s^d22?%a<_Em&XVw%)Vl3Y|#jC9Z>ve zigemgJUY$eIeE>_yyQ&r0r^_M(UFd+EPx*-(=Qpzm2&rl*3!jU{&NUm%-S<4wLB!8 zmlta4Qgv~n{{}rm{gvquvM%MB)yE#f>D^=%pur8oHoikkU;J?B#B#dcjhNk73<*A^ zYxN5pnCKrN%Ye*CjCUPKg=*Ni0;^-sesw;g2JIys!D6EE_fT{3VN-Jbxd^wuD@{Fd zNJ4nZEw=K}#3ukBD;^LnsBOl4Gi=N-b7rHh)}%}apaFo-ir>F0@G2HEA4vjD&*Y~fmDpM8>TCdlA?7Pk zQ^bTIs4mqvad6>&hQ;{{FJIK^L+8uC}p_&m0&p>ynRl${nB& z=fiAFAgYr%rXAv=`};Ir#wQ(howE`S#JadI&R-=K`afh%*piQncS3e<(Gtf0s@Bd< z3#TsJt|oOD`v!I%nqE>S|7`f~QZ&g@jJK|k6g;|D5$fmG!m8f=F@fk!kAd|10-jUD z*$Ab-V^ZVXJ+-Z;n_S4#>1f0^CBxLVYdUYk3LXnpyiBS>!<;QqA7}LFZZ(146I$EV ztJghHO<|WDUf_^{L~hD$=0;<_5um8dG)N2ks>m~PazDoP?Sk%+9qnzgdhnMDBCGk(m` z2}}SghFR*L)GKQjfQ&@o-$NBEMW2DM5Fi$@Z*`&G9X86_K_Sdo)~hMk7Z<-h*OIlK z)Z11M)2bAlvJjXNrVCFWdn}1>7SKb&a_Daaz%m)p?+|q8LC>~+oJsE^wZ5}BkJ^dI zytId%k~L2h$2NR^IviWCCu2P)6V_Cn1F<({I;?ovtkYpc12-^AE^j5xd~wFxz-RpB zte@zo*;`Gh^{?2-sH1Hu+kF)AGSboHgh&;Al0}%CK64Ylj5h#y%?=7Xyp|_80kzEs z_cgb#6=vun%H&*&Ww~be87;Ju!haSA+85unHBLQqDh6{)YF2xa7Q~F-7%z2wUVlhs zP<~t%IjgdJy;JjwTjin)WXME+t=4GJy!^Ru=||Av@LGev(a_3_wOkLZ9P&q1)lJp) z;9m4!8w)t-Nd~(+Yj99+gwuN-ynJ*!7v_q8{wsngX$YN6`{Aj@)+@?3$4}8MoGxqj ztHO!<{*d^;(nrUQs-=}~bfdIQ1h*hvCDM;jDtk@8XZD`+M_1ZmYEEpVNcJ7wpm_XE z7Z+a=4Te|%*@=o(hD07ju~K2jl<(g#@>y=~r(2Coa0)Be)tT*Z^rMS`X0z&Ym?;CTJ*UA8+)e`zKA->s z9`Rm9dAgu+e!4Q>;c1(5+6VT6{IQZ^8UkBs`FfElG?G!5M9+bT`5UHeRTmZw#Mh(_ zw~|mxZT$#%m)K-0Z`qk{NF5KrE+o;v6v!L(9C^sp9YU^F4+;gO2dHP%aV2fn)jD16 zai;l>;oyJ1N3G*3*nT#z@e)-}bQ;~<)@EYwHWOKhxwqTNy|O`#@Z0G0XejhY-(!nN z%iEchi=F&I7 zRRYe6ZqW-5`kb$ZG2xVugNw)1`;|JDOTF#dMamuUajSL6$}_mrZfLrDw*MmOBr zNibNV{C6JpnpEpTvCi7qL`nu1ZvZ^%#MRc3PIjwqER4AjaQ9}S@^G()fvUHY2Ifgi zpQP^#qdb-uAdBjnb3D=X+!1zs&8EI5Moyf9lO1^1-3O7Lxm@>~kC? zrHiWp;P51YB*HK`P`9H{jJ?J}+*?#;&frfmu5fj?mEgw(mTkLL;IDVOq<;{1rK{M= zW0bCk_Ta_8aJbB5PposFMmjuu?T@-g8Fa$Ha<(3{QE(t1cz(PwU@k9}JelH$IPYAV z@vrevK5*DI{>CM7jWqWU|1#{85;nUe1vN#yI5XWpJk0$s&QK1gXwv48i=L{->N(fW zPQX<}7#1%EnssVQN<TeJDpLm@kNwFnc7Q z;_P+}SyOR7al%WhVB{gX8+;Y@N#8)v2gG>eM=}9@cxki2puyrhxMv$DvwI!&$tvrL z*S9}6+3!zHcwj98rPR~I?DBsh7SgP=Yzxfe)_BU{Ja^Uzt)+i@oh{Co^Spk1Vu@up zFYIgDRY#R5f79Wb30D(H8RK*IB-GFR>~MCIbG6@)J$|ih*hv}cf|Jc=?jF3Vyz1}5 z51HDy#7^|saGATKxCbXfommsZ^60S>@vezs@AGvLu-(?ML;FRxwT-gL{LVDn>Dqeh zkZ^@#n=U+hx=J3Mcz!r3Q~Sca{b2NH;Eg7v!_ifIE#c%hPb3WUti*A0&8Xk%{Fd=D zmt?2d#2P!8FG}Bptt<2Iu9`A5#OyDaKn~PxZC4dxo112ct_2MBe;Cbkx-78lF|GBt zM|;70*Uv>m%igSEd;}ic;cIYu#rh#f&Jo)IR*u@w2^Cg*@IKodKDM~PG+=q${%13C zC|mVY`5uni9KFMKa;@-&A9FE^XIv{Fj0$3$KV&&;UEW(&ad9Yq5mynOW!`-ZJ@%~B z31%e($gQ`wn=O?f!N$kg3%=c;15WEB@Do)AP1Wve(KD|0 zZo$Mm6LMyG2SVYQH^^?uc+w!h9VS}L&TAEO5PPnV9)!^Ua4 zcq^!;`gkL@tt-ax%hQWu+bQhP>$rgf?5GQd`SNmz$kXmIM*2^^at!}N8b0P-oNE4W z1^;^=eL))|JFWn_l=-n-(hP`;_QScBeQXN$lWxRyfnUFrKhm;;Ij+K=X+qtCg>WDK z+#fU_pyeM-4~XMA=P#rYNy~`0!o!0#Er6$hMCg6jV&qo4_7w9uKXquMti?g`KzSHN z4iwio@&?*h4^_eDD1*sX67Oma3V_YYMceYtOY4@uZsBx30c|^I39XIPv@3=UaqXH4 zKo0w#M3QCkGUGG){I&T+=KCZv=GC+s7 zmoohk%*}E0q!H9C&Y>XsEu9PW2Y3Z+V}4shY2fp+p4{MB1RsNpvfU$M6qgn%qC@lhCf6Z-p)&JacS{XUK zDmANB)Yp|tYnzJFriA!(CozVVtkO97Y}BrUSfk%%dt$#n1Y%WdVR^ppW|CSvL7FH`UhsDs zRwy&rS^BZWk!1^h70yt31Tg`K;dbW-Kn_JZne;H*(VFC}?8j?1it%Aq?LzLa2WW_*_o{D8B5_QZsTGynTl@0iF74(4DP7ikp_;x)rTv9VrA_^v!Bf$LCR##51!$|NPXJvy6qGmiXUio zHt$&Yq)DwPyk$y1_!$VQ92Q+|eMnvM;O8Lb*n>AZd$Jzq!6o-o<}MNkDv0Wj6Xm^| z5Gk~0uDRVwPO$gG`2;L@gCDoR%D^`2%Fzu#ECHQ%UAG^_-# zlAT*Z0qZQ;xi>cj$7|%Xo-)k48IV?~E{wTD_JdqjPl5vnArTBo7cSdRA^z>S`kEKE z4!P1kdUU>jwf;5CaeSv%KEh&>bg4EY%ce(;j`;|0LFZVSQ&N#=!XokOSl}K=E!kr% zn>|Q`{#{UEVuO6c_`Uxj75~A{VK@1A#zQ-0r>gC^UVk!3^{zQ{w|y}bA2?rE(BkhV zgi8B>zB?%G%1I#4m}Sr_;{QnJ3HzlX3WTToQ!6`*xe8@)DMG0=bD0QT-SC;+gastJEV<(wNvGtNerwjXBq z@q?g<1e?{D09SHlZix(BE$RmYkUfyz7}p}|-p~927AD6){bEZb@VW>xg0Fp3+Cv`2 z7>UT~a0>-1!UNuCe1f~(O(=+A?UJ$B99|rQ4EW73Gj~sLu$q_~Hx4Q6U0r3+jgqz4 z`6)uid-R*uiBE&a`mZyxNqn4tGAW+B*IE6y8oN1miHssY`SJG3T+_sgDca(%SZj6F zopG_hDn1g?Cl!bsv9;34PA}QE{rO1g=;84oo!}JJH4gRx4WqKKQ=Rz8 z)fOGH(i=9O{jzMKL~UM>3nWi%5OVlO>NIj3@Azdx+~66qi+@T&cn0+@lY6FfDo|uZ zbOoua4jfOlvyfUY+4=c4gvW_IktJklSLm-&!n^f`h~v1C$lGlPZHwD%5)C8nop>uo zT2m4{N_w%q^ynonU_(a125~WsuW^YWsp8KYI|{~#(3R;e>~vn zFWI}Le`(`iaC}0x8xF7)nZ-Ekg#77By@$2VihLPn$mJXn*wSJRQ7;uV ze$B|#TvV#h1=fze*ZN7>rXagVp0zlsZ$=R5To}6UP92`xK4;a9q+jxEPqIg^Er}t% z@(~mkQX36jh!dFLG3dGL)aeehJjV9_>A^ zP{N@fyYK?l!*vgP&ak_#D82K|Zk#7;i($s@6-1Q_Y zelkS*@Y!?p8b_=e`&E|qgD#))o2|vMb@?&=t;7=K@i$+DMSAv8cU;YG$qnJ5#x<86 zIU`Bc-ic&mM45tMmj?9i6G`Rj^=^_b2A3OdTP`EhPZ6d-zBi)u8WF?8bzI!EmOf{# zdF4K8oXR)1@Z4AYyU5Du^l^z{z^+C(5+(e$hp`1oG@8G}-}z_5^kzxwvkQ}!U8d9q zzK*hz@IOD*qF%aLTu(&W=1Y~?k4qgOZiyq}1#i8YwrBBzdn)gh>TT^D0B_hf7vF3P z*ZB2Yv0Yt}A=tE!QLU$jn5?K-`AZd2rPistW7YaiI{s7t!acA3U&M-fRc5Ku6YO0olx8? z+q8_Z{O@AzKY(JH2B7COvx73luH&PWmV(DA2|EvrXIt|J8Ua7}DboE0m4RI7`x=Uf zr+c!yDG%aWuIHo*2G8D0x~uIvZ&c|Fq=^EHAyZdH zjY+bX@iY@4-U9^3IF_Ogsp2Eh1dNN3Jr)1RuMEiUucBTNo?4vO3qljI1Am<1+BAY? z=YTeWAAr@{{ky8JB{CD&y^4WWB4ir4KNY7*Gs{(Xvaj0*NcZn(763`dX`wg*2W0A6 zom%K=FZUsv4irMnItMK^c0;T{VOQy*Aw7^377~N!NTP(s8sVO_XlCQg-4AfJh?(KU zv~IT4Wvxv#LlLQSk-!@ethR96dE_5umeRSWlHxMxYyOuyi)oTg8K!uF1-Y$&L?!j2 zqwf0|bs%58~`GQIr;b(tL}({!AyeO|_1;V_cnR2V#7;(C$^^8G%PN_59{ z;nyxI!=R%cmp90hBP64)PhPWdJduc>)~@X1#c;ne;8vNLrJ>mrdq_m1Vx+ zvWu*`MOwy)V(#P7-;3DigjNNUZ2>$B%cs)hG{Jb?a%7p0m@xa_6xK6RRbOLIX4vCx ztr<|lbA+`4)&7;E4ft=7Z2N!`HKqn7>G zRz{tL^fKkjR(=BGBBOydB=RY+j^gX{3SNonAcPD5+N0w8^R-^DHhdX||2XY{5k%{J zF_A|mmb{r(a*^~uIhHt*@O#u~-}SH1}?dzvlRiO4QaIDF6G&U)PM)P@}qG0Yt&c&koqB|GvE9lQp@V%x;|Q^ zLeaNbv)gQ0*2^OZZpXT1!;S6h&R_}`$qA5qL)n1R8f&F5O69%E>Tehpr!yv4x2s`+(* zZ|3+{E;PL(I5L#e#y7p5E&bQ8T+H8+jUAcD-VTZ=A~_zk{JP>eYDM$@H{PxcZ)~(# zlE{*?zQxWIqbe4&+qy8?H*hebES1O6Ci;7OV(j@k?ZunsU8i$$svu6h^Q$)@=DM#c zQlGbmGm1FDIYW;>2Ht7*?E&t8m+wqxy?Pw`$D?5OM8OQhDatpOpXs6o{#mN9wuu-> zL57;qVWcnlvQWt`dHWL<*4|50dU~POW&Ll3M!_~UcQhdh_1y8;ox{F!A_Xg<+kQyn zG`pVcBoa+HKrMIk`S{_`tx~XN-s8_DN>f!A|J~^RTlX=4l?)#?pyV77ApN4>AUTgc zQskVt$c`rEsN{uO0fFWOb=NUW@#U1K(X-^ zovshC81P0<+g0)j#|Ol7ZC4;e$E#b{TmP#10>p~>$2!6*`O}lGtD^aRwC@7_0qv4K zzz|PMuEB>C{C92y!mns?*HbF$ach{p`N%=>iISd{85)Ha1aiuhKVetD$;>B^q!ph8 z;a4(e)et|)w-T=46Yx7LD5VQIi7h&iCnTK$Dwg`}DQ>BcG{|HYHEP~e^fUPaOp2$b zxW%nfa<*X3NWSIfcGqP?<4Hud#lgVH<#|B>+Dnc?&?(JZJ)@Pm}wg-{xJU&xYQBzVBlCdgMDfgEA>A=(S8S~Cp z_?*2Zvr7-%+K*{4Ka6oG(1Yt`XL@JNlm&HSs^x}krF9?QQA#(hnIux!Mczil;T*Pg z(R)IwP7GfPVH*j5OnqYS*|f8V!$*ysvXAnlTB)`9&C5rueXQ)FnBDtXlHSGYMF~Iu z>EG5Q0p9V4n4?|ct~pQH>A7i=IXqI{9ZM34`DaT*iS@f#q1h~vO9Vx9Ae*u{kbh@! z;H@H?y_whiWA?b2w4j?t#mVyt_s7>cd-9E~(qwgs+b~qH%olSuF1~U(z`k`{KsP-b zbvRb76RXtS!pV2Y^u7n9!N=F|c*Tx17|MBS##j3nQ@=B!Z5WiG_VjD)Jn8pkDspku zIo))=l4^4bH`}^c^z+{v73a8#)p^Tv8{6<+JJfb}{I_`};34KO=X`fFME1k~X{QNa zSU_n#wd4%>>{(HVd#JygzX@MHpEF;iIGKM-j~-m}2}zrtd7kDhg?c+<#4Fgv9F)A# zw+&(Q7@{)!oz4Y4T7x+oVc|fdo0Vh@l|(IS&G%AM8OY;>ekxS^7?ZOip;-ZwKo*i* zV*0-Dvk7mI-j(%GRTgrVDyUOmvm6kr(l@-c!W@07xF?IxI&F2Rdl)I$^|O;+$N6EP ztWA*AJKivje zh&0_P@R4(ulLPd-o@cl6w(nVKEA*H`s(fR)qW1vD4nv7+lrCO$$jLta%(d-o{UgBk zQ2g2k+SEsIj!=Ji%yzDRUgh7P{x~gamyy`5=YZSZy%ROX4*9E!>VgbO;V#`w7hDCK z(?f@7ZI`4jz{Hi!2$;IO?M!^pTZpF4P1 zUi65ADRHoQOeiP=v8U!cJO=YDyI(aoJ9k3Y_0_0szrA7Ih1A;~uWnH+dx%Dc(+RW#wvJQxvOeI!%`ao8|fy=*U!TjV#W4sF+e$5U`Tg5n+4+W4dqz0#n{|FP|un zIpWEjoM^DoN-_z2CAMj9l&?|^zA_hlAuF7k95k+?2G5QTtK_ueO5E(4P9x%fCB-@6 zRk!_DFc0}78i4R+Mtmqg<=mNr;@&iD|TFVPP3-}L`Y)Br6 zB5J*ZT1`uCcM5;1V$2%VZWM0Q@lVcS*O!NYUR%R@AlcN z9#z+Qha0ep3!DHtaLWpuYGCM$BBt)3;o67Bk`;?Z7wqQot`(XCkYtZRw1{p;a*Q$<_{aCLaSV? z=!*ifqjxmFd-rnRHQGG%pV-a0ar3Q-~NB#R*}I4BHd&f=eTXy$%ZF)&4}H zd{=;V9KXA(2QQDeDr6((MI_Vw7)wYw_?gc#PD%Z=ws}y-`+1aqGIP?LD)^5#_H#ui zwQQlCbsoLS4?~KjZy-ft|aWj;sBt-{$AJ;+(P|>&YClo-f30 zCmqmu=hG)qVPMOZ*3gt4dvW1&X1?-RAq&fcS(#9|ROqRx7U&bRQi@%(>aO9YZkTgn zit?DyfWQM5k7SlL3ha}>`hT=;GOY-!oLj@Eq`YSmVysE{4pVUvxuaIAH`3yWFX97x z!Ff*TXh}lX#F``axnk83G^THGV7z*(%(3{9RT*&JTrp$5?9p9mt3DAd9?0-luB=v@ zL70kpU|41?YF8?-K2qOi_iD&z9Kd<2*e>&)`5WrhNZ~2g(_tQ*CRjYM`jNGULtr_F zg+txbTxG3loISfwDok ze})b^9ZRXdNgkD+WNhhv$}13!dYDiH&|B*VsoqAsOjU5WzV_u@6rVMWz{&N^Jv zmP;EVsMIj7Wq%`>E0o%~P++Pm9`<4n*A7%fjed(kUJ{3O#Eu=V&k-{q1Rh(8Q)RqL zGwR@O8ch!_2MmuxiETu*yvu*UZ;@Nkb8!kG7j_49m~4%1M<0(BoBtSd6>*2L+k zNvtu3?~;6M_n{Ny-h(Le7HD!=-y2^bEK=$>?a>@ATLhTQfumP6gw4CU3z-JIdh-+V z0uQ6i&?VlYpCA`?QM&!w>~=R!`Sv$I2+`Su7`N{SMH5>tWb&jpJ=q>KZ2$Wtmxn24 zRZ>;vt*^PvCOe3(jbx0~)o-TmHJUm26*duMJb!h?Q&|50?ns|elR~K%>Trid(w0N= znzv=Y|5jj6NOT-=9&>(ZRSz+(#PjeSEu3`JMG)5ARN~EaR6nQ1=!sJxCz$dEWn3l6 zI0D~I!qR@}spA$ikEZNCmXiyJ0NS9oII-`Np+2DfhSUd|JwS#nXCngV6Tqw0-rT)bpn38ltBI+!UrVi>G5xv}?Nv7JG;GLuJx1Z_ zK7SpGJZmf@4U{bLk>A^t!-E7XL+wf8n{JuHYd6t4)E2Y~*0)nG9L+k&5BrspTxjLB zwHnGYs!z5Vm1xH6>T)k74S~Kf0@~Vxv&z748dM*rB4Ayrb+5Omams!aZ^IW-QIblL zmS$Z7hp%$t({7yCrd%AWC=JU_q*UEW%8b2WieC?eMon)dW*_NZ zqz^?h3ryIbiMH(~>cT=?g*Pun*WD7;JCr@bdxBywj;%2rKe+2SJk+lf#-bavH_hOJ z)|TvEX@4YDdz$OOx##>C2|9ww8P1eVnCM&f^7VADkM(=nz6fU6q3Z~TM1xqLz9|Sp7xo7C44sssj+Iu3Cb^j zJ%Wha8_48XSt^TCrE%5G?*;rgYWGQCULE=NsQ3fxJ$H7?l#Mlc>&(wktc6)%E{6?C z%$-%r+ti4kPXP8RGxA#?!P&9N^9LWQJ^QJoCAh2eBs96&=R7v2^B2-qBT@>_9x-CN zwwR;Yl1MMaAiVs0Q>1@ai4c1IRXb9`I!2#cqFFMp5jXzo9$)VFHc7wJ6~;pU@0Bw_ zhi@{vP>Ea4k&1T73+O+hlp{j6V8QH^33K(=9=%8mfyS30=9$L5tHfwwQb+)DGq2Y7 zh5fP1w|PK}HwW^kUU_e~N<-~1yfZH`+e)^3tw9T9_&lUTdl?d9MH3{n$!Du`8b`809Tk0J7@Er&*;+i@P& zQMBv=_%}5@?~J)vIX*QDVExSh-wX}%* zE_*v~H*_*VoZIvs9>kG!9gk)M9KII^F`SY2eWzfG?yLBn!(CJ7tN$sgJw$I;{WEi4 zBvtx<^d8Z`&#<#LZf>Y~8wf=hVCW zrZ1wTDUhVKp#NK56^UmU71{J^oGkA+nITV&shenYiCF^KSB>3jm85H%6=u zEoEg7o7vFkr7HyW)qapX6P*1m;=Mf62&VU3HK4BmKWgs@p`sBuPRuFujML$o?YXP z^8t-9y>@lf?1s42NYR7o-6%0X4QI?KuT7bg3U7LLeA?;A#+%iI7kaqQ5V(XlkjLHZ z^gnDUUWC2==K>^4$DZ;M*5MAkWOHqK6F8P}%C*clmdwWY`OkrPP4nZb?_CuCWNeU2 z{H7*`85~wmM4-Y$vIW*lZ{s%k(E6dA-){j8&HksMPdAh>l82OCBf&HUb{Qrkl>+BuHppaN%#n2pzah?2;}j;&ep ztp5&?-!uXd02wO=ASYYS0E*IwNE)+ScgRmaOqzsFe;k=OXC!Xx1a#~jltOHIaQ&&xL|w+9#AttU*vm>*L&s2bi| zMeYoQ?mc>L`$M<;{>W#GU40el=IruW5?PO1L1#x_R_z=4~?nCXx%1=LCsKW*4o7Gcr;HSzSt$>}J}S2G7`= zNHyb7^*%qv6xz3Nz0k?L`dx+5TKWyFWD&(TdWW3wG@8Z6jwX(vjE(4STm_fN;bLM6 z-m0RPo^_uRcHcf+B-|Z5`D;ZHruvAHamjD7E`M55kF1m+EyDdJucEDXzWk(Ch?5%F zO57&xc2ZMUnPQ{J@S8PZ?UJ+_%y^Ewz9reqE5hk7_7!#^AT$$^Gn@12RZd(t4wPvnSe@@v9LL9zC*+yz_8gIl;KwUNsPqZ>n-}+J>TSLi zcfx*)8V8ru#Y_aofpZi%@xIjG3i|4r15ZLntsj13_YeN`fviXeDuUV5x_`Sr7QnX% zYs^TxefQwf)hUFMa5~S!iF0sQu5?9mDC--02?&^#n+YMqxN03c=@MMi|0j=@<$x+KGI_^`pfl>X*V; zInd-Xm?ybkFd}h4$zLTd^}eRs6HO}84|yNm(ykrUkzP23oA@E~U=3j|mm-3?fxaPa z$s0rR1;nmQ8jUKsp}IaD!O~2BV+AIwEH<1k?}x9 zk3ONKO@)BAhO&98Q_4v~rhHiPLcY-8Ve|Xo+?gc#2NG1`U!o(nu57ocwrkjtw~>kf ztG<}`1WeCFSOro(U&S~i*Y&HDCv*QU`IMC4%M>W`C~I?0g&}2pi61n`iK#v3-WB-< zR&J?m-xaIqOT{Anp?u|eMSF6}C^-*NYoG8m9h5u{Xjdw2GLm=($y=$ZDiR4AEV$0r z#r+7{d0jif>5Wt{m#mR7N{*mNsOXI$gwGofx&|Yk#rpe(}XS_f9A@jY0X!rbp;fKW4_^SsU-_pBd|4QC0 zO7u^6yccc=Z+%K7x*Y)%UzE5LT{q4F154ef|B_6i+83r}Dj-%K50@s7}1zg(i zyKAZb#prp$OHqe7AK&XE2Midx-=3tW@y3)Dx3lh-2`e9V1%cve#RPHsCfYaa1^k_) zRJAK#P7oWA%0(w^QTSsNb!e({xn-FDC}eeOJe7OK1|_br3TH+70Gn6NK58xQtbMgO+X*^`u8071ZQ_JYh)5F#S}Cl+LOkh-v}-#l@C6n zS^r@ks5PwFk37?)jF}#&vhwsDvFyL_;u5F_x5Fgj$#UlQ{1F($N#hdv*Y5IM%&nCe z+#Ja{!!?&vAg>Rt42S)fPh#u(@2;#5F8Rk903bo)GdbHrlA*79|3ppp9g z&WT{PQCXstqb0?6Mjc5Y1`<~{kqAk@dxBU>UP;Z9fx4a z*w6~xHyL%>^m0Hd#7^Hdz~a;4QV+1wS$0j|Z14Go6rz9KD7|oyf6-Tx2!r4qE!KOco`<}Ee&ZzI zk|xhd?RYQ0J=aR>Vg9(q{jD2xaE0M|h6&s3;Qjw@r6th(YFhZrklASr*nLLOPPQQL z=8a}!2Mvj%hFvOYNqy*U%Uu&3YQ%7>xLEIH$g~>c5#3DxK~F^u>SHq()IN~n9&Mv& zN1v@+8~Rrs$l`>nK=gT(4j0md$2(FDd)FvWcs3T;5TeV4kGnK6E-5TW)B2kH=f3>! zt;9{qa)N9p3eH|ah>`DIb!8mZkUEUfg}5GPC?`WVlx(jWWYbJHg##?v>N1UZRZ`g% z5Sgq*6YUU6UBV#Wj?;h3MXd2_eF(2TE#Rj|4RmvvWtvgmE-Do`!dM1G%hg3H46;PxVxr zPUbXt#5Kl}t6sSwCRtMDi@H zW%w9r5)5&$l;n}F{pwP*q&rp$K!M$!|l>c;Y8oFQn>2qgHI=o%PrZkYTE zQ2G(dcorxv`Ax>EWW&-xpTS6&=(OjHI3W7>m@&S?e27R@u*0XerWeZ)A;Q(}n1Nd}wIkKfTKKce!!F>*Oy+6YBQ%@Zqw)p{o3ecTR)lWaJ~ z^V|?o*U2b-SssTk9Y{PUzx{HaEt~#hw5U`GH7K$laBHml5*jHUP}(dF<>UT%LIDq& zsN^xf98jxUh=jb*8gLN zC%|6`uCLvmH4@r>g$OQYrkJL@+2zF|nIbui>%;ZXuH}pLjo+xv4|vnkF=X2pBFT)& z&B+v35&eoFOIPICrq=noglA2bD5i3_s3IMPy_HOBE~Tl zt(1>QDx!T%t<=P-OcYEsPb|@+eWRiMKhN-l%RMA*ZOCw9I0n9idwUgWJNhY%Yi@p7 zU#oMp*K>_~Klq(S{#Dax&1RXE%asf0&$n4aWVg+9n;{~sX5iZn=%ri7=^)2N;?Cn0 z*Sc$IKDC-~Joi;yLalNEZqjC2q6;5c2FEmQXcnN73!DANsk^5jY6@*!p2u&B)@V^P zdXDeHP5i0&ePy~F%ev>{5UDN{0{+*$gY+D$e)CtYo+F%dm>Z4gr;zJhdJ1#h2+JDK z9rk*EW@m{DUdqY{U;n08ieyF_9WaV;#3!2U{6-Z4k$eI|q{UaT;3TAgX=o5Ro>Owr z#nPm8)J?W?`xya7T5kB^@u=#iaQ@I*!w!`{-dNY8M?VN7+}c^m*}t}+J~pG^($tKL zP=J0rF+p(9w0o71MY?ZdoUCJT#*c{ojHzkJa_ePw`>AKalL(|!9$!-)WFdJ%aJwO$ z1H1`dob2@CvmW6X`JV4~o+1WodO^C*y=gK?Hh7r3sK=gWPj1qlo4-OC2)BsK{Y;b! z%c7Xy%;bIeR!7l72XGETs81XOuKjX*NSB<9%-3tAJI}4EO}q0i5KAYU1cFzSH-B!2 zi_M7?>1Rd8K~idD+P-f;9EM=ujgdYK)PNXTZz2+0fZzjbZ%<<3RKx_U;N{)&^71E` zl6h<~;UY_bFu`C4=sG%|iTPQ(?eNhlVxjxR=-=x2mHCZu(SQj{&C^GVBcK2D+gnUe zWxO^Mc%|q0M3NOTKBH-;qxi>q5bU4PnDOu3%0E+`L2NkDi2fjBb9?%`j86~83pMmy+rGm2*D_l zPRdubW8RRA;|Kg^`Arnp!pBO5N_dK?-K3jDZ4hG@&XGzWr|~CD*}IkEbl1NdP)!`l z*pYHKbfA4|uey{T*VBNXQHv=T`%&oybL6NN>wP-T+n)Ujd`E|`=vu8woI0jH%&-NX z_}PY=WV9D0ti8lyu_zNxD!0{~8TcvjCl_W#tnoq+ReCu`RM~F=j~bR*^i*X@${ws` zT|L1(iGuuCzK6`6Yxx*0_9?ZTN_z}-`8dOYx;RsM5{OPdOdyWFV!4yfeCSky8uPK4 z8B_^;E`KF%MJgO^38Uf)&G!0Jk~fQ<{w9K_cOh%(!ox3jH%s+>-{>qcf9qd@lS(Z6 zN}RyMczvx=UR%&#?4nmCG6f&I1t7IHAw5bCZ)*och?j=UZ6rEYEg&OkpFM5b@vJ;{ zlB&IcmVjdp$P?tNuFv@juBZhZW3aYiitZ+t$edQyabXeE#$}}7<)&?25&dks*6-$A z4*y~D&3y>GkpZjS6ONjL_T@ldB{m_Vl9d3!d%#aZk_lSJtBA|%;CVB|V+ zfLY?;e3st(4>Ag@FWpdR>%#42v^W}pg3o?@c^E`m0snd4ta*Fiau+2s20WF7*}{rX>*GEg#re+Git9x0kL|BXel0LZSY`I*tVx824=vu|57 z^J&_T8b)izkUj@915cbVt7SQCXhdevcbq3R_d=lHgnR_ zxUfyLb$x-_S>+CPd!qW7dmMhj(sPr+Br6%uE=tYn&9<{;YHWI$(!p_FW?Bf#*97}5 zPAj+8Buaf=W?#~cyG}!Gb#C|b=G=_h(LoH?%-NQTr}#V*KzCA2l8*vnq|{MGXj|ON zB1CRq<2MT4bUf4X|03!y@7_x{$j!d zdSeoX#~!0o1kB{>^}Tho)Kz>^?)ld&M0}@OR zZh@5j$A;f?Ekcj-3R((c)VMv${7m~YBC7mids{$^P^<8ixO$(PgLTxZvE4!G=GMu8(M$QbyE+D2+oeU%91sT61fFgS&&!>oFr z&O*g?vu>`s%+5e-AVx(Sf=EWtp%(6JDtYOMW=}m4i5=ZiLkQjaC<8?_en$AM0=&*7 z1D^Cg?GJyX_oBqAR?ZY3(EYNuwMxAdBP;kPN=@d2+nLo) zoj>t1sTjJWe&(KbdhT_Dc&-+#{~C&jQesamu>=U>-lv1kn<@Tl7kIMrkAlpv$D;d$ z>FO=S?DhRWZi{C;7{^AQ;Evtb;FA^&S2$UpHf59NC-9>0==kYv{UaNeFacM)6mH64 zi4v;@l_?i^E9F`A8ym4AKEJQb47j|}D!}jiFGvUlasjxEMBSGbY#}K@IGQlq#xnW- z#G2`W4jQIBCs67PAtx1jIJzNvxJT*?wYA1rI4JPR_CO0Pw&DC z^(We3>`zX8xen$u@8*i@Qk-xUjJ-;DIX)!n}R{G}&Yjy72Pck7fc!9msR%YfY zEw$`!az)G&xB(ZL<0tYTNKKgZiJ>?qpdrujMwnE^I|<7*z@KoF=3f>+LmnBUiI)fG zw!|>p#jtGjm&)23S8-FECNZmT_x$`#N}G@ZLF53UtJ{hd5e;aL`Zh3wcOpeZ7 z_^BZ1A~;Uc7P3zBy2}kxrR3nq)2kIy9xmoYr3}y zsuv`p33)m`mj@Len4GpET(k;x`D@<`{NjlxN+wVoTY6=sB4W>t7BF*7EMrlzQd*e& z{`Y#Ze?@HA=qE$PI_1K0MZEPfiJLvb-^-LvQ@tTd;xf(v85|Wv4km$j@MHVR9ruGF z#>n-DXJY5zf*04^9YWQu4|ksMJQ-d<__M88ONQ39(3>61f=`Q&V3Ay8`q#?>;K93p z&wN24I4(84wGeA)Mlma_^JGdOaOR@L-ycl;Cb~6-xnB>1m6NrUU;dEQjibYZ)g~t8 ziCm#r$#Kk@k&GU#)>z#8$J3bZfM?LxC699m0IP&lEUKBXy=A3k`jyNPkZEW*ruK2e zJ-uV_$H+9@-w7-$DzUuhyl2D$O`npZZA`%Z{k$5P1ugxT<#(HdD4s=GA2d{Z*qhmC zN!4P@1`5vFDz!@|7YEc*3IGx83*d6AkcbD>eE#vm8D|7pQkBL;isD&5aP;c>1^ zHPr;4oYUH$qW!YWC1bxl579hp#;{aD=nO{9-A_PWBOM}Uc^5m{vC=^wuVExrJyCBrE?j<%CEApyS+ zV0DHm9WO_aI>Y%GSjq%b5qU^NmLZ|V)LvhHfus_GZI!MAPo*R_IbBi?hA=njy-1*G z`>;|O1mtGuiSv6Gc;3FuP;GVO%bBtNvu+L-!MC?=51;WY=|qZTjGMWd({!ke z#lF7N7p3*ibv^uP{A0|PepKQT20Y;=7`WSf2Yf5hMG@cGy7AieMhd*qu~JDqe^66uC$ARMuFV>(g!KbXjN)a_ z{AJ`R*P?&bd*20-m141MzpEzexrq3Mqg5ewKSjSzhMR;`3};SHDVhT4$;Ad`43Ge5 zJ}SQXo%Njjy@f3#Ocv0ts1W@Npby~wV0yQ9op|2915q71U|GP~wx}V*(Z6}2rSnG# zg_Yx!1TF1e**}y*Cexa3@h|QVi+GC&we^tNOT;DNw0%l`zun+j#q}O61tB1@ppqJrE+jt0)c3pJGz2H&f==I~abvDKfubXMB|LIo@!}vY0=I}412S4f zB1jI2SGUhmVgIGfFywf`Fbf&pBpPSOH;2%4l0^Iskx7%T4@_x zVm0zq;BE912>yx$!b?nW3K9eSE)z-+FGDG^@e_=jJjQXJ!bbw#YsX|mD~rO~C$NQ) zr2>tly87<7qwQC%)MF%%p4pcQ-(+-sN|YfMr((c&IGzG@86d$%rbI(5?lbN`tmS8g zQ3=uxLW7GPk?QZa1-r_D{@4X!?b3ZHd&hUEiPAg8F21x?fn#FfAYv=xs>{EvaJq78 z!$NVFfJ@Lu4c~T;_%u8FOa$hN#Iia0{LEh^mQNMG?GK+s6PiSm2v;#?KP$5L^Xc#> zLO(a2O}Yd=HO}LB`d=*OY9o`Ud$?wPR*OM*M_XA1IYBoYXb&OYe&Dl5$RVRl7W>ns!| z)osn&Px+(j^iL*UuhTXyB* ztU6ulFNWUlGjCo5as91Ktbj?pmi9YEY@KTrL;JZNPlP-VW56#qpozSvdzsY;{an96 z8mr0s>&!s*ciH5nUGQSaP7roxhw?GY-_*1omE$3O0CsFmLYySP?bzNw^tpm?%I9Ks zz@NoP$H8}F&$-rlWdh&Q@;wzch)O&h_5Pw-!eA!h-C+vLkWNE3Gp9AF&54Y6Qre8J}dO)4EO;Yk8Um^JU>2{=lrgp z%e7cF8yEH&N05qw(jlAsL^1Q4J?|*I6Mm<*pL>zGi7_HjexKf+B61eBNZZ^M-L!^o?uqwe=~Vh-q8v(LdrKqMpb21!L{=o&Tqr zOQ4`h6!9Ncxr3bJsf9SPL|kGQD0t)Xr9I8cT#C`iQPY?5jH>XcqW&l{vb>MsBJ#sg zL8G>zD$2aU6-&ZUpuEj}k7cP$8~#xdZ+pV)+UVtp2pX+pWF_kbQ>}6@5vrfPUoJKi zPUs|B=zV(7TN~0ZjOa1xL@7fuHq0UTXBYEIjqjRBmx&5n2!9@MVr+j`&0^NNq(gPE zMBUq`GZIP&ehVh5Cg(7@tudc}i}n3IK}c+g5^edQT?m?(BP!b57kLR`=G>H;XoYp^ zaDL0grj&E?579Bn)6TRZ{9jWu$hkXMlD1e6nXJ5+qK*Qu1obd;S(KT*PaN1wl>O6$hoSaY*UIp5E!5=gg|T`tzM!a5x}v-7 z-<#dG!cA{!3@&+5va=g%ZHXj&h(jtAlqG{C??Pg%RZ7K)aatPzxuCy^8WLh=?X@p? zBrY%0Wj=VU9?W$K1UHJM0xG}MtE~4Wh{h)M(eLGhXI&F-=$1ONm-gWt{S^>wTf=m-_AuJ6Cs4ZK6w{pLO%ACal*by>d5}j6`E3Kdh} zjPU`6_q$hNCc2dwP)rhp=&cD*Wr)e_LRtVKsgTP-K;cZn{jDXEQ5Bg3*hl0^sy*?Y zQ`%$68ev%eA>A=hNqd&a+2e!jY2ShDY{FQ3DQrT4<_^_ooS831ofM8a=+jGW$zJ+D zqrET%uYQKE5}!tw4OAZdtx19fofUij#D;O2sl2%QTbfVIQE;)YZM(aFSKg!IhAf7g zk=FQ+J6lbc2TUwK-h_2WlRob9Z4^58snIhgO__s8+(#MKsx_4=w|~Gaeb7^$>$|X~ z5_84ft?__~7ic6zhaxPYzBwHITWs#1@slb;OU3}+))CjXdSb%Eog5dgv0|M`1Gjpk z>p3gkQV-|1n4b6P=FtOwkDqevH(r(Il7Js}z-C_hh&_S*ma=>W8^GUs`(4|z?MeEezglT434&4)datmVZ&%z}E#Fid3BMO3 z1&J_>d0R92LBIYC<2&OyN6k>I?u#N!@_xL(&1xRqe2O<)!=?PkkD+gB zE=bVAb>)?nvsu$@9=c)6aE+ePPLD@U#{6?hf4glEY|i$mQFG?D8vfyKC55u-@!<}z z(BxX|UuL^r_18Ss{9*k}SKyi9vuJc?v{@A35L=I^EM+YL1UDJi#v2}}BGuz*A;73V z$jeACibu;$l^y)CrzDW*Ey_%fT=0`^E;JDZ{XMzzjs%F@g8Bw~5&ao6ra#t@<5&>( zmo90pB(()u{SEA0fBkO3Ow?;xagI zKn+!tb{cU#K#%4^3Dw0ad_&>I`kIl;D&};Tu#iBPRbrKek%|#-7sG*?!iuPeeD%Q0 zM!e^n7>Xk%5t6H)1*OK*5%z_Okc#Q^FzsLRY&>Zc&C7Q|!5TJP=K{RWSkL$X_SE=R z2?)xxQQF{NgSH8kj^%&e@SyjE@_rM1kxZU~Q(KXz*SBk<7X2o4GSWoOHI7)e7Cj zmcv~_MtSQnzSA_XhF*_bCP&)nRnS^ID_j4wSm{1rWRGIwT>QL8#GfXto38$4 z;JkhL@AOge(N9?O=mk}2vI$SDPps)gkA~~RvX6`Qe{nqdOE@=BYH3QX-xKd5xuJ?r z;{i0Nv~Pw&_ISyn<0?!T1J`8ps#N5)*pz1ij`_?mCEbP<42N@G;flvpfZ*2g5KU`P z)ok11U4k*0h$|{$c>W7cQNUHejFN42z{3F6V%2vq(uKw#duH=fZ1kZ3_9g1bLzwNlcXP=@cnF~ z^z5!13I4ETtWuaN%NC-FmX0BPKyuSS(kJeFNa_5Xnl{tS$GUR1-dX*_uVgOv#^ELs z<5FFatqf!s`%kpcOuRdLStMVSu6g2#^&Nx{o2R{`h_C%{`E;%nv3TY`F!%fTGB>;= zB<~!Kgvh^pZzIt^d?xGH(uGm4y zCr)~ol@wA8vE)0y){fZxR_{9Gx3SVnTMx~c?h-o>yHNttZ1{r3Q|rYR z&RjH0a{S9Zrh&B9*_XE~>(OCX38#&!7Aw*xKSb96cZyHe@D4GFG^gK?^l{=P3XMc- zLK^B~K$3>_?5g(y6cw6BEY<$e@X&t$$t6W-?LzW#C?`p2@OHbMrM0#%Atk9FdP)_F zANaSEgkyHr=Aer#W1crkpVH(|M8o6{^hWha5t|1$TH0%kjg8lw&8@<0NJBn_qW{wa2pu5r%Z$ z-vU8^Il_;IN|G4-L9eq_8rCyeESc3NZEt@TY^nXy3QB~WKn`hKIUY?(bfDCz+GiT= zk^u4S=vX1KlcghTT%ij&L1LrHDG_@+Fm&kpogbbyI!GIj;9JZYW5bvSMHYO zw9^_K@(@W^)VtYX*Qgv@{x5F~~ zi?!e7<~Hr-t?)%#Hp`sYa8I@BG$dj|-ae8Qe;X^_FK2!KU(a`%3}uJj5=JaJBi~F>4Leb$ScPwCz>KvB~$VY#S0Dj-M(k}URE@F z1MQjM)aWT`kB|lPFH+f9S@ag44jNhVmgmV+87Pta?|Y8Kc(1RpTH{d?Qyh0C2aY^QpO9#K|(l;U|9YwJ9<;;hM>5OSZifzVT`cu zOc4i;=0yS$?|l`67xC;%00;#XGKOO&YZ*}Nj!GA>TWYV%M?_hdfnyqMeT0)S3HDBn zFiQ-1V;?YI;ky_i91p!#))njJ;L&5MI*B4$)xdMJyn9Yl}6;jUG3%5|0&~ zT=Hxc-BoQRj_}VYOAF-~CeYyBdyUm^8dz&M_1Lhkm zSB$SSd-)+CuQ&R4b6p;DnK-^>zYTeQX_wPibN|vSfcpWz`RMEwnQ$tPImF(Wz$JzM zQuy9@b=+=mw(K^8V{CWg-w~Gx8B`>uB1`Y1jD>Xn^Ijju*-ji;BZY*8- z^pkaW@amg%0UBJV#OHJmEt<5;uKFGrqy+BX0t>WFp&($6HcyN@^gHt4&xc;v;vrlm z(c-ToHq|#)-4n#DlgryHzV^JMk*=M%w%KUps~EgPm8Ryns=@8ghCOCJ@l225uH2|0 zt}wqf^x8&*Ijl1e+FQ*B#W{l~J2v!XZ2O%+lD%hJ=dFv+Fz5D3ScwK_M}VXc6kfPA zUl@7|m|4hvJz28fUDf*U{#_i`tPAt3vkq*|qcM2c>~Gor0?b|C>AqT;b$u+h9&kG% z4Z-qHq>FRt%M9}QptA@X7;1SK8U1}v=m>#_YKYp&e+3{a^FIbe%Se(semW~}3S zlgphFNz10(^`Jlb18!$aP)T0LesBn6YddGK?Vy^5PM?zU^*lEP-NH0SrR<{%@SMf|3VizQ4X;)V z>~m_4Qx?XO)xp#&ujXovi;dU*AFMU$_x#|e&4V8Q49K*D=p6u zAGRM59N$Lx^Co2;)p|3+E4?I3TUT+J-kiaa++IY{=a7Ii(2ntaBAa|+vG(BZR?6>y z)OwUm_;wb0rX|7MU&~Ptz{&nDh%2$#!&sGO&&Y*bOU+craOxC=kVb zNW7;`ano^xk)Md+2JT%*yZQ$c2TNGtz-Ac@4c{Uz8r35R4hfwP@Cm>=L~;ReQIIL+ z!F947B#bCn=s)!G(K^Ud@nm7dDYEY@O3E#R&18k%|C+_*Bl3AVh^>V39fB5$$%%oc z>RYpc55msHuxTTLCAW8CzFeU%V#iUU+K^qa)!~#@)=q$jd;c= zMU7|$D_hU37@{7a*g4~4Xl#~ch`qEj1-VPnJ-1pK4x>e>NBPKDAM2IxDK>M)mnK;I zyq~PlW6!t;4U*VlI@6y5x)otJ7@0NWL~n`s!A^dP(sub`Qs&xeYUd%F?GEM|E z5+tdQ+@oG4K-qp2<|(ot=7&Y|{#4sIZS6_CcdHJ4oiAi3haS*6(6j(u^eY_z5hf&x zn9*ZTlG0h*d?J zY3gHo0iR;oe$%X>S9oI@E$brNRIx8P!~w+iC11$o>#dQOw`zyn-F!YYgw?)AFd)DC z3f1A&NzrI9MAYh%kf}4E#wSD&g`uolfmy6ho8v91(&$Y~5n zQ15hdlsPK6^M;v$n&;<|G7+7yK^wpFNaI7Jg7a^)eKC}}5|L&MHrDEgdJjb1JtlP1 zP?&0r=;fULrhR4!tu(bfeu2CMuoR*xLv2qFvQP&(t5(i}WlaK3BPOe^Rfp->G;!%{ zw0Q+vH|JOS`832tK@9`s#{I+gc@$~nJ{nEUr<4WKcu&;+Y|fRYp{9C7GCobdODwG4 zd>w405*5P^ zq0CD&|JxYC=`;5uknEw)FU!fME?a6YK@ypvtYM*HZ%YmC)XxL(t@HMq<%^y#XPtXI zMw9kW5i)%#egSJNWxbE-1;M+vc}vzybB{Kb57I1L0YAHCd(WC1i_>^#u|PX3=Z;5& zq61lb;{xD`%3WOlKL*KK-$gDTk@s{Nz{^+&$(d~kom3-h4t0UPdz)Xtvk4Yi+yBN5zNbh0qSkq1^?u`!TUI;({9p-3w6 z&#u`6g_IW=Nyh2fJ&ET<)nRuj-c;izUl27d^e*gBD)Y;a(Oakda?oj@ls#VtE6=B} z`z@&wt5%TZ_^QMVj%WQ%*q8969ukWu8@Rh89Ruaba8y0n%#ai8=B-TS5g>XIMyNV3{3%;IyznDqBL)*t7U zo+d$qD%okbe-45Fs@|(BhG-Hibq4Wy4{yK7RghnL!kgVw)6S=F53WR4x(0S9u0 zeN0?K`GLxT|HC8^h+bWu{O9>_rSBW;o@LD74lwW-5RCdO3I&}JV;jwsSP{khDX77a zykto9Gx^72-0Zh6S=_p^yoE1L@Pe_AQ7y)C-&eb@peNX7Fk6TPpjZxE<6gMrg=k%) zm0l)<7r9zk4YvuqZJ=nsL_sQ{CR;i!MOF|(QzA880UD8R7| z-X$l_xg2p0A;NaiROBsb`_Mh_Z}-8hB2|dz*U3r$vsuEulm$Ktujksd{L4F`a<}M< zsn`~oCRH_;Xin&NAyvmLGN?>#tc+UGp7VUDT<2g5`57`fl)~3@WgU$cmh5b(;f=PH zn(v11@i>h%Wjj{UQ9hikQR%^)Q2C#*_#!A9p*(}iX1C{3M3Qt2S}YG#?e-`GADQ-K z6J7$g6KlU(qkIfZ;oV#DOgQ#_7TYv5asM%*Yn}o}O`1is&ACfy`KreoZUg$x6)H_< zytSb-lmi@x2~f_ndn*b;uantYnc(TME;9{8cvao42Q-YDNth=?y|z_jIbG?+B1E&h zgcf-ZECIN|4r(ahXaiFdwH@=gd?vMy44u7er6UaO$LT2xWLE4DlJuFNoWvrRZ1k*U z9W?yyU63VTa=l-HuW5wts~aZtl@*1IQB+Nv+K;)va!sVdf=0qAk{k4)plf`{kJF`3 zH)~NFbD4vKy-MQ{{U@w}&P}(4P{}`}i+{e_lVN2G4IOJ2;ALW*ggImi&vSG_=FtXA z=b(|OZ-&0Z&EylJak5P<>65+O!6wZK!^r7{3rM)GgqY$iUJ;WO&(~hX>;#clV=v)+ zo=^yKArX~Nd7tT8aHt=nKa)`No^!wM zw7uE*g`YI?FD2;O^|Tj!|8!~ZVa`K+gR|m!e7XS*rLp^6#BSq$p`q(s)3Ck4k|8r0 zL!<`f^~T0_f6HI9#Ob>l+Pq9N&OLOxEKz4I6GX+r#+QO;adU8uBzmd9fi;diQU&2f z{uHa@mirk>pQoUz-^8II=mZP0q|rX8>ij%rtBspX#PrEc<+11Q_#h*?TRZDo@U@Ij z{LE~DK_y3pNJbnrrj>x_7__(*H;=M7g0`LEWyJ4mLNDuj4r#5il{Q=sM z@!ZiAK{5`p`kUHMuEvJT~+6Zd?Lngu$vbB>6dfywf|Xn zTKC@Oi>09Nkm8jF8Folm2YiIjaS>2^YFvB(^_2wPzJliee(o*XAeiXrowHHKQ4s&M z)nzgZeCU=wF`cx3x=eoErx7=eMHu7L{+I23Juro?a&C zolHCU-j;=Z%xInTw`+{CvRB!<7`Rq?L(5j?Z0DxtHu;zJJ*lst)Lod;cc6tRPQi05 z3rz~J&fN)h0aGwlb1V5ulKH6pRcGv#2fr)h6U=2VZpTE~IccugyME;Z2@~QTc|&L5 zMhxo|WhYlMWskf(AicaSNQkxU_D2wUUe=2Yj3Mf+p+O4+s0YAD()-8C{f>W$gPgd) zEDX1R(?rp|HSa0c(Iz-~_xxV!?(FxBhp(@b<&}^oYQcVqHHf+&?5(WYV@XvVJRSafdP_L?yhmY`zLvap$A6XDi#(Yu zn1CMxC&QYSNHjP?R#KMjjAZq>MRZnZC7Q`fgFU?rqsa_M4+($#A2x@==dS2JQxM|U zuVe6j_CtU}PJ=q-nz%2r;_OkNp+stpMyX%%Sh0A&kOtS$x}v?u_>QilM#f@|DqgNX zU2NJPTrGA-_i8xekhR_rh1ph)Z)lV3jM2ETT-fFh+EBt==TF_R||DE*Ks|Bb2)es3D`of-_+q2~^QXf_8Gx z<~j!r=Gg$jji*#xJ#-#UlFAN(RgElDRHAB{7uK$NF{JTd&1WcIR&>i-UC?&QR;z}W z&J%yV#UN=n=sqq^y9h<@Wfv>%Vt9Qm7Tt>x;=y1Qx2x=N+?2uiL9)oC5a6KB8PR%T z?&9_@FTjJK*r-uc11LS(ZhKCAG_s3YW{!~(p>BTr61s^}aKzz^5>L*`ct_Gv>voK7 zIBJIVr9BzX_HB-ge4)AzjL8kxA-D)rXDIc-*_fzTTw?j-;n;_*P%8h9kME7qJdepx z3XN@444$sXZgN=YD}x64ikAZzo!GQDfBP4-{`RDq@?Q9;L-d|;a(>bWy8W^0=vBz` zxusCP17Gu0Q$Bth&|acF=3R&1ZX6zpjhuVRr0O*O(fp{5{P9QF@?{W7N8#PfN??sH z*{?*D@4@o8v0wTWNWFts<2#!Y9Gv2@8GyBKpBcY!{(oG3^+S~Xn)lEkNUC&~bV@UW zf*>JCNjHL$(!vA-(j8I~10o>J&m-8_3J^QG68qGSlh|1$DNZDff z9k1NROpW74l#Me*lt4w37^-fQYIm+aPAEW#{!xpT9vLI?pF#r-hdGSc^w^Gj{imNw zJ7`a`rjZM`ojOA~$BvKXCd6D2Dj=3TkHoSFH~pomLxP{>tZ06SV;0eUN?-0NRb08=T$Wxr0#{jzeH%&dJHu8xPbLu;p*?3hP75J8E2O{Ogj(lLYuyLsdd^aZ}OU@!EnFF)+KDI5KDc`a@?RwotUcuP#%{yx<<)T80;SiSP$R>DHx!4UmRkKq}|o|i_JOdlubM1?Q6 zbiO<<%E<^h?(6bcM-ufOH#iTMt5nw!_Uabha!FZv^{l?1zi1#4EA7!6bDW7&cR%Ny z-1q|$EQa|`+l2QRwSy#W9$b^QfAW5Lbv-jo!D_3vCt;@e}<(M$P)&kaEY<;v;clg6Ol-2MU z2EHoC4AjKOqGsv@Q}1jhE>Br0L3A+K@%EHRd7^)(xZB-;fJaYpcyH;5;z{C?M9Xed z;7)6A5Hh!&{m=bVMj4`+n95AiFw>;0c5BA=WV#nidIgwCe~JVFkj|I;pak+R=|K|? ztKwgRiK8{AG}*b8IoZXReybx~0%Kq~9Fe?@$$P3#&lECCDyP=&ZJiP!9_rG(Bb{8k z`LUlby~wyod#e5&pK{_}Mv8WT5nan!gjiRJHQxb1hJ1mGb-zuo-#j?+EAQE13z4@g zRRahUaO`SKNBT%O>WKl_RSacUeG_adeOAU5>9$a}?Zek+`H$<*@;HCcf;&jG`|zjF zkHYX?qt_SLMC1(|VNg>eB!kg^p3#4uVfbJQq*wb|K+UgZ+5$59G;ucc9GViom7J@f2u@X~0pJu1^^79UE z$N8kB=_!;lQ)@(Q0C3o)vn$p%ERjhpxM#fn5=KxfY035VjD_@__A1fp?~Y|a!u-Ct z={9Ag{oSPf+qO-B=h*Ck!^)%F2gUs_m0sgBEEl1vaaKFcJdwg!B=&L2m$D)|8UiDT zs;W2f^nJ-E(n>WKxQ7H#CPmv_QXDEe%{ahYoCI3tg&ARxQQXRL>oRFBEt`u1%K176 zZ}2nvd?w?q1NkwHaVW8BSXSscM`9yBFeb=c|`*YAmX7|pppkI)x{ z)~R_e!t25jHd&VhVdFl|cIK<>bIdqMgO*=8$iy@@YWQ5h<>2)IRUFFA6~17PlOW9# z$SL@X`zcv~77!QzvYICj#Lmmbi=8ckj)-6PLHYs!URMBOD}Yx@&yJYujMt=&A&DA) zGvGL0=jT$UzAWxT>x$-&H*d9%BZf&B5}M8;*uv^4Bk)#eIUMCadt0g~4+zriGgoto zxK2kGXne%nTV(f1iMqGwIynS>3K&~s{`mH7c|iYV1#|S4|D`5*NeO*86mOmv51p&r z7iGWS_RPJ0F<|>G^6k_4fHNrOG_&%#*U^s=NwFOxk!vY!e=Z}R_dc#uvU@YTsQBwZ zT_3xDR_8&#ruE@!KXS$)^Ld)vK=<=BE1HvRa8GOVYraT0b8KG=N&y180CVnR|?gWL<-RH~Gz6oLi3Bdrh_p(dEOA8iw{} zZ{{*xc`w2ghlK4K%3Hnb(XlDnD3PbrHfW5AhL2bXWxeoR=KXLkl;4DEaqa-?>Qyx~ zS#esNW<{@uU26MyHZhFU>qmG9QT>9N$Lmcwb(@P~_^$c?mBZl^D)K?LOp8KfbY!W{r&(PQ+o|DCht( z@a~<(9&e7aGC#@;^m+?Ct$u@gu&K=6bg|$VdX6GOvmV5jZ3kmUy5r*+Sw<92&hHxg z85kH=&e8Gll{>XwF`08x7jLNFaAmwh%pX79^A4lvc>5>#ga6N+w+j{QulLLmdx=#O z0b{K)=-&*^_*5jW79>Y~#=!>Ge7OgOHW(Fcw&ld_}1#h&< zxs|z>%(zcyhCF)iH}s@njUi*b#QDnhqXc?Xtrit0XOEA@T*u}xeHIO~b{YcgtFk70!xi(8Gom#w|}KF#Zb;M+%5Q&+(| zn;jY(r(wS5A*=Uz53)m;&eB$eocF-cnJb9e{v()`?9XJ@*Zkkd!LI6i8o<4nYvEk8 zf|4{@dL4E@4VG$<1w`Qt+%df^+zg=LZ?78Jbw7`7KOZ}$^ZhEGp*To;+8(?;s=v>0 z)mBj<%mVg!*8Rg@cdGUA<>@Ys#oXPM5g8c0fI*^P?pIR!PQLE4&1}!Si3e9)e)-3a z@QZqS^>1bOPN8gcI3_#DD!NPR{0|X%DYhH@Dm6T@`Vai|-zz(XCP0i#CPh4gH!ePT zxP=R+0uJsbW^22r%s23ezRZj82C$9`h@(t~v0`u|2VFW=jW=pfkLKNo*d&|iX-~(Axu=u!C=bXgzjRkio zy!&{PsgKIPlrpU>9>Xbv8>RShp6A7?<<(Bx6^`zHovC4P(i7Ksx_G6~D*|@-97%fr-30TnY?8RNTr}*ee=tta;U5Q3k}K^^b0&u$rNtWR_0M0%qE&za_rOZyuzW= z+1|dzbTRreh~^5x^47{n$qvamc=FfhpOA4dcI2sc6;_AB`10sqq`|D8v1@_!M{~TuV`g01#yB z@r@-mc?a(#N>bg`PkWaBo{A{FV}fQ{(#S^D5kh)O!ZuOq#61}prwJU zthb;Nr~e_q{Hr9-&$)7Kji)Lsuz+k_QY+7Et740DC>4XTptP-@~0R;k<9IQg(k%el{rW9LzyKu8N(Bxz?%|LU=tigw2Ymf3*oj zX3i`YJfm3AR5w*h)q9f0yg#k(%RZMC_=E6Gjq5z;9CYVZH~*~?yZhU@{T*2@Zm+fQ zDuSVmw&AoVtWq^jGwo^V!8<)mHWW1^7nLr*S2tdeJ7v?jVk2;t8!qA~9CKXMVdKy< z5akBbOGB?NX79`E{(9ARf&Rx3dFL*eY5=CKDV5*VUN>%B8O1(Q*e}u> zPY!&HYCXRW)Gw=qToHLJQ=COL?Y5{b{2;)bo*SXh8kVR#ZcconOI=QOCIqzQPkwK< z3_rpQ&iTCC$#$LA^wDjO);5e34M^-MU@vefxV6hH6^=r#`8Px9nhGM8b zLa5aIk|Zn`j*ijDxhX&O5%;sYeUv3gPMGlQZ8C)0Yt_ye4~WlfmwCjOQ<>;-{;<@? zEzWsAKvGzCcQgGQRa#tb*%r{F%INywhNy!1rN!$wV76(Gnb#xiADbe6%F>@*YVPvL z2>-`sLd-#0wg25sa{Y9WMUP6s2WpBnMa~C}*F&sfcD*sibeul;>R#SC{ zRYUd3Q z;nFD`Vip_`$Zs>mGT%Ct)b{pnToFvs|HQCAKC92MojR{I+S*;D8ksuXD_F{J!Cx&i=cd7!Typ5V*Z^VZ1>>3fQrddUxDjLMZSbxEQ2~%V^bRH~7xU ze16_<>fDs68HU9n9QzxbivH70{tI1R*5?VL(UxKVoS8|(cC~&Ui5ap1nPo+q^FLu< z19SjP_$iw-e3hagT7HlQ(rh+@(7rKJPn}z^8{37 zAOEWF9c5$g?Ikc~0Q3`QX6yUltxY9B!s`X=CZ@*66V+u1v9J01PHO2|>B*q+oDEXG zKI4Slgqy{z?6n{|l}DX{Y^7pmSI-P+-Xei;Ah@rQR{2&K8L^$YW~A(GvU0e zZB1+7ID;=m!Ne$gM8X+QrYnnArdf}UTO)7tAWZ0y(6tLqSD)ibS-;0p;~4V#m!e&= z<#yoD+lJCG{eR9e*uy9mGER<5Vrm@@@YoWmh`WV?pKni=KT2kwegzSMWooYATt$F- z?rhX*!l|Sw3(whiIQxnFUppc8ZSs)?GiYv1M4)7`7_UGTh6+60H1gX;i;ME1f7I^H z`6rjOnhVV@!D7f8?pFth1hMI);xE;leavms+*hP7C<8^!BrWCBVkY%sh9ieI@h#%9 z0;ZnFi4m2y?Jm!E-RC9dCf4-wjMm=4Xz#wCQ`SsZ*!NDZ-(gN-La!702lJSMUPuLk zH}_R$cZ9AIu z?&QNxwI+{wN~vG0-H&M45R8$pl#zF9y$=38-{&O~lj+vN$#ON$ytTgwHHvBrgqTWVGley^feJj@ zbgaAB1whTw;^B6AgC&R07zt4GP~8c?pvjIkzA{iK-fNuMFP^F^yoVp3Zbof2-&o)l z$N!+ROrkB_LOu*rPOI7*!OHsi(tEdUgu?dj<}}uqxdla&eB|YQBoZ&QH2_u1@w9|o zZOa#^2m#w+%M7BydJBynd-IKd=V{P;nU#^{^>aXSdYG!5gTx<(@<%8iS_HF^WzrkZ z13p@ma$qYb%bi!MdL>Yx=e9TJQTg_xo~>S17Jc5>cmt%%Z$l(%`Gi{i{m=OwtKsxh z-}(2G@#w^>L+rf}ViSJ8XTz&nyPx{v)xmw(+V2_CmG6uqQ3E3&frSRNwpDCmQ4W*) zpz{mhIpYYBp=bvoP<8TmPJ~?X*fYlw2?7tM7ooDtt9^qQJiyZJ8L=%eiEXe!O28QPxJ2Ogq(BlzJzc4*Hu=p z^~yASS#*%&Xyi(KGJD6IdF*%#>1{%Z`p8H#8jL9ajTGL4VX}=2{R3)a){TAEMu^2+ zE*X(WLg2RjHcwP)Z;!8l8>W4v1oUqJIyb4aNFA5)=5S!F(Ry!%9qJxi*&9E3_nn9w z!)Zj8DS1`icu}y_Q#x)He?NdM&b{n(O=mo-6!~vX(top>vD{B0&E;+yotS^wN`F~z z{^0ea`+WES!EH?*Nsx#8f>=du^0CP`rh%Eq`_E(zN7b-j#xuQG_AYdR)aEO#O!h-KmK zY=v+<%5ggC;y1U{*0j0tzX`sng(Xgqr%Qp997~uC@OlOk;j{S0I&OncNdoam^n2=5 zyCaw&N>PV2eC14`MXoqRx!fjf(isw+dY#=%!T3rVPypW$sSN>L3G)q!Wn)?lykhNK zjf-If=f%KSa63^H?)xE&QJjo^hJ<2pVyedb?;D-g+x!x=O7|2d9RE6Q3DUwF>uOoN zahBh;;qwf>?6BXH<>JRl_e^oJn-wywrObp^>~TlyVfx#M6=$EQ25~&a3!>j%M~L;q z{oG~sRLo$~v^aT(EglTCopQ_)vvPQy{59+pxE`@}5Qm%S7>F8eVx&fD4Ol<)<19vs z-z7srmUA$J7QVp!UYU1O0ZN^VX@gGW0c{t*a6hcsk;a$h68z4ABXQ|$>AiG`ha$)_ z=Z|7D zIB|v~(kNEvtlGGt?5wsa_>%VSX|<7yRx;~4m+y{vN`AeG18n$1lk(~g8XQ~W%Rv5g z`A{{t)zxh758P}UUN}Lpx?B#E^rC!@p$V$Wf?v4RtP6Q&aZzI0`<>xDtL?ipXGv$9 zNkQpFY$Q!=T%TL~;O#f<{XChf;RUn39|M~Oa=;gLZ%wO2CvVA&$gU`E-D^Jw{O7!M zKKLRRi4>Fb*~^Nsp}oHTyV(Y*e7jU=$YJ{;f^Xt8rO>p}4hN7SQ^e5SDJ2<~&~Lz( zsU;)3yT}?$i-0Shb)N6&up1!>W{S_^MtoUqiNC~aK4j^RH4th9-^Nsa53?V^ygTQb zl`edSS|NIH^}EgGyZ4|~xoxdI2R=WHzk@pRG`Df%d;PIu$uQd<2K5|jVwZY-B}nm%y4A@aXwq1A#q+#5JE>J z2U2IAY$wr3F_{%d`3~Lk(Ap&$d|y&ZdYZb@b&zr{y4L6=^>+@9>JB2>L6q1vH49A% zw0oTwV}^DcBKGe4M0P%ln1K7fl`)&+~t|Ufq}Ht-m-F?!Qa<-Q_Mt(apq_ z=ZFYUh+E*L>8ih*-NOv!b=ekFxF(moK_X;}seR|lB(4R43{8g*S)gt%S)Rx7D=Cgg z$k-*&X5aa|^l9YNfjaHO115zZKI1Q?d-B3rnm?RxyVkyokvroRD;=utXI;rR=Is5D z6pDF?cp|;miY33Ug*n@nvzv9#Y~;|!mdh1kQ;9>p(5>S-F>`L$))`D6=KH2?2H26_ zH$P-f%Dm9kq%~S$Iq$WvtfpNla3n&z`HWbr^{;F`-#*XK1vxuOa)y;RLTa5(=)cjurs}$#yQ;kWp9LLw zLwydmoj#kIe?o2|!s~c_bCMWo!>b5!n?JT4CVYRhL+aR_!157v^Xr?c@jqv@e>H`J zjI>U5-s}f2U%o_I>sn}iR*;R8-Cj|N!(C3>akLi2jZ^~sdM-an;() zGvKc@7k@N0nW#<>KJ+i6ERjEfn0kujS0UPG6ymQ+4t@*cg}@y=*&KtY;rY4uC?C<0 zwn~EVABE6ArSkHe;t*2bZ;W5)yVkIo0AS_5)v-$o}mK-RSgQFzBHtztlECeiyx z#P_mbHUZ=x`X~uUe||lop;E2m!Fhm4d}7h$4k0IG|MO`O=KN%Rfo#QT?wYD#P;<24 zi zgL?Eoqh-R)O%8=6R`v-qgU5LsgVWG%$F3qcd*#y0S=zXHb;^$O`?ytUBR{F+jt3d; zT_(s=F0^hUp~{bDzsv=@oz~>0w%LJ_i_v~RQ%-kVWQhx{E;p(POK!%bJ=vVQMK6QM zA_pCIW>rUPQYRM_C%-e1Qb}&z4qj^my5}5)!Jp%%S^I+R-9bx7sytt1o%F8k|F(PL z>-F#PBDk;SDyd2R{zQHw$Jw&AG*=DK7$qc%+ut27BTP_CoE@n=`Ik=UuF~ zlp`I70rv_GyRnh&fyEQ}|JZ*!>hRZxvl*G?ePetk%|OEK*MHL|>-_|sbP+pQSko_I zDKF@6$)xX#_Mv~jF3FNYJRO9hf|3=93&o9Xl1vX{eXr-QduX; zF}?)9TUzPIaw+oT%vTptESMlP135Py@-Krh*>iF|x-IL@{`5go;EDox*gn<3x29cSA? z26WiUIxv&QOyCn?`u4!q^JhT4gK)L@m|fpRq1oCp&5rIPVhLPwN*~4CpzFljW6~w(jyi=Dj&z$oA`nf%?Q}cEiJa z`s~7Z^r-Kk^O^3b5p5Zn+wJ~d?4c#4ZpuVNTjr+qt!cC0vCo6;yCYq-ScZp=i0t_K zdCYc8Zw4<#wCtFaVTb9m@o-_p-tHmrSyC-|+}nc8Rr5X@NQcT`>43d1iMe-(jy-7f zq4P^ih#Hr`t@PcCH!inK9RoS+n(9De>jPSQ74)*&54v*~%_(Y18zthB1Fm*~Bo>6B zkJ}ZsrILGFX&G{VFx%Y$p~)lg5cgtu(Tcmcza@GwacnbrRpC4eY)uFOdr2Byl_$K8 zjT9Lf+3-t^y1F*s?j@2M_AFbq;t`HwS2RPR-G}4qYl^tA;`@hb@skJ~+w?Dwt=iUZ zf{rB`0LF`6QzICCjhQ@)wdtxqER#17KLC=ck?Y9l$_N*KB zrS1P&EB0(a1AyKuzLC`#;iBurj;5o4(YQX`2>fN7MLr;|T2F-JO=S zqEp72ZvcQ|DL@6T5=z^5O@?mwUJk%!E7bahTs`vj!~l{CWtU~|D0Jhx$0yLB++8}( z`Fb~rM6G&ZiwG-%2);hqswMB*OiKNs?Cdrsq^7P)SzI z&SMQ$SF#sBc423jl?aV_F}3+1gM)|tKD)*}vbO_S*D4>LIZ#J@5t#gODLH;|AuD6= zo{+C$RYHBdm5$?0TeTo_^4u-hX+7fc8y(#IL`hPdq4~|KlUGgFrgr4OEw!p>b1JI- ziP@IHrK4LDN3U8V)JWW0gUC|ZZQ@r<@Arren#)9)3fqZeVbT6>cAgi`QWTj^GxJArIdV#XMJX`O|N?|U)1ct1Ew-Otcu zJL@DS<`=RuF22l=rncgEEM4Q|9+YwJBj66j^wtv*ZMtw0Ku+oMi*vhb0LC+Z(=N#s zfc*{Me%N4sC?*(P*!;ihP^SrheJGQbSv%DwyBhB-GLmKIqW&L)2Y#|8?p38 zLYcuMYKaEA!u4PF^-kvZG?VnExAP4)wq&n&gV44(zbFwJ{)N{xhQ&Lkz^1PG^BaEI>fTEABV_*^0eca)J%+fK;6|8 zZdOV?Fb6m2lQapjouAw8QBSwa()!#{+B~SA!&4Mj&F2g59ZzIcnPeq%{IUEbD_13^ zCx<_b^S~Q2e|m{}vDRm2z+K?_Z!fzmX1}Py=RVN_%Al)X>2$EoTMR0lc110m<&o8r z^V>s=IX7R3j5ROlF`d~{#kgFOiWi4@&d0do2(8q^;TxUaRN78h`o4a@_A_n#}U}lxe;@# zZq|qOCZs{yT9Q`nKzOb^N<&0P7HnYdrORWM4c?kS&IV+1`wwNGXfwTE{*IX%!~X7V zcoB?BDtKWaqYV~sa@_% zwfH~8!tHbjM~`2>26yrgDgNL<%wY7OdvQVD?^us2H|6g{sN}G7LQxe7_;=2tLuib} z0Trn>zO<;NS2*X4a}}Gq?6rBbt4^VlQ&PI=6o2Lv#J-(pzIRrmN-ulV>)LPIfuf5u zVM#&2BvyFTb^qJ52fr(w?5;dVHBPdx1D2orU_mCdSV%V_ypo=@4CSTCE{EM(UVg&9`Eg0kdQ5n52>L7zY`2vWK^fvx&2QOW2;qDUW64)34t@ z9)Gk9)QnjP=36Q=YH z3E->~%6vH8sG1f*;w4^1ob_qUG+1O^ZIM{p{tx(jvcaD+UzLapfNUZ%!B^vi`|zPX zpPJC7nf$C0iiS&+kgc>ns7>i*##+R)f}be}V(~Z>0K!4+<8JI}ZjV!svlihC_sTfF ze43zX()r|2yZ^EvHJF<}vB`csDgT0ljYAa2D0VI5oxfsPx!{{ihavc{$sCt*8riF1 zXs0oP)SN`vnDvl@!p>8~0uOFR8nEK7!fUS(QsNmBvq50Kd9P|Vk(b}e1k5IOLE)*O z+7hv`*pDUklq4u^+@l{*P_%=g)KibC$094(xKG}DZ_aDMt5x?9OIf9TGvI=#RHayd zy39J=o>Hdx$MR<2(_|zG<--*lc4KzXJJMoGI=2eAw{xzJr+UByaHMNLA`R}V@`|m& zr>Kbg72ERJX)DB>IGZM-N;vM#SEI?$oD|L%JNdkeKYLx}+6{7%_IJ($Nb%XKu$(&g zs6Eo1QKMBg^jXu*4!w#HEqxzHLqi*)Z+y+-3)J=7W?1J&HDW8--vk5=pzV8K#JY&n z7l_M^`>69;Z)jAW`a-Kyztj4ld0XvDSaGGA(Ji|99e>!9o>{z0!#mdhB{;)6EYwk_ zg92fal9Js4jAW6dB3EyI`8v%Kh9 zd~!bR_Q@l8s5H6U%BHcy*6F*OK@n!Y%i%eXKL>f3)lDM1OcCf4*!$o8Vhzugt84z? zUlW|RBLyMOT3M&)8+0hGWyF(k7pm|qZ_DHD8CmYo1lJPOqw$qjDHANM(!NVqaD&h? zi_%oF%M3A(F-!*Bw~sx%TRQOl3k8XC;qtD+CxKy03%2XsSZ#wCCXML!I0)>y0gaDr zJDriA+?f>T%^4a=$Lsj$V%fIwS?1le*wzeQoD7hB-fy4lYzn0lM;b;EdlvwS)B-jf z`JvU+u)(t@E{NHu-TBU>4sFaJd7HNir+puhN5fXJSY?~+=f8|!0=sq^4{FgX>zVG_ z;KTIs;aIni^_piLo+CaUT_;nh5Dc0&9$Z`?-*akp;RNxRYWwnLV^OTA(2Jif9^4>1 zEPFQJc(Jijc;&a~b2jZ%X){)&Irnw^g+AnIz(^oJjA@AsJHP)-Oopg9YwTM~t^V-` zmV<{7m6`mBe#4Q$!mf$z%negJKC6Q5k8Zw>@YxhUN6D6nSuGMQ!N(7h^+Qk3uy=7y z&);E~55rg=iZo~;E(5Do*D3viM{&TOSNDY8jftTQ^t(hY-^?In2dUDWm2#u9ALi*T z=x;KD4Ov@krJ?sSP35B}|EnM(CsiBkf0Vg$A%@li3zStsywT@(nO1g%9gkY(t4FRJ zbPdU_q^*8Co&T5a=bR#;lhDYO^@vtVC}%6jdH0d>)WSBN<85Mm(){79h3#dlAFXut zHk=u~d0)VGIuH&t)Gzy|J;W^@_xrg4zaGPpTYiVZOnUk9!%!LP?ES!!g7-i9id5|E z>~IGQ+ZBb{LDxU<-*Ses|7A198KZf{L~k#!%#vg*tBphQ%|2c38Rc{&U{Zgg@Og$x z-m`YWx3r>EY{X>X56=k8Pe{F0%K)G85^%bde1mY6xJU1AZu*_EHOL&kIuTdOLX!~S z|MW=O!x2r*CrQ-$r5ohsl?#Y4;fLbf0a{Ub3EuOOr#KUQqGF(r*-B6V=+{Gl*<9!c zPG#?vGV>>qS;srs)3 zn(~YHw?03|G&6vPiP%m{q!K&6qwl9M0`sMnb{?@w`(-VnqF=xHF z><8&$eq0iOqB3i7vDdq}&x3Mt1=e?Px#BZ&ri^^0S7PDuR|}=C_JZGOmft5jmKdx> zbdk$`(-+=t5}`VA>NPwcPRyP4g%spc9zJBF&FG(%dxZOf?Az5Vs`PRb{AZO( z&=+qmUTTXuGKPV_MKs)xmB=GZg0)a(6rjYA1Vow6PhYo;sV4a7x=_^{SEg9wxNOSD z`GI@-#Uti1tP$F0*Y_nGSU00(KSy8qzfRD;Tg`X9ac*Hks*m*5Fg4f(X2$y$d#Y`a zPFK3-%CIRiN6#c&)rAi>G0_M#+U{B$(~=AZS71A7;_WsfW-G=I5S*uklhA4qCqeXG zOhpJW1|CUb%@#6b-$8b0_g{@UwtJcApsV)>OA{_P^L@l%XLFq1Bk64$$98SCju!R> zq?`UAVn9L=^4@1QnMZB8jMZq19Gac#;Xbco)1KyS)drS<-lw3Om&X3adKH4t{2D^&>TS&5&ln=t`3KZW zFE6nI0CV$_K&+K6h2Kb{i;?RXxIb|P@&vm%M1tT7sKRUo`MHo68rMX=&L!3JLPkMr zBkZ56`e(lOnOyVi)_kO@I?s0zu@{$OY8h&rPIU2AAzP)CVp&q=NY=zEWuGOjlE8j! zMrM5b5#}Jm@KFrOJ=DbH*ek5UG$wtgAs&ojL!>y3?sU_~b(9*W3H5FzQnh5i+@=@D zWQ)~U&2*MB<}gQhw4?8_&fcPi z#yUToR0b-Z0uQ_CK$XxWszBl(%#d_%Z(=VKtqv$j-R2|V{W#@#MRvE#lS=d?V}%An zmfQ=09*GS4D6Ivf1jWQ`QGwR~$WyJ6YoEb9dMi=wYS~P#b5FW!MA{hgTR>N(v;m>IWHzoIC{_ zC0F*GN$}p%#`LjZ;zk?VG#hq7k#bdd0q*YK1I|lk)lPqj+@;)>T#{{w@_-jGdDLCz z*RnUxUBGq6BIxN}(mnL0G|W@g`xuBcvKBgRX?r?F)nP3KF^~LcSEN?9mp+o(B=^Kb zbA`Vb9dO3xb@=^67xH_Bs7op)oYmx7Ww-VnRpd?sWU9bc)lR=v+}FI;MHgFme;<~c zO=u)QzqsE8mK`C-mm;#={R7#sPxHo3ygaAk-_0Yi4S%xZFnX-^?e~?qQ`7BCkI`l@ z%(|B~gRZ`(&gx~*cTcESr#1$CpW=LKr0{Rv|J_lR?NXGyB*VYL&t_qP^Cw}OledZ+ z=8!VZ%Y$KU#gpIZieTRoj+0aQadd*d@C*COAd6JEd8d?`4DL$Mw}N2WgV&+V_z&PB zom$-NY|@=ab@UlbAyv4)@$fkY06UZ~0q;5kb2B$+tbx)WCzW~(A{`wFVK|k+_d@Y) z9Z&|uE(K9Zf0f_?UsF9lsqy{UHwFUO;FP)iY(eRvzmp?G>i(MaS;C}2X#kw6*>re( zr!Snflg2u38t*NB6z(%-o7|UPM3lG`%*qX-srKJE`WhQ3!}JWlJ+G61OqazTk3Vb-wr}4JO|&%v@pE>f154Cja-W3$;hd;~ty_sU*n zq<`^zOQ6amGgxbOG^aCSL^I{_jmxaWlDmRETXcpN)uXNO)3W4_T>;O!+u1|;1x`-q zw=ZLQ{BUV;9ivICQL|EbQ`|3}JO2F8AEym|RP~aVnTQ`yWTp1GFyGZ-eSg=`IoNyQtX`HC+KeIKzvR*OICcw zTleui?1SSl%C3Bqk#ZsZX7}dLm1o&E>(C}AH8%J#BhioCj-l8b;^nWLc80R01hSh& zCu&l)6jo)1<;teD2=hzbmqW8`?Wv^w6X1ayUDqGaI|$@ApR%ivGbqT&otX2%V7o8J zBu`?~<=tUqB@s5Pw+KBSwSQpN6p5W!CO5Kw!pcNpE=BGnGSjbphb9}UudiS0JS9BI zSgmXRVa5>a2(y;|5(=$O9;aH5_MBj;FDRW(z88$_m{xN!tQXEqXWpI=EcEMSz)VDvSlH2m!yjoM%wnnfJ zc%fQs>bjv|dCDn|P#m1}r6&qhU2`~<1vj6_+pX{x#YK!!EOZ!4&q_;ncb-VamUo;N z7=znl(F+7Vn7vQZ=<xaPt6F{UHWvFS>V(Jm`Y#kBlvbsi2K%9t^ShLt%+Tg zd_e7il;8p%xx)4I`S6rq3<>tfXLT>JG_PIuvx82&BWoMTt#$A3htuMY)hkAZ6}X*F zv_(g=omQq1tsm(8LzYLUaCEQK@wY#i9`_xph1n_Yr7s2(^(@!zh@8Nu=H7esTenAn z9(HB5lL@Owja(%MhUJXY^4?y3P9L-d3HXuMGDhBU5puB@ax_W^m!8# zcr$S<$T8U`F%r2pH7%Oz_L(7^+232?c*DH-zU+>`)&1y~z_W|#S|f`?f~}fuzcUaE z@3rU%G+L-W;zl)#_Gr#KPF+NSMQyK1g$E=9`LJ_cuJ}bcv-zt&AD>3uwrS|Pm1V$F zbLi95eu9ohvG&@DgvLAfN)$3~0djFc*SHtv@ryp+b^*39u6WAP5ySc|8>>%KoXxtC zx@G7vTicL|UPmE><;5ymq-eUBhWvl_N!b4ULC#lP5kr*I1>!fni=il{+{@)){=x(4 zT7*lmnQw*bI6!xuuXGHB5m$_EKh&O&{%((kVUGU1IF+=TQzT$4sZ8X$nD6hk`gAgB zxkfQdn*_6_Kh0Il7eb@KP6Iu(C*)#0)xY8$EmO)Kn8eq<3k*oZfG$3Y;=sOYY_2Mk zFZ%c2S$w*=eMW~4Y^GgYk=;+HbxygbiTmJsPzJ#wt=O(m<)t9k&S%;oFKk-(E!yz% zG-|T^?k3VE*+C~#WWMog&rDAK`5GnFt*6Y+Vrh$FO%%Q+OUaA3 z&%4i2Fs@p7((8e#-MOgHLgJWmINAby@l}loZf7U(%fPJEg7J6NY3cVeoh* zFo27iT9ka$Qe1~QFhYq_G`A$J6BhtvF4e_br_wi9Qc8)0m&Z*dv%9t0!^x7Xzi%I2 zQw80d>79>_ChPCQ1XuS`4fb&`_l%`~#GQRgi2b?&fX{$O#43(=9ZP6BhbTJ$H%rCy zX#}2d>6Z}e5d4p#%Iy(yu(KF2{8RfBZSJGQ9+`eKv%Yt;5~^vPL9*#)KaDHHU+FHwKXRQF!w@CVzJiJ*brQJ#im{U#~0rH*O}JXsSJ)B!1=Ha}J*c7_M^f z-uuD@XjBcBFhZwhYmQK>?=L7=ojqVPKHxmu>Y|vL zwta414$YL#5XVGrD22Dq#QJBi=aOtKPpY&-m){D(FW^4l=<7iW_MgT(AZJ{BqVCdl zCGyiYU#7H8uZKOT5o{c#50sobA-pqW*_3@Rk(6!G@)<*2M_(rk;FJ@Ua?d&EsRmwa zW{ZECoe2ST%#`J66DGN#6KgnEulFu!$B>4L^X%)HZgd&#jr@k}I7W?k1L~q+uhvW1 z*$Ubb+6ST8)C|cKq6fq9H5@lTc0OZQ`SqvL4e47}`?JJj!`&^07InQr*~P^9D?blh z>uxCDaD+Grbe>T8N0SlMSZ?>oM=yg|?%Io?zto?TJ`#+mcb!8KDoaI?3b%-_`|Z$_ zij0e2+=uW>`IO)b1$|A8`Sa}sEuxTXm94N!HnBUYTv2+i=eB9OJzl3@{lSZKmqUY- zM@>$a^&7`UP6;Y`E1b~m*Y{2-;ayh5pOMf9k#_}hC3#BuC&%);Ngjq%Ucn?JDYjf+*MNo6wJL_K_M&UeZZ zV6byou)7lK_csYLs|^(nf~EsU z+>2m=kU#Sg8L}CBO`62mS5hr`TzpldR za+^nwNpH`mNc7mO)bPK#0Qn>Qcq`Kq!ZB|GDnC&_Yp#D6cO=}zAuMwfLXq;T1dp)$ zEKcT0^Qjqc;)AVT=gM}h9?(p$_2uxcmdgMmQ{b&&g~Ij@wvRMuZv|QOpM3^`sb~UYogPcrUi+IYR36wiv~zZiDQ5_mOHl zX3Z_{7v~$72Wimp4feLvKW{F3s5nKUUQX^IAZu5PgFxxPsQCS0qXRG(FhhhjRa-7@ zT%rp1D%7ewzJ7mG0J`45tMJyXdsBU8NhH$C8wGv_I;AfAkzP08w{~awwC3+r^}XGgXnlNq zkQ#^)La~_PZ*+y8#!W1@r>-eF=XvhAf5ZOmiF3}y{l4p{q32zG;ODR^^Dc3Z@!_=R z%B8dZXMxgH@mgeaJD;=d)*<1>8{2buevw_$(`U(9juU?Wd7uBS+U`)ob#Iji`nGP} zsHw%xAEuxGh%&n&4Y=BnjHHwDS|3c8?T}p`%6J%#xE#kkVt7v^sph6WQ9|k9 zWEifn&03{{dIG;6iKq0qi)K2!HO!pAo1l|31V9rMf{Jko#>bymnZ`ZH1Atlg!7O(Mg!w-7F9NEj1_^OZm@jXEy!#v7}ge?QMXWASip1+xubP3xy zk2R`PtbzsF@#65#5R5FmJQ zZE+`9u_7e|C=LaJQzW-ebyhbA+H9T zxRtuUlco-Px*Mx<33NT}(nHBnF#aoN(s*t@O)!9ufqpaO%VsEXe87+}$09kbVub=o z2Z28x{7^31=?1f@4b?{NEt`*%NtU`uydeoBvvEyLs1w3TVa_Ln5< ze#Rxc>!PsWmTOZQ+WL&&6N}TReH6WJAly%aM+uEPgK~G>s z^VXUuHIbj0_V&}K_je1y>R&&9Q1;%fWX_lWonx8S`G9#ST$7-+QSQ(S1=!$p#TIRG z0hq=|SsufWrI?Ei=X5gG7LWaGW4EgszxTwA=GQNAp63IJoxc_E>$|#d&n#>oCd^Tm zDgNBVYuF12<4*f^3OC7E(O5#M`F`eFkA1uu{3G-^sjNg*J;x&=*}Mc)5( zWmc7(lMXpQ;V(E74PNct7EBIZZ{GL3y&Tq7ob~<6t^W(Ddm%TSb6D%lGujrZ^!euZ zxm_|0roZq8-RFT0Quk%dN_jQnYF_{wO!yeRKcKc|QcBqCYBTQ;Pu`z)l;I04z!U^M z@#SWu-~Kw95tmu1FJr~a;dQi(0o;&&d3;9{`Z=^-BYv-~7H@Vji3?eHWf}UUgo_|; zM^#<0=$Ni2t)_H2N#uB`yg!KjT)y*bXm{F*)Q+yjdc?}_c-sm;w@eB{(~I5avzZxQ zj_F78t)8>+uPNtTvwi*?`CpawDOF}DKe?W(UWVRY%yRC? zdh3SVoLNee1lnX_sNn*1Lb`EQOV%9CQ}^yQ9$a z78Z2#hjoG3;#Nl5#z;w?p&@J4W33QOWijTmG!fml>q+xVCr7rIOm2_j&5YK&R(>)m`P8 zf3%@NN8|LqLvLa#q62(y1r=H4_JfT`OkJkF*+}|KB&`WaEqYuaH)}t@Tt}N8NX|0V z?Uy5%vtCZ?gy%#WZk-oDW&i)=5e1+QJ7?exAxUa}a(CGjc< z#6&~B7#!Fdx+eHDW=&UCaL#WhRfMkf)x~z1!f(IlK@GfH#6vv#=1$b(-)-Mzf12xW zzMT`$dhs(xsFO#MRaE*?=5)yjGqO?o?gHJ3(Mbwj!MYLO)fhd?I!Y)yQJjjizx-4r zD-VT)%#V6AJex51=ZB6A=AqOrQU<-`5SrU8oAVvsJe&t!r=1%5_;U$JW0`5vv=nci z4~4}$En`AkVcJne@vmE@W%0(OywGgquvu4S(O3!$CYfV-06Hj+(W;cbo%W&Q)=iFig-CGOqA zXNBbb1=qj-|BW1>vABp4dOEq~n>(+5-Q{B><|f+Y&Pn!$w?Wo(y+2*lMQtd3o=K2y zC{9ag$e2oqO}vXdT|e$Kjw{w=f;Aw3kP-ld(|#dk%hW1GLM&)@rrSJFp@$cYCNs^IOoKl!EVV#_Gysmc#YVm-hUpIsm634xUc-$k-%W9k`*IUIj;~ud5hA%pg7p7y8|z-9|p)e{Ib#B4Rf#43d(j>}T;ZGF`q8=H-*jX{o2f<>y z$4$YCQB*^slqOJnFx@r3Z_+aB|n7#jO`p@bY!khtA8Ljq#t>RnV!< z%FX+he=lQbUp|BgI5(qtqUf7S-{qw*{T01bqr#_9qPS-6HHbr{g2OGj!@vrIC>!e3 zM<&kZcaKbRU50p-e?PS$%0ufSllRIzxwsh3f8$+i>WlhT#Z>j*`DQC%KUq%vbvXui z#;L$Y&Q8M21l&8(~I*2Bu_`1>dpgaY=$o0b8 zVd^pY?b%oV%M9LCSYIxL;T@tEX=om;Pe1}qF)$Xqs`(Xi%GCU(a`C5$Ky+(^V!wOP zkEHd297WPs=&s~Zgev(0YrNvkYx#$+qxTOi=)nT8vO;QA@8Sp1pElxslKP%+K$Tlu z-+x&D2mRZncD&(n*YWI+lTBs~&a)iDZbNOebRupkA(WArwxBr!Hw9KsH7LX_1UN{;O)$k(^;7hb~awc z9sk_Kw4&piYd;Jf#bcUHqSq)2Q3^s;lo%K9gR$ebI{Sr#)-O%`^K1Elk%7aDj=-8< z=W~a~yKS9L`zO{{gDwOV{=_QB!{R*Idn;zqS_q|FO0| zL&Py!tl~;OMG3mp;%v;nX$Gz(^P4EN`ojcf7hX*EdEtZd|qBx1tWsZ>O4%Vr!O! zuIopyd;G5%??II;Pb^kp*}7trur!YBFQa8R5$I8J#_LhW zB=M|c;b%4IJXRi6awasOF0V=E)Tgjo35~6K=K*Vc+Ek)@zO}fn@VZ>CrUsTB$c^o2 zV(MJgvMl>@$D3Uw`nD2Cb6{p=4ipPg6Mfs^*G|{ZJ^d5WTQ8F5DZg8Kb?`K$Np5d2 zP^;2SNR#^lNU8M8Cm_3V3v9G3MK84s6Zix=g?}J8RDjo-9r0H9%!-fwR~wiDvq(zlSs^>*MY+o(Hu)PA~@&jbwGXF~(1 zu_C*SaYgrSY3oRd*q*HtQs^7VS<;sB;=Jz^9c&~e{Dtn&l*EpBB5ZjRL9jzfEcwEw zJCG&6%3>6t;llMXnMwJ@3tlj%H2qT4 zN!Q2HWh4QZUU4bi;zY&1v#rEF;INCW$>J78-r~mX?vC$j${Jo*55vIxgR}=y6*Mm*XJe9x$4H>`2Pht@wSclGylM)4k!Ki40&8 zUBI9nS@49^ysEP3y$ulGvSl-*n5w&pPcEMkY})_jmZse&?-j#wmQe!$PMvQguK`NGi<%#fx9|$nSn> zJ_yBzD%L~EEd9MN`?9X$@ps?*US7Xsy2bSMe%Cd69O~b_jDUvO!dy<21>|oMmUou( z&1Ny6P*3Ba^}f70jW4XX2b~DQXlKJo<^NPy&BvARkPjBpyA#sxo_(*8c(M;!Q6{J7 z=d@NxZq|D}=iE=00-1{@{iZiL!M*smQ!}edCncBuHMC&)cm%` z@h_M>is=RUMJnVY*tm1oDmc52{tY~hpk9vZo-7fZivBqrJs0GAp`|yGXY|+@(cJ40 zWOKE)Yd=9@HpZSekL)+(TaT*-zKGiHo2HMB-;)WEUN)7_oQk?^y|G`Hsne?E5MK08 ztr0?%zbKWxr?P~awH&q21@DN1lQb4wpp{l-I(kHNvP_3XV>%*APn^2`2 z{)&J9XDL$Badd;cDCO}hu2`UWeL1weOh6?`*X?H-uUErkZ^L27 zBPMILeM}USC9k4)52}6nQmNj-UVWm-<1Kp)b&VTVGHW9b`9KCwP=NH` zC|qzI^d0~j<+c84}|{)?4AES5n+XxPp8?R_I1oYT;v9_ zok!`Ll`lJNvT+TJ)3k*#r`XiP^p#GXaw(di`U2{#R$e&}!C=?eOxD}q+Hku)iy%Z( zd=?F`V3%%Md70|h3Oy>%#iYksUpuO_*exwUD9X%C z>0AO3b0$cAyT}i<8*kZNY;x}Z?*25G_CtncrNM#mS*$qW;pmw8)LxW}0&ID&1?X1( z{AYzj{(s8SZ@R08CnD!AUKm1f-R8X>Iv5=={5!4JDNfPPkHsXp>OX(|zYnd4RXLd|Rko%eUh&-Bwu6g44(1dopJ-BqIjtF09I z;1Bh@YE1KC(+8JV(I2R&!SrmEKj_`Z@J>fLr=2NEU+BdN?A5(Z{A>!Av&?V`@`GXl zOj#k^0EI%vQS3yiGQuf+4eZz0hk!-E+}mEKQoB?f`rDAj@_&v708IefkN0Kff0;ws z+N7ema9iSmiUFTHTj~^v8^m4H0>kq9MIaz`JxR%9X#z{ZqsJdez8|0n_$(c*x#RQ2g;4;a z5qouUzW zs{y}v!RbouwW)AJQUPuCxYyak9F}dN{gxZ*y@`u3=se$SO!fAISzPP-4y)Hihiz~? zOvY0ica@CoiMmN#I79h4JI)*;E|&B5txP~;R$o4bsf+Yx3cqXC2+72}IUXemRr8se zrP^yE`t7@o1A8Xyb=A1hXaz3dMUq%r;NgS9N^S5V1V=da1H?BM4+CPd2>!Pc>XY~7 z3qG;~(T;QVQ8ux4&p=lghv4MpR!>+WQ`?Z!8s2%ui_O`gYF}ZMbPrBRl^v`VNoc|> zl462!mEroicD>}a3xq_EVVk9Fuz5I(l<>r|;OR1h_XDI?M<>s4#@cl+!*ES9&-)w0 z0bzIm+>S+KFk~w-$CWq6wZdt+EtciDpRJFpCEnG=fUV<`(s<;c+@M`%w5@Kof_&ay zxT5n;MxH9Ug1C-RkX8GhOCB@>XV7XFb!X&{`TURIyl(jk&{;TZOftJT?EltsD}IM( z-^}VOi_lG-+dVRPEhVQKd^K4&iac~L*#rI=$65dt@kfo3FY3{UK9A)9v-%%b+Hs~s zg|r=17O~5yryhkuaEcK2P<`lT-WN6x5*3Q&Ea8Cp)=a=^eNTzLlI~DA{Z#m4(N=_R z(sN8aq52g_`(Hh0Ae*0!Kp{CWf&Qg@_o$cfJrz+@ypyw)8Jfs5lV0swV?8nbsQ%gs zkE264PgmZdef3!fKWqA-3p7agOVjB_Z)A}3ZOj|^*M}Hynp3XxNmqmmp=iJVI;9(% z;AJEr917ySvAh#!W(-Yo+KMJWxbGJv6j)!kK8YQ09inol@Y!N!AOH?-1OUHBJCqDn z)wL${JYpy*`NL;`Zbwg)r6&lPu5x*NQ&h0uTO~y+h-O7=Lrko=FrQ(N8aVH2C)Zo{ z^99)P9R}W~NOgZtr$|j*d_X-KcyO||w<)h+$9k0B954*o*RO*AMsVb^69m9hYznMl zN-a>D4A>PoAMve+e%;46bg+R)2lrPwSt4EkI`DtP)~QivIi<P7EVS&EcaBARRPJa(E{Gpsdd-+GF-pHDe07mgryRp9mRUdGTr2Ii{aN z#!r56sx>^|!p}Mzs>1*K4K5B_hT^kG0{2Ec(bbsaeM6VSv%}o8@BTN?o)EFO4F;L` z|8My+lI{(_Yllsm)b3YM{(UbG$y2-mhF#K@nZ(Kjg}C|UwKx=btvH!s)m^a4B)o-W z!R$Ufz!Lb2y0WrT7NAQFka-?9CYl=N_qB4$Fp5h94#34`7b9)~U+nTf$0;KaAv!uS zmcjNc#r{gj4DQ+3KzC#@uEOjofMb^& z9TV56->(%;(|q=s@?MQV9P-8<&Q`YAEA1VOc{I~y6cXb9lRC|#y!12 zKDXJ&&GGveWH^0w&9PB;VY}WLecJ1&@L1ei?B;frUXrXtOdS=M(d(`OcHZGuX@t@~c7xyiyv-k)sGa*ceTM!Z}79COd3R%uMj zD0S914idLQx}FfU5l#GO0IZFXIC>4Q70x{ykhb(Rv~r>wn3WB3st`9I;NT*ot{8z(c#<`CtoIDjQm- zdKdhY^CJYWmrtmp!6>Qb&;7_Bo12FK-QyJ9J;G~3K8QXQo}P#P#-I9CEnF@%4{KKE z@ICEy6#wNn2zS4n@gh>1E61ky2b8&2zi94_ZVKMOiyQc8T0Upo>t`C*H)gjdl)X7O zPI>l0s`lz`K{_6!%xrWH6uiVR(%CQB5ZfRVIA0xF#>|QiZLWcO6Q;qD{Ah%E{L#ZI zny;8z__(6iBFOW8D{6VDZ>kyw3BwGmzPU}JbI{rg8`}t6vt^vKruP2K#EWR8(2;&EU4~h6D z>ho)QzKTC(u$6Jgc$CP@W)oX?N3zYWua6*9mu#0xiorey=4i#LQR9EwkZ|8b7~S>H=& zW$Uo^M@zZId7}h0Nl@c97CqkQIlbBs*5$FJl(kI4%~h^!>D0>Q=tLr{A^+{&z>3y2 zUY@_CtW$$LKCkrCc9G_X7x^MUM zT`C`6C{M=8lyG{&Vv^2chos9tw|3wpIfc6<@bdq#Z3KWNv%V9b+w}Yvo5rh45b$(i zjF&kTa{&j_cannNSx z+@IP}LtVF)E?V#h6Dt=zBqcSGiKN6Q1J$Q4I?inAN_7ujx&U7R3Ypm-GVU<5{YhZf z$gtW;FHe1D-Y_1cbU7~9<&f+JiD&kXZyW^u@DXApUWzG?o;Bh~zI+L7U_rieHU0Hq zfLn#X%VWyBosUNdYCQq&{kzw9^phkN|F4pl6=zE{Rk@6tIZ!5f2i$`v=hCxYVv1k^ zdEo^m_Ak)rOY0!r5VqwMazUPNkHKHK4^Cu4&osO$lr~nKqJ1b7^ z-X@V?5(&+$_=qHeTwyVai9wh)o3VK0I`ExSFV72-yKLzkJ``%kOA*E1R`= zK<_mTC@s)ym#{W9lb8AZw6@R#yc@7N&+K%QI5 z#plHnC7QD~{q*W}h+!wktQ!YhE{|t*j5!RE&7VoU`^fF7Q3*i^OZipC!q&>Sn~H@> z7;KqX!3xn1%ntv>1c1G?o5UKRoX{Pd0 z*COe9D4Q{o#n)ecQ>K$TQczKhEKfqwpt7cQhL>UURjEJGwasDg9oH?i;**_)&7@K= zHziCjV@Yqbf1!@jIyrsac$aIwUUK#uthHSX*(lR!P{}Pj`X@j=KnTc!y*e(&X;59` zRH>K0^w!6QWV$WjY06aMsG8whd#g8AaJQOrj|xoea6L1uvuo>jN>W zPS17TU`_duM^?;lN%-nKXS6C9Eh`Y2oq>z;it>qT@-Yj&J%hRb`;A!@42RvLihvJ* zh^0MxvEm21(7SL~ssDaI#Yy+9NadZakV2{TKFj}U6aGslCUmgW<%%%#9)IaEs&E01 z2&)>WHd%{by6V_ZX-$x(U@vid|foieSfli)_gMh$Zts? zW>LFTg@u~s0DMd&g+5={nL$32`j!fl zY9&@QPGy+4vL8qyNvrc<3g4aDofPuowhwR(bs8+oqo{&K8KLfN@1Gi(kKO+R!Xh z?xd(L`5Zg}{+K1KuHdcX=l!iOXN=l2DZs0`od5f%$E(;EY!N6{!FgsDWVgbZ3m8# z&1E(EB!OSw@-_qth1k@sEjeMx*Kk*e~0B_<|W@0{`-BoR|-JZ_w&@t87(%b^UD zp_+mWgM{HCKK^)RuG>ew?``y^W zvW?XDe$xqpRN^XLuy~ZDL!f?Dj64icX$d(Pn2?QJaI^z7F%(*<62Csgd9#H@sT+vM z?5LJZaNW|DcNuaAgh= z6ZMBOY03sT4baX>MO(TFYG^@0k?vT)Dtbcn(5DYs-iSDEI3(=MI(PPIpg6(mo2CzOy~!Hc zbu2^S){keyjbcW6f90Z$VHjaZg5y~X|>Ccbm-R`hvzupN??ZfU9yFR2E$$U}h$@|U! zX;H~RII;BS?hStr9G-mqU*-SU!E(Rr%6~#hKD31Let2@3x^G0*@#oaqkP zKjZj$*NDf4OPGY^zb!+TU>2G&yC0w*L!k!#az$X{$oe$;FNgI49=%`|M*&|cu??;x zPI2OEz!soa<;FvNt^dIPOISG$K-B#k3HY@Q(>|ci#p)xDh?7vin*dSA{8K>41bf0F zJ-D&!t>~r0Bu-eBv!!!6n5T9zxXU@^_k!xh98csqR<639kSJrxb=Xmy;q&9~C!Q7- zcm^jQwQR&YljC*H4q1*9ClN`*3>VG>+JU=j&|!8L7|}l4Y5+@@397JjL^$}HHD_Dw z<0is>kkE0!=M#X@iGEiw7hlR?@iY)7=u(H5C4@w%R=_DX zQ+Gk^gn&gCk%Z%Mcc&?!!33?m^&gB)R_}Uwvg~6o>CDa~AM^(UT1j`a`#CGRtNaR+ zL^OUyPKM!0c$dwqOx@$T%KGQuK%uo#D{ZlqC$8aOoqAd3Tdp^;+K{NL^OjhOEEA3V z`b6wn!^A;)N*J=?^3U4K?P!HMtfp|x7GDYu7HSdAkLXg|8h&D5zc+7}6Pszf>zG2c zt9sskDQeF6X5;BB>Dl6opI0xE>Sv1-7#Z)}g6AGM7Q{5HP%Mpj9WzN9uM>U^hrO-{ z=wNmLE4N17PbsV2*NyC1SbKlo2cn}0dRKXs`0qC5l$eqw4{qVL@we-B&Hrcbw(Yj4 zYaY_;di}G8X!<{2{@MzO2cGJ3+>F^-Y7V^l_~rfM$vz+BCvECoOG`^eL8}8E1AW4= zOAG{kUDK4kohU7hyMJ28y-8dgir-#@f9V-jk$--k=>D5rwF@NT!G3j;LQ$=E^e+E9#ME_6v_J4T{(n+pkj2;VnghhwU=QP2RHZarN2`wcMxyts#fyy zVUT+v)iDH1XYn7dE*kRXafi2zW03RN?PXTENepY!-nyR)gtfnicavC$-pqET!tN_@ z&3sy4ejcbs;nXa(CUPe<+e>9d9?bspXrLG03UDX1{GN#;@#_I*)cc#h7iA>|c4KhI z|2CDLWEQmd(!g5p&HK7!q$<>s`PQV`%ilYX<8rdxAMM>sCwB`gnHX*Y1bH}>#H@;{r6H75v z*L|?Cjld^}=A59%Oda?vXk8oVm(LB0?koW_;nBcW&;e;SY65TgrDGS0Sb&jmsG zfO?^^ZnF|Ofz$pJ*+ajOTF$&I6QMG1JxzLvMGfej9K#oC31Px!rIp-5KM_gvk?xWH zwjHxcNbRAIF0Fl-0X|wTM!7_PkR2?OK4$+712H1R^L{7RW> zo=vhg*MGuA9jy7=x(78V-yH}wZm)LxG|tLV*i_LrKyBe22&jQVKcD;8nZtQodQ8E&_Vma7OI^Q1Ysp{~wig{|V+iniX!gYXDD13C7J* zw~^*Wj_GnnNEH0PhwH~D7#h;{Rem=JYjtZ!kfQBSVe?gIsHEDd_g>24YYPn$lNqJQ z)@pGOuO&<_wkGxy{Sz!E?717EcP3w%@_D=H)6c^L`HgWeagQ?TKI2pA%bdFy;N1iK zm37zS<>$t4`xyWdT{cvK;H-Xm@V@+`d?qPa(dUu1PyB2y;n=SUUW~4Z0zTVdGoi*k z<3yTW^;6cM0W{(SIvnm;II%4WI#*7p=EJZMfFeUR;P>HzSsn<9W%Wn8k?3_fWOjw# z__>vmZQ^xMt=#JCHt$V_tfyW6jKp1PxB^qf6j0MseC5Bi;H+u|-5F&aOF>K8ZM?2@ z&~`=|*RQc~@On7eZO+@7#9I}$t-sgJ`N@3P(> zBQ^0#d=|ec|J?AqKzun21vo+2 zC~nW$*7xGapeOTU=}#P9B_}Fm@SlI{D>tf;RFz*N{rD;a=iKI%O!;`k(ua5a5LX%Lz2kYvq$R$5h3EHB2^*$;H=e>jFG_foXT#M5-9YO#{f-i)y`fRk0O zc6umnbT;`FLl&;{ZREw@VOrzUD+$`*xopvpLvb#Ul_EXj*rjWmV8z>7$2xu=TP3-O119!(c=4^~m>5OkCW zx=)b-XOb*#%|8BV2a`p1O|Shs8QB5zAO|#)Qe4f!YG|;F@fo*YW%-{?FD0eunjyoD zdXDy^rUd}|_h40uf$B#A}?~X5f zse&{;Sn@JhhepqX%MyNdT>U`&4n^ffi&0v`X{_*WW9}2)-7EuRSH%?_(bfa?Er^_T zn#kj{v&5;gt(k?RykHRi35VjiqSfQNM9G`H(Hh;90{F*Vnzn8nOkdHb!w&srL|jmz ztTab9X1?C)QJqF+KJf(u=jZrM_^;KC%2NSX?ayxw3@2FIpEL2W@Nmv1Wa)KHq&D@w zvT{c2vHW}LYGv=NzVKXVU1pt^1_`C1gqUrC1hh-;ghM3At+cGqGW@kWGn`dAo#|Y= z%;6gv@4xu1!wLhu%g?&*`g;y~jSY zQiJv1j!9G8{kye$7@xP?DlQ3fWAZp! zdXT3O*RFBGOXoGP5AN3T)_2Kk>n$<9m&^4$|I3&YS{T-ObxyEaFLfWfk(#>|#XoZC zt<8ErrXd?kKkZ!h|F=rb%UApnaq<URF-}(2q;qFoA?wH0#$E@+Z$J7L2vH=r`SpJWYct_X^6xBg))*SKON0)CwY~Fpd z?1t>4fTYInW~2S;_^oqX?(I0210~oDT}onKYzc5?>mM;FQ%J|tTP^gfB5?n{1Y;Xq zk(3fz;tOCq#Zp5w@|52w+q;?q_`>3q9Y0~ez0wHna`$>(a=Hdlv~-klbzc<;c!_9# z^$4U6P5my=<7?*9V~T7S2{}RW=6Bd&lk1BZ+)KJ9n1id8oj|4jIw)AQCt^saf#`*l zE`rEL(iI?JhGT(+Kn?n1bq1W0WWfehdCQ3tXvQ@0y$DQSc6nq+4)Tzgps69bUw*5M zY*%~|=|d5@w^i|4jbI*KJ>gwZloC6A<1O9l%G3<#>kwu*gGAiYh)XI4f8%@?7#S}< z@UnVNZ=gK)?S9{ev2{LgFcSX5Eel88q|3FdEl<(}yzOn@;Zj-a2M`A@UffxSI8XZ{ z%hO5r16Cw+^=jmY058u(nt#;^93?X{1KR$Q(SX!GJec+JLxSlMgW3IP z@oWq!r&l5Q@_3@eKf=^&y-<1!sU7D7HiM1vnun$$3kTfEY18OdZMLK;$p|=$(?^fg zj#NH#VW7+5*kTZqYZ!?QJcrBXAsmFXrdlNQJBw6svgT^$zQ1D0yH3-`s?5q;vXE-E zN#)&6GF4C8l?2HnjhvSbZ;KCeO{rdGG0$7FG#lR*DaLxAexO6K|Z7*)>}^263S9EJ{O{^ zQl{dm2aj0I$`7~(!gZf2s(e{7IcBccqz+|lA-c3!NPs_5RLG-h;jMUz-oOMYQg1rM zbRr18v3Vb7@36Llj$@E(rE%h)yHJWA2}V>hwtd!zmk2@=4ZXKf0{qWT#(S-A92I>o zZ#$k?&~^r@+-j4kkZVY*2&ulOwz!OoAHIJ<`_J##@G>gMx#MT3mDvj!7xMA#!X3A0 zW=u-%uYgx)f|u(z3-YdF@m8Dj$3@$}n=zX~?j7QyR0`336bbM~M*JOQqhR-Qr6SKd z4<8Su-wEhz*Rb?r&L)j76m*Nfz9UBZMNnUSAQG3Tpetx^LkS;^31!9D;F@fEzMdYV zekW*!I=n4jvF@$ZJQYC{5dQp1jp@ah=(1J);nKUlD&gQwSibj1Q|=Fyq0OuHFbA#B zJ(ay}MKAvIIZ-{IGC>bqhF*(@&>tCouXY$u1eo8!S(!w!93Y>UmHM^R!Tuk>&Ik$`;&fGCK)mmb4=Dk^**iV$&%d zv({yZ_A11Wj079f=x$1$*yo`LQ@+%f-1FQcA99j=FHmOl2(6NgW>mm#4%Euqw_4aq zU0~C1SM%m8m}gvL;;oEt;Z?|sD-$$5T z)P+WQaJCIqo86^V5&gI8cTD`K;)(wsr5x%Dmrs`0boaaP8N_U)!Y;f16y(G3=l{{m z{_n4If714cLpR2DFtv9j_4^jHjI2=W)^C+)EN`4Oy+thXKmIz$6tAg>NGwU7zQ6#a zv2$(fu`MVy0GR;$j9&n2OHNC&T7qnRTqk0-w3H}7KHzOv7Uzwr^8VY!Qcf7aA~_Y|8m>~3y&$c~Mr1xheRM#w8{N&WHB3ZAzk0Zcnf5(t<)z&n_2 z09bMMV%>trY?5*`S+elUxgDs#|Iy9jS_itaSsC;nGlR$RzP*Tm5n+$v9biAl8qH|b zVqxS^4tCi#2|abozBOwUD5L`%HS#+)^5p}6Ip&dpSHM2Tvk$)Hip|(r{Lh!8ui|k_ z2O#Y@TaxC0itAV`Wi6=#yt%=MX`TSFdDid49vPC(&d2?!OK8t_ocwvzfJQz4Ry4hh zb)Onh?TnfVOQ(jLdXOXs(s1DFkk8xJR>0RX4!T(6$y7)Fi$N(6n{QBdWKAmetF!aS zCxCc8(Inj0*)wGm8tCmZkBrr4JpCt@WgQ|+^S5DKU+<(1miT5vj*-;cAvZ?hVp^uL zEaF@9A*OY})7=kR?BUq+1Sh?|&vQu4@V0A{Fletw*9&`H(yp*W<+OfVq=2f<`4lGB zs>)(>jT*@W`YN|bdZXXo*76lThm zvnRdkP1A_v_NZL19F_(RMptS&A6RR`_A9mn$hp(jEsy+r((W@HLJ87OE%53aZN6az z4%-d?xGj%6VLmyd@fn_AzTEB$)XhSP5`98t+|E4)MoOymJBNSQ)7h1jwW#wq&qnWg zt-f+=dy1&EVA+*A@BQ6F=Uih`+joz~6k(YLY&N1x4DUD2b?sjvg*&c(cP`II%pxZL zM}wGgviw{6#^Z)4h8XC)zkUg{x6)-VWQ#b0!Jh{CDwd{Gc7MKy#}5@8V5}cjVikO% z=x&TwP?0>}>l=N(7xCrjeELMU-ow8J2-bdSp^kiMVEiUEU8w>0CGI!~j}MlU{_%q! zlR1?UCi7rZI8cpI2eCl3Pbv)Ki|@BGU*l3@TzBpxJ4j6v^1>M&E7 zEz|o|Az;)w=I<(lhHTLLlfH}dN$K9))t9$NXR*EMy=>}oR#dG%@zVVl#`=8mPq~uQ z)l3+q5`ni?G_Bi5oukVhgqOAc5B zjA2HSa$lKd8$kjqoP2;l9fO!Zj#en%3YnPjVBHz-Zy@GqYBzt&bn!1n+~-No+-h>) zdj#bBmMV!Pe~_ZpYYNCf(>5Yrt|0?)6n1fN@mwG)%3{G<@dM*@`sJu2GU+=sS#eAK zh6i=7U3MT5(9GB1IB5LOB>M&aXa+BuI7$*?XpR60C6Cep*^I@f@CncUoW{6GQZt1?Co4=V7EIJjD;1P=N51oKE$5oqBGJC zPA||s_8kfj;f?6P+vOq2uK!jb6bx}d7FU0f26w+_cK6)FIsV%VFhy+fq%-t1fLtd3 zK>_yKY|&Wy&-?pdXSBo0?O|xBfLW{f^r>6yz(Xp`;9VZ^BWZ$DoJ{-1XDa~)Xe~`2^e~^o0^G%Er3lzJts> z%K-2vP)L)spE@h1rPMc|;W9L(1S738hh5FM=s_SMe?@lxTYw?p85=eIqgsg&0Q(rN z^!B~7-wMBRKj9XC{$1%@Q|zNr0AAQ=uGlp0Bg%4{g9%{{(%M8tC>}-GVoDa@wz{v7 z3>Xu)H_5X?ze4n}0k|f|P%heX@l6Fx0epEKGn^sY-xF9FP%Oy)(gG7l%_Yv45G|S_p=lj~GSf$ktcVPh^(aeMZ=A5BDiNlGXK4**eHztbc658?($LZOhO2zrhCjr7ba zpLfwN&CB+PUfSD{RAiVV7NG&e-ne(Tm7AB@pC|skM@_8k;S*8Ed0XlU9!izWS4!tr zI+Zx^LD972otr>9_Vw9ss-jxhKUzS(_0?N*Pil$QayDdwD@3-AL$nrQ^#!OyR(r*Z zUUYnD@bd-V&FC#wCTI`2jEI`0%)!^0t}$i#!#oR0Nj%dUi_>!H!~Xh<--%7^68|wK z>@7Y)CelB^ULT^H2|ii4R7%xg;{i(VcY%{5TkjBf?WZ}|`W=yOR~U-!bL zk$rawQ4+GFl*4HI*wM5%@DHdg2)Z8S__GfH9~|Sau22>+#OyVf+H8i<6eyLNQMiGNJ4y&fn0g1n8qU)R-Qr zV|Ekj`yo#BIqLd?%jp(w8ehS+DWV@Ugz&>zb=(Koj|tm=_o6VE8Ov&;$ow8PtuKC#;E0<(e%%r z{e8li#XoBI($~pkPuHG%+tDjW4#>&OP_PN#n6u(M;I6?)$76;#&0~k<>v3eg^a=;B zRFuDTk0oQ?FxuFFLO_|EdFdVg0M&D&Y^ng5xWH5zVTXgW`~w%vZo z{(F1=w?5&@O!^>#3d*(DdNgc~qim8H8AO(B~@H4{!8I z062LVOcG7Iz9rM6?AL<@4gGB2R7(*oYcAr67EJxU8X-f0Bc@p925 zm#auNaYo0{7f`{3>~VU>FN|9xTmIrt7@LR>N-0!;?`34!ik1cK>6Z;cU=tSX%X&n( z<~}C8)&#m5w5!_k13N~GuKhJa^BpQ`ef>jVf`<%txQ-YC(VOi%h7A&koGGSqaGl50 z31f*h3HQ=0HkyT=0Gzo*m+2ADqUCSgqc%bBn(%k&2qguiMzGXjjiIFtm_lUmQle-&Lx?SZWXlDSXM+&lrA#H` z2nd1KHkZ`Y5PRrG3U_yUdh=j`x4DAjtFb6v2V5fTwOhEOJ!@^62O^UQDF%?uf0~-q z>)_$Y&vfdV z9Gs==p6`xgW3t*T?4-$|NWt+@Kxc8u7 ztu8ik1++%luAFZqZxia_u@uxLxFUU1)JXnS`zcTbRksb|t>>Eg(ErlG&4fQp2#@9Z zK&#F^)dl-yMHpUYUPxUS`4YVy46Z&+`?7IffnA?fJmh$hP(9q?&rhNs{{W|MO}f?Z zNj#9!KPq6!Y*d2|QEl$T@?+QS)x6(gkp@#`wLDp2*2Hf%`hs-W;9h!@_e?5&Oa@CS z2k>$rSbXv_Q*;LJplHsIf&#rCb7Y_Iw(LmJKHHjle;nD%&n&V^zI09=oJhhPhssuZ z5vMd>fU=2KRmT8)nDePa=xj4r<1w|Q8F|Mgu{osP=wI_X#&7{tLVMRV^I6)&Cz_5= zj9@N{*=MF0+U!w+tX=_udVyr&ZHOXoY3xv(3MwvBwS_kQdzUiOWlr5*$_B%z@Gw;b zzxsuOAUk|aH_U-??2ElB*t8F?LFK^wgE6~B_^wvK`P+->YHpzz->zo0O2G?V8KNUQW zP+8V1DS=PP(}agoF1Pa8804!G`=Fh=y=ocmlkW?iZ}=Cuf263C<6x8rduI3)UBZ;h z2I_UO)al_lF2>#FB!3e*mk{cG0b45a<%R zgU$}I55C-6rl9U%cic$~1f`b866Bbc-s|J;b%2?{TF6ZtIliy}sBx2-@D5%mu*hOc zSSoRWDqbxSD9~?C(15oITPL~b*Os+`$kNiB!wLxeJ{vzARirfx=hq3Y={HVn`W`cC zl8*Nc?_SEEi-R+24jRdmv$UjIT@RDL@h% zM$_Pk#Pjfg$jwm`XZr#w2Nw9AR(wwYgWeO<|8z3B=)8;t}<`8_j zIAADZxJyqmI0x+gjq*;JfD5tsSn-aR=us>;pTBZ1cmI_8-BN~v<6FRU zD@}KyP>TW?$klMw)&evrsm4D$0*zuzjuDyWu)Ke}v05nazl!MUP61Rhvx)}Kn%548 z?k%j_i_e{ffzA9rXDt^!Lwtbvcu-Dk=zwA}at8?{i-QpKdsJmTT00*ze zGNG5~H8|nRhKCtN6m|dWp1{sv3XutRSV2AR>EY*Wx!*mM%jrh=nsioK_z|WJ`y8q? zRe}r+zR&tp&2)90j)#rg;6Q`MOR*>LP64+L#?V?*P16`xZ^@lXuJZA=+ibzb>f{_d z3$<@&>A)-|zUnO5kUBiVeubK0-uF8}WiWV;$PWo=pO_X7y{v^kZx1&0=P3bi$0g;M zzxu1UC}!P>tBf}A2CSgZYEfuSA)HIJLZ_MO)DB~-nYn0stbYm{thspXn>j10e{FzQme z*aGj*Cb3>&doAgL?o^z(m-;cFo_~XOKt7i?CN@c|+uj9TYpfRbWw9wN02T$ zodJUspj|}PUI=!`Y$aCb;r~$lXroy0_v2ry6=OBt2jJX_X3>VzCM?p|g8M;?=gM-0 z=|?TGi$~hRQ%8|JRhA~+WO>|9W5D0igoXU(Z_MM+_Dqs-hP^){hjip`zEgfhHu^=Z zIpz*Aj(>b7`T9r+3Q;)#`g$s*3a-m?cK@~5Cy{YVFjwdv@J#u^XR{Dw%Q)hn-q0Jx z=HiLsRQ#pDMdE3XsHo6aUagt#d!ArUr7UZE>^@k8XJKm{jt0{qwcr1$ie2PR6IQmf zLURjGXNUVPw>|Dbzad{y%Y$3j@)JHRIbhyJ^c;TEZ=@rC7@vhHlDi@@3%|E``nS%0 zqFB#+wNfYlN8o-b({%LnkiWK90)hWU{UZu2K^>3C6)xr>lp)`Ca2AA{gV;35YD5?tI{}6+j>j*NNZ!do1 zDG5#5D>DS10ODbC3~iDMv~r}npfB>C0L`7JB*X|ZCW2I;G%x^WfXKS#1v88fMUpP^ zb^_!g`e9ZcmnvTjVqyBf&XB6!*v z9(y*cYOnC-<)^6|iL3%vev@Z)lqswmf)-NvWjK2ApG*-#?tFOxdf4GeV2*dC6W|xP zMo2v`Kn&xA?Jy(mNidxEF=^kH3j+x)bUDl3j9u3Y?=b1-NLl?%s7g5me3x;BT^{E^ zIiR8wmo((ro^auJX$Et#HIbeNx)o~N_^gM$twnU9>)zrUq+cp2 zSb;_3lgbQFP87b(%#RaY1586r6aCTU*{m7?7OQ)*9~Dhi83Y@Oyc)D@b2aWilH`$+ zkV|?cqE_dfj&d@3HxGGv4OciyX5>k(aLLxGU7dAsu5ugc(g$|r5+!ef1<>@ENOhTY*l;ThiWPy*&J>z`N(XKC;b306P1qKZ&)#*~BR zW!*scJ85T-ZV~wU4r}(ahanlt9`s|@sc0xAuKWbBpSC#}+;BQuswZ&hmVtK9xgLw* z)vNT_dBnM4G6nOY?+x`I_TZqM-#`8nHhbo-8+p;YSSFrLcf8W}X1RA5cF}atr8W1g zP1_Mi$@)4h_QR7C8U7|_Pik;j4v2mKX^Q&VBuluPGV$OPmkx1eVk%YFgmiY-%;MY^ z#s#x@a?QEIEgXE`E+)za>F0#(HDRVZUYpH_&ESTp8?L``CCP^FA2jf<{kW_$38C#2 zyV(-7zn&!Mn)y@GlH7(3ORU&*X~9BlZEezCC2LB8A^cx;A<3?twj1G2v_zaD!T=Zk z86U@HUJ5}|TN~ZZTSh4iMHj=>lXf=y*pO-~Q!S}7@{*(^RQgmMRG@ohHRdO^681IJ zDnl8&=*Z~hnA4rNUT}DQoYw>43BV}uOHvO9rhbcmOwNnRH+~rM#gwf;kc3kCi2e5R zq4JBs`|_Vz3ii_~im1ZEF(oZ8+k36%{)Dr{H8N@4)$+XZ50jDSkB%Oi*HLC@#V%c$ z!MXB{x*6xd2GJ0Eat!ejitQjGp?tos%Cv5Ln64Sgiy=+TMVTuKZb*m|OKQrkSZI*2 za4mhR-eq2W{ENm&tL#up*dv?Mq+_TQUI%LbaoSaWQ7i~ARpSGT{-!QxGE0p&rjX@q zKZ_5RHkgw#mEp0G5!7;@6l0YL1K+Wr>F!NbEB5H~nWHCr(OEPUf4R+7u8|ejI+({~ zyF~1`q0hQ;x6eb8JdC<2l z+V(}e|N7K_;+jzVuy)P6YDKIM>NrSxbnM9SUHH!9HL1-DJHZtROvb^4%SFU`kVYo! zQADd#MXprTG>-yCo;_HdYvbD^{{pAU{gtmVV-c||6xLFLS8JZa$Y={f4x&VrXok5CSZ_wW_2+9h}?*P zY%87b4&rWYG}`VRcOe<0Pt59}aqojy+Wcm~KG=7}k3nN)=rd%cn{ZII-m;Xv25qPN z9e^;EJ^UKk^ST4vr-4)zMdmTF%7wI&eD}{ie-R+Zh(dd@9Wth?&>)G zh%RFFW1@-~u~*}5FpFl;1q&6=?8CIkO{V3_CBADW;jD-sh-JLPq^d6}3$8?n{;==w z@2)H0|HKcBSQZ1cZJtm;(r3#P(p;!OGqs=Cx!)dxh@I&k-c1g5=5g%x7miD9cj$Y3QdT;)R7Df2g=j@#h$1xU7lUlwWSq z6H9ME5oRUREEt5pr`|CC(Eh;lnpf%BM>btI>RnP8ghxHVYK0i;YbbjwLhDf^JH91*Gl?IdBQh_H`qFR*02KX zpi*igcI`^ycEsUV-5kTMp(IhbPPk)VS^InPRpO>ZwxdoHX&FO;7?qM0DGOu})M)Ll zN3(gj_S_kOpOX2)w0V~bYk)4npfpboD@mzi?U##nc8+dl|v`s)Do@m^}XmZrS3 z8SOGiH`}!Xbzz)E68QCU&WMWu)79W+<&L*x5_7fzS%BXZQ46>lqEIp?AXE~Ph*Un*t!YQ36%dwycHH0OI< z436u9euT%tJN1x#7j^0-*V_ZQ2;wW3~;?!feZu_aTq=~TF^GWIYFTS_PTh&Y;+ikEi@?xpo~jDuQS zqUH|TZf0TTv(PLmk56Q9wruK_*mYdEQR(S2TW^k|58T>KEq>%Q^+WeCGJPXK8Tn~i zIrzB}@%L#;C@J@7%i&zL4M(*ij*Xv4XD#rWO5NIKXO7~yuh5+^s}S>?@$(HM=EGb4 z+0VE0(zE}Z#5vYkuuhaS`<$URt*`&+a6T2LDz`^%R9f1PmE^V?fx`XJ=a|@no67>L zDG%)b-pBu1x4b1(zWPJeGbFpj2C|H_KU^!t3(Lqs~N2Y)BU0{%EF~*gJeY5z3D}`aV@3?nw;YzM1F=r%45wT zmLnTr4uGy*yG)-tp@tZi{4N;^A0HG0%)_;%^M*D;s}1ZVCB_5@-{7uj*Z^SwJ|&RD zw!v@2GepU)OM?=Eg_>lt&Dp}^tj{`P#m5RcM;ufJ&KBeXLyRNc<#Qd}Do5Akdz8wa zI+Me6CC5GotKUd%0-ho7!^(mo=JtXYkM4fJcz_ZsHhMKOE#dB;ZgMO{SZ6e5*+^HvBw=2b;>ovjn#seJo8qEoU| zcbfNY*yMpicT1e-V09w3eV^CF+ls99YJ+{z=DYj9E_wA;?DZ9oa*kF`yuw^`=NoyZ zJzQ~k^N*KJ+SmVe23l5`MxfvhtC_2J-p4+l*bu$T7YdI=5`{6@n{nJrMEv|QQqWx| zN%@BUX`A^k{(G7^2u6m$Mr>QEi*eD4;6_SF`>NMreM%*EcW$)<^WG#}eVT?hxf7h8 zN`-ZT`jL=m?91vht;A_-(yBVIP=*^R3MN7h#iwdzr-kb5Y4Go19@|*FkEV)8APpFC zR<7QkM2sUC%kv0msQ1f@nKd7$slv|FQaFt~+mKvU`Rn%;$nX(uTfcXF?~T<1SKQyR z9Y*)X>#;qz!4`KHKF!Ef<9DgWSodiJ9Hn_H=FQxm*Q5$;Med7Ei}Y9|e(O_#dek0{ zBxv2PI7r3>`|Kqc+@}8QEhzhNb3T6M6{CFRb<+~0h88;4VMAmH)WO9w?oP5xbqF+Vcq8 z*_+G#J5HN5FTTMazCwTOjJ}|@<>rsM?J}CP7d-x~@%%!!mXZBq|BJN0bKXhqNU3N; zzjX`W&l=f0#aXg$<8veUyw@RS(IqJONMHU!D)s&R1~bhR&`B_g*kX$+GHo}D&i8pP z&VqWBBpR!5bRF^JZgAiHuM|F!m#V2*-sXAkYU3FJgK%l>Vp`uaW2L$VH~r&3!C)>Q zr+#&P$@6Wcm7BbI%S|KNJn0We{e9luSnoD6#npw4#S!R+l=oiKX4htTyI3(rMQk5$ z)C%I-rKb@dq6|h)6J;Q0^TMa}!>w@d^b_j& zo5dwX_3MVHu|V$TQ`=3e_vB!trJm(=mntpGzsj?5&*#&kA|}_=(7f1Nke-myGSB)m z=WX6kNEQl&0xJ;?4uxb0R%+ zcA9m+93n4Y@8&sxp}uBtb<%-Ye35P&eLIx>w9@_h06I>`j#nY8(EKwcm)$joRl;3T z_y@lhg?G%=&c2=Jc=cVU3pqMjny5J9`B?=CNi@+t(I2*&y9&*^2O}6_n%~ z9nLlGO;g$*H#L0+qMoQHV;^b8$>$0PSq!vp#pP0eMsfe*yjFPj zL1eA)htH&QAe%17Z2mYN>m$WNGa0~#kwL%a`g?XpkGsS@Tn9GvOFu0Wf80LdIC8B7 zP13C2Hx~l?;i~`|;TI~9oCM3L>GBiP#B??bpwEs_Oh9*z$sTpjv6t@RV+*aXY&v!I z99l2x5<_2ehPpX@=IuEN>KRSxr&O+!f9~7E;^?)gAd`s7)05aCsu}V0c!Q$v?WgnG zi8k|f+C@c6n7Tuy@)ZAyo3?Ooya6s_HwPpO8e`28VCE6r;t(($o1Q4opl=$31VS zUn8?eunzVVNS5@%x>~9Zb9Ddh1f#8<)w^%+G>sEeK3qu_UCh#QVg2oymHC(EuCE@n z^UNM;`;e(TTY9gyuGu^$Jx$~F!7;}p-LZ4CNLCVK(I;ej*kT@G8}lVKMgpq0`_dMw zLFvFB^-|Rj_MP{fH|csc?Wjm@>0mB^mFz0#4RU1r6wl-h%wiqh;Qoz6=m zw4gd;c<^xBTK}p-9g9!tR=FgT&tN7?hd^I1YC*7yo~qH{DnsjO_e;vbI+wof+ZAGl zDt754D>Y|^H|M9(kzz6e#WXFi^{sewQxD!IYv}tH7>vYWkf9r02CrSH>>HofhkP>) zv#&lDAQZ^h?7DqJam43&pv%#=U1FY+Ye$^OP#A)W#%Fcp9tjcCGST|`78Gq!$?Xub zl2fK~WZ_<4JqMS|!pVC(Vq*pcfA_9xbjjNLf>&24-_l&x$s-?S5-@Kq1#M5v$5?)U zfn%h=QypTJDh4?wJx``r-}=wn3mgg9Jl?oZwMVlVJAzV4&GFY0bW-A0Je`!&x)M-- z9}@jD;l0OsuA&4zep|(Sg~6#>iJ#~lX_Yxfg9mtNyD`$%i(tQ>-P-;K9{%n1S}J0P z+2Jytm5sN`YITXrE~1uhYcx_H>t()H`8GIK7J0bMG+K2F={rS5JHO)ciKC<8*wm{s zK`UQx8}tl}sHjx6hpNU~*YR~HaxfDOe`Z+i4(JbicYAXs&=T^kfNv?J+j2D?WN@Pw zFfsS?-^NyK!pr3hd8_as9A51YFKL|VEMt<9xKAy3wObcJd+6WOCK>jFrZcR7ZidC} z-%tI^7>i=R&o#09JzP$BnsUh zJULt9a2eczjDA}d`N9hM;UFWiES%&4@B!)r%OLfG<0tYG!`}3{^)vv@y5MCO(~Uj{ z!7F@n(7jNi`GM69V*n_=IT-?YGpGBM(T)}p9S?Hkqq3tV=_Ob`%Bl!h7D`_9CP6$4 z1yTc(WC#^qR6!VuQS;m#i29e3OITR9-~>I2BAIX+L`)L|YjzZ2NOTuL8~yek%hro~ z>QRm>P|e-CpX7({Cphb@pjD>e`OBF&Syt*zAE_q00?$fj76YHGmmm2SK`khv&pgp* z3o;B!&M!8}qP}oc;*@Xm^aE6Tz($2fjT^uc^qSiG7av639YC%INq#^!S;Fhfjv*@6 z$B^gCo~iE#rQ4DRf@ZG4X*;z-atTPH8DCEx^CXN!JN(7H0 zi5)!w+e_w#J()e;JfqxHrD{%LC}^pTM4K~H3!q2Ee2P7y4s<>m=m_{Up#wM3fG)Da zpX;R4p{i_`6++;Kwaww!UPFNN!J>-fTs$OJyOx{1?w+q}nszuY3}Hfi7NvCQpCq4Q zUDeO}>Qh^@d+I1BbvSsmSs~w;)jhd=sGfo}eZ;*#)1a^s=WtyI%H$T__d!k8M6Al& zwImQ{M`@b)J?~Wa$44}tv?i6&F2GY|4CeblG=zQsQZEKqtJBw1=wGDV+2`8fkQ)^)9kV_+Vc zzgr8>Og|@IkE%!()cdY{ty|5>Xj8Pg65Nl@dMoS@igO?4hT~@&{{t>?rz}kntE^XS zns~_Y=^|BDz!T_c6X(+7aq>XR=;uA`W@*Db?kDmzW|RoJ1& z06fr|C$2Fu@x;N3SOjYA9cSLh#`OVHP@giDv;C=Kq$fk<>r+Yg7F!cOhmWI0q3{nu zeQg`>#ZvKAU6iG&?Ju8F1m|fV{uzn0#15|%D;AGmWea0mIaIyKR}|qnm;9c|S&4%o zslVfCVzdeFg1^htIN(s_6ON+08;>Gs1;;W%w$#!+BtL1?W-BW!y%UIBfA1k+<(+8A!(&Vvj}byRR=oB>>Hw~BhJx!w!9 z2ltevoR$;Qa(4W#UO^;Bm~X{RqK=#z5WeG!{r>XzbTpsz)E4TiOINxDMi;#ova>xW zeiM2%%h@4hT5Z7@IN#2vp2Wd`o5oFM)c+NbHFAX>2)MZP#~3Zq2##2PAGj@+2L3}T zd?LJ2qR6={n&}?dyFNdyzTMc~PjGNiy5aT;H%Hdoh=SnQ{Q!Z;wy?4Cl~&#n&$|Ek z3I2OklksBtUn+v8Tj-5`!Q4XKnpo3XY9pqb)gkou$zUd!*XF$Y2g4!h8)^h)<_;6_ zR3P9ZLW7H$i&2gW?N)h<}1) z&>T=r25{$c5dn9Ba7{Z{5I|IEi;?=960GW!Q>7ra4^~<`MuT#HgW_un|AN;88S*Ja zJRwkwSVCw?roVi=fDv?5H^oD-2m|EG6M(kcDXoqmH4y7I6BmhJk>D4(^DRyY8n2s& zY>eB&aTd>Sf$ErWnJ0vMfvH2=Kt^GUEI5)JVZhw147vfmVxKLE1zZ9m@q&C+e;#!^ zs{-T(1GJ@U_LyjpEU$g?T)tBU4e>I;1|91j;vLJl0DL906&n6C_5><4kvE;XDc) zK)){QDGtZAO@U+zVVUQhF?)jyZSc_Q`}^EGGXj|v-b<`;`)90{w*NMvuYBmGR=!!4;K`s0bfz0Kjia0htZYBZ>0jo!0+KP1zVkL+L?Vq-cT=a z2DXR)6oSepQ&xUT+foqsG$!P({V9^gEzKZXGjMN!bj{I$w|l^5u-;KR;KNiER72c7 zG7DtS+Wl^g@YfdSm;M((!7fYvu+9$QlTMyUkGXk9LF-fWXO`g3e6yY=jdnY# zCmM9{|HZle*FA4XegwbUUhJmzP3&imrh{YIo-Crz7q%}pXxh$rInOn%{uTW7;?-T} zy&tx%m+W4`j?8P~%dXBAw4^XHKaK@Dxa+h;^gfDkS=2t7R6z`oy(-TY39GF@6KE3k z12+N92zQt~j3Kxy5MY~?2UA*RJD@nP=oonQ7!OzjY_5;$#S?iv=?&aVmOI3%!IUJL zj1iCU3=PS_uJ#0F35>}k=u>8DlnSJHAC}dPL*dPM_fw^X6S=Fn*n)EtVKJJ@!L7D< zg^qZGJfFtIN1xcjESDeD%xoso;nSvNS8V~g;;?JXBdmh*@ejL&0nAUbdv?-=T!?)_ zO=JPKtPkdh8r@19%>rRBX3b|6bGoR=r8VAvnHplI{!PF}gk`~%zEcttE#hG!(8u~R z%k`UEjckA9%>9K|IhN7Be6n^w5||(Obey^$)I6>gv>>n9XNKV_gl`h>aDyzzBSW@x z^mv@s3H*p)X-nM<5XMY*x*lh$^WKj=k1|2&TL9FjK3-M8Fam0Ewvn_L{}Y9>a2^D0 zZ)>E#e-yowSSrD3VPPQ9f~!s}2aN`DFgT!d9ae<`fn5XYx&}j&0^!c@FBhF7rGcox zyc?P>(rBd@ne;C}13?-|6>dY_@0!vWJQDz0m6pD)o?A$%>E-~G$fbi!#cF?z#=X=AyFH zfE?Y}Y3w-(ALqY$d(yjC$>^~Gu6B>vbsZ>vSD$?GrNxJ@5A~!rzHv$ba>H%VkkX$Iq#1PC>n0 zLI(w@(cf@)WO-GSW|kqMAJGC1L&D0++303Yk}7^tZBQKwp0C-nhVsHSY5({ho-0NH zewndL-l@VwHNckTbC{p*jB;B1s~mr5>OV2p}XWQl?g7r|cxD~P%W*L$Z&aTpz$ z{#E?*0ei!45D0=#Tf&vT9yCkDnPX6?)ZUD#G+};O(~rnwRM4!D^irnFsL?yhK?-}5 zT~m5r8jLYe8hs z0#9XPkRN30B#j?v+*Zv-zjr5kKM{z#jRg-t&$jzVEI z(FTt&+7=0p(|Ap1{q1D-v9FEhgRc6?~eh=dPx3{;5tgex)Q=1K=%L343FSgU1W$W2ta@G==fC!d*%4q^=(35|-8 z2zF$zVF@B#!*6AIyJQWS#;ZD;>{k{0BEt-TLG&E($U$5YpJ8ft+@nt|g;;_6I3c?x zVk6bRVtnQU={VkeJxK)tF~~BM(is2dcWcd00QIntsiU;BB%a>7$emgjACHY!NvW9ED9bEc(6@P2 zB4R6AubYT(IF9|Bot@%_N;yk1cewyl$H-9-O~B9MK7;B#DY4zu*-w&;7x>+Ej7J`y z*9i4ZYgLVT3NAfxH@mD70AE-Thpd*(@|D3}<-6uVY;pKm$epJCPm)RQ!_ahIMi;ej zkDM2eH1$^>uL!};+x%1*Y!Yk!ovxi7-T?DYoblYe(M^3Y5b;WCtLU>gOcRj zsE>s#@fXJeqJ2VNw;{`F%@=6o70O)7R3pDekMZ|`{=IZ`bF9yv5rw{U<5{Ba)x6bn zFZAzcuVyRYa6|)*{RE-0|G{1z*y(eld+$17y0o+E&laR&n&C;!rdSSp_3QO>?`gLs z4&LMn|`gZyv{hdrX%)jO!PSug7(BA&@_} zPPhWsuZ_5BEU`jK#*Udy`-gpe?VDa7-$mc6zo-v-H@J>H_MY+@7OI78=@uJPt#H%5(t{IOW8nM;(fsFKE9PNR)_2~RX5jLZ zid4LXRp#oT{@}OfuFi}7T>F06UXPhQD#Q+T&%ICi93BEs;whi zo_=e zt2;(l@N_yVb9$abs?N0Mj$my1iJ6c|Qe{B?1!FBu3r;|rFH+TCn9SA@>{61;PuKE+ zn75egEjY$^5M)vN=3_lK;luW$EO<;>pP=)5!)SKovuK^;AJ?~3lT8kES&FKR_Ab|( znCKNI222G(Lz(ph+7Om(Z0`f9AKgV1;DVD4->I7lmD3)ZUi+$(dsRk%tmk_LDoC!! zL9<;}y{0PyQ^%b3^U?t~(WSa~GyC3f`-2572H7vKRnQA0jd>>i;y0hy9QC;z`QC9;7X$DP_2csiGO@c$SF^Q} zp6dlp2gzU8VQm39vCl0}sLXa+|BacKEv6B7XgW=+c0_w@FFyhETQ|3k2AGLDPA%?K zOaFgyx4)+mr<&xKlO@U~VFv}_!e=T6kSw|9(DS^R(E5)}Bcvt>r4~qpZGbYK#*U%y zz@-orh*8X&Dh5=*UXL_W>+OAF{KAY!v63hZlz|jJ20;p}^C2&nIS%L_4#0_2EXRRc z>@h+~Jpdt|XTZ5OP!33urzE9s1)u9lJs6?PC3i_^1sViaI|7#fPK*GIU_8{c_sAm1 z>`I3e=aM+k`h*YC*qG$})yGP0xZ*`ruN%-*tjmtxnM zK-!L9f{BS#;s*c}?JTG!z$G%yw9I%w8s8v+=U0g@+k+9MvgS$|i*P|r8%GM4uNnF^+~nSJeRI?HTw6czP5?8Mrf5M#*Q@f`07&RIaIqm<_jo^Hdy$F?mx zV4rW80BZ{%wBF?m%nnQwcHP}druZYM=?r*w`rN>-x{LU1#66k2KOz$A$gdb?ktAqM8>X(lKhB*e zsG{=RxBV;UgAr2VJ&P+zFDMaKUU+#O%%K&#B6x=a+X5ezVy@%KJ5SMg_NfGIACHE3 z-2wm2x6!~K6mkki3tvt;;J#qycCpGs_-($GSbh!X;n%-}?w`)KVfx=y&Cq?};Z1UL z_0ikPOFWzwtpib_YnZWTqMq*92#3gVv-vuvhbqwxC2bh=VIo!7cU_wwt7haahq$Mp zD^X_T*t(E23)v5UR0HDK4?1>(@=yM4i(GYE&We8i`%za7*rEHwCj05tp^DfoRNil+ zeO)jy34Mr%aq|RFEA-VNy%V$P1e=fsD+1u$Mj6>1E?}^Y@`plE?-@}8wJZMNlf+D# zKZF=vWr21^2eXxtwca7*%prcZ;u?>m+KTz})ses7mU;CD$YNB{!s-m1agV!vFH)Db zc$2E>f0t77G14){b)JY9({noO-cym}V0hPsv9~)+HOkd#kGEXMJLCfAD^h+X`cuBa zZ%xS?@R74m9*H_3KII0h=f(W!DpF1Z&5^H!ReCCMM=Sb}ylbe>=BYAyti`4)^O{b_ zOw!OS#+<_mdI-r2Au3I*I8H5@-_N$L-Zyz&HC;cQ5ck z+J0{Y*0uZMQ0|;r0j@ffZc6wyian)rt+$&i{A>6{a))!5<1Wv8{(EBzb0aS;(0%QW z%24~Y^lm|eX@Ntwr`8*TKJv|#=}Cc<`_eM!8m18SWqaG(b()mTf3yH4dsicQvtya4 zZ>C6JzKu_)wSv^34fDD5eHL5K<3DG?_=4w`UeVx^o#Ia~zN$J%n6^{uaFN||f%5Yd zN7!UX{t{M!Ly2D(yuIt+%PEe=6dN8! zRmICE={rdwW%EL)lpl9EqFw}HTdGA>eDNKXB3W@vRB zt>JaGi;I5lCR-)iV|5Y>T)iIk4mY)Fxg7|=Xa5fVPpJ5RuZ|}WsyZCPCAKOx_Nb-Y ztFJzPkLemLlz=5+SF4;E>E$!&3#X{^*5lSV;cw(?KHR?7ufovnicp^qQ-Q$MWHI!p zUTyki03HZGVhN%SsP?(Jg9jLdICy~O0Vxo*<-8@}JDAr9kS^jA!U!Od^<9GlqU1wV z8%L_Y2;u=f6~#Aq@6~n)uHc0Qj7I|uXmovKWVR4QJQq=77 z15yALfaqu~N431%8i6KAa2xQX!ed!(nekOtOF1yAn*u$&Ne?lqA7kgaVxdcq1s4N6qU@=gOc&4l$D#qUrbj<~IxfeSU)c1CMqzBug2` z!?S)_lFG%tY4HZ#rNPivm;5NIy+Gta+41saS76=%wDmkD`4ag8tYJkbxc|)Nu()nO zLFHgf(tEeHw)RJ0l(Kd$O=rWb%8_2pKVl#cW6)9;C`I{n$IC0IJrz|T2{ZvM5?`1>y}KB0ABO z^N6<9czngL^3aNQ)#=1+W3pQDq2ga(@BADB%wta>$`429Z1Nfk_Ikmka)wt{<@zVT zYxp{-eR=A+Bcl!0D}9W>1z0nv5S7~TEBf44qj|A_o2Hi*3-BbL&TRz^w$BWo*Pli| z^U-Vq&GvEydB_UtxKZlB1sOV+j}Gp|emFS5^;8$!jhVR}($Q zo89ilLiK0dx$~$mIV6<*QcXJ7iS1qCKZ2Itixh=kr|Q^17!O%JX17*@YsWZSa6kB_ zcPlWxV#ePjIblqtiNwMr-ae2|2rg&ax5_bVtgL6SmSLN0ds9gY&)YHlx?}G^7dmR@ zshpO5Ko<;L`Y^#|ju9O(^;tUL0Hx zF0^ZzSkr!DjXeTN1v0WXurXJf3Ov! zxIV}t;dK!E`oJjsB=i^X2O=ji|xjr5T9d0J~-IKv6w_og0$A+MuY{qsE z#omD+D@Pq3^YzZ#!FIRyv!a3ikfpPm!_Su)A82$qMLVOq3{!bEz9-!Oy_L5uVEP~W zVS)w4W1m@adyuziviJFRqQtg!ThI0$o%(6H%iM?mn}Qgc(FWaMO2ExDKqOD z&ll!VHuE^*_pKlB`L4HAEfI3~TxtTLW%miqa~}vGerW4HV_BnA6ZOD5f%%>^c6&zM z%YuDCIAuI01eAy5o^;?~E<3gP65u&Z8e$Y~;NQcL%AUM$QFwQ`>MUtN8sG$I)P7EK z`r1Yz_^Kc*0!}Od^IOZuKN2-g24Tr{)#o3X<1K=~2}xBE9WX0064;Y{P-;Xa%xvl- ztRFF}D%h3y(&5MdA?rN@;e6LF-Wj7MI?5aOM50EG(URyyFQfNP5M@XN zLG<1udMD9q7&YqX49@Jc-~Zn4d(Nk4zRri|ey;mkYyH+1AOtV2L>zzUW{K*}{HrrU z#0(y*N*HjCi_X!mc5eyrmm-J1sDh2blhl@ouoe{1p8aKx9?fdFP)`Wer}4Rn=q9=u z`@mcqNt_l<2{1XpF$H#f5LU$F?zJf+oBy5N%4;4Q)wY!}a&hm@Tnj1|l)TC0(Qk7K zL-N|6nq7Q@#h!$)%=$T|wSomMSmMa$ZSaJfEaY&Xz(R|ilfvTq6ia+*KtlB45BfT`nBib;WVztDiY;A33DN zA@9_d6C+QovCxhK$MD%mG zeaVsShKM5*D6@CfW7IXCd>qMEdR9a*=J&=n$m1`#mChG*!!8t0(*)!`IufLIv+_u2K3+S{`iq6y&NcK zN`5yDwwemlV(nq7TrOK*a2??ZINVns$hC4EEY_7i(@rwGST^GcxcfOsfDvf_JH#*$ zkyAHTsfhd3p8Ov_MY}VJ3|M&gL?}hJXa9`h;IIB$r=j%9^|7C%LTM)cR+5-p`qW39 z9~Ew)5fq?FI4G^Ba%ojk?jV$SN|%5#IC3_T&#su9h&?ExcbzHDFeRC7vKfVV-}E>~y6#o9!H{1UxAXUO_s*gSkbMI=%-i(Z!e*2;h=ZpnX*>+CUk?5{ z|AAL{UH1nxb7N>r_#yAB$eUQBrA^636-uMN9nnnI7_Cws{K+!9i7>KET~siH7S3M5 zIzOJbpqW~ub~Q)WKA4AUUp1||)R5V=5pEdOO|@V+=#xx-@o{a!krQ~@R*q zJR@@an@j}6PL0BB-m11s6iCJRSo};99Suona1nHhyeiQd-GvFeH@yV&9>A>_l z+PBkhl%95ia?O0sDw4A=&j+ zd#T%I40>=-_I%;h$MV-kXE~Jtm(kI31|B+K4ZuSn5@7=1sTk8VXBEUz{kR7^`356% zh|13LN>O5N1R|84xB_`8G69htPoWQU9~#`-bp}m0hU?(q60q+uM1s=qZz);gu)nStlY>%1@1N9HEpt=RD#OKwa0KbIR z{j)t)zZ@}5xYEm-M}M+qRbXF*MbJ!bcuFG!w!R3H-80i^iY`PWV%z({#>^p!6gN22 z6M0h@6Q@k<|A!s)LVw6keuCD%W9&}7Vp2^CprfwTKg*J zT|?fPMqV@|0M}0?-i|2$%g{lVOCWVjb)1Bm%QYhsSB>xaX!)pagUqdW>c*pX6N5Vg zT1nj9EmJXgLVFQ<=1TkICC%sjQGS^tVY%sncT9~V7-5^O-v)i@DAhK*+pD<8HkbUf zr-ZQX<#WG3P$sCOR91Lj33{?4J zVjdQP4{UVxj~u=ih_wlEJs>O22NOuP&0|;V=xi3j2#@aH5Rd+6T_ug*6d>$+56=4) z|7wT9*?XmI)38P8Bwf71zIqeXqfspD%BvdAh07G5x>C;F^Fe^YVujF2P-v7CUt>nY}A#2A;i*WSKy5b3sILxj%lU9nI)FR4L=kDsktLqrj zb-UB8?&ph@;k@0<7$9a-4rl+Las9xQ8n(fz}G|o%@ua(HLKb{q2fP zSt*zOvD$_!YGjRQJ1lmc4-*kLqjvk^LyKUbWX@lyWlO5{Un$gfm#|Yu81Pgi&EJ{F=F3MKQr-l=&Y0`8AWj?Fa&z&pAvUZBH>|6|Iy%+X84UW_Dcq%&6ri9@Uy_JId=7N?5Mw_@7rX!h7IwW=qQ@ZalfXAjk6O7TTzV-ONMuJU@EP3M&9#&9QNYV1>yo0m& z0W)6(KsC|rPSb$IJ#qY9AJW4;oFp{M9Ygi?v#GxUmbhln)jW{DwZ=)Wf6wcC>QtRv zan``O3C@a)tR>ENW#(Lmoj78e3I}hQTRE@ziZ`c`A1V`+^r7!y)DPo{ioTN_ScVEo z;cu`%#);gX|BRPS*B&Z3P;a23nP$_cd|;<2o+do|b*PTe=JcYIWIXL?D>~qa;7l*C z!y>W~C#4WBy)hd^+V@sIM9A>4?~B@&><3HvVdZke?}Iq6^kks9#i-tWPvYZ9V!)Y< zgUYqtn;(kGwHz&MZg10;X1mWryGxt?`n(u=Whs1h7@()gN`}r^6X)kYf}Pc0e3qg& zQyE+P;xqs8EY0O5D-N4d`4N;j2xv&;&@*Kq}kSr2fmL>d({q!_X2^7(p zamKkX3)ApycjF{=w{x^jg_h~c=l*qED@7pa+;6=$Kek^)w?C5ev2>WNuqNORTRfjc zpP%~{9FReI)<;C!byd#XsNfykdujjN|Nr&0VGmn$y3(~eue?duHIjcWy;t)}66F=1 zI#||FB_n0061%=`NdI$k=Vx#bYohK4`!;z?g)Vs~g(UJqc9%Y3<3=78jLnze3wlE^ z-&B-jb#Y3uam2$_nB>dYN?H|P-X3GeSN&lTvt22xFC}9=pEL3-(Uh=h_RDt<3!Znc zpc}nU);8L{T%T~a>x#W*+@|E7XJxIY(BHsk=n_UpB-*be!hXL~2P<&pQ&#dWM(WnG zc_dKHp+YQI4cw5kkLC5zTnB@heOBPO9YqTt@5(=6Z8G5;O4Ammjg1#rV?E6|im2zg z!zg&T5|185-RzdB(HFnfle4?A=>24Q4)QtX!c0)7q*x$YMseQlXB?k+h`?5_EPSd^ zM*emf*0pq-7C_$|^(seMgFOv{>GBUnCuER^q(3)PRdta>&l2`5Cq3Y1`BGj=8IvA9 z^ONf_UQ+_7Li(m(Y&KC-7x`(CK{9WqeFMG7B~Ul6=@60V(O0>i=qqv0c1b%UE(T3> z`&`ezPsQ&2?HCa0v`|!eoXj!b@3X_jru)$c&X4xK!wmJ#E*H=?`kVGVvQbhk&digN z35x6I;r=SrWJBb??V_bDPMd`=)zMY#M8PT|OUHI^PXx(Q=Qo5)P^j{g=dd0y%jCy4 ziuFEaEws?VKqfj}({)x3(nV(WR17VbcH56<7Nnf09LLQP!gp!r|7?0!MGI~1z1|Kv zj>C*{iO~UW4nckwkQIrZP6s=bPgp!dFypB4z`KQwMByzyzOdw9zAGg;p+^1lmrt_r zyhaP8+{H8xmR(N2u200@SzT@-1^BO%G zV-auKlbYs!{&Q>k_fxtJpQhaKwdUpOUjpJ2BB47aNjvn9`Q4TBhSeyS^F@!euYCGx zilmTyW8Xj5#ih?m2eJzQez>J-1@uh0JVDR(_;JSAbWs$s5f)GbC|v~$TaOKNKqiNc z?-oGXS}sdll~{)ugThx#@qS6+V3i^#0%P0?HpJ}FH+!rb3&#+MiW5J9Jbv(Pz{lMK zj0BSD-p83sB6<4G7-|bs`Td1UqK6&7f)WwY^;sy*r(T94_sbeX>jC|Z?N4Vy0xjZP z`bX*ngpv>tB@P#`5TFc|S*4fV9A`Mex5LYsn{4wo;px>NSrMTX>jSjpOT)hqNjFlF z3~q*4IW-LsDLBAT+NF5UEfU$;IF7F1XE7(>cK&| zVk%1PZ8$H=Ewd@j#VBL(i1)DjgoP8JlZr9uq*08P3lUmBc%kGdIKaNZVKh*Io zPdd%!vf}8BU}^ULo5-cW<+(f?7k#F(UBY*Z!ihAbCqVvDFogX40#9~K0I3u)q#P>c zv5A$&x}hQbw~>~%%-g>dnAwraiKP>1(tDSwkQwM5&5NPuu2~Ba?m1%1RZ&ppY88@7xKW;H5?-k`NQNDMP~ppC<*_7o4r8CDZx%~ z6DiT1%!}gs!4aPY)1$nL5|-uOpG02dj8GJ9^=l&uCB>^Ev}4muVodhGlAtHjFhR(CER7YyR~c~^DvJ-Qxcc7h50=+_ zbCmr4+~8zWv`)kyE@JK6PQrt-QRm?CDq`%%=aOX5Py}HHj_t@CAN%|3O8`EkmTwF3 zaWaHo6MiJLi&NHln^oSYy#6?6a#|}VDl1m&r~XAPkvN?3%Hma(wep9i$BtXZBMKu; z6b_y-ndOzq&t-Z{_6jUM^6msH!q8fomMνcg#xnCP-EONAq4kxMeIOfIvBnpp7X ziHB8RhgH#fw|(bZinp1#QjJVT$JCIdOPL^AU)!o3d)YvivF{5akv_ZZur-NY(qQvn z-b3qG6_K$@=y#G}7E$Xfg`I(SVR5*r4Da=shscJGOXF0y(QnZg5VT2G`;gEr@c7%z zP(xYgQq$qH2mn$KDxbVta6Zujl&h~Ob@5*($>7QZL_Ti)Y z;kM?XzkXFMwxjStl(vcFip`ih7OiOW|7#Pwvt225Y_G<+{TKzMFhlBm4?NoS@O9M& z>b(5Us*wU7OI|jHRld5CTiA=0^zH5_pLql>Fdx?dXBa(mf8{AF(^xu8kJSVc4^Btg8e0yj4V`U&Rdc=W%B*V*kef~rU3(U1e; zi#>p_K);Sr+^0Gw zN~ojIo^TF404`%vA~9k|o7d|D%*Qs9l(GT;1ZO<0Rdk=o{-`WIFm^l}M zl03RpLGFb3_w$u$PejR_d`ERUU>EmdlyQ15$b^Z5t3xDVUfHq7N*hFe|KzLw-pn(5 z{Wkx1)J1pI>Boz+Ko04iK#ZN~{KE$w&ig_5ofABOhqGZraa~Tppul{n4M1x4v}94n ze|*!olRdWWCJuwEG^*xElgaO!Fh~bzS?bSThDaM zDbc50=xKb1^w@3V6+y;GBl#Drf|)>XI0EB3cD8iE)mDG$&NZzFJYPrn506lIQ#yW z@$CzAU7Rk1ktaf<%>m9aiwCGxGsE%y{mVR1`{1+K(@HC>u8I~mZ3CaL`#j@Kv8njN z`_m=!QPlSz|9AmPdxi*RGqeUKTfk|n*KYlqAF8#r`d-Gn^M7R&=3E-*LcjT*Aj75V zjJ|g)FIPLTFp?;=bZv=h$oY1hC*4C4sljq}Mt0k=th@A?wvC63GuT(%2=YrpdU}mp z9}oYF-|Y?6WQBdfLk6!{b;w}2Zj`ztTJ;5lM=0t2q50J5^RF5y|Mp@Ul30xp-wscgUIW6?B-?m{>pQ|o&k z#2Z6(l!KE45vJR3(#@4BEFQz1FarvEuU{d^0Ln#durbq0ipm{|l$W z_1o`I@<6(z$q$H}xF>*OU?woiOC~S-6Co8;zk~Y)$&33KMb_djWDY=caWK2+s@mub zksX|#n$La!dT%y%kgnFjb2v3J^OKw45n)$#1t1q!e7=&X=2bUA5y1mICmgX>b%)VH z8A8^pnpfBo!$;p!8}UPB5b!k|adsMDJ`(?C3QMZJ*!EhfsjC5*7rbIE5`NG9!$QtT z$tf{%7x28fmLdm{3-yq~xoGaoK!}n5a)p3uyqOR0HQ=V>^a78ss-qbZ_ZE~96SPbQ z#sGs7R%L&phk*Ukwh!}J0QFOld3|tNTi{v8{6hD^1vT_rbWUub2Errv39I3CP98xU zFoK}r7xgN+`A03=WM-GbW;KgBC}LH8PFSzkqu3y9%+i?V8K9|m|6!eic`@~RSeH$N zH;q1EEF#3R+DE_HH;X?aj)oC1cU9T2B8g@<0wNMO5d-(qZLEN6RVx4uah4XF#9)?B za;b-PjpeHh*9M33Es77G*MqqO|FM!wk26XP#2+J%{?vP;sWGZeg zp7N*17&Q06wbQKCa7&dhr^QT+{fW?z0MpWv0C$td69TNvfSk5`{|`B5r7=Hn9(f^u zdG=Fcl0>7Yi$wbS3o@4`?RYr?rvkUZ&tSQsQZRZUct_zle#>zJ!|Tbdz~{3b1*Btp)BLMljElpaP)0PA0tRm$#(CEM_cna=1J7X!BD2+)Hjt6vzFKJq`Q#>mGZ$UpNY`4AdeDBH7sOsJ>q z_^3R>Ym->cCN>W2j4*~R6%M2r^YL-fR~vtI*qVJ%*4t^`yLExk{p99gjSZHIs2v43)vp zQz_F_@hZ#e@y0{GD72Bt&F!R_P(a*mpcoUXWp<^~x8a>rtD;r7{-{n`U|(Pl5wDS3Rz;HIDTKAJ~FSEDGC4D&p+ z^~x^=B|9@A=r_4HOf`9Q+3)=7I<80Q@w(Z@w*ys?AJ6Sj>~h9yNTEC1#?|Dz7i~w5 z$Ag-D)~Rn4!&6!do(q6sC$Bp7?uZWm5*-fwU3zI0~L1T7m#`_VnM2U=D$kw3YMub|{fKz0k?{k}(N_y~F@g7}kH1ob(7qs-ldF5JoOjOX*8QL8tb4Edr z*GF9X0mb};qED3z4d4O(K9YNIZ_gJ%~ZvDi281BF+~vN z(K!nk2RL;R(3JzXiKa!o#ASuoexaWEn**86h^zUwle9Pj88yM!HMU5vCiy26`Ieci z@FHjc-j0I^pYR@@3rM@x^yMI1d@e5*7ARbZ!2xEwn#`-4XQvWwXf z*J$H#En>uyr%PIOC~&RfsXPSz+PA_0{-;lYn!%aTdt~u^qwCjX)5mUKo$NRg<0ZDj zqfcPpP_lG5%`gCCx66V&la@ne+F9)v74$Ci+j8b5E)ALa_YhQlrZF5o^0wTHI&^Km z0q%O~ypUb}@O&!>k|@x>&B9V^+l^1dxmBenX2jSVWAGa|W@Mb_3dv}0<7;rB_~Bf~ z_fyM7lkwZ9*Y#Ab%wkfHHmWvFu$c@asIwexUf<605`0b7U`@D?pG)9xgeT1qI$dws z$~VYl0cNXYH@KD>6DvTL0r{HcaeKKAPxEb%k@D%~4l|tbzm8?`y>cAwN?aTCSUOo3 zxHbCC{NHsNHiIVqlW*Gt%!cTfNTf;s@br-SS-De@? z6_(zQ&nDbz(><~ePU-V9CgfYyIeH)xd9hf%x>{P0#bAG85AGA{3R_)pfa9)4`>(rN zSP8en7jz<1MI%Jl9V{RWdf1^f(th3EMb3Hk#~L_p^!dHH!QmIE2cprZE!^=5Mp3V@ z&RIRU7^$=ScMO&Zvih3Zuy$*lf-f27tK|S6;BB%@B4VTS()wU3+;odY?v8%htI3$m zI(4$LPMHS&G9LMM%nJ#OQ3c z-3kAc4fuxr=1}r7Zu3&OedD_C!!B2EId!96ufb)dnnPoM*L9qgG73)ANRCauOMI|I zao>~=tO|Ugr*E1%u4B-8>t|wt5GXsN&gTL1zVXbq7`9abdiHzIBo0!y`9iI|K zf?c5lvA9x>(Y7|)cUCpJ*)1kwB5nD8Ap-ur<-^LC{PWp0KFgJ*sIx}nK}^1uc$!3} z;vrEZ$Fj)gi4mWoN9r$B;8n@wM%qX8%mAxp#;Dztc!tT1)+LXBLrKZKJ%z>Fs&mpRF)Uf|(@#gk|e~;c)^7Eh|CyeZfzG z4}jQ?0swOdu`QM{ULmbFif7fhDGpCX=hNGNF4PA9ZwpSacg+V>Ig<6-_1B&drM z2|{gda3lquvX%m?|2pjK(-E>3c0csC@qred^BZ$`#XmpA8e|uClAp(f%N;U__#o}K zYxf298tttv$jGN~5H{P|nT=qX5}nFQVVoENh3)uKjP^B+#m+e2qHstLF=3r~V2iDW z7cRkSwm908)Lvs*l7KgfaTBR zvY&dd9xv-Ss*)qg<`$n9Zp1cPYxqvG$Klbq#><|Dr&zItLl)}^31iA(LBwP}0F(fK z!>HrT*s3~{mR9|HALAO=r{=i!*<_y04r8Azt+J?O6Ax{wgr;Q^cCk{&$s*M3Z}zWdYPqxp_u7XQDrkVE5N z9fHlfFRhu~oGw|At;Iw1m0H78af<@tWer_k#Uzo7ai18EvS*%mGYTg!C}({-`t;>@ zf-4HoLSHhr;iUsR^T?O2E~`Efaoi$f&a*Uvh~U5DapC|s1`o~jDP718uSDd0*W#+r zQE7vKt5T)l=M)1|7E5Zfs8A*bp#TbS)E-xCgt4w24}MyELQV8P^*;9oEj-F_ zJJ-Q~=0QG<5=|F{$lqvUw8kENbS&els~7d>ROhH4r;7f<<--J-j0a0pUioxJBZ)_L z4k&bUmKtLT$tA#V#!22?GD1AT82932xS+=%QY|{T!5XfZlv)gFg0JYxeHL|z3|wcX z*8-0p-35@s~a<-!~FCt`+!bGi`RC_0a(o-9Pw`z{^$nnL)= zN@t*Ha^rlI)I-6sGLCR6FHxJ6FMZeBAnQ)e;;{bgnLNGYhxC%+jAn721c~S5hbTyf z2Lsui2+|J6f-;%}oEo5gV3!Q8Xgc0d4sV@1H+X#(>q7*W&L0UuDX#}n-eAGwQ+pnd)gGF6Ya&# zjhU&ru^G4M=BT#Il*(U7-)qz&D7+Zb#)?*Iv}7(E7;y|Z=w1&nx!!h}7ura9t~Jan zE2}uL=LVw(B{X-AK3T*R{Z~kwm)4xQvyGs0j)tI>q3e>0IMW>}vX~|T!oSPh!l7SlA zMWq2_RE2m68#s*E6|Ox%n3W-c=#X@wo(@oh*N4-O#sZB3YVZz03qbzB0U~dZ9d6j> zh+QsHO-Gfz`1yKbSceHNrG3d1-UD1p9M9tjf)ys{Xf;3%4cy~@G~;|fgCV)fd|BGr-*nKS+WBDcs1GR+;K$A@Yd-P+t2BXYDrbH>lmWXtd~_5 z^33XyF>isLRzErw@+58Y65{&$dknpzg{<)aRi8wcHsQYW9?t~(u$bHCD)!eVTMr+u zY0m#L`)*tRq-P5)$#j%_opYc4Gal=j_<8JN%NyF`4t6!Y)@Ccs8s|=jqqad0Vx}p$ zE4MolrAex|d%!GP@}Sq|o%7`kr#!JyrHihl8v#U3!kzom%e=@@v|uQG#OyOdh_Zlkx1?CGpy?6ytrE_jCzpCA|fn4IhM*u_O{#4 z`&26vlHA-!)b-24ZOwi3?nJQ8W7b%S>!_{e(KF8+PPvy!#;`c9j~@>r{I*s06~wO& zOEGEo-D8bB&wT8PMUJ-KP;d_h>trMop~fRDd`0@5t5=i1h;M&mwrb?!eT$ug#nE8# zWQhwTsXGI8V~n#+>SOo5cXJVM(JvcI{ohSCI{czyM>58+jNd~h)*s1v;WW-t>*ZC6 z_>^wD@ULxv(VkLQe}Y(-nB)Y+;oGB(P)18|)vrY4cB<@2=t_^jo3-ZK;^YB zYLJ%+u^MeeTWQ0sLS)6M^UYx0^)b6b7qg8wE^=$d=^ZNHU84pc?iWz#2a)KcA?VqlUlOY^w?$TnGF6j=1k1Kl2W}hr<4aU)5&` zQ^r@1M`_FBzL?Jj8>i|tiu7~^)F3A}edT7eR2*Eh1AI@ZXg~ffyR>Wk1Jg=jQ@2T# z4-QXpPA>Sky;gr0^@fb!E5GH>)iFNiJGOIVZ$~SDs_x2Zs}lMGWSw5B%$z_qrO zBG*A9Qm-4)9Sf}2tKInI#brc^_p`bDxP&q2VnH18I{V|{NHazeLS@Pie6C`b^e1JMAG%oSqgk3WQ#p-Ii@ zZ1L%o7(oj-Z9r>)NLrLK0tPz(E*tw#%RwI`U^~WVfU|(VOL$c~%|X7L2)Sm9e%uQ@ z)DA;Ou`Iw`j{_}uNNVu6&7Jb3r1`ymx8UpMVqH$q3&<>kd5+DVh4MQb4jd>j2w`Vl zK%}ZE2i0#97GEie(jzkUq*g!*}16uL$qHl+b9}_aR4d&Px##_$P-M$|ZT!FOW z48?*BvG-|m1A_`tNWAiCi!xI*4`B#qlGKHWqUT3%jYQ}$mr^qZaqv220IqbIUwKS& zF2MS2%KT5iZhVO$Gqf&LV5KeE-dae=>UV{~!e@!mhw*rn=!V|3_q9#yY=w9IxVz#I z+B?6#ppM)k!Z*9SR)6jp3VFPo?oL(C^GQV~&|Jz~v;Q8izqFG_*!RCn3^i_hC8MtA z-@B>1GH~Cur?)U>y!s02FbLO}A$(i9|KuQ6S=P0`+wl@{i%w@d-$rjp=DlxT@3)o@ z8QwM35A9e5pwqjnFeAc9-*Ob=bo@K~t4e?xm(=NT$g||Zk`Zqo$-7ZG9dErMKH0xW zhOP?xV*d{o@!- zkeb%DhsG5dSVX;WVV3-AtYZ4!chyraI&+rynYjy~QL5SU)Vhn;>)y?P4+>!kZDRZ4 zvy`ZqRNXVmMP{Mh-P!STEYuwxw|LHqC~FZizoZDQ@)Kh$vF}eS(+mQ;x4XL~K}fHRy`KUX2>01V6_%^VCnFg$52zTXF21Nc@WXzQvJ9>%UUHP;3+6fAb~yMFn|3fqcv^m_Z1*6q zmk|mN?`5r(+@Dy#j=t@G4-wpX%<3(w($>r3@+$v;{@_rU_H%}>_VqMo_?if_chqZ> zhJ-vWv169WXKXqDR}Tp`-W}jE5I)G`!&Lt@zpyTXC7EH*;*Z2`DMB&;D<{^Z)ec!X7s46qgJ< zmsr%H5iornC%67}UD@$=?f#Tv8-=j(X+xbu8YfgR@CmE4tP9+dh3CGiEv+OD;TX9* zjwgu$sELip%IYyTknmvZUI@q?rvZ4>VYb4yN|;OOL`1HX54=CU#yo{b{x%O_o6*5w zxTm1`lI#+@k$AzugUgF08jz-4LOXvI-p@oUf`;2&4T$5O`v$l#adLnKg3=Lz`zR=L ziBcH}uwY*Jz%LrKi|bS%H)>h?ccF;MY$PSS`&? zie~t_p`E=8BxN%aTR(-2?Knp0nnTCw5Sl02|}1$EOi1quENOeF_6CN1)_(-F0Q zW`LzuA;l4u03Y!wGI9o<=(yQs&DL^if6sNzB!um@TM+6ENrJsGNU31$ zf^B{$?tEZDmBBxB2y|y}4kaHgHV2$&25};?1b4JWgB8zJV^8dDTD-c;WBR2#T-L_R zY=*g`g?t~y7u7@rd-fEP-*`*(j96FwD)C5Y0-WqB+F17Om|y*o@ZyiDoL*CrVh6v> z>n3y^u-Ttpd+z<&WDS-hYFafW5FpVzL;`60;(&gn=90-!d-bE~!T~t-e%gu+DreX@ zBI{l~lUM%AJ$`dWd-AE@RA}TMzb+-|8G-$=>UTtf%i3;*3LzdBVc0jT6VPmx8iIQs z%M(xZrWalimTkO|G(dU7r>l`8)7bLoyQ5HH)=gg3a_*(oansVKQg_AK^uGs$f1&EI zC+hW7N60;@0e^xae&p4{U)b0^#GXs*uas&p({n4 zah5Jgw|t$>!Va0JZ7guh+1y~jxw;U;en$~CppAlAJflX~7kulzjoku|Cn%2?@ri=24%#P`qF*uU)SN4X$t_OixObi{aT!Us(EUypqB2^%(l&L86&dl8ch8kh_heG;v#9xrbEt zeZ75&)E{FV^GFQ9*JytfpZ|uTztd0(YsZlZLzfOxy&W8ogu9;Va{s32@mHxQCTwqy z7#dlwt7993vN+G#-X}fUP9Cig4G07FeRGK$Xl@1D6foFx-b*lJ=bm`Qx%xpeYqL+C z^yV0Rj*3d%EeJXeyU@5pON{n?z2ICHZ~t4F*^HaV`ynND!H?Tkt63cHZSbyJnv#b5 zVReA2!PTT(R^CV?MPMR_7hk-f7%9?A{8@tAbGhPozo^Z$fw3~a2`oiU!!&{+2HwL1 zD<>rSiN7F?QhgO0-__Se&wjNph_(C7Aj^-I%Z;y=(33)@uN>MBj;bt|N<}PY5~@Z? zT~hK-9sjP}lwWC`~TskcoaGq}Q?s&ZX;lyiwC|E0?C(n^W*zV}l z*Yy-3Qhv!PHQW8Ivfn-bAUOVqhamS$$*}K(t*wt`GCDX6~HT2UKhgLVOOsf+yoO{ zVZa}(Y6pGKn$0THz70L##`hwY< z5Eu6;4sk8niYuo!eqvWc(u`tHlc>8L51&LAoj?fdJ0Nc00>I}AU@YZ7-T?dxFzZDF z^=#Z>RDf^WjJ8C+XT&c_UoaHCO_VnRxJ3iK_`hN|30vN{oil*hT6^D^kmnb`1GpM?wZM7Te${G z=F&HyGXYgVVBkCrZw$=G-4vuPU|Rt+VJDs7W2tTUSwRI;ZqLEnh;t@hE%3yDtj_HI z>@aH>>tJ4EriHdH2hRBR8N8pYAgQcSELYW~;WFOEC$4+cDU`60P`MTBc*L-BymR`e zzNhh>3C7X1!i)K9vArexjbYJa&ApBBWXm&|YpfS{UF*a$54sGvP}?!0MSCqPAnJw) zn%Lx8s|`6hq?++UP z0Ge}t-V2TS;_1uf)sScmFO7k*Bu>H-#a3Tun4P)}b`fzhw<@u#D6xO^e)&1VPh^6q z^%Ize;Iid<>DVtp305^mG;dhZe;=Rb{_LDWB*ggYd$epO{OA!M5$mI*aZJ9=@*67d z=e*^h$sSDL*5HyML`hQKZi0bkr1TnDz79%&?PSM)q@x@=a%_ zX9E-gkwoi}7H=961M;BGHVO#jepwC6_J%O~x}PVrapNQ#Ly7HW&(Gq&L8h&d9#V1YfavghAx}5Gut80(`}SkuN0nrkF27<&Q47yLD~Pz1>-~Mb zY=57gnE`PE7fta6^kai~>^(g3aGT1u<$f(w^dTsO`(|SUJUUKo77Vle;97O8c*%pK z)y<(2j>*CHY$~n&vP=OtU@Ne$(&A?rd-vV=&->tn6lDoG{Xg;NU@?OO<{L`P??24| zWs?^flg%ts-ya9d{eJgByx;?F`w>_%+=bs|aCC*ZOnO5_^S09jtnW7dYB53VksHyd zupV{7B~gF~Sqy=pR-7#&bUnk%7C5vKAe)$2cJx=?EE37C{35ljYZ^9OI=m zDqI&y7*ph&IXqwCw%gtEx60mor4jJ z8MU2$MSpF!PCaU^9vI7EvpQf(r#m|rx^==DsPu$)CFxabI)V>m64;wZWu5Up%)f3yU2xB8W9~!X$@xjhQn$%Zj zXT>hR|BjTbKoTBylm(5qYT;Mn9N`_}H-J)bxOJ&PXTT;sN8*Xl#bqc2u#oMlKKtVH zcMfep1u!4+6q-+zuscJ{$~LC0D}<Hy@>b6-T*a7vJL>PJuMM@TW zXI^8FLK#2fLY4J~;LrHkS$|gMK`zt$XQzFOZ(F%kqfj14w7q9zt=-VNdR6CRDlD!^ zW7Ryh6|D2HITX)TI{tNKLcS~!fuZhb&_h3;IL*Mb4Z->GOl1M0r)6I}V&P`~J4J~k zM<$7^5W{z0afm)FYBTSyUG48Coa&?sk`p%Hf3G#m*~SC#y|RY;ZD=>T{Y;yQg7g&c zp6OgrWDLDp4t0aeJDnW9)SL@=DR~{gWO#d)L^SxNdNYl>6`P5zy<4ii{of|R+k=Ni zT^0WiU+*2w_W!q!n?-FZp|%*cS8LR+)tXhcYQ)ysqgJgLRm9#~QPrYq)ZRqIrnF*f zY>g2!2!7u8{kcD%`+R@j^Zh3$C+BtYNAft2*Y&(!*C2Gtyww4;y0UcntEkb=$PzqX zGDy0gMV=lb+rKYTo5rX!W|tO=9TpGpRYiP$k+`!LXuKfQsQH7Y3uGDCW$da^_ngut z09Fxd!mp+Gypq@-#w=bs^u)(R-O9m7Z9{YJ$xd<>S4_;`^#WUWH&D{oib{dZfFDcd ziz#KT@;ri+!|G`?prwq}K?PV(jG*n?K$_G9tf?NS;yq^GXT-A_)dGoXytMStsclUq zDHG17nDF;)iqd|y)2O`HXpVTIF)+DJUs z0T(!Jy-|1=JboW!$t-DJ0dsY=PhB0{W-0yfx?D+IrYo5`ZomIHXLY!i-Q0M{CQaL{ zUJ>PSp03NEC#Almp?{&OLHA(_oVt6w*_oYRxW+0K9Xm=JCqGV&{BC@`Bc-rTn@#Et z&-s;f{^DD;>(Lmzb5!Z@T}$Pr4Zw^zZh5$C?Wt4-)-v=C{RD4U8i>(w@wH>xGlzDLv9YMP#?ci{^GSgaiB_PxMPNMK!->Sy~jwPo|# ztbJ(^H;-GN(#u`c*#X)0OR`W)U*}eP@}qj=k+k_fs#aX?I2WX%q+fdN>09M6`xnos zy^{AazIWEC(L?2fnRzpa+>BzMQcC6vJ|G_n7~#BG!xqRfzv;j>6^rTS--@SvxsRIJ zsBcl(bIwaClmBHS^~Nw`$rbPLD&nDrh+j%yA3u_AR@+ekZ6m_t=?B#^Cl1s=H@BPk=T$iCP+;CU=IXN&Rs~z`f9qlvr zt(y;zI@?j2_h4Sn7T8U*1!eS^v%56>U+P$&--k^rpTi!K=62h7-^b?FCXPyUF|I5?nxH=Duj^6KqPW6LYO`wuRFM zS>QQI1dS=&m`5Q%qI5!K+T^5&*Kp~-Nz)~ciQJ6D+ zA5Vs1nVGlqmg@}^qJ2$iaL5m%hgbx+Z*^D2xB1BQN)P@?*OG9@ z>aG8}EHI|Zuh}Jm@>Zk{6bQs>BP#JLI7u{NmMN;urIh5z?wfiyrQhc#yZWV}88bzPr=1Zw_#3pPu zhSe-XQ2OodE^oDUy*kY@wE9YI54?ertbsm-TlEwX)@f7fFn@p1Tj|<<4ckFZk|vnMWd{>SMb*qhg8fpG>t!mNt&HP zT`y=wCVS(L)4}BwtDp#|pwfd1Fo4TcoyCCbzo2f?jTrr@tQ&q;I0X zmyjQ)^xA}3Muk+5)ZTs_ILBZPEl5xo;T;$h7+@zw9x-r9iI42l#i z-TTM?{xwa|spv%V{6Ut! za_sza-|~&3_myvLi?NKS)a6#UNMV)O{L14M{eRO_|Gci`JgjlP+VMs|`hNdl@)CTI zcnHrS++Qa&e<|+z`;c#_vg?$D@V*A*AZ?$wX;G`tX)6H&%Nc>IoQ!u-ktNZzF zB@RQ)-59lzC%pIv_{)&D-Fh6J#speVC?kk#m`G#35Im??y;nmst{UZU9F|RzeQs4&|Qh}R<-?O>rW@S+4^Y1fEi%)}ms_OFSbeDcF2AI0~$A6J#^WWMf*v)JwDFqM5x9SDMBj~I8UrEM zF2xa)!tjGxHNOZ}DFGCGBk|$}7KBBAl58=N~9oD)wW=@)A`ipx;KknR2z z1ip4=xD)l1+#J+bK3dyK82m(t6z>@S>Y^&DSKMp4gyQ2ha`B0D@L6@B_%cncbD81* z`j(-cAXF|=+Q5{Y2`8Q_dcPL34}jA);G&A^p0No>;9jTjC%qih!h`q1Az?<0$V~aS zD&;50k-Z0OZ6~v%CE1lA)kZ`{4VvxiT|Ohpgh@MH&#*K7!HK=u>`ytNTvHn-oBw`6>q}tUhYicsKPSIJ^*sY}xL0jF(qr#b{LL zYW5AY!+ssoW8};O0QgUxE*J)&;r&aB%Wf}`Fog@?Wqtjq`CDCH*!^1fjSjSXe3B4l z!C=T*96HH9P4?Dnft{t>7OYer{=2PMnRUK+hml+93PDd*FzwbkOCz;ncv~ZpH`EU5 z@=7XPqm>FB{EO5zKOr3-Rk=Ru8r474YN>072he1|j&b!U^1p-wD?o2hOGl>-Kx(lj zZO8ghEi@Jou@TD|toB|YkbsNb`=cUG571I6AJK_?4nE|U6?nqjR;$fss5O54 z1h88JEUIR1zv$5uxb`zz!yb-!o#A_sq3kZD$2SgRB}1sD_a6P-KUX3k#jOvbT75{`*kG&1qs35CN;yW2X1rQQzQ!z&&nA|i5k zGU8X}!lQ$@;^5ZeJ$vr&I7AhG-F>dD&GbW7*>IeYFFgB}WiWm*Pp=3W`@yrTI)1X&JjB0J24L$q#CwSM_nlkT}kKo$8H;~b%rpj)iz3^?x3vQ*GCtoU8yi5#w zd|Jfd9%x(2c(2Sqt?+H!O`Z*bD7B;r&yJzU*~P#kc6C1q%?a9XRRAab zNr=9r(MM337Q$t&n3e&Lr0jckKai2_j_$i{hRM5#lz{_N*z;{&V$!SRMVwbr=vH4g zR>4fzF#z66dQiA@Ue?ci^lUyRu5j}#=G8{)K19Bxk?pEQ9&l%i4@gRb!GhVKL**ln zT7SALZ1`F3k z%nP0^`3M$#@;^_}@fMsSn%ybXF-3J;H8K|YvDu)`A7ps6%Z~JcjNTuxdLx2t^xz;3 zoPfJ(_aJDeULa}pa*bf_-pyb0U#M4RO`xBMatm8I@nX}t%R_we&q0T;yx5swH%pI1 zZp*mV)og8Q%djs;L_{0JpU4XWKLfp*ScqZ=W~wcy@imlMFtAN;0ZbiI+U`}z z@Fq>MtKY}+5ibFJS2fsmgBAv^c$l&9!2;bAicaUQYc+840k2z0Wo%9su#^nRwAbSk zEWr^xYfFdCRo<3K*`j~wy&L5=;QO|&L^4?xkZ-doXtT+|{WajFQ-gG41~4tF>LQ(R z5D5FT^amK6N{RRaRs!8mso&7G>r%&jESj#a{i>*0kZ;M*MbXEn(4G zO`G!oNI(+`@l*yws>Z|5yWKx77m~7;60{T55R!+##j}S5E)hx*oZ+1-%cy~@i^&;H z{LSax+I=9~5SwoHkH4-$*rtna-{>~Oy-?)n^>cm@kYw!O!xkwbhKBW$V+NdVZXce0jR^wRYjCli1 z+G;qSZqmjLA0xWL!N`bB~FTzHgD<$gycc;vFS z8c-$|Sh)6_AvZp~@e&2{AtQyJy1qrr6gvF1g-X0_1 zS==C?%L(D*z-<>*pKH>oEjrO3!ELpz0L56yx$BZl^mcyJjhQTlTJiZQ>%*=pvxW+J zkK0_Zj?=$RS(=$+6#}yR2g`z+N;krCzuC{Fa{mKBmOtKM;XLm#SCabJ?@<)$nW)cubZAY1-fu)s2%K4?Bhu0K5ycN%C#%6HLpKJfXT_(!CPj>etZ zO62$GpgirCKt0$vLKgMZsK%p!S#|*0n*CxS9+{>|occ^&L^N$Yjz7T&CoprE)2`Ys zsMP#-5J3%#x=^!EG5<-D)9yqcng7H>#3Zc}0fxmPxC;52kc&!L%}f~LZQ`#Yv5yXQ zKQ$#-mMvGo(VdL1z;6?YKA773L5rE>vo zb6ApfD#|$DOs3Tix6}>gR#= zwIDc9@^@i@nB)sXwNcAFzqde377nq$X76#1&+M#8?6M}__{Y%uwVM37i7_HGsYin) zE0)5+9l<$hRcSihB%Lmy`>Q(RPZmyhEF+`fJP-72P`ZJaQ~>0EX(p~a6comW}b%6BlRdv`x4HPltEHo2@)L*gh>aAvtG<1x8taX zF@GRnj2%$DTo*oUHj}EY`0q4DYu1O zP-ZDr+Wl3kX(QA3M3bTEmF3WQ;oT5+rG|7bOLgaFSa8Nv>psZsz{n>`xSg6LrL>p4 zuHo@&9EAdsH_4iY+ z8fw5-qMkve|3Vkm=D+@_!C|wyFb!Os8D|oU?vgz(IZWN=bFW-=_Wu8=#&Yjt`%6fM z^SaJ8374)v7>e@q?P4Op_5Dsa4WgkK=(_pxg>jDD{@`JdQ%6}l*4K$aPCwiVq6%R? z7~Qj1cBc!+fs;}m7>qhE4dFF}J1hIj@!fL6Zq}ULjN!vU5Up_Ksuc8Bk{C>2aK}BNOB@#gwu(ToJDw7x9UeB&orH2g+NG^Fsa*p5? zK?eRh9AAedn&2gVSxcX(FX&W^YKX*?S#)+sJej$KokNl1aZku97s&yAfR#U2Wv@La zVRkzw>FbLI(BmY(2{z@~5`6RDcKXSPOf`12N3jOa1L$2mQza*LK@OpV3?XGRyCzaw z*RM{iCoIg9%(5r@iqFlINPXZA;sduFMm$M3==2K+!R<08%7}|6~xHiyf>Is&$yzDLj6A-fpuavp-~N&&-cd*2_^|_RFN&1)K>cET)ijN9C$Q~i6hvkCWK-RE{w0H2 zY3a^2+6a^sCi`y9BNJ0LBCgsJ+zXcV!e(UmpzP^UCIwoL zjp%zV1idu9@$Fje52OGjKf?p&kGz5BakU&L9I3G#`q==lIt#-pH(a9lC-(he2G>_5 zcHDxpG0mohu@CGe85CmG&zOMKs2pxGhlTtBg`W*LHIYP$NHkX$?Gy-uc!RJ$N|B=g1o|UljNiX{^;wdTm zSg;kJe(LbA;Aad!bW@rRR9Z8%Ys=bo`kj@A1&={l$JI#7u~Cn;D_7O7>#V+sT6q<# zvXD+bV5;G}!Yd=D@37L8GS}aFCQRCw7b#7j7LhL%)fFFScyOss#kKuAEg7-BxJ( z&S^@syw-(R?lNC0EvzmCO7pTZXlC6$+sSnj4{&yoi>Q*|puwHYSkAh8EQ2zL^zpvD zmn&1BgBJp>A{zn*aN@wL@#|n+#o=WvTSyIU&t_{kmp&d%2l8DI#|{)+lXblXy$+e-NapOv3@>~cV1w^p(s8C*oeSp2f1BNCa4Iwm z^}IYA`qhLh(J_70c1*!!;fQ|JK*8Q<_mX11BCPq<=6~4Y@wz*WF^s1ZsRKjXGt}JC zSG$Ak0_A#2fBs)3WIwYyUv^>=Jkn*4HnJqg-nq=)lsWxgY*g0art0SWNZm}nR%Zu9 z-uVIxQ_03F#9uzje!NyLAFX#Qi(s0#7eoG$D87PV?NF|UgoSO&fO>L92&wgLx;Wph+OthnOI$H)Q%DBSB)lWgoZWjfkoxIi?0}WE* zmuSOxFb@`M10XJbC4!HAEr=!3``Lgc_n3q3F!A}zrIE+pd7kk43^#~j2_h)QpT@H# z#|suv3EJfy9OxxpVMOd?Q?A;0ta1omzD`R-Qjv}T=IZQNp1YzU=8qpbLe2bKf=E8* zx5YJ#4uoHdkQ26C&Ju)I5CbA0?WEcOb;!)zA96G?S`$!tt2<_7H;bo67DVxqKScKw zEtp)5`arI#0M^xJNP-9KQ%3ustW7MrZ5A=`NJYI}YmRwFm={^Az`OBH>JUrkHpTNi zk1{#i_^sMdAL}zoE_i`l9{wR zSNYq@slV809_1Ny^2Vk1)o??E1Y(6K=B2tijhc$E>4q2&A9u1ELcSm^C@d%pnl|-w zK1TToa`|X&Pk;H1d?>0&<1e~^d2h41>}?Q?%bmJ|o4x9J284^uV`ZxN=BQtvJR-VJ zKp7sbtZMB-8^CRYJWUpN)#9U&yBevtThDHJq?)4BomTNQ@&-PzN`BNaWg-|lo4O-S zJO4>vSmPeh{*Fm4flb_IjFju)A644fqZ?AabkS?Bxx-Ew=ywElBfpw_ia0fX+9gf` z|IST%Q7%&^j0!gu`FRH02TvLyaGi>mkq8M>Y;TO)#D1SEwd9dvns$rZiFzrecyNhb z304VhWPEwGJryV3ZxuSDK>!pd5SlJ_KTBUVee9({EAE@e2YWVG6afl&FtvZM89CfI z+{7T=_j1;j28&7SFT#0Z;%;mPzGt4tE)RR0QiRQIha3}%Ty(=7j-PB2MJ!rqXR4nPNV?R}>GNr#O zRSugVazrhm+q}e#(Pk7xhRh|dTrUzwj^?QKeHfcK?qn1cvdZ{Pdk*SQ1G6sd>wmK? zON&W`CVECiil3S(R*rZttmX;`jDXx`3-`IrhLIHk%eVijPjx26s*6iiWk;Y8dcn(= zo^GoV7WY}>gHG1!m`BC`$_VTs1_3?qYG~HA~7f5!xZ**%~%Gy_7c2) zKH~%S&b)u9YH_hhwc1|Y@x5qm2&9jCbYF4z8 z2GA7yMbjS#bPC#sG{xY{Lyn7p%^|AwFVN=x-T zXIqcq*4W`w*)j|HX$-yd^hMSxfTk6ppUss^J40%+&I$Tc%HohdVpr;1l$?Ulv-g&L zViynwmxl84AFBfBg=vYHn4tuIvVXrX&Odzt#ji06i~E)yi;F64tkgGyPa~PNcWV#u zL23aI$>Le{s!py4EGEEcnpKz+ zpj9Z2(Qw~vB?5G4A?IDVT|_M6YsUDb)kWIYJa9-^*{SjIYS9y!#&y}%vD0Lfm`>`l z3^r$%i8`vOG!UKaSIgJdX+sIN^N3$@FLeGMcnAFH-5^Y?dQ95kZ{CHqbv`W?@FV#I zey3V7vPE5e86QAxqZ*7z6dEy=N&mwsy8Z=RbY%yIPuWJPlZ-Q$dXH#%Tv3T7`i1o2 zYb53yboNv9Dqqb=UrkGqzxgYrLBT{=);b(_C0!Qu8ZB2{=Gmya!zo1Vpx}BsV&b3@dI2% zdutXqc4=>LWj?k$RkB{|y*u<}-wjdeBJ~37G2OhmtRD z@jq{tdUsD{aN@obhTaWEdwJm7pAsCV=m;-a$96mT+C@^-;HDQ|_B5@P9-5PNrVH+& z2KWte+;{%?Lsc5i&FEcDU9wuS@L-G3&!EA6K$J00{qEMz_n;yVeRPS=dNZHj15~Lv zZ6wJIh@e3sX<&`;=$qSPF8M=?i!+q5V(nScELrA{Say9_e|aW@P8Cn}q-N+BZ6VHT zh}mq-6VmDaNU=Ygedc~*wa~MK`H@LlaygZ}PNh1>*-RGyQLkhi8sC-(Ue$_I z`gd!L1|I3elau?>7~coB6Sy8H2~XP4RmXbV{+jn3v^PjEPb|YrALMLaI^$g1kJa;F z(HpTh2?^r1^VkPPmsVmX4wn&4kF}yD2HxoHjgJjn+vf?&P<{<)<1;Lx^uj(EZ-d?N zXfGzWst8M~GO1pr28{R@?^Eq&{?XTP4yMBY7>K>(>kx6jK7WMiD7rSsy~9K>O4&b0 zD>RYyw{evC>+U+PKK7dL(RE&%)=M#Sk#kM*`H9qpWS}*6C5CO+ZvR{l* z{C&C+_et&tDX~u#?dNIhomml9Z3gVGOo~)?s`<1SjbO8QFGQ-&XvWBCZE%O>Bs{yErDc+J@{9iZmk%meB{H*d zq$$bYNsXh*tDdhnzwZ=yn%mXCXN$}LaZ4G!_+5fm$26G0GOnXTs=cd*24&OE$5^a2 zNYQA@o_0IcsC>y(r20xO={NrlC^GaRub<8?38&8eT@~psR|x&$-<428MK_IkV#HaC}kB~&-x8Z;izZNhqBA2df9v}#JEbK5A3u$`S7 z+Qa=?G+^l4!Pe6^e;toVBIaCmaHSanII@^O(|MiWo)j$2i+wq~JN2 z3`T#6*&}%1(r)3Rzt(#_@iuesyvc%vy=!Bi1lNzdeg=d^6*NA_UEK-OpD+C{ta`g@ zX^@lug;&eIKNkF)wCxw`3c~xWS8(6QzX%AHIm*YLY{~X(uJ0d^pHJ|Zqb~$b>vEJw zqztNR8${H1F?}9xu5^C0#1Fp{ z1cQ|nS#J`z;NwGe#j*Qv_D#aJ0g~Um*Anar;JrFp)san~YvET+UGDCVoBFb9UNhN;o!judn&X zY}*_{)HumBrb~F~Id#~}Ti#^Sq_h697I*j&;4 z!pHVo6SHJR?;H#_?iaHam9jzGNB;z1o(^WW|A50$GVUu>@s)|0bRrXqBkJ`r@4F^beEIG*$+1?|Nvs)OM85RPOOFhfb?`8=f&+ z?Pkj6o~h<-|2T5!#P?y;`eM9Gf`L1mWT!+B)3SWU)WW0cP$y|uYIg2b>EcY&mzO64 z@`I&vA;2<7b&PnMj?YGkV#cbp!;BQk@88Y)J(Q289(HooGdYLRy7)1jk!=Knv%y90 z&RkX#nX949?#<%vgDQn{U*OBX=OaFnrg>Ok0ejGQ-oTHRjC-$Ziaj#h<{z7&e zMK_DWLW55#!3jZtV_4qO;cw*C0sUPS?f8fXnH>WD@>~QdX$bXRF>Y=Gwr6Mg{3nIb z2Wub51$&;VsZFmv`NTCy^Et-h1~ie}6QlEquQ!{Nd(a#9sCOapMK8zxpg=TIeJx18 zgVgquC_(BMqLze_5h2s3uh%$Gv?;j*+6dBUd-ddvJ=YQ{Te@{~3y!%-5-15k@wg4W zQTTGueb+k>zUR@V-axLhkLc%OVS2f&$y-6*<7_}wIK&n3#I=p(4Hod6Yc|0t06tDux7SITW?iWA`Hmh57R z)e08-z&MOGn!QbjZsrLMZskwmX%LBQm9CmCS}qA9G=BQq)rbg;unTEeu?a_{g!6O1 z@MC*ly2RZXyN|Tk8!_orbZnH1YLN;c+58)JY6+-}Y4+Pw+9P19-UrNE+w}A}6!&_i zZE&Gx2}}p$8othbxm;5J7&OpJ*!CczSHy*h)7EwXFk~QXw{ryleH*K}&IlbrXa+lm za$e5Xx7eXas8=^EiDYnScc-b1By}2}QGc?$m%ONj%N=e~^`SmhruD7FCFeHR1t*xN z$?3E#OmCC{n3CgVo3t|`?k!q>?{Q;MS_LD?^Eoc$hdgj7^vS6vKuS)-&K2Ks}rgaR?syH-VlVQxEB62ZjQBsu?4@v z0K^lsWAUQFqQ)H25wS8c`c(AmwA4MOA-DYzw(^P{r~3+>jq@1;uj2eqQ-2|&OA{WY znaFO0`Z5sj1UZfx^R<)pl7?UxZTS}Pm{)>^GaZ8aU5TWJY$cmxerR%W<#oZH5P90x z*D+Cg)V7?`C!#n`FuQJzs4nO>^0Qwos69*5xYu-qz|Jc;QDG9$cINK>^sv*^9LIfq zfeHC_2U)l*>1R-gjqKPiYVHlXKy=2F?Ddren~n7=7QRpFWHLFmvFZ`-e~VUQAHny^ z#L!DGHPnrfjnBpNRr4(!7WyBjh15vz=Q6t*nYqF@4_*`|yxa(M{7p|UFo0dX3#Y2g z^C8uoRm%HiEahIxDAaT>GH!K*>2&uJQZwp5MhJ_D{M+73UaLLZjVz6yeEePi0u>=$ zNI+dOz9T~LUF5fY5H{^2#nct)-f$Murq3necqvruLAyW1lV%QBQVoKVXJK3j%M!*X05$O^ zFTj>DdM6+-ZO&8w6E3wB=#X)jALoWP*u;p7PUTH)@1AM)T5Tk0K$NHr6YX8jcr65_*UCQ+A^sVk=XF|J4x)XUJ5H=9h855N;z*(xunB6X1IAtZ}C zR{sRDOQ_tWnT`pCRjL!%5f!DfIvcVQ+L`Rrk*5N6ZG|%W{q_-=u<*{!M6Pwmt zjkTEZ_bV2%?@<3BU-v?8w(!*Kwk58FQ|8qd11rbex(6jiVxsHS73AuQCL~ohwz!np zgPJxgr5`gXJOK5Tn?l+Dd;rQ8aBktBoHDje_>FUqFdelgN5MHTuyMi>x zzN&NZ)eLJLzfO>xI3aXL?8;}t#vT4Rs}+>bJqd6MrFG8z4j>o8*2;~iqk{|vURhp5 zA7~!vv;XR{W0TsM(Z||5N}P=l=VVv@j}|~(>Z9dGm`F@*5Yc6OErU;^X4Ix-a}e>V zYiW#>^o+W;ABD?lNe3bf%S3OsU@zcvZkZ_`)n&PxlQGh+ld{A)YFJ-hi8m2#oW@??_2b*e z|KhCvnN~C-seGy!dj22m>b4qhGgYpiWai@sow`2H&z5r<>$7DKy$^;iZz0?iy#~Jo zg@s8uqIj8dV?9YP%6FbHma=@))DzeuE&m{&~)? zXA9Kq_lV>;DALx=bRGaOg^FvK0#wd|b=f$7I{iSLIgAk|?+}?%dQIB=xTlEGI4^-U z{>l(gROsf?{Hc{~Aw3Lb2fpZDwl`M9<-Qi4nM8UQ%yz~xvY$Z=}OHtARZ;~YJ z6P&=;UHxL!hWrKz;EwhUry)iX5Dx=tij~5~*dqRA zS&5x*-|Ym4i}$IIJ_JarXU!U)zFkaxXJ56?ig7t)+8|CGz`-W5i3cGYa(gg({kS%d z4)N-XSy8(`5yaxb2Op6^O-$m!0rb(kjA6gbMgNEU*VOn()^{siatm?!7Y+O0{OSW5 z)Aoz-0L(cbb->ZWsv`5H&ccyF`YEj4`_FqIJaDrBc(%|cwscZ>2ImwFRHrTfL<2o| zk4H%RriteTg<0dK2(7ee<=SnNF_NSuS{eQKTQ4c_Cne@XyZpy_-PjQgB&R8D`3{Gg z3d>x>tGXu%Uj_&`%wkQ2h)jtPLK=R-Amfyd999AUNG`Gluo6v2C@<>CYcBDW(3qK2 zl`nEy%H}DVF-pLnrm}Od%O6CroySIv={Tu@5_KnTpT?}oGG)@BcX$<_*&4CaMz@dy zt7H6ORl~g`?G~h34h%oNp9@pd#>T%VqqC5O`Ag`TRVlwPe<*ebPLvWK<;(DA?30K~ znjJ@&rMSjhV9I<=YqJeY%G^cFTRv2HmgUJDzJIsJUS%)0rZ3l|!iR$oNtW#~U@UV` z2xsclUTV*Fu~sVmzNyflB@R90ZlzY~ouDL<)lB18ILeCgtjC6b{9VnReHCwQTpo1Y zBc5cANnG}EYRBlK*uPT9mDI@pBuH^7?jtc{uR3a0cc|5dH4=aBcd1;7w-$W=M$D!Q zjo%V)|J%d`51@cc)cLJ1Wn-gp*)Us@Db*t6T;t_KOBJ(m?{1yTevMJ1n+1=T7`Q{U zk8x!7y*ewA?7}QL*TPs}gd%y!#@#xFr18L>gvto0 z_m^-`zIam%=X^FaKr86i`-7|wJ+LsOXQ+g%?X>=si9E!qZ(W9SJif~A<2XMu?LFkV zu;e~}(p5c)W@$6BgIH#qCaN`VQTQwNHTh4){@^I@3;Z*Ss`}TbSEliI7ul};=%Pmp>&^2+H)LzyPUoW}4^OZbIJQoS+E}zwlqJuw7vE3+qGtPj zvMVO9^AZVPTIj)ckx~QPqi%}^5km`ez{x3Q$V`so-sDMO6L9vOr842*O7|?OKoe#x zm|qKZ^e&oO1itD*7l&+4xqeWjd$ld-1aRs6Oi4o|f>QBHJnJ`Z;fxt~FV%&+s@dV* z&8FBtqLYj@BrEa?_OIJ8m8?%*?;Ebv2MpinN?J8KSn{RnJw)9)JWs@QxgpgN1< z?t;=vUIO#a(PHa=GfNq=Ol~Pt6*Q5kVzt*#|Jvt{?;|C_vjKvtm)ht#R+TN{TJ|tg z_Vg^f+I8{b;M-WeJTurxu#4N;c%M#zg#Tu3yse`+06$s$C_X@6NzyLEYtqW=vtnEe z_8k6XM97L4!TDSKdah^y80SC*nDSg{YhIS)Avex4$nTzHq(MMA#@ZNydGwO z^7V5NBCRJ~wOFz@&wW`8{y`!^G^6=Q{U)l{tq*72aM-C5`*0`oHk&L!!_iqQslq zZ*2rGo88TQ{BEWhjr9q=e8N6J#{vr6YuSV=oQcFVb!}dW?{;SaQFmL^yHyS%MxP|E z*ZLv6*7D+&{K{KtMiP_MLRl5-Z?I2cY&Jl;y+5S{L4rBa2-On01kTeK$5f=|=T@3B zgglJ-MNt4*_^ZU6Ek9eXKgj_Ib2V1F-|we+kDn+jhP~ohw|J@BpLR z0Dlr>I&7`Fkqe$u`*d%_UZdMKCE|d1HtxZ%tU8wf%U${xkCI*;Td}2WTZX8la$Tgr z)npN7XBG$@)Msv!lrB#Qb%YT!?ckm{>2qGXmqj)j%0w<|?rh&6)|#Dk`>&}{44E;3 zj69RtxJb|6Ks85MM@&y^nsL#9Dc*LXQH`L})v zaN+~pqv`(v27p#7Zs0viCp3a|J)AJwL3Q~@Rh{KZuH)lkzV~8kCzSk3+|UWTpsdjk zbz2iAz*oQK{ zj#T?ViRMPErr$%P@4(~TF2GlSv6y)YLCE{A{P7DSIll@80QvA3Mwd2g*&HM1>Uh%z zFwdMNUN#x9%0c^YB|qK+s0-C_94XkTOfA^Dz#$<$Beu-UhZJ5V{TcEbMqNS&Ym?1^ z_u~4afg^b|9`6P&R}I6wv=-_;2Y}I*0e^Wgla(Zn6qie}oBf|sZe+h6%3Qy3QJ4O= zI-pI{dOh063o$Di)?A>U*Q^=5-aPJhzs-x=3NHMU{V756hg}bpfVr8gGH-KM^~gK?fd^9TW1;9gxmjnQ4|n? zjTqp_jZiuiK}s5>BqXGS(H&CKJ=jR4q!bWPKo}hg0wbg*>1fyp=@J-ycAxvcfB$pN z>pa_o?a{8+_4;0)cz^9Z$@25Aq}_}US-ZH;%|kdoQWG(b@s`hCcgnAtUhZGMl(Wnb z#t2Vn`y3EqGJVVPmb0#qg{%GXli47xy%_shyb|Ju?dZ^^{<)`IY37F9q+8F6dCTc% zpjE26ppcIQb(wpH9Fji=l_lO3UFpENh8CqWsJMb3YKSi)1?sBsmIuRWoN^$`Qel;+ zMB-$${i20L!>*&6(Z}R+m&9NUmBE40m}oCdpdXkE`y#2De{zJ=%oclQ_~qmH@rus0 z4P@H*=P%i&_zN=+AOb)u^*H&8N*u!6!6$hyGEChxfyUA-cW#&rpQdKBo}U@{1kNg& zoSNopn;`6Zt}ZiKxM{l?qxC6w|Kc#*HO>T?gwk!3j9JiT#X)VW3(+@k;Y>2>t0|As zN_nylZ?>|J%G5NKG#!G_0Bfl2T8*dmZx;(wibJ%BkYYjY{SK%^CiB)|DG%b2ahkxW4Wia7!B{32F&jV0)HEh`ok2TXqLr!s^xLVm^2X+vKmw-gPYP z$NYV=#nXVT@YRM3#!URyDIfLtZ~9i0yT60DTW98a2DY!53&kxhQoK73^41f0KUw5S zw%tT7;K%lI%bay3zKPea`{3U1;5{&~z(0SG_{j%RVl{BUxB~8igY2oCgKmlTvU&3vcyx9=& z9F;lF)1nJC^>o%8)|AfzHewK+tf}~4${{VWB7zDXzxYBF-RN#}QULq=s$Yc>`Uezy zbr4{*Q#A*t8nCukb8GaL^5c;?YlIi@U1CK)o{t6j{CtvuTkd zn6OsywIIoUtALNKY(6SwCxj;ENQG^&s)F&=$Ogp@HIi*X8R`$G{^O zvZe})3lvk>f=^C7Wq97zs~vAjeUZVN?5@2q{<`6`o~HmG6t8d5yocxVUThz6Iea-e z<<9H!fe+1+VQ38OsUR+szfY;Qzpq#pWOnW*w=BcIbWr`W=ZL*2#EKHNV$2F zP^AW26Ngc{`&0#5b7ys6a|4{L1};o9j7ZYD@_s|!jj(8%`T~o?Y|Kg+ zswR^waxQYi>FRKpsQ^G4d1*P=!K13Svn zK~5pHfdQHv?72zzT9?|KrRCF}KfBYV_M7ZFWNX~l_TUjkB;jO=1bhh*&AHRDzEl%` zv(n4}#&qdr2Vb#Ae9wFI>2x5AMC&2|{rw1VU)I^{>K3!fhrjy85AcT7y7KfW)d9Z# zgQ|Fjwp@-rCWpubeML@Bgr6DCyH!GShH(es8AQ$K3VfTU^(^v>L#(AMU8W?q``*P; zWT!!{2v1e`eHmnI^Y|hy1`r(we)TZmb%Me-8mR9rog?maT6TvFudacB@}gE}%iBAI z%XMD=kx)VhO*&42;8gawHoF1`{7V+n^t7vH>J|-#pUjA3BOS%7Nn2CQ@f_)dl-aOf zwo&yJ{Yn_ojnA_e-u)Y7XFB?I;Mw+S$&TJCB7x6kBnSXrM#Zd@8Yjx~FcMP`ccIA< z-}eklnED&DgsVh*7S$Wcgo7WghH-BH&1vn#=7`I`?gq#Grq2(Mp z>ASoTq2;^w%a^!Ir|DKDuNxo4$YlrZ4>)hah>G;+Z=;PK^95COWGnlCxn#g4LuHWd z-x+avH^PaJXMZkbpWkR+%V7LsXR~m}){5|Gp>wY&W2?j)Lm?qyQ-ii68sUJ~H|rYy zakO?xIX!-4B*qHPXo>-5ie|1I3I$&-?Iwgyd1 zdj6h0Mw`wbr(n;LGe9HpVTe~`DbX@DlpvFscPe^NS716^JxZSQRy%d|RE@fWOtpDY z?*qnE;74^y*l(M7+hjX+xh0_p07)k9Hdoy~rPRi~*1EJ`O1rEhTiR;-pPnPa)WIoU zd7ft1y+j!KlGE46d6e&aF%I8{jtO;^C3i)CQ_6uT*HPS$zw-_l&!jHjFG^xwFW57| zeFA@?OrT5QI60(3eP$~vyGlt@ceUZT4pzv9K&1Waa_?!fo+e}}+d<{}fsjJ}6vTjP z73kw+FFas2B?=yhpa=bn?R9LG)igiQ?T_3uu>;{p?|hxV%x{p!zO6m2*)8Az|*N!kA~Lm?0t93 z@)H2g?Hw{AZZ22?N1cXUm6p%3baoiUoe?(y2p|s?PA_>^19Mm7IyYCN`LEt(cEK-d z{*OW3IlWL8!Q!fD2sw+{+0921ORZOe5P!DIIW1}Hz-n41x(irC_q=wXB(u9>J>;2^ zSyEZOy;Z_&V~^Y79lDI0q6mg{&JQCz^#dV`4p!YAOgZpzWW&)*q^M=kjsxA9xKIFT zteiuVipl|#94G^D;ArnsiR{#Vq50l>^?jF5UQgt>aWr?acP2L9 z&3$G4vl4fh(R#3GhyRZ0w2o0xS58Wz?I19BC9~J`$m0!(FWF&PJDK(jM>c;&V8CQgCjR z)!`CFT>XnU>0QTc+A9m3%gYIcCesjYpQz_#av`goUt+H|PGs(4WcP06r_T??aslu= zh#L4bVR8!3ey8hc;x9U9IO|xUR>)Cnh+{sJ`Q|#SC%r8t($H&V?ZDhYw;yl!<>7u0RbT;hGU zTQ?pqjK$h+_Ce(-OR5U&q#~n54J{|b9+Y=ONliC(%<#-1jm7}BC`UFxzFp3XYV@qp zG#^NGXl@zb$;RWNS}cB~nXFu7mlI#=tnQbyuzPpDohJ$QAP@b#N`%_QL`FSm#r%l3 z*!D)iSV^PiK$coehn{9SbBRs%ev`3;cvd#=eKGn+YX~YE>muZ=RnJ#F7F!nbdiY;G z8wY(V5`DWg5DkK4++Ca|$F<`paxdgJua9%vUhG5ZPKu>l#RJbOv|Qr9uk+?y)Wj9r ztZiFAK1P>SF?hR8(p((mE_U=2T7Ph6CC%yk{*1TNpNNOcc2AVU62BV6BKM?@`T&SP zYCD%U8>)G;nn;A~IJY*vX-j_A&5Jvs=cL)tQvv3-7MQDRW)xW3PD8?sYCSPoi*10Z zs-!LCV5jHH*-nCU<%Q!+$nS`eq~Mh;rjDvWLjhz)@ZsT&%if?%mC)Jh7}weS*&~Dh zXbmnmNYg6=MG_^CU)*rITnKkZ;Pa10)LgXaVq7B?oL)%$_s#ZiXH2G^Jm_M>GD$e} zq+j&vYYSfZa(&GIXoppwT%1%^m(q%?Kqx7ymdRtc_L)p`vZs7YTdGwPVBs?94d2IS zVQ(kX;8h^so~Qn|hSeP+msHT4N-4!nfG`;q>}MrYw)I376nWUD$^tDVXGVUF^Wfd0 zTwgCp9I&3$Jiz>o-__R_nC9dLd4*d?h8;PL+)1lXZb2@Rvzm6p2UL%sF0yJeP#7rU z9n+`H_gjf~lu02ADdivqKy9D0A~Z`D!J-=Glw^N6AN}`GPVs^8p>a`F=E9%bQQy4| z*_jhO)E1gUZFOAPb%CPqP`#|(0S_qk#-xv5OP|{Mj~GTtno(!!QFw6`OW36gE|uk{ zT!D1qWE9^w8|$7ggc8$iOecs35<(!MXA_mc7a#N!N#mN6N#rSc`eMS5Q+9pto|y%^ z=t?JF^g6?KWbvKd$=~4~T%vjtJ5{Q?>-?S^riPXErZP`n(!orseucUkfHW!Iz1y4&xnh!YNwwVzv$jE2BU+!R$IQuQg6;Hk=x3omvM-0@(d0||QoSP&|bn--U= zI{X^ijrP0?iW3WD95D?B1?m~|23{x0llUAvc(-S`vn+GuIOZk^rBe4#Ah#`9ePQgv zFza!<(7TkD{5XfegPKdY*^VN_zZK=)D&=M_*H*iJh>_%+yPT1q8+wU-Kl(qfupgn(KZv6%tb1V6bBeKz zR$431{9KpOGNnrOjuKO{wm_w_|@)%Qvn`PkoC*piIBZo zPf^BK1?RMC31z{cl2hi(?x!cqf1YMxS_=6R0^K0A{lN4%*-5^Et{|0Up1`Rue67}R zyM>ZG(cuoEi_wPTZ?GK08NTsklZD2bGV6SMSMhva4=k)*WoWn61)L+91FaZOX&xQK zxK@SMr|hw{fW+|tBxB&K$UbrVK|hwrL4}o+Y8iY0{`HUA%N?ep>q_%fIPHwfjB*$7 zM$PHczB%auP1~X^>W!n0+3~0|E?qj%8aBUzbsZ|tOpe*N43={5$)B&@OqAR!kZ1nY z(H6L9*=1EjGm~gK`+nlT*%w>V^ok3gZ@c_GG+_%E>Y%`H>yABg5CJWwBZK<%iQ|7v zh<}wAq{*+(6S^Pa2E;MQ6Tdlzl8{Zhy=s`H9$Jl57yjVNOkK37h%yU7&NZZ0y%!Ne5TvpSR8M^}iHy zQ(Jj{bsC|E6eMK{SwDX=L5$pun*!$f(|)#~jbx3gqwg|2fR5Zek}QE{{5>TdYXVr<3n0kf}4DQV4sdI|ex zr>xCmft2>sKLnKyKL1Q8xxL!^AJr^nxXku#COwIFIN6Jq%WyvR5kraa%-__9i{XO) zoD)FWluD}qW&!ZMK1+4cWGyp`pT5Z`#xfza?uIHp#M|8Qo(mWX@@gLuazN1RDw}mK zt|o%kPF=~GqHi6h-~2fILQ`dd)N;B1nr)+Z_rfQf>GKSuqLhyxE=0lyvIw4Mpx#@AH21Df}lQlO%PWC=|py ziraO`=XhHDO~`){^c=z~C)oaOpg!n6Di6`Ns_xp#ET2`X^i83$U5)2~mkjhO#~4zh z8fb*%?dJTos??0u3wja)Y7-TI_4%zk&)0iu34V&g*8G?E<}D|cna&@03RuOTif6w_ zg3h)S@}A6d2mW3V((vT{gZn)>!$a3RO2v<7W!v*8M3`6=yyt+%&jO6D>Qk>E^l4l;zX4?$W^&D3^3V z=j~84$mk&+a>`}LFbc$1QG~SltF@iyW!0a%oV)Tw)+8wOc_fuSTd{+E7_?RtWeBx2 zt$oqLa{qnyYjl`3RcljgyUH{cjwheY^hcQ20E40MXQJT1)SvMePT5{a=9XWVOSi?Q z-=Q>zUrze-K<9R+438id4EVURv7bZOQcr+AK>f|cjzlYp4l6nJ%iFqXz)&#VOSuU9 zQ#ONX{A}ueSwp5)plhja@ji=!Nq5mS3qBZgVm=eQ^Gg*2Uq(21J0;A%;+mi(E*;FJCH$Gqs z(Dr8Dk2G6EZA`-;J?myy;>tbKhr)wDQ@MY2^Aq_IkLRodT%#KZLN}wWwY}#{3EHbXArr@m&1g){G=7WXY5G@lHnouDC=%|=6UsTZ(5hqi`*+Sx zQo(p|gQel^kV!V-_*Ko|6&QBY@V{g5zuRKO`*&d&x;y9|BHl{q67FU_*ooIUKfFp@ zWj!i@h$x^ry8-Xig>1MWG@S7snu`ntbZdz$g3>a-4|lMte<6&(OL7-$-j)_~b+gH0 zuLieC+2*G0ZIxP+=T%i<2Kqev^lLdiJ{(a*O?6O)y5pc%-G((4w3w|1=Mb>vQ8a9^ zOl$`0QUogB6JrdBtQG$5A7;w&isg+VND`%Z!3sE98;-}uLk@r21 zno%u>UM?=$qf7S}iW1h<3{T1YNXySaLdkO=#pHZ35T|h58$e>8VvG|s_5JgNs@T#l zNr`BNy!adhMBJz6o@os6sHT zV;&0Ez>L_*6X#7EiK-7C3$@Q4v>@LKkg7XfXzhbS`gU{7hrtr#1G;_MDLEZT8kdDV zY30{oKJ87DP~ckkBqSr6NxEbpl?QJ!YJ#lU1?Ue0JtHYijl^TlIbRBO+If!N|@Z)^ma3Z{5qSy@9^60i|3NtOn?z(hd$R+Tx-ne9wlL^y5n3}b820ifjc?8~6L$76yN#sC7m1*Nn-bdR8t#V)O9mSxh{wIKVf4=70KWC=0^1fU z^WGiNX$=L6x_$}%B`nvRz!s&AlirH>SVlP3y?7eJ%Zb&RMPae zDWmhirttPmToroR8XQthd8<3llA1k4BJoLigTjpRX03OEWB*F-ApJL8o!Y4+MCLhNknnVDN$u&TJ2v^R3fRm zA|*+JZh?KyKh^V@o&_7COTb@d?{t*ChCK5AL0D)v%Y(ssSiCi7{H{g_{oa-F%-}$4 z!oncx_?wBSi$lh<;6TES-Fwr!>K#=&>;X>abLFe@eYDfg|M+75LI1lp6HD{P{+o5> z)e)S*{Y7_G>tL`aR;rSy5Pn$-COQjyk#B{O3SCrAM*nxdmRJ*?hIkCCFm|n8VHj~= zPyD*`L(ko3-iKmdE?f@zE*cK}+qKfKGIqvlgsBpglR`hBjs6bEm zd%rot_}22R>7t|~rfjV`t;x$PEezt!qAK4kTG=~o%kWGogwb9)kSXf5*f=VkL%C8K z)*nd-MHO8JfF`wGy){VyY`{YfO|LycOYe6OY||z1cuBIOS5_p8n2pdF`cT@NHC_xovqxvxQISmslS`!*hIR`D!q0~s(Xqls1~59Jy! zfoDHrXO|;Lb6#oQTV|iG@1MfOM3Z7xQTe8JnpIU@LNop5qqg?)B%?*m4x~4Z>{%u& zRT%0!yEi)wGkhWK;s`^#yOE}-Wi6k%NeU`&SG})trsS`tvpx=4X9bjoDhGlj$Q%A5 zdaE`g*v8_nPiom)!jEMlh}p!1%;D?sJ+U2Z`H5W~lkmJ~Q`l-CmtXv#Ms_YL0w1FB7d0zmQ`GCL42Ij5k+55?}A^ZWst#VTs}jR1rQCs!yLEN5PVPW@S|VXKG}N_m+8w-&UOF95I%5L)sY)m zVUdJ=sns5f77U+Ua%hlyNIK#w;Dt`YO@f{Z={N=Ancbq+LscLcD7sfccp4;+qYzER zMq_$0xE85Y^Co5bKD@ncjQOR7R7mB)axB%C5UKeG;>-I7=*+y7b^&2Ie>Xz_CeBgA zkjEtL_wV}sPbalFl8_wV{hiuRHt_8Y42WNwzGioFbe;ixKeo++)B}2^@~qFx@>|N@ zhx!Xb>w1aZk+W?(c?gcNabL*_lMzGL_W02N5d4dK4FV*d8=ahA-OM zEMfU9^~G+jpGrZZgc%BcgXBCL!lvh*35otbg04?B4UwUwLY_rbu4-+|Q%^8CR!V&_ zGwPnScEFQwFt!o6QM=(}`1Ae{`i=BM^uixR@NwpMCGt+Um&9@PoCwG#Ia-5W zz#k`X!)>A-jsO-jvtl;;loL<#$(KtsoPx=8Dx)B@Nghf~>J~Q8RPg;P7y_WQE?e|~ zmf<7fkMfaS_XjS4l%UhFOl3s~wC?6)Vy$u`T{OV=XH1b?OY<2ZK5||0rZxCyf`f7~ zG=#!r?9+vBq*I&34EX7qpiAsDPLj?q<8iyj=u8vuZdb*bV$q|C2K2`QN)}xz3*NZDDdQc zU|xT;HswQ5vcR3Sj8pU0{!Q&iu@o1`G%2@j;~%|J?QZV^(Q_?mQ=OCNEElmm@t?mV zVEEHI)VxJYUE7m?CV+o64(pvsR|c}^^h>mel!7L2JBBO7Rli!li+K>gC^#SKzOjEt zKEAE@cOU-O$zSwip*@g%_Ck#Cpr3aBuVF77PwRvB*v3xoGhulIu_v&YKi}cbW|4GP zQ(jXhJ==lsliPQ*;`03!LZp~3!ZH8oV4Cn&SxBN0kOdz`e+{w-PS#a#vgVS@BfQN- ze2Au5Z(SucpBpU4IjYiBnlxzCTlJ708SYWAr&jh0W1zcwcb2js$Vsz{SJ_Woo^d1s z8)%KuArE8koF+9S?v@-2s*VHz*m>~tYv#;eL zZ|p2_Y8%=9nmH7qFs{6XvxDy^SJssB&KZqU-Z=5Y_N=4|tuvhpLK9&JcNZr-`L@5i ztn^z%|Bm&u!wqri1EHM7ysY#|_z{z^XPsv|JAf+gV;WxE6LFNL1%Z|&1SEX`q<_s9 z`nm_re)JW`>scl_(p}Kuteo%^U9cIZJ|tNc0Q;hi|6{?%Um60-%pOAuY7!T3UgoO3 zHo?YDG})Gm7iN2kg`yL?<8r^8O)UG&#o@)Ao9by5m?^g&7)`eMp|4KeT*RPY%>;zk z_YCWroMT($gnY;gP&pZ9**?8uWPEI{qaJ@7+QI;d0Z&YcWT!O2yLv8M7no-0%|2I0A$bg?xw zo*W-@cewHV*|m>ryZmyl7Q2p+k)p5?Wv^~*5G>R`qI&JKG~2_ZvoU$%-rgro?sAuZ zjN9J4Fbf=K@H~^19sd2B@oXTQPASDyXMZCl$|^K;>9eOpKhqHx`{cy=|CUgz@DPuy z_n$of2Jlk-Ss+fQM#SkfL&mzKfe(hoY@gYlC-u)se+U!)w&aeJ($7cXA(Lrsc=(xZVga3MoEX?{=iZh&tfbZmQh`<*n&p2b{FOb}Eg{p|x z%HL>LIWP$I3c81mlwvHR_NxxG>Gy9#h{$jV0T9qDC*8~X@q@+E$Fl%ZJ1HJ(y(=MW z*W|(5B#w(u@>w}kBHnX7KcqZy^i;|^{c1+a%}Gg16jOV-6Zh_*?u;-itg)r0JfU`s z8&G}+fgW0Rf0kJxnM5{D*hetznEV))YvhChi91+tOBYL90f6)_TaBzL#^PUbR>1ay zb%{A*vR6v%@7#;rA8ux{D=Js1uhiz8Z=8iSoW85v0nz@=aOh>aKd(q_JzjtiTpNSRwB*LBz!opSQ!L~*CwK3&(>sQfN$%2pCj`#n>YHmu{MNc4k$ zP0;t4{1Cf^_FthMwqR?(`R%N?h>8gXoR~_d*>+B#Sh~ApU`knC3Auj? zqXj9+suuoH%)jrCdCbFc$YVeKgH_Vrm@rk_BEP5mv(o1i0PzC>oyX13N{%V49g z(JU=xf~u3z=AflpMg`_q$dA#q#&_ipWRplZN2)olQ~H0I4|JiKs0vpZ@{}m~p2&MT zyWP_7huFD~MZvrS`eD`yk-o<8aKUfnr>|8_h-uxQi9zmb4bitF)M5L92i@Hiyg2~& zlj%{|?0y%@&73Yq*6~uLKIR|mb>TkpM^2MBY<*PCz z?p-ts)ZuQlR(gHk@lzu*Xl;>vS-o$Z&(1NSJa_vxjjivtip*OC6;p?lN_^50>^5jK z7D{ru($$fT*#EUwmmmbbOQ=0xoZ;wf;GUyxq0Jc1#j>>9KQwG|ygk&CRJk=3;BW{N z-_U1s&NAjlgkt7rv0X7}KO8@*#kZOYrp{5gH8=cq#!RpJ#8AMHT3Pq76x-iJr6;$_Lt^E(Si>B`z^*p+9U6w5+6EGL;ddcV|+so>k;0ERfrf z^N!ivXx$h9Zq+~SBc=`ur14fW*w6KA@0vusUndwPG_88F5ZE9s^o=3Mb8vlaIX+c4 z3$Q$)od%Z}_{Lh_-4dB2!+Z3;>IQjd`W%SY^1S56r5CYvppQ*)F#mUmMdvzcdU*(k z#p}zTH(s8mM9wh?hn7cMS)RLxf-!m3^KY-EPN4rUD~l?8o<6+B{q#*v=z$6f<4qS`7T`%L)}dOWqWB3QdVp=AQ~9 zkB+W}M*zxBSPF< z%_(zm8|C}J+azE;Yy@I8rBc}nbQjQN8|&;o>+0GL6%XbVz9`qceoy(+JaDpCiwjk6 z%JM68`DJNu;f0rdBkQn7WHf8%sZIE`wX7Z`2jPME7UJ7XJ+M`8Ag_N5RKMKgP%`U-xso{eWcd}!ksM01!H$bAvfH9L6G`=lP<}K|xDU z_PDomgrPqswUVlTD-te_+?YG_e#v`ORR42ST~^7>Pz$_!TPO2L6wsa?&p;MdVmYBl z!hLUsc>V5=IeJbp6A_HV;_OFl50I8mtDxKccVxMhe(kR-?ODaipAY;#KoW*fn;{_l zt}8`6!6aZNS28&t!~6P^C4XzG?tAu3llK(KgwXRBmpNzWHLF@{Nv0=(Q+>j#qmu^c zRgmA_YMlwQ%D|YdKCaX8k^Q*6P)JerN!+=f+7_`09>S%#sK0O3rIR;$T+Ix1V(nw( z-H$%mp3?Mq=wpMf;W27^S6P;OI=~p6AV%2vs`Z^gd@oJ2BgmYw?PZV+T`VC*c;L}; ziM{-I5BmyZnHso@UO9N`nGI2R6>q1RTVGjSuf$WPIeRB(*4x`=D7d^%h+|<89(`%I z$~W2d*otmZk1eJ2&hdDH+BI>_!)a`MKRRlPlcb%lP;6Amuz0nN5N+dxtq zmma1T50a|kP+wld@VG{`^Dei%Cg%hlqGODfRo2;@AOPj!$m*J0{5P!KUB~@kFQa_S zS?>AvK_z6B%(`>B{F~y`OsuV7WZw8^_Z-z}vUI0$`>mc&x5EaRuZV{w^NXeCbsE@L z9qUID8-1E40XfC3HS9fa;wdIcv(ctYD>8)mNBT0wTlZb`inSE_MqkypS?MPf>N)s~ z7o2|8iW0Hy#M$jEK(S${dp$9}hji*Z_Rw42^Nl}7&wt{L4%AJVQ51W-Pq=H&z8>0N zmyrj{Uqsw=Q#M)WTH#kix_vb%UVK6hFQeUP8q>vXrPS%HFl?)|r9Bh@jma_w(uB9vOX5s=#ZkZVVr$3TxJOM@wbMdfkdu9AN=m(nJuG7uJ`dN=Tv zXskakL7|&-g`EGL!kChBB$x6l-u4(4KV-DZb(8OU4m@e{Vl6hxUIS5u7ltoJB>X%e zw`_wHc_5K-7Jz2B;ZpU0@iL)l+yB}!^iM708nQ@*!--N6F!`Ac0fIj%;;7D8Aam5V zQH+W)2-+TdHq)$D9!0NA4G|K2ZS3&GjGqjMxOQ0nhFm|nN}VXjUUCSfVDd2dJ2H_h z(NCqW)2&@S=Y~_nCO6Mp8b9srv>V$KPd~LxOf`G)G*B^}?2+<$8S7}`wKY;bkm7bo zsgO&l8WElg$KS<N4JRymNEsBN<+pWQLUzd zT`S1{W&w;jO8G7(KjlXm=7~OR&1-0U+mi^u8NaSdwgKMELUJ7E%%`+<8{XoV4y#Ku z$Mp;P-vf86yK{+I=>o;KXYx%Dzv8qAnkJhsWI;k$QN>Bj++Ir;g@n*3{Ws+IlUFkJ zGPJ2%w1><_^>(5i+jd3YMOD9Kd*?w}#(X;jTzygQ!I}bNdx%fKNP+q-z11X9?hj(r z-D$6Wnu+w-IEb?&nFEh1H(lnSzBNnjaU~-P9B8 zn_b7tFVsT+4zl3 z6ypw}Y$SLj?$VI~LU&8{8{IE!jKke~^BIknP=A9&dM+xG&71p0TyK!dYY_>8@9TF< zPWo{9^yhF>7c-7hFCA5$c1<@i+R^#r{JmQxK@R6;UhUq<14fz z`e~wOkb3=ED_;n$Ju@ZP2pOF@c|>6PS%|bt{|>D1XU+)#<4 zfuO7U1phZsd|eW#VORg!Gm}X#00;A_(FnRO&EF&AdtFq%lYXT;%j0?eAkoof9{mG?3XH#^f5T4VnYu<)O;X*=arEuS>V6fHn*{$kaXx=z$?t9OIRapf451*=e zu_N%8t?8idm2omZ{B_28-Ms#$YdZLB(dEmxPz$fIMU)-PG6+|`(}k^*6%{zOp8vjr z{>7;iVudxQnF9-7oq}(jUp&6(%12(5$U0|VX*pB<7dspCBQ)m6f0_*;1U$kbKy`pk z8V_Wj=d;4 zGrp!lD}TjtO=N5azFz%=$V??JNK;CHdce{HT~fWbVWexwWN%KMl(SxKlazX{l{kJI zlIm>Q;h3?(=xa;R>wuqSa`{k?6kl>jE-~CF##b|A_GdqTQ)Wvb&>wD6z;FyKBy~hX zK*E5uK2AjiftTb@#vEaG=8Aml(hxoQ@2ex!%3QkCxZn2bT9hok0ZZ(9ERJzeLW&Ps zn#qh(pN9fkdyR^C1>zcJptoxz!-`a_OgdDr;Cp>d za-I42Ddi9NKk~sUB6MHd6D4UlTW?!!-%CvK`KZ|aV43z)KqB^O8Yw_a1xAN|Wj)dfrnTb}6+f@1l;9@3<@K z;sTgE>s24OB@atGTOW1zslExb`KV%g$Qo0n1gKf7maHk4OR7E2K}lFNJ;&#X+Q|#K zM%hk&Z2L^RvKhbi=j~%;O1`*vT43*fHTFR%?QoXv(AAV;a)XyX_j>-kExmq{R~={4 zEzTqBhF*WXoz)NVT|FVM=|+?m!fbc6+suP6YmfM#dm3SC5D_4?@2$yQCwTtu=DS=z zN4EiF-BGsDN|tZG#Etyr@C zybZ4J2HARgST$?SzHVepPQ|#TPVi>opSRA zIb!bin02#qE_8D!A`M}^xtj}{feY2mQ_tfk+W(oHWge5ZdjDqd?4yx?ys*r#eK{p@ z1#X^pYw7y>wxrv{vWep;Q@Vx_vP^z3*Sr`0OJSzxSkvl7y#Kkjh?uM9VcSW16G>Qp z7cWB+L->%A5A?c~{Kq?OFJ!vd4J78#)Y|RkzAn1_Q&AORB`%@SqiRwmc9adke>aWziX`m;`+_H0LKX9j>PB6DnS1aw(ZR)^Y3hPws$1M88s_mmJ;_XDhdQW-}zB+?A;I%kyyz?F&xy0 z+QtPS6C`J4Jw?Ey_E(6EvD}_u+Q;o(Et2e_?+ASaFxg@JnU&M_k{uMUZv2UrSuN1w zguCB6gJI|!-Bo*;4RWJ8G;JU}SC&!R+KeIbgnGA}@H6L_S<|X}8+*;PHX!jHB6GM{ zShRWmpwC!a8eOweu2J4SlmNT^d*5Hf-?>oochH05E@U1?RLHv?u}pDO z$6}S=6%V$0d%dL(=?)UnA%A>4A9{72V3~h~#|F6SAeqL%XWLDtcSIeee=JK1^>{U@ zQh}L`+Exl!KNwX|{zt>Dfx1VniQX^c_|7 zeT@CO!UNi3NotN7eok;orMi5JVM;jph^5@~{t@k{HnG3n;dr*5F#+XX{ip|va6fZ( z{VhdONXtc1ukz~^QAiVcfhDPsvMR{PE2C_6A>K@xhkqU;CI?L6j7X^jnY{aWb-K+a zhK;yEJ0D#1Al0G`$+@Xt8vQp~mWW3(eW%;%bK^MW;}niP;+sGtvI4=q{saxdrJ%^& zDMd}$rWHJ*qGCi%IZ{X2w(O18y)kY+hf-dO*{t}u*L6(cX3@Eq{2ObtDLFScmeZ~^ z!17;_lOX=^Xe#cr$#` z47g@0^s)*lQKGXeAwe2_3IkTyd*T>E-Zob%c)QwJP{`=E9Yd<07 zB%>$%RG{l+-*CQ}gHnk7tr&3nxaY21pcTExI|j`aVc+XepUjr2&on3I1#D_QPWs!= z{K`Kyt)4+vOPVw{i8x0{W;Ad)9qLa$7c*X8^VHX!v%TY%)t3;{nPJepnoA}54Yi2? zn-m4Gz^nK9EAQ&7NNlUPDV?VNoZS;v)PZOrG`Bl3ijzY4o}P`l1EamuM{R;UUoBb6 zet%5}KAu7)rE(#8l6}9Qt_dd{v+bsr6bb)&Jo`@rqC#PzV7iTty>2J5nO^Q^pcDnY-ho_=26lNpbv#L^7u_7XY{Jm0PA#j(|w< zTFGeERas4L>SL6G14R;~V6xdvjTrBwAG0CBTgil9Nqh|BULc-D^#sZ@!>qb^P3vOq zt)s@Z?wMmCV>LM;u0b`yqVR9INVMpXm>0W9M&m7NG{W!ELanGb+OBR}3|%e3^3V4P zr0J??ohbW0{X~zl79@1w#EB*0Jpl<6yz^F~@9_~l$2Tdjet;Vnfub0`emBSgO)WR? z!ai6MLfBkhNVrpeF^_FU2kn_LHQe@kvN@!4SP{CnS)JpX@Ct&nLf1Pj-blm4SUg>d zn9T`f6716fD?WQ;v!a>PBQe+n=|Paguf2;!TcHLnyC2+Bj4WS!{nN8864oQi*Fv1& zXH%x$GNB+kgou^@CMVisZ&zy1HA{6N@_JcUZ%5YU(Qx&-A>h*AZtnAe*CrdR`{wV$ zi-wDWmv{b^;w;9gS(iuI7z>=coR92WPQ9%QIl=zMFzx?&LHX_fXNg8?(y=S{7S7@y6p^{Z@FF8FHCm3V=;XX3% zoa?P(CMPd)6vL_hY6mik98}lEyWTUB)!B?z2#shwOIrs-t)LdsivTRKT4h*ZJR5KTHw#;8^c4 zJNjadZwIV_@S*D<@IJBHT#Lp&AC)Jvsm=yj1#=`y0HX*7az^#%S!>8zxIUMl?F(su zy}i5iYWO2}yNArcd_O2efK~J9qm`4$;>kn2>MU0pZG_p`63%o>5fAZ@vr^ac_5Ik) zZwJ@3bI7-GblYbldsz=ve1}@P=f9=aNyp=G_(a8>bIe#()_)SN+vU1oQ(?Jc`cWo0Ue?xLNaBla(o@k8}$CP>BY|zYqo68MtHAWv1=JY-V;_F^P(uLIH$i zQL~+=+7UaS?I!n*0mIY%QFFeWlgx2~H$~E&ce3j+`Kb?crz?F)y1qC2Rr`AU_|LZ_ zOXVZ8u0@noc~1T?sTv1AizE>X-HmU;jzj#}Xyrr91mXs|RySa-`GeqRXVm3c&i7O8 zUFoxV;xP*XR4on)G*7t0Aklvp8?htjn=J!k$r*swb&TKryecef1rO(3x0lb2ooDF7u0cCEJmPA9>8_O0lQ|gyVojbosq5?%td=OB2x;>1`cuK^zsD5tY%_ zy|xV5yL0mW)&l}nSiJmGJjNKj&j5*^Ggsj_SGr`psbP6;jE=;{?r6MD&54cEJU5rP znOMqx&(3xpaRE3Bk(`YzcNQG1f3b}azMq^1yApIRhj!@b0@Y{xt?pAlLM8X^ ztmN&jkrgerkJR7nyn7q^g~DK(DsBJl;Mwd=p->UdY|-mk_C@fltVc3xD+Kzu4Ft;h zKC_By!Sv?*ebg-wtRoU}1b>Oi4R#^8uqisvn)hg0eN&#>M)Ye)Y!ZN)O%T$!0eEu) z`PF3hBdfecjJ$S7vPp`y_E0uhG_>&_4F9PI#q3@ZR$NkvQ10IPwm4u2`zKPWdh9zo zzT@lgjzmG6KF0m@eM?SKu9ac8%z(IMQq^m&&r-Q?_#drVYoDYMY-x77g+*;c-He)) zL%P&Ui*8uHlWd}*BXL5FAk{(33~r^RE6I|h(ZtD$YUVJiIEro|Ras-!jqlFb__#$; zE&}28_liYILJO4yE)JRx+iC9N-pSq*tyM)l6J_?x;lo`nebW(*>4H<1cgYdzXeMX9 zk`rlkfW9^4owc(n2YY8{sxhl_K_Y5WoLRcRiBOT`Pi2o9LymQrY5mmA?KzQ;?^PM!oYk* z`0zk%uqMlN8(T&o6ezblUNY+w8d|F*ebK518Hu`GO;{{<<+pDIt=AF!I}Sik==LI` z*h_nJ!knhvrJNkvMc;GtI`>y3O0n+0^FRJGT-$mO4ilh|Is1f)3*p)NZsA`k{y*_K z*AKi!N!a3Pw`31rjgRG{*);f8keo-8^R# z%O$DSqapoA`=baU0m|8vRaq4Qj;a&k2GpeG%5iLMwbb`1KX(dO$eZGfFw6k~pJ<>Q zXoLl~Ih%m{+Z&`o!fdKvv0Hy8t!%lrVq&8syt!QbwMbi1HZhTIaa+|w2;eI{D45kq z-MN<+bSqGwm0`rB{dv99^&LP3llhBK-wmI>OMV;Vkg_}Agz8DJ)|7K=;(ud0^yjqm z5MK0Ek$vb6NxjojiJ(-*rdL702(Y}z`14J~v&|&A?J3r{ct-!fk207f}rcHpmJjIj%EgRF$8K*zI_MxA12P`jv>3!xk@%iHWVJq)I zO$pLg$mg0dbkbm@Pc;7viFrL5szNoc-A8C9OQhyp_*q z_)bj7mh5IiT+*HliYKg%f^O` zg?`jQ;jv2dc)ujq4Iq2Nac!^`^dqI1>zfU+l|GDz82uFzMJ)z(nW7jqo9ROt)O(Lr zUEuCqfPG!3PA*dfliVg0X!d&o3pV?$&WaYjx4^qnHW)U0W@NJdSmu{LJt$T=+X7 zya|ao)??Q1bJ~UTDAm@loPlr2%k~(3EgQelSEEA`i!?UdArJ z`dYU>Iq~vf&*V$Vqjg=^WY+!Q!gwi>5(E3^tARAn<8P-$RZFJXbN6f4j2xM5G}hLN z%Q0kd%Gk(FX1MNLw2m}Se81cXou54lyO{iruX!cnzx#{o?4)9c$B$8l+2Fkp{YLMk z6NmwqYJ6%AjQdhvx8CcMm9=(~A@oLDtKi9F!N$c|FvSeEUOH27nkPom27BZAR{g&- zISO48wk+Yz)55&vgN4Aecbxel#5G$~X%N+@jd<<9o0b2w9}G1l@!0$>ZW%OhMRj`I z)`I^kW&4A(?PZ$I$=VP3tMg+F4-{BGceq3NR{JBaC@I+J?_SMHMoaMaxdyapv)xaY z^*x=^eS*Uz50XUGd#o>f8JN;sU35kS+z0z89PxmFc&;`L^yBCQRsqKD zt8j9Di_11FB`1_q+Dm*$FNu0Z5||~EX(*=j22uzd*j8p?rvAlhy7en26qpFmmGJ~d zKh{!qUIk`>q-cIbS%PR%a{Lm&xoP>_;HDNHTa~f7QMj^w-5d^XcCpT`=u)u)7;4ox zc6tbL&njZ6H^`sb425ZP)BrQwy!gr9vqaEKfK*bpSE)=Xl|Qh4eAq%$4hY2yd;ezX znD^M5l};Ek|Lfd;`3BEXNc>F7YqZ@TZwqi~$Y%b?OQd^T8MfzS-m~u9U%~H?w3TV} ziLA2PF-wG=FjWRez2JXfx>k7cN*HDd;fKUS9(Z#S$x4T0%q*^3{Ws`t-QwRHw{CE0$2fhEC^!#(gqpE)EsDC{xW7-?~_0>)?qLD$glU?5W5K% zmuO^U_pdCWLfYbdVtBwX^QEkj?AUe0(@B`)m&kw5^rk%^fA9AWX2FtxRy3j}v%!X* zb?-BYE!SSXapY&0Zo@o%Jd`CCN4`Re1B*0bbqXJWGTCXg;bew&sl9MQw=IrOq)AFQ z+7VrzD2%(1HF{pDl&+}d%f57({U#wVAiHGXE;k`ZGt89f={$F9hECkbiYkc)1{x)9 z>x}-0)v4ffydO*3e{8X>lnrqbVAlE2b7cE_2uOz67m$#E1|k{F9l$5=CNV#tR!Q_n zm(xv8%K(0Cm7R0}0SgT)8oYYIPkm1C20V$II2*I~03mQr1Yt^Ve3{a_o)K(thbtqs zX_$)$_M^#ctcU;RrZ&hMFu!MHJZrV~TkVlVGu#sYWaW4`hkhd!<<~2axxC(N_c)PU ztw(T@PG`mh%nfyZF>IH>qw19d>$6#*C6Y*Mc*r6lC9R)c&@0;Bqnv^y$-nd%2IAlC z?&Y>(PKhw-A{LB!u9-ATFus2kb#*esl5PiG5%YOzKEm$)PQ)=V29ewk!^0#5SQ}^B zS04vn{|AOqA$6^E;rvQ!44X?X_{ zQKTrXET&K)MO@Y_xlcz7dc!WEjd&|7uO$~UanLaSmVP8P=e z>M*2zQr2pIFwuqhxKsBs1FW&UdZc0(K!Ve?QztOXr?^h-uQKs6`-if5jSQU14(cRY z1bOqf{`Ea7*CXW{d5?6l}!CB>`uEE|M6$4S}^<*tm@yXX(i z`6vz*{9&&ApBxDahyT>uiY!;bo&VMXcn*G++TJ%#_sOvE;U0?LX(4JV2bvGp{d=DO zkL}nB7|#60Gj6ir$T$*u(U=v?Plc-8H+CbGUSaPrnA5PIVS910^>ZMnLLUznU(9<| zE{E7hwzP3sz5E}a-z!DjS9t<{iu{g|e_9Ew<~RbD0c4{diwcz|yK7bh;Zn9g*N_`f z{}bYi_i>9PUh%K_U_q~YA9HN$?|%>lDl*anIb9~p2IQ~hcFxjeU4)p8^_@*cBM2Vx z&cXL00nL$}c$29#c~-Z4VQ0>mY;bLu9s52P#@w*bc@v#X&O|O5vL2hF)vT_JV1_?u z1S4z`rPLlwcgIJ)o)S;2uCs1e!wCP%&AtQN@KJ}^+3^s8x=(}Gq!h-$whVE}*l)K{ z1=YI9L8Z7unqm@VfX^W_#CyTXdc8Pk!k3&M;3s8juE2kEU`%8z{lOF8;AT-ER<$OA zny-(E98z_wWk1oOz>C4(z*st?cRJ+BJOViW_+;&h_-vTOm8Nu%0uNqaDh@@ymKagd z0Vxl7(WR9!UgPe;Pz3vGjDWfW^hT|BaDUVr{yUoj#P)d;ezkxFg7|dp#^mg-Pn2Xu z3$-#MQ;^YPVI8O^6^n1g$4f%|i<^VCu>`9t>jE)y1$AEp*Q*+_sSk=qbCI!Y;Uz=C z;6?$H07_&54BIY=au{LIWeuA0xEc3bi^+^ZKS6gz!lxrlU^^Qj8yb6|?v5~fOKb|` zO(EzrP_WwBiICfD_!yaGumE8-D{BnxF z{pmyJDx8_U462{hmGffF>ROC&M3o<$+|iTKd$HTbQoj>@Kj@8P{TBlhOnUkH5lMvr z#VR@zTI}BH&;lBly?r*4LmPH6uaCPEw(BI~tL#ZJ^A$?>QVa-m%<6yhHur zQPsf-iO{1{S?ois=ANjSD2=H~aaRGu;uI|&KiZQ%>X*S7dvo8qza<-XRLSdOfKJUu zN-1l7?~Q`bbf3*32~i3X?EQQzOy1NY=j(#($l^HmI5D`c0U@>aGzY!3l|0s9bY33z z0k{+Ab>Yqr<&$7oi_3|V!6bW&1|suI{GZWlp~p8GbyZqb^Oa_~c4)h5GE!o*)mr&1 zqsa&~BFPPG^Ad&d$F-uV=6dIk+lH5yyFXMrR!LySyg|1LiPI$y*52rNcx@;j?7>F9 z_$aAEaE`~oO>`&IsT$bh%{m(utUqtdhV2To z=1UGQW<-SqCohr~uBKM)x=7zq%rbvo$ioo+_$s9caYV-a?P2laOqpyKmyfmd=S1mM zdA@?Zn~iVc|0IlD1ZyX~e=&7rNCkPoVzqWpg2;5)dX)|jnjFi2VJHTGf5!lT0br!|DITk~wvk-&=GUOcb4H zIWA>MTGT+T-nZOcH6t3%!b<;P@J$FdVH*zJTW8|T9Agg}s$ZKhXCz<1OMV)Vgm#Xi z_19$swk@P??%nb6F_oxEEW4$E`YlPgCV|R7;LEe#l78NpZ-QpgY^;Q?UHf3qafy*W#H%ly+_oY1%m1p# zF8`^>Lx4)lP`mt~wP=yhu?8uZHiq6*Q$);ud#>02axp!))%M`lOgZT6x&X^kp?r3$ zCKiA1KJKjktt}(qanV7&%&vW<#uA7}M%%+)LV8o;o>l`nI2|qeR%BRii-04ZlTNKD z;W#@&EFB&(bk?R%u^YuH%SZ#K7?YTzrYLA*6}mGmTA7W!)YCI9Mkl` zbyPTHkQM4K;79hnolna7+~q30>>sYsjDq;h{XiQdpN9D|jcaJ$$G#h@3xk`Z6luXk zWIl*r?>t@UQ57=}C$(tGt0)CHO=3u3`t67;>$}LU+CDA&uRjdUl(vfi$ltsT9oT>i zWo>35QQF4RkRz}yvKVSdMN>x-0@vCyeXgLLYlc#d0UKOUc`o|{@f_*H)S~wZRdOF+ z(xO+R`yMB=i175hgi3nEoBUI5^sIKTX;HfT@XLD6;jtoHM|glSr{;=ulrWgl58yUl zF~ne&kgPE=7o6|qa61jZsFjr~?N@*(M zqd7QNuoAA(T*%KV(rSX}7#NFH1xC3T>AdA>saem6H=0-exCiY0ksajYaP#BlODl^R zaYqK!E1_n+mh6x9C)QTdnfCVI$V&_9?^*vbogi(|2-+z7BarN|L<)FAB0kA#pYYeB%ABsK_ zcy4O_%E9o41R>-8S%#Cpen{bQ;)1~Q_JwSL4@~S~`xZCZFZMDjRN8g;d9SaJkgMHi z4oL zT%=W_#Z)fAL}}Zp7OwI1F%7T__y<4gp#sCGsPNf}5{efz>1PrRH*JgJ8-9+4;f%dX z8|0D$8T1Eb3|$9W(oDL6nNXh33=9KScs4g+Wc&pt1ZOH;bevh8_)JAn;w9XoHXa!? zx5mSn%{ar&)Sqw}qTQ00fx&#Ub!yC$9o+F&$QTAx?(Ob^($CFIQR@dl zUb=3rRd%LpLpGog|Ei}0X2@}5f!6WY`nc8?tz&(Aq5eHjTaciI<=V*6ePB`0P;@Xo ztlJ+jgylJcJbxk?8k^XNOPl7)SnV~ECl++E`Mpsajys7pOOof?9wa4uY(eyc1?AF7 zukR&Gi#iS$vR+-(I(7dMmlFxrq>rI=?W_jf|2G)Nx(~A35N)Fz$~D~n|MagscfvQ` zcqB=lcZ%RpSBc5JwT{#lHrQweOGfsKKd|KqvoME%VGH>=wo?dIe9WEv!9 zMu*qsl|JK%^0pa=0u^LJ4u zYutxOCrWrdHY%W2Jsb{U^x^csP=?l!FXh8{u9I~F1Gyl#rqQ?h3}K`Vld6m1)-W#G zKLM;vrPLnGlkYDWryVhE>X?VDUE@}v5??7iDe@L5&8X1H0(r#_g<*Ab5I0#WI# zJIm!XeN>(>ZM>ixwgX#fDzi8BQsk3C#k4`c#YN|a4UD6*JnG@Cv1sRS!q&PR46ay8 z&7Hq95fnt`ryGv|Q6(a@dmN?Tlb#$txWzE~l0^`qyo=*=Q+(C+Pcp?QDa(|zV9KBE z!Ga|~Kx@?NAL|b4FUUEAX`Kc_TW{K3H0YUkhc@pTsty+5d)kwS=&gLJSXYt;w64Us zL#>i}yb>1iFOU@IXDu=L+&`4~n|qZKRE2SpP*cDNl!PCfP54-ZW!#>%t_iwh3DcNU?zf~Z8TQ<4hXuLOsLU<=j zKR*Xi#Z2Czy^!Gs3G$NLf4k}TLPaELc6_Um=c8Wfh!UNdfKdbNj)+t?q^hKy zfKS`>PZkMEid)&&Isfbl-D&Q^#!(E*>oRgWTL$;r-mvO-8ptpGl_Me0O8i!9+hMu4F*;3cuoRVzq=|flam5)| z(Y^i%=v5>65Zc>yjn4f|BC5i;O*gWFT*iqT!ZEQpe!_=c&J-8a)ZVf_z;-a4?O>&y;Xcc!a3bXselWVzSof;0*CCC z$mHE&Gtu|CAHjTbLksQ!@WP~cmx0Sn4Sj5vBapIiYnAhDpHn0?JellO3wPlV?;Oqs zHjW-v%Ndfvix6c|BkLwOJ?ZeF=ru6KZ01WaY$Op@fHw~UrJ%QH@_WCXFm0C67u>9iYQKKTYl~yWg`?$e>Haf zbn=AE)@2R55x?kt33$;hnVfc{-@JO~t~X`isgHlvhOVTL^cMr8`u6*;L;|Pbe*#w2|FgDMQ`1Hu{Q|`!e zXD*b)Y>Bt7$b`2|KT-L~vk-&K4_-Y!ZQ5Zv>QoX;lrcTL@_ZLGGnfSLuU+kyKi`r)~H5=9I*0{(2rle^pE^gBw_4GTn`Tcwqd)j%d%3yK7 zxbV%(S?`T6glJDo7O}$@EUprUqRhJLB$2O`Rp>l{Q5-PfMKsBl9|PSdj`6stlr436 zm6t{bHByVzJ*F&uMUTD40nd&g)my>iS)nxPNnMb1n{w6L z_u}hio|j?2uC^Wd6s?5XfUMi^eaTz}B zQ7mDGtbprA@!=Rkg-z$TIvtpa4~*2kZn4?*54fy$G4elY|KW77gVU7148*XOmjMv% zWJ&PgWi236I52fGB1PlwrfTPlkFPptm)t|KcEB5cukd!KzO(RX&YiW+fz%wsbo)x+ z_KGuNyN`bSl{?)p*7Q00OMwc-cT+=61pAWicgWcld!6Mrk(od$fmBC2uI(5V+c zN_VBdB|9ZKk|!zjS*w7p80DkL^3~chZUZAneAqTfX<^IOm@mc0_s`j!Zgi9s@osMv zLViV$NR>50H%(X4R|{7`wAzuz=aS{XTgHW4_V|t^lV-C zHNMS7^OyotQ)9M|1F0-sYIg(UrKFP6?wbu!dN?%&!!=j z%l@la*X_})+3wGrhk?(MbnHt)8dK0KUVE!KsKML&Tb_Z4e2vQzN@!tTE?zxYSQ6xr z?50g*!O-&kL+H0O899EC$`zK%tEV_Fs z6_>-sD=`wg0UmfcLD`5Ay2gy*Cn3I;cmmM_C&dh}0q3_Adx39jcMS&p;u zI3{rA)f-KqBrI|ymj&`-f}P5^u!hjzs7X}NSgq~$x!m#x&rtDg%L%#-Nw)-L>K7)3 z-~9e7QAgo=EVa82*~MN9&Blq$w*P@lbr$wwzM?JSuv^)`t``bd=U>_W2VC75KoLk1 zy0=jkw^AO2C;(vu5~x=;ninoSg8wM9$LD!Ainj*OI}kQt{Jb(HK*uN{cv}sfugXJH zw}ZKmZvwTVqXBr1r5II`iy3!B%V9n5^XoEa%a(H;^8wMP zS%zzV1e?)(=(DM+r5k<8P@;O<#O3!bOs2p$E2Z*^8a4jc2s_HN4G;gST584kbCpLk zzGajs)%i{ZdTvay7ufWm7fMn-@S^7fZJL+Ni(97gvJ-U$)zfcd&&Xm~VqHdWdznSq zCcT4eV6u31ipt@`8>`p&VD?+3HTrdJKg7_qvMKei4kUlcxvTcjj&uA$u0dUg`fQ*} z&`y3I#1J>Ge~t(+Mo7a=K7J`;9}tP2yA>ZAoRpo6dJ(1GK}}?w@M~$_OgfU@OO;6% zh~RJ)i0AMVpjh$B!^8k=eDwT^M@mrR?0OF_RgmXB$9pA8B+?a2+B7T94@`gxrmq`5 z7u$Mrb}A)E*R*$39c<0lG{12#ptf7v0QK-cn02l{$&4?W4CY4hKwW1YV9?p#iN?3h zP`NaK4}t~czinZ7A%LRxvDX4wq!6KNQ^Hq)1ei~|8e_KdNNThc`9sjP5sW$X<;{*t z?~%rl_FrNfO!d}y+gj<-V!cAd6|ILU3HohMOa+UR`8k0m z>PiBUQbt0&bM$$k^E39-^lVdQQ%6*jf(z9qG&bX;rafzGN>@h(cAU8o4dSc%Qwk;I zD)aQhwB^L-_diQ^&kep=Mq8LO)_w2Zstw!NL+g|?N~9~lbu|00IqRkZiIKZGT`ww> zXhcqx8>owy56`If~~b zyJqt~=A~O3xQ&Uzncd=!5P)GE%!|FhtSfpy3yd;(rj-Nh9i+re+lsm)+K2)27j!cL zIu!h*`W?NWffqnhU3+g)+DWO!I6^r-d9ecw4go$-e0f|@oWlQ`At_fR7C-D zMUyHY!WxtPy1zzQdJk7vKg_!BB;8l~Y|XMS&(XO@ z@p%V#c$i)BdJ?Ae@8|k*b+REx@R>O$EmKpI`BcNA72e(Svp=X|Zx)=~EiRwU;)Yq1 zwyRgH@*R{dD|S%ScyoeKGR|JU<<-sAJUHMQAZM}1dxdA{fy_(DpVBS_$wli;rw8>1 zu9R0lm(%!_4zry$=k~~gb!J|8TT6c-49FNv3=9lO2+e)_a7iiF`Y?I5i%1^|`Qb#m zFkq*2?a0)5ou`O*uH{h~V$?sOl1?QIT1^ds0zsR2=AI#*1Ug3*EfVzxisjy98iz)m zdGFq$ag-csql?ZZPC8O1=)Ce-;CYQ9keS8hZ{O$~Z{6zC(5yV%v0$@7z#xQU#VK~= zLCHP^pLwL$AaDv2CaD8mo{zyZPdNdGCYrix-f_kw2_2oNOSP0vL3tCmxiEANXIZdW?|ib6?e; zchYDvDJ2P$KK_gYB7v!iCU=(NZ>Xp>^#UpSY?_O=K)#%$JKKI~tJk34Z)VM`SoMY3 zll4ok+uxEIVW+e!&g?k}?JC>!5!XL@fTItIiBjKGoTXbX>A^$olp6lId*i(;Qi1t* zui-7=W4N0?O7(6?Eql20=wb{hFPSDLm7E*86UtQ^^evIU-Z}!#^F~n(qClq2|AHf7 zt3eG%H&v3Win(pJqiOgRouM*s&cI4jyn53se%7(A``)#ok=N#Ws|R)-t{p)mg$Jbe zoB=X}!)=X9O|-u_-VO8Net*zpdkRRNHPm?&DW#3H5}cuTR2vG*&Dsc#f4qshq8$u`>;_AoI?A=aysb;1@{ryc07)Dp5F_OlDdd^9!V zN>7axYK0@LW5c6wH{_Crk_e5s8Yw=a=P*$D)A#1+#n+$3sBJOoAni&MyodJ!S&uu# zITXcOSN^=WSRmBV@b1WD^pO$!o9**AZ3NoZre8(~=AP^Pq$?p4DQW2Sif#FuXMq0s zncQ~~=tQq#^Q-P^^6l0qm#3-J6sK2}WL+`)VOPTq|8-;;Zvo7`cg6_?&4Nix%b$A& z8Cs>NvRkcV&3xN$l49_W^_P8|@>!`9a*b`DjeB%Q+lrq=G*~5Y$IyuZd*K4zGv2I$ zDN-Y@DlEhfy@CMS!~=kf#=*yNalhZ0Cgy@Xjt^OY`apAg!F!C%QkYI$O(1Tr-|#J` zRjo*@f@aGjwfizzyaOS?qUJJe)^(o!bR z1)%;{6TI4UF`QoC24(weZ61$gHfNYS>VKilj9?9nY0>)OkY>Dkf0IG;jO=zs9`+GW zy4izr?9?oxV`cQygBfhD6#>X>q;V-*hb#3w;X=Jd=Qq23H8tFW_@DyICkOAo|7$q5 zHZ75rX~o3w-tgWu7Q$d(AOGwXSUrujzk4yM`PF^yGrX*z)WDZvGz%}^FsoSAsr{jV zlU@DV+mz~8RJc4_=i$3(cpb$`MnYU zTH=Es*_qu%I*EiS1xFv+1&3s(_JF+zIutjD&n!4h6WUwrnZ~iebC}jlgtSsg=6BRF z6Rb5iqjgd@YEju~?6sO#+4}0(|JDMm@-#lQON%|CE=Ling0m{h>^NwneM7DAFLUFlglUYNYTJiN<%l9<8XMFN8qLV^Snr@O%J{6TK60f%Am=swG(drb7qX z4bA#2aa3GS6>jLXzolzCB^_-(eE+NSSa%s$W*oLh_H_a}{qG{77!>|3WCNEiRQRMM zElidGxq4sSNY+(2ed7G(Ul+!IF@7ikG%??=cAzx%v)(@W2Xh*j zLAryeQ~cFi@4LGE1}jQi+CO`^5S%He9*XC)onAXYl1PIKv!nmqeqZto&#S~j7B_ar zsW_6!%Pq=HQblfRnsoi4+jCW&>Gefak5!)O^M9_=8+WyR_ym~%MfE=Dqzdo*0e-Sx z9sssndTX!x6OsU&^y^+7&2v+#m_~2)DTZ*UB=&2+urKMRKyV8an_0J~jRoRL`o4qs zQWkfuAz$G^N)O{sigMt~^%v(TrU|+;ryT|;O?UPvkTi3og z=jgZGyknI|G%fJgd(P&epMQYuXhba z>TZEdEjfjr8i@T#`rB8e2J8U&9lw_Le(i3du$y}}Wuw~}4PU+O_h9eK_vU4##SOr- zkz}V>DRLXCvl<%V59Q#WaV^IomYb4}N^SXJ<3Ftv4eII4WloRuKYu zb57g9X;X{AT6;?X<#1adz6fMR#2Qrv6(&-)p3mQkB6NrBZa3h(| z6NVQik0!1gq|-6MeA$IiSoKC$)p4sZI|xwO&~^&%zMI|9>z}-ndjBfW2>T+*42_+z znh_V2GHKzXUs5wT3z@e~4bsccMm)Uc26n40?^!rk{PnCBmdFNQ2>_0vc~UOkErdDt zxUBD|i6S9(o(b3%SN4a2lkuo-&w`7lFQGCokNX;M<7=2~;9Mh2r|Zh;3*vt?358O( z5Fmm0xU|n0Cmng2O#WZ+S_I0@msUyxa5GJa!iqk(G@9MEieVT?wPiJ?*ya1C3pu| z>!MGgMFKg@fY4~ft>C3`GI@S@9oy$%FdFhcmRAkunbjI+i07^*=&(F?6d3;un>+=gr?TxKeKts0h8MdA4#<86}$SQFdOqg2({KgD<0~!&sN*=h>Vt8}+UX-{SSi_D_gyTKM?8>lr*Zosq z3lk}4riQ^Zr^gG8bcxoUF`T1&m-@gnxLxb3yQ?Uj{(|A#XD2T_JUD4ycf6<+itU4N zYHE+NqWe7YR>GaSyReYzSnED+K4=Gp&pqmZ@?377x8KaWx-Iv*t_-4ov+)m(MuZ%o zq_dI-=oj_MG<>fI%%jwvo@<%yVRapA3%09&IP9_eAxP(rcCAm_N!9v~3((aSAD&02v8z z@?HQ^);md^>~R8InY-N905E&0p)hk2fBaj1)Qs)o?bzOk6S5LMd%xv4I2EVvK_c!3 z_>>Y1@Fo#OlPjg)7<8H&G_vQSWk)_`v>R=Ye#bxl>$5TOXEwG<9syBTQ+aZeeiW3- zTv@GF2vHX3YuHp8Z!IY4vpj;l7ofo2(G(_?iK;mK?&%~H%#U-N!FyA;bX3ajJZPZ3FxeGx z+7;tTdfUf)8|1eLe;$LBFo;u5$|{mpGwuY1TY=>Gsb2T0bG%;iV4F%fu772}>A-N1 zd-@>E$296KQ|g05z&UfX9e;ux1H(P!ENkos&#EB5{lvG9bZ$e;S6{Q+K`Q?cry4Hy z^8eie(IyT|hSQwiPu5n1? zjxpz`fJZ{*@<5RttFi5QFzMf&vk3WI{jFoOhZ2`-#JPi=WTT0<7sVZ&V z93P&N+hD?0=fD6j)&-xAubU-T(d+aeF#qe*R-gPYuD?D$CB*onKqDb5VTkCiprSU< zNl*;a-o}r!@`m?EnWC2qru)AFjz&G_{$3@!-TuEhC1RA}_hUj%Hr6nqL56o2-XpW0 z&nqaTGjt@%KGVvgw|4*8SA@mP;JCd|lijYiu&}|f7B!WJnOQP%%vIZ82w1d_t02cA z?Z^NR#@(Bi&>&^i`bV5lG^;cJ^be;tJu^aJ#lTxuG3hqwyOa!p18u4q?5JN{@jwwIFqm60=|lA+sqZOTcz`6_9`>iXc7D@$oyVnlF+Gc zMijJrLQNt*9e&&V>&T^Ehigx}?pr9Sj9>rSlv`-qm389G3bRV!>Qf2lzRStM zpYf?>(}^Vk13T8_{#&k-v(D@6`=7FH{QSQ65VPv3jmuxGnXryq{=g3Db=LL{^|L6@0EXo`4?LgE0(5!Nt&3s!`5vP(GSryqldeE1<* zGjXd|{Cft6!|fR#(iT;Us1l1lmA4>abKz?2&3f?j+~*7o`!;2TUmF_aw$i1}op3F#Ec*V|wCz0J2xu`?c0iF8uFuxrJXB1MZEg@ly{EdLU=~ zR^4jkdHfeYN2j3auQzO$x@vFG9p&hNp(AAAe0PxZ&>i`RA`P-?fgL$L5uZREGr#CA z%FxxRb2{mqw%IuI>IYV2epRM4O7=4Me&NCgH(n`g=Xs8BhDjKI677%}iB*mI0z)R0 zBVeF^WUnyVemZ9Adv4iP6?Kb8_w!6?t{sei*6;6vKJiXBtV`}nI+6>W_vCXSe~hrW z-1CW0Fq}P>V2^CWr>aGGk?X9EzxB&dksy7XLl%NCWj0#y88%ZbE2eIf*7oXav#&VV0{fzt%9G_THj;rXDimM zd6OTUiWcXoX;ct==;~h?P5O3NX=rk%Y+NNvMcu z0t-(+kLEk&#vzOCwIBLrGWYvq%5mDAqFyEOPl=Q!=7C{}7lu5dRLkk8K8plJdEt!M zk63joTk};65=Fe>=P=GMoz8-^@V-Uo0#Uz5#PlS2jMYhV4~?SERZ%#9u!v6&H>0c0 zHQnv)r*mPE4E_+7&_06~Pi<_K ziFnCS7^Q^^MJ;u0Izrl>hH=7qgL`6f?U-UaU!EI>Zq^WpY4I$G`-R( zy(KaU`q=q*{{+>WK5KQ<-p|M=@_}TW@Y@RXKJ?3IIB?~Uij|03;`hEhNr0YIHA?6Z z%k!H25^l2z`t}&ZBeUM z5w%0?tyYKDEMluo5?g4i_KFy_V~60E=Q+>$o^yWx=jE@sbAPV;y584-K)ks{z+0jA z;~;irB5ou|2IZyj;Q73=)0^2%^(Qd~i{n#`#EbiHW@za8mX%Txr>VfMeL}lhdr)8e z?F-$)`@xo+sXM$FY8xO-*3T7{=&url78J2x+&4G3tia?Gw->ulYm$3GyPOzuRR#Tv za2qe~oZ78y;_#|K4#DSTL8#^L?v$F6VkFd*x6M@&viL`5-%i5+Z;4mhj;gb zDtwF2V-%7I@)ZAoa4GI6c+@r;^*_P^Y24aoYApQJm5L{g*jCS_cm-6u)p>W8ydMTQ z_3%BgDQURZgj9OQM_tL_vD_F_5q4{j=26UBp#|ReUc9};f!Wbs-|;&7-}Omf&Gl|j zCxHE=41aCF>OmKP$E!e1&%g9`!zOSBZBs2u#lO6MA;3kz2dXDvt}~8-4!W@)NngPl zz0k1289TU{RtsYkk%)M)Ma^>R#oI98nc4k1k~cy%CD~TIEJq^W1SKqL%Nq{JUN(bm zE4Hh>@iqQlt065{-VQ}i0DkqVlM9B)MmlLf2diKKBC$lD&c|n@w+rrBCY9HcS`zTd z;sDek#H311XSM6SC)3{!#>ZBoAKfY<)+3UAI=lQOs?FYyMvW)^*%nwMm0P-`nqlm% zNJb~HhEnrHa>JCK;qAwXZ#BtkdM1*)v;&bXM^sST1v!3sRfkLp5t_#=S%U}-@-0$u zsK1e6W;qK(Cj{q&g^+1=i9#U=-b7@CYA#^r)R*^r{P;X^+#kfJ?R(B{z@=_TwfMMB z=c?CyF7RV=Z@2OzpzBa{69Se@%Iwy9JbIaqVY)mdz8e1wU&U~weYxAr2RcazAPKo? zG(Z-Z@fjXwyIz(7@b2yBec6@5Wp{NbC;{qqIP!{r`FYd zg~d0to8f|mDh?BtyTjU8MVY{&eXx4gxhm0BH4Vw~4K~=Nr1=ox(x7U%I*x@q4~AVU zs8w9N9kkf>*mLR?H(WJ%i5;7N_6{;D3uZHM{`kMG+?(Z89F6{-aRt&c7k_oTo4(}X;pL4#zpTXRS zD_RMUT?M|Sx%BRnj9orTp^7i``Fzus#CO1tu+lPbN+eq=&`ywhIWewj&m6P1HvedH z8MyyS$})VDf@sgpx-xXWXNRbUyJ43>d%5vbOtSrslRPSKV*;t;knsY}CXZh*Z@rT1 zN#5zbhEr78@!5*;gfk2XrVIgSP?LEhv9t0?z2nFZudDk&YoD0!Ztvcg5cqOa_Se*( zNY?%FEn#_3gI#s+s>|Un?^z5uFB02O#8Iz2=Mw#vp&k=sm+8;2Gpe1({YG+83?Yjm+BFd^4ywac;^GsEs|V!*V&&q)YtnGYIBh83KoFL9B?Ea3f|zqNWT zR4xz^vipS6o049PN_ubP`-v#fzOzyS!Bf?yb-?p*W4)$FaHqR1F3>o5WtTi($-fJp zw--P#HmT;zm&G`KdFn@WDp{CSeY(t{@Hw7!%iObZJM=WbUfwU@_Lp(DDPBgo24c5t zZ;|aX)NCX3;nRG92Kx}Gls_FvYI9HgcB-W)g|BodbI8G9-)Ni>*LFZMP1mHNhe|$0 zCBDdY=}$Cp+uCaXA$#xwg#zZwO_GYrpvSJ_PCJ)6j;$Sh{KmCQ;cpJN+oa)Yv_Zx? zOx{he7>mC30q<-^P!==j_SiXR?U|!g5xY2~DM*x~KSrY4+7xjeR`%Z;Nu%`7B|c~N z-wWzOHoSx6haue(ny~_B`Bhd=xlB=2|B_Z4e;Fi5ocjpCg*OMJE(Ku{ zqq<^+2md@;=YVwhd^ly=M>(oLL&Eaj1-`C>=Yn?$v^f)>Lylg6A7%iZbScWr(jjVlnQ0`;F2(`Fj?S>zUQ)k$R6>@`*x|D=79 z>M;(~yjcdWizIJ;4)_AoapM_X4BvJ30$fZ;(`+rZl9vudX4$T)fD}N zb6D-`8?P7`51nLr0&}cRWs5d7`Zum+h(_{yCKHIG!0UFYsH>kvp)Ri5i|#Du4D}K3 zrdHv-7KJK^Jn@7zcohTJ3jL_wCbA$tv^Um0VaJ!xqQissvyu!Yi;!)KmVebO=mbRc z%50J0!x>c}Z5DeA2;XLZ^#;L(12(C-4^?s*MoxECazXH2t@nJT`&4?+K|wh4ipt~7 zTihS$dyv(s5&<}Er4Noi!3h&@T*>oR&}#9Ksg}3YYip7X`1%(MFiecx@}jy`G48t$ zE?nsj>}OUIq^iC=542Etx;%^}3lY~M^HCn(LhA$aC~l~!&$G4(4VF3g_{EMdV4{1J zpiU6flliH84bZ(x&>!tFwKfO>32Hicgu7Ac*3S1J1buX-D})kK#pC_YH*C0%#W!Mn zNRVl4WZ=z`olB=D|43HO^Gfo~;l-{!J9YY@MQL-~5 z&9V19KLsKK$nQATjIMu}27vmmB0V6=$J5&OOz~z}BVbFvV#R0avlVyiZpxFNLGb0| z-}QPr;IB}fDh4mR=sfdVbX`Rno__5`2_jlJMgk$*x1t$tr7x{2e~LX`rpKBE_bCL; zY2Ikc1fkJXSbpD_x`C*OZMFvi_FQ5wIjmVt?=(-Crn^G;)nsLaKhfCzL?#1z2HAV* zmzyB&$J-CSqKmLjCw)B&ymO|^rrTJbM)og3qpRnV&I3)UMX_~unuGk?a_&{K+{cw= zo~?~+P-_Lv^V9O&n+e!;AWQCxon(7>-|{4yTpizxfafhYU|%nVQV%Yh&BCnl_q*`+{E(_4#QYs5&DQfoFS<@2N9rkx*iA1Viga7ueFihbv`UpbYcIRv~P zl4K`m`{tVhUUfrtHwzAL=Rm6sb7$Zo&K4{85We(RafA4}*DN!GF)hO0@`S`D=>{+J zdVjrqUcYE2RiT8GjlQ9nle_LXKjJgG!oAy9=7#KeDre3hmuR96Y#^Lcn zEt7jo8^2tM>nr)+r+6Fpl>%%|GeOCD`ED4RL6FhZQjOQ^6B{oZ6rqtIx0BZFEQw*Q z(a8s?BsSfn+vzhV1iYbp9xxTSH)1%Zd${$+D0k=9ym9`sqxqWZVYzu+^a{!^DPTlU z|9zEv*IMX}@t|=wkSa2M@7)PId-I}1BHAxId`9}^Ma|DF!o3|y|NSwL;UOVfDz-a8 z@$Q`O`YPl!Dt~RhMTE|}_TxfLjg6}Rnil?piHs|WNa7Uo@2{i3Chu;I@iFSL^F`lB z8C5${X5iS8#l8Jx=epc!>-^=5C*0F{MSF2!jW_H6F9FQ9nAd}_oY^xyPuOleJX1i9 zsW!^enM*fh!LqcTP9MUx!S8`&$=e#6HEoP(Fa>2DPjlE)IJvgnG0%EZ9SmPy9>Ku} z-D^9mJvOuXegb}&&f1l0)&se=Cs7ICDT}!0#r$X$Xv?SQJvkXy$ghJK_`NeFWcKJa zAMIAHL{F<;2}DBqL2{ID6!30cBFs@tZNz!2UURLpgA}aOHghQB(>(BY5K^(H{K;VF z&U#$wqs0jv2gFVZ+~FHkgEBF{VtqxUisbnWP+GT^Z%Udg<7}2`NvVN3F4;xj&{yd> zGPEjT!m_b?%`7WYW@YH;rLF_J{wx`y$1alFsfNpsA`~Orn|RxF5IkYc974aguu^c4 z#dWY<4`w4l(cG#J()HjRf7>{VVg9Smi&Yn*-l2Si#9bEZCr=~%hxx>ZuC}Y6^0j$u z-J@L~#TsB8#Z4RptT5Wgs;;QmxS!!v111N=exqDJ54wks=CnxKW4Y4e+@NH+FsM?= z7vuft{mH2FfwO5H1;2ypSna0i3WDmYjy~30$dD7?VGB=OrJ#=++!$88De8HlZa)Qv59^ulo zXpnQ}4SMt&I=hm)SJ;i|g~^)Ue2TVmMHHuE<=VECFp)s4A{=rWN^dD~a_iCjrVei3 zXWu2clcRI6#tnbv!hCY*1Z0pv4%`OrVXwggy<^voTNnnJN9T&CDiJH#RQw6uE@x`y z>j0I3RZ_jc>TRc$sW5=e!TBjL*#DQ~sRmlo5k;pN5at4zU z>$IM|673B?W7x8@$Y-7dKpEGAq8l(|YOgCzNuW?R z%{RQ5q(C<1t5!GW4)ktius^7ksJvwWe-=M4@Gbqa+CqQrqFj>VT2re`qn4!Z0-u)b zr7PnJ2_I2Zdgmd;sP%Q5pZ39&j`X}ADJm1*VB6Or?N@f+Blm~Hv|g$;IBW2O=<7b zutQ>WG_aBwdDS2qtt95S#fNc@Eh~{bY&G$~-TA6lAKjVfYX$t0E%ud^Xev*4SHm9Q-=I+Mc%Il7m5XS{9S71@@U4Llp{$H?3WdK~r%!r3l&4^^Jm% zhoIH9FWByai&Zmdhym?6T`&IQPPT95HL!)lts|m9_)Ty|DO|ohh$vG z1F=k$*zjp(IElQu+A5|$vr7G(_6%)+Nj$dpocgGVIm?qm&&1UD*$GdN{QoB`7ILcY zxx)%S)ST(zUAW!OD+2B1iCu?ElGdXv3JEdU4)fuWh4Tl*2~C$^s% zyM3QSX(o&GXs%Ehfkl7Pb!t8ZAA~N5LBuSf? zQSf*UJlmqvLNX5V1X9~3wN$JQen;kE5Labj2l6KE7P$|Rj61HI37OtfDNKuhd#S;) zv~iDBm$Nj-^1i+OoT()ysS+CTG>7gc+hf#(~4wpbo;G93+M@`je|-*I%N)|IkVOH8KriNK3w!scpO(EAP= zO-DOt*F#779CtNMm0=S#DH`hDK&^&v9=Akx*4W#+wIuohCwKs+jpVsVVz~pFuNqeg zn%VG1rn*WyZ{pN9wL43kLq8_a6B1-pe6#4GjY_2EwPc+hPidENLkIf^A};e;j{)*j#xu4wPLJ8Vi$$xi zj|rK+mtP(xw&FnJisRk`h75da5vn&O@t)Yz7IcFIgaG3M-L)cuf(!UIv;2h~gR!w% zK01N9bt@*%j)Mxw?!*f`n{R}S*PVdGMoVbNb-V;VbG_4%De23ov8RQFw+bN)lsNLr z(I(i6>$43ZtIf*GAEA(gBezy)P$dQ+!%L%>zWVeuOw;bA>URia%DKxwpD4cOY{qzn zai;h0VBPo$*{6KVf?4^BUcD1~1gE5$zC-?kBNxtS*SoNU_@W}&7-=_XP@N(@9sl^u z%f^0H9nHfn%@sg5f2>4^swZ$?9{x~TQm&HQA&$v*`VmxIEB}%fp(X>aI^mx8x;G|U zB>g%O9mpBg-^NPviLN-Bs$VRfx0O%KW9tTWr+^{%8me?WS;Yxqh|VbY;#0KKF;je> z+hI=#>8*q7QLPMn$hPFNccSCpYrb5tQpyp1zka1G-=%&MM5*5Ogz&4z$rK8f>|@#A z-1Cc7Zr!PAqsL{M9zD-_F3JCLX@2Dn7dq>+ml>q|$8_K6SV zRrXa?*&F6(nYvFEPq}%BOSey+* zRNiTeUl05^V9vLP*qD$qlP|qVQm93o4Ax8ne9aZ7JO0mP1<3+VX1+Tk$DwcE#BnMK zUfANru<3AXmBP?QY^)eIZzssvGUWWw^UUfPb>8iiJ{xzYSLRSiqjc2jXpGyRnH96M z=xx^LP&)`)(3k^c@e`uV)Sm!v0UE)^us68x^IIRmwdUFn$$@)GueH5!Z@99xTEhJ` z(=Mhqb+ms_Qp4GXZfI|la$<7Hh2*vY0kgvvZj9N8$HsT<)sVkx(`SE*Ci;B7%RHaT#%7fYQ1AmS*?)%de@WuXWJ4>oWUpG}vh9~?`R|LjM+*#}SOtZ)t z2q^Y7ujw`eH1)n_r9Ge&aw}ZBIa%&zf6!F$y#&q~UXm*%;L3|AbUBFjgxUH;)s6ud|U$Qk0E;k`0aK_F&_m0s3aCP zgg0!ua$Bz={r9spmiT5C4bU)clA4a@Ix)ZERsK>5YyVW<`C{(^D3pOaxJ?L(C^cyIYViv1oaWT5K zZK4Lxek>C61vu$`Ax>K;JQQ|=jQhIijjov;>tFS45@v-@4bF&tpR=Kv!=?5TaK)VU zW4H{%hZj}7H{P?vF4%|?;=j}AqvI!p$ZC3DJ(7{4@nB&*$=!L7xpSW;vci7#4+UFS zRXedmXer=wxej4rLs}+UWawPGs(8;Y?PULl2Wp@|mN^sX6SP_Zw?{XYc>e5SvcLR$ zrZOu?Gz`(r;NbXzj*FKH+Oj~roqus^wt3Wgw*HL?k({OXW@XJMEKtVDWSeAc)rWDT zf+31weC!lRH^`s30;DC@PR3=$)0|hA)|oFUOIpl6rU{t`(d~J<0!Z+>^|QAOv*f&t!=UMo`g?FZNIq8?Yx|Lk zlj#XJYe?#hhN{P&`SE~2Ew7WD(?qN;YS)VkBBoUH`kBZFz%}JAjurEgC)*$0vA1ME zao%4@AjVk7T52XG7Fw9+f*lBKG?mm`m7WK23JA|K<8qio&P}CCEemjGF5StLVW2&o z%^h%GmR)Wfj*&Bo3U8sNx2@1=`#IG9`&mVr8h=wZs9*vX+(@RBff?8z5(7Ld$&h>X zQ71=UR#iq-KB+O$y?<*#$RLgHG)Jm?&a%UZ%Sn7Qop1lz#b|t+vKd~(Eb-|vuYO4AosyfDXAo%@h@UgyK&4 zvch@caBqISlOLJA%O6jUr9>mxtm z`CeL0jkc>&YbNbe=@)u)d8$oZFDO@Ylp*`n%cA%K_r6uPPK3NFkg7iAxsdh17p=Iy z$TbZlU)iCXU00-bo!DFj#2y^P_qCPIPb-SYaB|g;za`N4sQ=4-yZAUc{T{h4?vfk2 zS!N6#PiXh1$!J6KDL5JdF-;6ebl6L+^*z>RZ$O2s+5*!yfwubgry*w@pD_LB(Jf;| zO5unv+_jF!k_FXpP6E9*H2P!Ywb6{@E$B)E@pt>JV;(pmTH~6xJ%`3fZN+d=6YrC| z^Hu6cE}8VND6g|v*!H`hqNShbv zvA6sx-1q8dI)^eRVuFOCmb8Mh44MqMSNJGi&0%yXf>yHv#<(&yN@<`btgO~mTAbXM zQFW`liDQcfr^{ll5@B_J>weyu_o-YBx2@ucSN4Vw$3zNZkK4D}(WD?4d&B#pq;qO>aq6%H7qDro72CX&QHhpJ1M2&lfP*7s0RPd%~EkbW@L?p z!-NIjvWFsVu-cw!{iR-wzz=u05kWrjiBwOj*Q;;nVeeM*fog!6&{uWN*9Q-Ab~%r1HE66)>v~wRFKYG?G|eIfgsp*t+FXL1oDm$I$_u% zIgIHyG|$Uo1G~No4+W`H;GBm_31LH|vaEtHf` zp2aHx*s?Ak+{Zl_&$sC2!7DlC_j-#PD$2gkjCmihr_}W2h$_o42wC(zTDB-#Y*9@| zqq@&$Cfj@J<@KEuQ`eGX|0vhmJ>ts&VlVshTdx57-q+hfe{vmFP%INpbRV>=`rARK zo5>fAiN(~bi~FaNgsruHH?{(?GL?b485Kp|{9>?{sdWCBT~_sH1|H_U!K$zn-1k{isLtt^Z-Z zaJO_{8n|5lA7V@()qz$>t_ON2B3HZ;4&4!x@t%=~8%_u9dQx`}6Y=e+d8xai7 zF131k|KB(shIhyv&$GFMF7v{#d#v~JWa~$1(V;|V|t|pvvaPqn$flf7h?E0Vmy*UsX#Q>s9cl_0VJ6yf(wvh53V)s z?V#Mivjpmu;O>(#dgdx;O}=;4{KnUk90KdTr5}(e9KRJ1A(#{61gmE#m-GdCC+|YX z>Ag6q^R(VM9Ey6Q~CD&nvlhIPlDX3o*`bcUn3^90G~hcBNqYcW5YEEGlG)g+hs`N5xBb530{M;8jo zr~1jz@9G1l2xT`k3xTdAyb5FY*3K$?&GsoBbzRZo%4A~+BXY&KS$n!18F{ZkI^17w z68uD31A2M(C0%$tYCPV?=Sm3Q`{dD}5y#Ok2>K;lOm-cc01eV)zUv)6NJ8L~HQlo(;JaNDGyb!|FxQK?L3#Skdf^ULg32?V zg69J80&`bsZmKo=mERgSX=JK!WmO9eE0i+ZwD4AQos$uSS8*U z;d!N3wEf{mEMEo>>$Ti~vvu8Xz&||G_?|aYP%AIiq39)Q9Vn>hAD~yEig9 zuCk?LPy)*vmgEz*@*%*JMN?$Xvz^Gy9m#g4>c7g7*1He4*)uhG?N^WUc_Z3)r0fp@ z+NfkPph4Rq_n_#G-`H6(`VVXc7Y9XHpUPk->h5>rDr{e(5@T;K{TR#SJ!`j-E&t~h zq0ADxzkO;=9Qw+A$00&bBURI{8SI-BpqnX48p@hIrWh3kE)F^84Nd0H!mM^Qp7BId zf0dlPcZ>)s7VDLsV{}iA%COWi{yVc3DpIuGsZM$ec82^35kr&*)_#Gc5g!x9O_=;* zTu{!~dBfm$Iz#HF%1)kd<2g~K>JOaJs-@UDJS zqdX3{*Bn=bs`UV5md25Dj|~n+pN!+*p}`|{Zp<$DcC!g|fnmI#E^|VkR9tr!POU0T z$^vz~CnEoYK7UWjvxd!bm=B$q30qvWr?WJ_sy`_12e28qjRU@tqK$i&P;7AAALvQ8 zC%uEGlt*xC*3aQf3Y4p#fg}|+D)DWlCgd@g{FrTO*TexRA~LUzULhwgx<}hM(u@D|`RS%N?@p#+;ve#(i_XF~1PjbWDXdoOo%C z`ye75UJ!4pctxe_(pL8gwYxnja(r%?&$*NBe0(P};HRS(vzPP5TK3AoHX?guAI>Dx zJ~vLMVt1ZF;y4|xfQR+t%o7|rt-ak@L4^GN-8LJ`{C0T8*eY)Pm_yOTtFrYtK0JT_ zv;TOs>g`|h2bz}|5(wP;VBZmi^VIn*n)baw{KyY2oo*)0_(U#}IDHA&_lx)`_mKW$@t`{?FyA)Kb(&MU2?C835C{>%$s0 zBxw<$8ull$wheosqXz5%Kwy^qDt9sI4$^ni-qR>;1tVohO}~j(G_X@w<7_qFt;)R7 z9^rqvMSSv%70A4}85|~E$+wsEHB+jxuvzCJ3(G{rg61}_-qsu#1oUZ5kOs2=2}!l5 zS`aSjR>tKEeMZj3zh;z$ipd;?CaP4HOEnfO)H*n|=V8KQiAN#N$Rk`ey%;akIKNt~ zd-6?1V@~w5P?Af(qfu9I0x3$Ya@D6ycjqG&g%b9a!h{t!a?GxaY(mrL8NRSQ5qC|) zmv5CCyOr`w+%?Nt88J&Pv`CbHc6%Ff-`#hW@vM`TI{xJ!E?O9Ja4b)(^y#Hd0n9mX$r}?^obNH?uRiDpH!ph0(aOSnzU4P2oMUp!+b&j+ap#m6YVMZfn42u4I9!xPL7EO*&4D(ol47YB(g<1F(6{UY(#iI*5yOQUk6NVao`4;oMK{|oc+gSZ)c};sioF#imn3@R0kXl9? zA-;ZLU^SSs*lA(orhX8tAm9P(#*E9Gb6M`vo4DoXD=Vi}!XL(_Yz^BC1Wj`++nQ_f zdm2nj?y9GM{d;mQakq_RHZfACAxnFX<18Fd?@k(Q+7czr*XYfVh@q-~zNed4$i=}e zm;H(K>x9qq=Q@KN?dMkO2AMIs^h*4iacs=1K^r6oZ{y@wGDec0&|F2}Pv6Z)qQWH; zV%!W;a0-+uWB^!Yn(pGwtbD$^D);T6&1_DPuj!kj^%L#iQqWsV1I)<8DY33X{Rp6- z1N|c{r594KL6s@b0h&I>#X*`+{yy-`RYA?2X8OhKqx!H0I|H-G0P~v@Z1Imd-Wa6? zqRQpJY7bL+!)gym?eV3(*vlCr@0+^GhwWZ8{x8G*CCh$xJ(iOl%hc8X#X4F^_LjAB zRgiaLK)sqQH6Y1`4bJhQvB#1_r5W8g$2!2CGrs&7^78W?nMNXV!VLt0gGK|<_A}TZ zyrNzD>)!$50%sNL-_yGW0|7MOE7cwlk}yn)F{ouZ3ylza7dr9E$gwVc%P$80eSt%& z7Qq?2TGk@%DehGU;yc0q%~~znHGB5#m>C!S7PKJZ!KH9|`Pl{=E^|e_4U%v!*^jZbupon#$TDjhe+OPyyC5aLu)EkE3p6yYi|Y_{}?Coa4AVFvrn^1z9f#%#A068zdB+ z`QeC@N|R`yRBI)aHLpr*NXhQGe7ujEy75lT&xJ123Gt$XCkziyB@Y6;?FaKe0e3AL z4RZalx}F~-3_MQAGNQJ0_%Qp~5n%hL!!rh^`I)&eo3jn}lJ&EZqm;6%v7?#}a33bi@LgLs zLUNQYMLv?(S}SOs32&Zo&(=P~?b@S6ZAS$Ny-Y&6x+bt5klXuWiy??-nv+J0r|^_< zPtLEM@gZQN_~&!q=3zEP=;#KO7Y~mt;J3EniCcmW?NnWSuR{)su|Ou{7CFJ9-I4M> z=*Jh@gd7_g2>0i}=~#_Zo*6IqlJ)Bw6))1|XK1vR_)bFUA@i7-$eT2Zx28X*QE{GZzKF)N0XlcwZtoA8`CyNlD)NHzUPn*uF<}}>m21~_ zYNkEuHtW_t0dI55(t%aI3f3rPNEDF+#~x+F3D0{~h>=if&BlF&O5PfyTn#g=-{>BK zLqk&&ti?-9#g5a)wrCYw+;k`uHV=edER@`Y(}H~?OnpUju5Il)wrM>@YS~f=VPLD# zA2ID(3V)n?R)?;MPq-xnN2vloC67U0ob>bEg=H<+R}2UB{+S0M$FZC##1%@H!@8j1AOoKHt@}bV zH49$~ZlMr8HSdd8jfhjuNu)po^RI%R<*%Jv(>u0nPC{zgLv0 zYSrl<+>5%&4X^=iOyk$qCN*wJp_%71`di9e%J~c!mwpf>S@?cp_*#u;1BhuRzp?bvlgaWo3h@d9h0O?Z<=iwB=64cUdwM%2OHAxejJk0u zBQnO@Xy!|oAy-|o1U@Ga1%-6S@$Ni({4yy1DymGLgMkc?1p8$Lc0@j%Yd}2Y5L@Nd z> z{B4&v@&L*KJ(Ae`!0Q^mmg9FYxtzRqA9cfbH2Qj1S-G$H<*kq$`+Xq2NP1%wtvS!s`|cA zmwI6Ed;dy;c{*#a`*L=6`z}Q#LC7FgD4@U=uIGPsNmfge<9f(IDP)f|Cl9w`{yo-e z&r*qwqqXOF`CdTz*P)<%a}wG6hSrs)9|_5M35hrxFD|I5{QK{f_ABh0GV@-wgRAkp zW!*V`^E6eXQGw?Pu5uO zuK0vs0k(4MAND6H&G7Qy1J`*5s-OFA$oYCaE=5&Q_FKa3Vx)9!^kn(}p)rmy__t6M zr+tR>*N;D~WYc6gs&8nOsA#`9J$bd$-{^dfRDon)U0;9X2fg%0aEFVHv7iI*)%XHQoMdh-Vmji7 z#Y~`wS}t&ClETH8*Dbz*gTP>H8_}X5xRZX?WBhqM?3&l_#sL`#_KmKnV-l`QmNPU( zIi0$e!`(1}#?p!c)cX>CSKDk!CNo0EcXr)q9;wpn>R&bX^(MapS%&Z+h;O8Fle3sA zL%AisJ+5rp&dPVgy%07vLg503Zbpv!fjJ5f>${`hGS^xCKLcFD|jh~SkD$(Q!>T>J{1 zA8wGLi*4YhEJ{gzE8}M>(IZFVz!58!=D`{Y!3%TP>YJk4NZ?6lP0~*9McaKFs&_lL z;WX&dl2;8J-FNHfyq!t*^c&p!ikNbwZRo&x8@o7Fn!Q%kctGC`7c|BI)Qw&ZCP@Sb z1AaA(JOy@To-aU_%?Rk9b7ERB%iu|+FT*8E-lnir^WoX`d`GASILV%pMILmyGAa9T z?RsT3Uwc{YH=R4m5}KzMHU3Ly>hq`jv)>Gk0DgCzV_6=+6J`%vW$d~lPJS1uGVSMI z_Q*IJ8N|iMdoDl&AIx3q`~Eol7sS5hZ&)XD*Tu0r^lrv>y?+tAu59B!v)6!;amAZv-QX%mPwiZK670u22 zHA4-V{fH0iMDb}8&o{-k`2+#s={K3I9L2)8S?waTl3}oiNY$Rpk|c31mg$)BT7hW5 zWlXoRKk)Oj^baGV6cB7ZNX;NZ?I!Srv_-N6?h*JVCOnSfhxd0>tYG0 z&JBx4iwuuP2O+a?V6^0_CnDJj;)M{ju)>ZG$}L-oFobeHp$g7;QW-!I8ka>-^1i;E z>dm10Dp676g5Ns*3@6BeFV~~R8Y?kv&WdHqaeMhFeI{P}hIQV8ZN)#!=hXr*F`1;t zGp34how6EmBlNz`mP@CP1umaMoR_51ThCmG1|EEgy`+4yt;*tJ@VmD1YFeX!n6v=P zhG&}4$Ip>ae1oMv@lCEq+8C$8%Wc*Afpg0saf(dxm_5e&9LlEhmQl}Gv{gdgW~tl; z>u|pMek438BrV7fl4svm65@8J0I{e!$aje?LoXCr4g|42`#p7^eiNsRmT>l|9LHJi z{?b|mb8Iy=C5R}DO@NlIFKk2~yO#uS@KHR^SN9fJb>x1sXjIzo9;fKlaLU0%>V$kG z&pFIBp}i)z|2lv2ZvoMIc(BMrQ{=b3Sb}=|U&vB{B<&_9Rh6XXj%frwRi1Z~)?}FAwM)$V+3o+8kNn76CClS6S?z+A zU~P3yDzSMt=?uXSo=2Ax#;pV0T9zf;W7yqWWViy}YPjA}lBrlLWpZ>BMcV{~Fry}T zKQuQd$5Fj-kV!F)2~tgNO9%ktrerqFV#;eHo~V0GAI2dCB=5h|XEsoJ(`lTr#{Nvh zk?IMl#>pGo3(KZDdXpI0wpCpx{W0toqqLibIB-@%-@%8^4R}KJ36}dj5>iP4gKT^g z#f`FJhn)erRLHV->ypM=`8+JGQ9K$jK2md;V(m57S1$pbUF3`$p74X?AAG`qq+W0# zuNNy_ua<+rGLzwJdMcvMfIA7!4cnyhx>M_nuNT=tQyPKD6`4v z5F7?H*9!VJM=*r``X$ZLmU}IT(_|=$Z z7vItOTSw-+&ijwukUm_-WJ0+f07@UW41{~J?+1G2_XIFk_p^lHHL!252c+WG=5j)a zayDl&1cA;PQs~O&=^JGZifY6F6oPawHy^I7Q@(mLvl1f(oTt&GALsn5g(-8hC|-14 z0T5jB#W!b8wUMx5WRq*n#ga@vO zUriiMukg0NIw05DmsVMB44RfiG*q8TlyCmQ=r%7G=Z&WOYZbZ-Y)EI%QtyI}FQZ$P zuO8QwZll`o=F$o=WjkM|xCPb#JaT&KQJSH33sGEbYEj(qz3!;~oy%71i@#$p>b}?0 z`4p=UjGYw=$5Nz~ypOwE{E&)`r%#FcaWAM~=4|rJDi;|-R5cT!jy_j0$j{wBYu({$ z3*0&`Kb)O!t@o{#4Hl&Jp-pPCx4ZIpC%~JD1(}RCw3)27j~JvlKOUA&$eyJ)VS$Ki znlTu%2L5axgnnyvCBs)hm$KneEG(o=yMLluiY8E6=wtJDi-rB%W7u(WLCn4Q$&#}_ zo*rMH4xh!}%wCnWr}93pyT(Pwa?1;mEKxJi$Am@|1fQhhKg$ls;5S%M^={Kk*#?3A z)@Kb1<8Bi^x^H5=+uO^eH{nv3W}rZ{EOhqs^!|TR@*@mvDOALQX35xNn|*}UX}up+ zcfJKA!oX^`)gOIT7Ct{%l=>=zzSlsYVv!w>Axs24ZF5u-ogIy+cGN4DA>D%_#DiQB zR||4rTATdxyk^xgUpe4ye2_6TOo{0=$T2+X1a{fez?8UKgJKM-PKoy~SRV+mwp3$7 zRT^iy+D8D}Sos(-@iz%?gRBlkYaAHoo-5EXBNFuP|0-2oCjf6uo1Kv|_9nZD7!eZZ zztGs&p-XDsTwK7qIzTf{_=rb%F%!;oe#PA_Dm)Oji@bu}NttxjV$SKzaR06%BgRYL zp4zTM)M(Ub`B{*6*yE&CB|B&Gj5#Gc1Qm_1i;-2WLT5gN?A&$cm>b(Ix=kC!$% zUokRqn`^5tUK49_aWB{P&ZH8b2UlywtaM~+7GI6(Dj+K8Z5BWa--ObS2(0#tp*c){ z;E?;xg*-^(q~;5R&*W#KkHu^Dg~H0aI*6-?_lo5X#v91D`HtP_B)z|>PfnfBwrBeac@R&^@SUMk_*_b-iNST3jo#V#(<_uV3ZGD`a!6Y0<<+(`O6HuJo8&qJ|8Lp~mn_cR1$Q0Flfek^e7zfKZArll2K9WwbN!>aCoYL;rG&Np}|H8-CABSQbz$C1qRmaHMW@UALT z=v}(JcO%v`n8R_Bv>b9aO=^p9k6#uuLAB=( zyfRcjnfu}wH2ZnibOb-n>EO&q9tOQ=5hfGwiH|FHGtIAm0ct#6{9!+X7n}+w^$SFZ zc-?wNM~M?*{9SW5oVPsrB!Q^@90=KGqXbGXGlH1!win%45;x?@-+DX74HkUD(6m0n ziWKZ3L^Rcb9Nf6#okKO1gSVY=!o2BFr7(>GKmk};+ti_u6X|H`+fe*PHNf&C;()US zuGl;5Mp zMh-;v_qj6tOq3wMm~v<|+${sTQyhHPH(z#p*XY}Q@fVI!qV-V|a$KiwgW~#Wd{0b% zqas050*S@8tfrDB$1layI4@3Z^LZKp+ovUiXMaqvTmZ>m8Swbml`?X};Edka_N!{c zHIBhT`9J-R`-GinC$f%s1_3h(Kki-E4m=k!yVr)c38cPa*A4A|CiagjHdGs>^eZohOi+cm0`?<^C) z5?@XpWd3$Nfozoiq}#Ay(GgJOV95?+Th>n5CQ%TpC^4>+CVnJd!hQhY_u=n%t%)sg zuZH?IhUoK!(AUjE!{6bLrK>ya=EQoMd$P=3pVOZqUV5CwO}#nlZ4)P73y%cvD8U7x zCJhu#tPgxFiKB#qxZ{@d+%=o~2Z<@f(UhDezqU)EX5#m=t^rk+XcDK?`s}z}?!S+H zN8)77r1koAiU#t+J#*NHUT&ue^8k3=cM~QRvJ{B;?CgZ!LV9NhXU%Jybv7)q$KX$5 zi2*9*Y;me$zS?+3UgqSXVf@E4$Ft>Mv3Zt3o6ROB1#$F!nM>_>)WG!wJ30HZn>5w( zaC=zT@8zHJp2_ui^SWu&>TgY`1b1SAixIB9qYHq(=1moog9=v&GCnG^6~R@#Am*nvwUY&{=!S#p6$N)R!<5DjT`Cy z@lO8ZkiwxUfXR+6kVmlgcHjA{K;k|~-q2ET<&S2VIHLHb$!))v4UJl~f=A(@(A94& z=MI6cd^I-3uUwTA~<1=zQ0E)Te-iguXp*pa0^`!OyDSZ z1Y6?_)qNk2`02;0e0X6uF7no)A^(@te%~26eOG?kqPNeQYO?Um>i^^FtHYWM)bl_xfbErk1do!7V!^Z`vy!??sjwPW)T-e$^JK!9#7Z z$fMXQezDOa09mYCaqDaoi6K20J2Wo&J~jRCpZ?ESzOAY>yK`C|=WTwl)cA1U%JR5d z##V$OLRMv`X?GxI!lf1Ld~)1_dzf2G>np5!4>B&*yEIrh53|=&;d~@)<3M*_N&X=5 zHJ^wIHR@?eh>YqsSALWaZI0I$kWu0e{OJN`WnutDhkIWKu1)P`qfal=307$?7OC(qepjqyR{>_G^l&EQ^rsUg$zD|vAC%Wxs}t=k*l~@ zRayO!02a1W@v<5#w(;96Io&#o-cb2Lv2ZrAVoPWgZiKN8yfVfCvXl~qG;yEDyTy`CW?+p&n>x!6Q|9`i>B$JkF{kM$ zfjHR-1Mww3}(A+H!MfUR-tfM|WzS zml^gGYP9v=AR6~yD@SIf9j0JD1b{ZJB+`#*7!;%#V5&3C(zrOa=Lg-JyCM6#OYt$S zXUmG{#(@Cnq~GO7T$866>(20(-WzAHSA>XijF+_*7Z(8+b{ug_ee&y3+e?krsDHB> z@p2?qt!r|`U`K@(t!BSa(RDZ&0pB2%!?(u>BW!^7$>|9>4tdWk{Z;U+3am33gWWmj zzyCBacP^2cpOw++dSWMfr@=QtxfQaV9%nKaY?ldESY4n401sC@Dg!ovyD)ZVJ}sq# z$LKTBc7FwaMa%URv749Xi+KFyK;ijp4A5y?=-7YNK(RGp&={>S6~T5<|7fzKc0O-s z%jwye@{1vZ36)t=-~~t?-gR`;BjmQLurHf-``$uVhjY@?Xft&Bb|~{@+NIDYE!B#& z@?Ea<(=t>2b5~D0?M2JL3Y@xs3WUQgc*#HpBVB9Sb5v+{I?UBr)h{6DaF+p0+Xttd zvtGCT!@KwB6K7fPqvy%H1iNY-W{i7iGU`Q6osRiJ-~#R?Nd`0IlMY1})}9o?iu#Px z>n!>x+vMOn1n~Q9FD4L+`Ot~E$pGOLtMZ~N9>am8ehU+SJhE8t^QpJBXOJl5i0Hh0 zgJ@kx&{TivRR<16XW?G3D-TZN;3t@y_?Tm$faRuf>+L7&7RQ|-zv4IAEae{e=0|ny z8IO&BvOB4V;h2psv3kj|JsAR)NWJIh`1>1BM~FS@&8@;=n}JF@F#~poTjYmtVzeRn zvc4ZzamX(GS`+8Oqjx7dM5{cijAhIxjImD}#i8o$eVE z>V_;;D?0W3ybp~kh*=lCojf``&K`E7@*5om^QcUii79hT{x4yIg3|}d&F*v`M8Nx{ zyTPycL=a{TPjk600_{6# z`6RaS^PK&#^>JzYe-Imio@DIRqU**Y4Ud2SUxZct2f{{OA#97qr;GCPvq+zlVba6# z%_|>>-)22=DCgo~i|1*yj92S4=$Z!?-tH%8fb+f5BT(Z;uDbJ84%ruB6$);-5CA1y zMzs$F0N}cy?et`=YM})>B`RX7_bQo9#_>k89=_Z}S zJf7?$Epzm&IvHKq>&C2Fui`aEl?2hKikmB&zGMn2$*-y1;$Z#I?SybmLsdfjiDNzp ze{bU#@4QF|osF(7s5q%1Dv9=B`%GT|RepHR-YTCDcef+&9{eAl!N6FTb^OE$ady$1eF=_eRg2y`pHy%~JH zWL-symw^_37_Ici)&f_t`{^9@^qiZd%3xwp*ax7pk|g%q2-o&PcGZ@q$ARUk61Y1q zkq9LqHb3y0>8A#+8E@wDwL_NH`Qx{SrPI_Tp&s)k(UdGuaD|1?~{e%ZYNr#A)o2Joc|ET&*?djhQ5(DD!28OCSlDt&J7;RUE1)(!pkMpURlkHhVe z&Ci*={Hki#L+f3#UuPWNg7wLIY1~cIaD98_@2I;UUwR@Oo;S{Y%BEa|dg*P+aBJ+I zUl)eb2sLlK2S>5JL3pxBw9H5TYlG!M#dt;XksmLbBT zbq`CNV7w%9CR~a%;+HHBq=}Qr!O4ZH1Fip9!RUjhZgmVaaOTHNbueGq^+7jpdZBAY z8f2v6G5|DPH4{z@vsOS?IJw;kKl97T5vGt!lS+BOoHz3)Oe|ig{hnA-T_48bF3qvI zKj-G$8lA&-FIp>%p2y6vHcBZr?ShdbLGixVytKfCME&*1@&_$*X$!N3{$!+wEXQn(6bUO-)r@oUTrg-Y3vd`} z^XSZ7p329Q(o09jqX`g8RE$X(Ec2BCR^fqW@_Jb@CpZX+@`&;khq7}+>kN~(}g7n zElXz@w2CnhxzI*lOqst&ucbiZ=V^KC&9e?Q~^Z=lSaU;MW6}Hm5X0lvG1b?F2MG4dh z3P?64TrakrHZz>CamK&KWi`a6iG5JB4~$HEkM;DVFSFH-TiAlgc(sI&5+#Bty2KA8Z;JkMNt3l6SXbo7*9h}U z-W)d+{~L2@JOT@;q9kb)3|<|G^{->7F+^le=~ca3JM5lPSAq78?oBY+NCku2o_Q)9%r-5CyVkyQcnKL#|o^1jgjtn8`q z2LB4P@jE>FG5Q}xK3BK5NdRt2@72sB!Y*nC?HliB#_kj@J?#T+S}1L0JDdzKHm{of zVj>5FjoCz*C*9_+y3OU8XbOM%mDpzRT?ri!_N2=Y%`t%3CbvPn_6vrxG=Q%^_S^sS z%(IgM8X(btCr9>vw3p%QIWMD(82{`==v1i8ew;7O zBgV#IZe4abZ!ESE*l24+B^fjWkjd~s_X0_G(+rw~ZYNLlAbVL@wG`KwkP_$f#LMvt z39p4hj5E2E7+Y>Ns|Bj`*68{R-n$<4gJOcdwz&!wJS2i4T&lI05Aym$BoP>j5K+5^ zCFUeB`!WL21$_|9=#C@Nw5OBsa#arz0Q$x$d5h#`KR-g27W{&L=sREKBY}9RW#+Sy zdMdYvzqneVNqs7==N4@Aup}R3S1i+0!J!SY zlqU-w&=3BR!52Q#NCQ>qVl4sUrLa0Y;C`4RQ!Z?DJa!r$@J6CBwA8DeMxhf!lwmLt zELpDdMtT8*Dj#oH4>H`r#nr#zy2%?kxDiKlxlw>;7V&+eJyFT6zR5J+Ucqwe99J8- zWzl%Ha$Jyh@tjqxn6iU?V}{^m^-p+}ucxhhJ!eOpDL*T=coeWdHWN$Cd!*q@)8;mx zsKKCodV?`g@kFEVemi@hh*f$JR@asI6!9-D0HHXw&&uNFD+uGoGRoto^ce1C`wZ_j z{ND$8tjD8vM*z9}{gDL6ipib_-!?m60QD3%lj@@UuHc60d|RJ2^(2>2J(&?6_d= zAkU@ae-pfSzG!*n?Hek64Q!DV946#kw5eW|9B2M)<@@~G-tC$Fa*cBIAT_k5;NaL| zV)u1>AKcEohc|VzOT{$(cyyDnBp_EMZzol~U%7=S^I6gh#NQO-6$7+UvC5bG+p$0c zK+}L;;2HC2U-C_fnU$*N;ytr5Rg{DjWIH0m4CAP9*eny3W!otrR@LI=XVv4=Xf=^i z^j!7D_zFeqN922_xTkh%z?u8HXP;?WpnT&`hY2A|hFWhQBeu2m!16z+vO8lezNZN- zMo5~_@u5f|A~~leXiZ4kUUfjz8r2HRROT3g5Lny6z z|As5SX}udP6=eO^<4)gsnZFd@G(>}^crPE` z;V>*iBMojD;b3`>uKyNWZRlU?4xMu4>ol$%j0>(IJcQ{dV_IOs(GNhoNI#7G;=>kY z*ors7P7pUx+Djgfv<-6ge4~p}V^z8`17-T$iExqg@@m@ecZoi1*%E>L4h=xLomMbO zIL&-KFZr)9J?5)2saI?)wNBBp1aJKxTlxIYnQ9z53r7As_OV>edhKV8%!NP!Hs2{j zi^U->gPk$wc%-?e`JUI%b7_p(%*m)pg|+hDzDRruDU@m~ikkboa$xHSR?ruD6x=1TEwz9jgZF>T_`6`D7c0fwZg|?4A{=HPS&{QTXWc#J*v470`WjSc@ zSfc{!7P3tyn}&S?V%)t?jWyYGDQV|3jWySjeu<~~f^gNy1r^%`tGK_-VO!JGBs*2X z@^`h2uo;HjRTF(Q4QPY+som}I$_FA!Ua*VPNvZN_YN~?V?|pe&AgVuPr*}On{+Fsc zNMUZH;k_GrwX}NUMD=T~aF`^DWe0b@sv_R*8ZtG?BA{0!58*uIYN*xVu}&KPYfN?9 zP7ea1w=oq}&L5{o#6~#U(@SiwKEV1S3h`jMYN3}VKa{5Fu$fK7$=8Zs zU|_P?)7U-A--1a@jWr3^SY+0!defIh^l}0VHJSr8TB>Z)-&BRuLG8XoE=QNXiO<@A zfr$?lG@YgTcxQXqXO|0A8{@~CD&2LuYd%knJ6?~edpdC~(V4vx7U;5z*62Jh|- z_4U+k5s5QXB~B#+#MJK|TP@d0iam3=Bf0A-Q@rUR?Z z!jvsc@NT*TKa)qv{V#lH0pv7W_W~TOI%9DNCT(U4eMM z*Bm*DF1DEm;UZ0EP`~f8@xxk7HE()rj?gz|;-4I35aHz=*poR@Lu&V(vwI?S6mmzU zc(IGeK^d1_R)oe~&wv39KH**2AXDw-pbJNPO5RL>+!)zB$6T}nCSO%R{CwMq@-yoM zAL7T;r__n4J2Sc^KMUriqp6N&!sPws0>MF2@yaiQh#OdMY;1YiK3(WMAPKKCXutTQ zPv-fDyscGzc8&=Td`!`l-pUC&ezAC0k<#vuVnhxTGM&3h(UDJkXvv#KkzUx}uqFQp z&QWa1v`l)2kvMyMQMwh)`Mp=OGl)KC>@!AV5G*@FklpYi244oei6dfI$b?UR4n~UE z#Q8nTpO^I1Mw>&}(tOl1Ii2r%J4R}qVy4dgJz0POTuLETThHvj3lEugI(1f|hl?F! z+sLsj+hfwVRr{`j-MLXOWN%dhzoo1jf^S0C$>IaH zyKiWkM|7XP3;L{D**E*STRM)$%dW*+<#7#xZC>Jt*(DR<+l3HsY|O#rwn&$w)5Be& zdZ-EqkJkN57QWHEG{*!Wd48R_mvGCoSx_0sUvqw9Q7+@})ijqWjXl2H4RHIL+XAs2 z^c`>UJIlH>k(^Dvki*MSiNuwXnh{dSvl#|6_SdoBmyLA(y?$L7(E4l{azwk#cs%7l z#no$c^68w;ij)>mc3c=le5U^|2%e3K`zaaY5*x!{d5`e^Jr%P%Zm(c#gstItF@qrF zawTtTI-E8c|I0z}!*-5nJ@^t6`=rOt9?U&n!IqM)+Qdaz5958!Pj=!VMmYGYU zH}S?4l8Zt*OiVxanmU`NDw*K*ut)=Gt9yE*cjTqi%hY+vp5I z1x+im{P&4}dq752M;|C^Dn6g1rMom+8?jq~>K{O4BO4BAzwXy}4PWL?J^c*2+_x}` zxrRmER^T;Ib%7!MlaPHh9SZ z%or`usj(bwUGX6^S*|vQ0|n&HqSGa7ElLZ z91g`Q|1`+Ze-Ys-HEQPda*}p<)z-bz(A1VIANr(1qor{$H}nRuW{Ek#MsA(9Wt;n} zdu&)Z`9{@;WNwArCV1-2Rs09H2as?5^k$w8>NltaME`O~eT%1XMpp;68_5aX;`+R6 z@&sBSLb3GmxVW~)faBT3s8J)A(GcIDqd*4VTic-%r!Tz@*#0u9IhD`A$Dl^QB}03k zT@0We`Z%6JuiV$^w`RtOWTjl-kjHpY*q@@!aLY`ef|P3l(qjY<-ra zHT7qB(EU5J@pMKhB~LpXCzWWwagaSmQfdlz{57j}`gKWQwED|d^c4M77hhqPHX^f) zt`Eh2aZC!TXAUo4{>Vic0lDUB%lnY%#6E^qJk(`93KM7|J{gLW780M-e+YR$N7Wfmb|ip2!u z@7P5-#iGQ~q!4bXM}SrJZK%kd7x8uRwAgc1rEyXRV)Bx{zrA3JLsNAmZFj`IjE9pV zey*~-?iL%;6LcBL&jkn|7z4^?;hJgG+(Q(eTfLp}zE+JGBPt1J`1s_N_@B7%gHc?Q zy7JhXXGYr_)$EMd^~b2M=mcQ{HnR~$6)9ArkBR&@!GjKA%wJ)Th;;pu8$o!*^yQeK zY>7p_z9NFfxZ(>jNrgrJM)!E)KC&{d{4_SvMC*8yO|k@^X92558`5aVdpygE@A=z$ zx9@nYdAn2St82du(%W@6_Q<%A?fR`JVter?t4wYE#MB=B9Gs>7V?vw(9r+_EI(tOI za3JW3)S?`;Anruu36f~Dw3zPO!EW~%%TpmSIF}GTrCscn(@;I-F#U9##u1tegQj|j z$2*0$wxr8VhbvsYm62|TnM@C7++goc`b>!*tEL&2n%4#)>nAPCY?AWX2g%3(MB-tp zf#?O6RUK4RUMDuiA&7u_(2z3pZ{ilq8f-QBH7~@a^(00ts6BIP^s2Ha{+RX$$^Xgh zWp^QSu3t!%S<3W){LVdvcK>Oq^X^D@XT4@aNy2#W zyQ-o<_kn9QrTBR#$(z_oLk>5ZHX8XoAhDGFYeD5~Vq=&PHK#WKy}94KL`gQ~qQ~bw z%t93d8mH1CL=4xwNR(?Gi9h-QPr^om?Ad(V@6ax42Gv}b$W_=-Kd}63Olb?@`}$V+ zp6_%s17H$+XV0p2q+qh|g)u^!O{|Xe9N;*(SWXsDnf=dJxCQ#Sp@BeN(?yYAy6e^<+J@M~&`X>L^ zT`rc8a;fC4t0ILGL?ZOneqy?oU!NVGKSo7#D3HpK!i(@EeaXQDc?m!GbAh6lfG&!6 z_C$uNF>aAj_Umky zy@E*v{YD%2{SigWtOlq0U3)#13Al7WPmI25DubGkdKahb*+nx^gX2WIY1hdC+i|W1 zS)*l)c--XsJo9YC7>j@e>^JO{P(j9V{PBl5)kgo~8UEo5YQ0IK!RGlAHu*!g#gE8? zs&sfyY=ytlzt%22??4=qkhtR(6H|`y2JUWykDbm00;`sey;b+mtnJ`m#LWb`B4#j8 zFNks}TIOxFi@MR|Q3^+uQqQ(KY!eqOcgeLPP;-sWckms=1vIUH*Pcst(q*5U`PnNcu8_I46H5y+F#ZV%yXQT>pH@#8|RqPYnS}wf7<4>m;V^N&~e~w)6 zIo^iyX~!y8>P2vpXnw<7Fh4D-(Ae^K-o?T zk;%s2!O4782jQ9fW>*%3d|`EJJi%(M9l8_rGH4IJ z_}m5YO^X5R?P77SLnAIK&)x{%r!(M~X7tCbhMR<&k?yeaR6x(WFY{RlU9AjBVj;E7 zC%KkH+pCI6X?+@BotIEP8F%&-Qv^_X@=o#BY(GF?%&4077MXZg-^XLe=k#HmS&0`K zeI+Dd+sEQ`^>Lz}T+@^fuaY0_$t2zX3YS4#i&@WVBMSIM;^5j zbjS*gMnJO{6WB7bz^fhP9|y$abUbo^=S*55Trs?zX|KsCS7pe*7AoLmb-2UjB`JGNk}sHlg{g`>fz=D#nReTWLWvdB1xJWEe<7; zv!oL+UnRcZjVFy^hQ3t)OWZ|>^UbyCSZq4*tz+8}7>1bJ zT>%8=vZ|#Y9g~!UEyKmxeQy&EQlLv75OwIa#w(vto?GXBXQRRy(r0EXn~g@kPNv3b zhW$!n`%CtLt#?rF#oi80C+6_H!hgU091#!|F`jsnI54ClGn3C25Fl&_EC&n#LjoWnT3&ap)EA9>Uh z;ny^IUj;H=c3++q_swGX7D9I)A4jRNRz85pX!vNvi5y|iUIu<+O5D;c+0suNJoZJN z8wz5)n~ZO+35{@YE8+bf1)z{jxnHOto>W;)vpo zVqhU&q>T3+_*U=XlUq|bak&pwfvBs31;J2JkSp-87^uVV|8Q_yVw#@J0v0zrP=`E) zX$K(ZvYPSMu_6j;CEe=H@#ya-8CQPqex>!nbjFLw&y@ui>lbvph_ZI+g}Od+5BP_; ztVO#_n~=2^wK3=Qf2(B3x8jS29MCIEO^+3}rxl}kv`+JC9en zfM4(B#?O-KJvdF2;(E}#xr&%ql>*UA4AF)FDUl=qJXn?M56zpXLe(Fj*BeY~ctF;Q zL_4Ech6?Qr0P(sOb3^rkO>~f`>oAu-OVM(dyecn=VPn#4>&FkypEnyK(hf{!Ik#0- z?MFqpJ=kXwvEVxLIZCUqFnbdLxpaq@DkCYTzpX?1RN)P@sa=EaO*t?Z!s^yZRH0N0 z1C#h^qT0>D*I<0g3-ZLvcQGU^YZ9)i+l}PRJiofczurdn=vr@_pViJd1dDErO2XEH#g=bTb& z49c;{0~6QZ>bdG@DR*NecCgfr+1o^bsx1!9>!Xs+h#EmO)a8|S-q3Tt{*6@)Z_-y( zO&wMzOL{ozTN1~>_CpO>Np51IPkud3p^C`8F;>-b%8^gw1ZR@|ejw&OXZC@sv+{$@ zq+k=12KHGc07{}!=SX8#^~-vW6Z3ri+j@kCt!|OYCqIUSA67wq90VAbQayuO!jZ>& z$5x5nEbkEC7IUe(r`6O1)C;K!p_E^P1jul&b8N~%n^o(N{-tq8*5Tej>jDwjugSjf zyHe@ccXYY!zcPDeKbkVTOSjg%m1qQyR#AZbat33?O8TK2-5Bv26(;-ApSk&i)!lw; zN9Wj1@{5BU{Eb$knHaPe&xBJD{ffACY!_6X>f}@CFPZ0E@FT_#A8%G%^&c>MNlPN) z(?$iEI=SseY_+&2X7gUq#85)LJO4?t$~$zyyc{tR?H%?Aax0f7m$ID(R4O#_y&^hZ zkbQn@r}?M|kB~RgPc>%YF!v>^%jpHg(ineX0JQi7(>zEusI60*~14%1|1kUuB2xps?sOi!n1>KYx{xtJ8p?_+T()iZKT^P z9ui3o7TAI4I&Y^qW6U7_k`+v3bBmvesvRyZT@UfmT^A7$K_%~CxbQ&F7!7!X^*rd{ zk?q?J7xp3k>EBXLd?w;j^?G4HquFSKqRz#OGX~+~@N9IEzb`72XdDzxF&5PlBRMJH zh$P&@sEpq-%jr&!jH$4>FYyNntWPY1HR)M-zsv8}zX~Tz>gj0wS>j4cxH<8047YEU znS&gU&GPE~RmP5QtwcIIEtns$Se>pNek^L}{UdnLv+lBb*UYyJ?KSaMPToOhmJ97h zmJa)KgOQl&{=-->{~X3AasbJW(Zghm(u0dTn1CwF6nc5%S5*~Iti^s&a)L6L?Kb>lyd zyd9^Pj~Ab(107h;D$^C+4jl|?e*YVxmHu@Ro~D@ens6e1`kPHpBV@(kJ-KVW=frG4 zA*w=an>`h$Y4w#IM6Va|Q1w$o!W&IF5)kNC82`>~K(uyh;sbBxMvfInHzj13=OLIW zakxuLwE=TYHOmhGAAiypEU&5q%;9943Y9T$mV`8drSlE-?*LqExT2hF*&(`(3G)HmdHkqNWZb=#K;R{e$a>v+9^v90_?n zT)iR82a`*mGYI#HMS2Z*%T3ta7f2y?=Q;ccE!1UO zSWc9rw_@H9{lj$kroXe>ha>%C3RmcLD%*z{wxnII3rx^m z-R2YZ&$ewp+yAUS_Y9`;(sUnVoGrhzA!a-ayWXd^_vTBxxjqMkrACyPsg~Kdx?oE3 z$P?5l)YCTRU*oDKXdkPU-{7n#6dvIV%NM&(1r*P5Dy(}IWEW=Y+(*drMR^sK6K#unR<@zeOOE@~xowR!tf13sC2M5((@ruMAR{C08^tYl*`7 zs0`91|L2bcgxWO6=@xz|^|pfCTQf?XBk!|UEd|dNR__uD3t+CR{;yD?Ip(nY7xR8Q z0lg6)J*zDPQG*zt@uZ9r_$pe|-yenagT4Rbg{p|jd5&%`?|c4iC4&x0Q9jwLxL7qb zOw8F|C>*=OaNo8qJo{mk%ieF9sFghnb#CZ0b0ewscN&uIFZ_8oC3uC!SY~F9ibRu|n6j}LH|~Z%E~tlriiw6Y^kRa z;oKa)(?mx-49)(ewQh82oq{!ZYgJ=5)iN}2_tE>o+4vv*yh2bxVMFOaRhaYU&o+5s z4GS-p_~GE*-`-oSV`SCzRvp3?>%Eh{gx!2hux)G zw^!&k9|d9bJhD&8woDxZ|A~#%hGbrR)Ta}S&5Allo?$kJB`3a3CmAk&WK{+Sa#9zI z$*oM^nQ8+6OBD8+LcWc*?)+ea&?hk%8_E6(5X0`n`5qf9JYT{#y zfS_4b>AGCLGUI!fD8%O3J|-oQ6)mgmaj8X_M}Kgb|GdpDAqu$5^*7w@ zkWu@JuB-_5VDF9!Q)^hIDxpQdwb6xCvbPB6C<=1DENtnroc0KmQN-v?t%tZF!rUb|$wY&$ ztrTSHYA_P@B3rC@&;l*!8Ao>6Ni(QeJH%S>!=VD}rjHjH<-~X^U$u>y&;|k&A4o7h z!U^o0w^e5;X^*1z4SiNTJX7-28VIUmWb*5E#`dP^zICY%ZrI#lj9iSLs>!%*OI!N2 zgt(rY4mR3yv=fz+^-IT9tN4fp%IRcEK0B_V#KoS$^}xl?P}y~p+U>lY&L40wjnRuL z142&wBj_BnUHTFO*IxW{Uk)VI!qGjIw!|qw0%6TRO%Ku?(NOV)4fYG+x5c#~Ja!TB z*GH?^D)0JiBv)(x6nj2h9Is zHz(d9$IA$2j*{t&^KrMO_6s?ug!TW2e|?sbR%S6B3hf{FBpzC|3U=yzGOBD?^u*wW zJ+^+V>eg?Xm=HL&UwMw5lJ9TB}}oA zD}0GoLZ_U$2Yzf)JfvIOeIr? z?;LIy&xz~`m&9jF`jlAP(-=fD#hAQkqTypf(diQN(_>sU&O4)Kg{Ar0o>--Tz4jhA zK5+AQ!DQ3Y2qT#-SjLcbsxZeVahLnpqnJkAje%>2A6VR|ztm9(ua7lX42uj_DZ72D zVKPR=ivbJjVhfIXa7>PPh4dUoUGgjMCugR^<>{P+q%ASIM|56rJIoj zo^T7a)LGtqU)PY+3}1k&xLseYq_X7cn5lp>Zkl1cc{&+~qFpedzO2H2kQu~}$NuWo zJRUuM3kk;tgxY~qg3o2+41h*(Gd|b`OsFRxWTOiTbzW%&PoGUGPJR=4&LqVhQRA8W zKdThA>D2>&O%csz0#Aj{Pa7EnI1xEP!_KE?h2jwbL_|48!{ulXd?S`!>_OmOXH0}3 zycGI>h*@1hvcR3B9CWee{&$o8?NyT>uk~WtBpTM&)$cV>d`rlKOMiW=OsmiIJNB!p zJy;&_o9wRYV?;z%2avK@@N1zsErh~@Og1F;7v=9rbJayqKL8=UXDH6tmWl<`4JLB* zfEtvtXKJN-e)DcBXM-dX30=DLHum3D?t_>U2Ub<6ou`e$IlCMn%#7EgmJ({aG|2=) zfXJw$UIL{!={?BbM2}Uk@?JN33I`GfPmuXOV{lpP__IPXEhg{*pz^6Gm>sZkqLDyG zgF7*Q40oUcbs17o!Exs46#^4FZikAFZ@q1y4v4PhiPXOs`g64~x>YWr#95iwF?T3j zd;mDwc`ntf$X{2oN;eUW@ORh;S9D%>&F7rX57&CDDx^lm)p_Q!t!`fzEoy;9~3f|CvfigML2xO;85=HwEyQG3}SG6^q+ zB6kI|<(DmL^=Ey4G{UQ0FsfBZ5_0U0`r^$dT=?;3V;Z}R)Wr!$D{W-X!MJ2`l|Krv6L<248iq2pg4~0ctN_1DwPFLy3@Ig;c1X&C(8WZ*D2 z8AHG=99B}{;>e=!tiPoz55Vzu+~aiMn5dCA22lsmYn55!niQ!lkw>*B&wttr>My5a z%YcOKwByEAhMc#>KFNhKt$nRhM&dROUVIm&Fyj*gRqKwM@$=kXM7bRKaPJ=87N7(1 z{CKQl&TmOC0H&1LzBGCPf}0si+yjyo4SDzAbz>wj#>{*_bF$j{o7Xku1ygNV-iVN? zpE)1$hjs}}aNY#0oxskz;BI+uThD+&9I|=tw?*lfn4pADrid|qvCm+YeMy477@K_c z(^ikXk1ezJ(en7~VxLS@8s}^T_jRd7?)3F}jFafrh#>fpGw8O{;q+va?yc=z#Yr^m zWD`UO9Ac6?;NCT5_uKLlOR9$s&}U5QW0NgblTaf%`|Yv z$VKE*_y+zNs(5M7(5znr3d)C^^1lwPe-E1!qw!JzRk99^+l?hjBDVW+?R}Svk0#Gx z&P#e}Es@P7Q>sNLl+vD2o?~cwtp`i4?M?4RCie}`kAMXJYzG>=uY95`M4?ExbkPF@iZmG^5`DRf{#cgDPhjxpoHCt?ujl^oM-w3p%`aib$Kdg=Mkj6%EnB-DIFGW zBXazLRq|9;cR=xL>EC~1V8vf>g`5p;Cg&v0ZL(=;t7aqq8Pfqxt-qMUK0#zl|7ZM+ zf43!#y5s+?jz}U6{zqW6+sR4+_LHO|VYHDE^L-=lR-(FmSeSYpXnPb^IxkKFI99tB z{t~?0111N2O`Ei$>@Zj~{3z!wf0g*6ekc8<8&8djD)@-Ph_)TBA9K?S3!1Q_1ktKc z)75ga101_vk$pSh+NQMYQR%38CJ$<+$$t5?8x%Nh)UD(R)Z^Nt9Z(fO{-y>LJx4qA zsMszF>mh?voxZDrf~j__IM@i+Sg6P~pN4T&(wlx~kKs|M0@tuE$<*KSNJt(>2&NVZ zOyu3p2ETg-$@omQQes8+P*kM>bbq5RN4AD;tdkp~z`&k>B74$Fr2vP@I68Fa4`11v zwSGZRHclH!Y>neCfL7N|_uk)0kd z_OjvINLj*6moWqkl0{dbB85mMj)1OhqqzH8S>stGN;3<9D7f(v*J5>d-78I{+=8zA z@o7pB;Od**_(rHH4!M)i(b)$|MyX%x_(FRIUOfWEr`TwSinAvUDEz>%iV0JA+tv)u zc?^DU;IvI^?yUm^%0Bo?`|Gjcq&=%(NQG|>Acd;UoWj7U3Q#Z<{wn?x)TkGgVPJfdWIQSkYmlgOWjC?LzThV(G-q4FNH0fllqq zsFUH1KNs$MgZT}(=4K(4_&T-;+PFT=8X^Ij{$@39==DXnOe%lceZEi0 z;zt+M97pmy?>Kg^7XF5Hu77kc_x7|#r&&FZT&`LVeYFl18N+%LjYO{*(rhaC(+zM; ztOOq-&O2~EhB$N4i$vwRxKUG6quX?mm-9y*f8PIZHCn_^AVV+#At%gk+01*f@rMIl z!aNkM+nWQV>dsxmcgT)U@FoD4?$|r?YJOCUgyYe&p<_nJ8DY|A$}Ox`84TaymzaBq z=UY-mw5(%aMT_PBB)W7dE-CIPALsKtAs*agAub%-`imXjA96VJ$%%S{4n2DU;Q_H- zpF}vZDxiQ}LDt+~Bpb{L zRNP#yv<0G9z=^lM=yt!9YlO_>ljstNd6YFrMRE@B=zvjK7(3CIX-tV60-Y-l9WaOV z87W8aU&e_YZ*@NLvdYbyFjm)IFH#U+r%1NGMM}Kciu=|qWk|t^v14xBDZ=1-?uODT zbgyzdhoOid@sHBE`58l;o+H`K-XSVT1_{Hj!l>CtHGXTn}lcvu1WUyOo%| zH1^!yvX|PjzFvEsKT!H_Ph!UQig?(mpA>CMXBnZ0d zy~Xb&?YwwT=h`Sw3!C04U+Q62`m39+D!r4xZZJw2N+motujhuY*RwYEne(+gTyoz$ zWp#n}?F~&zZk4~%hrXKph~lA^JIvaOV$3?Xg5vWpmf5C&kqUMNdqcXo)_VZy!x?s$ z<4rm5m9~OUqU)6Mxuq-r1G3i3U{~yp9ASUvxY^>-i6d2qg4-@5UZOT|AyFlghJ``8 zb!8?(;Ztekf6b-YUn$JbXZPKHxA#Ku%S2+vKct2K$c>3Z4+b7o`CtSr0c+B*SP{zr ztJq~`dhEdTe%C`Xb`+^fZ}_~5J7}4T#0_6 z3VQWJX}p0W_`8A%XAJpx{<&{#h}Rm+4#S)BOHCE-+&qb1BHZGj=qkuH)Z`uTcbeKQ zEO_}8FeK0xzy4C1)p{03Z406UaD9?{ZxdHBYV1@|k?Ydg;Xz>-23Gk?LeU3U^DZqJ z>-f=6a}eQ2OC;-+og zYMNqbVsdMw=rHk}oKHye0BST5z9ZJmxmMm5le#U6$F2FUfN3;%!t_(*HZy+NkSA=# z1T_cyV)%GmFJY*pV$5VvlK8L?GYHIA`sc0t`MiHL0g`Rrh@$3_RiL;#)2n+mw!R&N z!MxuqwamhL2RTE^y&rpCHx?hVkk)7~!r2A5`_|PZ%kJ-^WJ{LpVGH4xrtwLbl`LOe)ayOGV$b$ zG*%Z?YO&Zp{{EC{sLH;a2Z;cpyYdlDjgv*O+Fb+5Ai3iFkLNBn1wOWrsmM22aI$m| zu)54syKK}w%VGym#B!~lk@>D^7D{#RKhtTQy@uR)ON8rxG3_6x)w4Ukuiwh|#{IS3 zqP(X589brX|Cjk7o(mzJ<(ELWKV*la$m84Nt~gsNo|~ z2l4St{%Y7ri0Y$D_B+^~k0JDZYt9Q$_71PdQ~mlHw&nBHj}a16D_!=RK&2qX;?5AH z^vdLrdWlYkUF6{@FiiRSn01r%Bw)VEva#`i@lyv}i@!nmOTT>7LDvc>jV~8Z&RenW zW?wn&-s|#zP5Po_I|kW9oODv$0WHyWlTQQ4h2!1!bbsVI%8^*j0z{J_-#7*B$~Y=4 zK;Igyp-(Cqy30puyg*GomQ#5{b=_w5m%EG*-c2pL86FPw`gi1Rx~+jSDHs=Fc4i44_YswkJBT`LRZcL&HbOKNNTlG@H?V@m^gro}b+vh904b z*V5>02GA}MFEYQob^%luH6J+i&(QM5Mwh5d4;~(C6Kw~U6SmHjdwPD;w=Yq?fK@FTH#e6k@cMDp? zlYA-fQjgvBZ&*xUnZ4{z-S+?(IFeAeohL-;z4z+@K_&KFr#GqrF0t&6o2tjUeOC+* zb&qnHqmRSqA~NJi<%#r-5M9Jk)wbVPYiTceIsA&h0wxv$Bf(Q{6!6Cvon!p5xcu;y z2e?6W&b?%h!9?ATxDw!Pw+qd~%I^E!I1*aNQD0zdf9@z+1y&g0`8Dn z^t}5wE{TAHbLTVW8nOTLBqa9v`I&HT7FR{o$P&=+?vb)>VQ{g*=X|r_{pj$`=ATEX zPe|_Jy(eR5F}#mK?8msJzU*9TJLIees6Tqg(YUe$&F z(|^M#5RO{yBa*v+$k>uP%IJaulW*f|fzO#;ze+t;-LyK7&Do-rvU!IHYC3ttZetjK z;x#1n%9gMqU?TA~5J11vGgB>04fCQQx_2x=vt{34c~k5sWFZ;5U+zi-TBOESsr0?2 zTHyjHn8bC~=&N}UjopOMT;()9$<5QuW2X=E(9m+m0nn$z88qpW{zCBpBGGS`qAjJ{ zmb+p|q(!LwBd4%Z021a@tJr?-IsIZcRG{*)Q_m+CHqQ5p(HgRT4@7}k2x(NKhr^#w zz8JjRFZo|n=wL_4850z?lMpqOZ^mfP&5z%N0AxC?8<{PRj$88dLS1f9HTZe&tP#iXcn@WmaQK5dh8MheZV-Tj>rGpum=0t%Nl8E z!a>u+34o27EM#S6+Cm~Vn|8ROHEdU>hHcF|#>vstK{~s5f&Y*1d(H*D62!)-p&vW`8dr1+EvY`WTNc^bMg(A_4Sp89u*^-OG?vfZ z8iauR(%;=FKbcS3Rb6YtnF_L+9x`YWp@5ZG4RnT0rj7>-1IJ7TSU3r7V?9Sj@YD%9 zlHetQsqiQPE58aSKW^AD|_*#-jXyXeBI_1+hSokd7uF8S9!d;j^c z@E>7!SnpPNZ&YmMX6Vb0U1~fsku4%_UqJF@BAu0LDWV?V&9T}-=rkiuSXZ>Ii z4_D!JG)}BKJ-kUI4IiR;)ip|wanLX;o$$!DR;cpopvZn+e~a5b`_$x@o~b_^Hhd0kLs>h-+iFCW zAg|}_i}kaFm}CCUcnZC=j-lGsvD}KY{X%?5*iRiQaY%<1giNU$_bRs2${BjmD)CHR zwG7S};Dgl|ao4nn3$fj3Vu{v&$D3B>yLSgR#F z$=I7dFrig;tVFj*&jkQ>wV1<+v0Kov=wE*uo2q~n{$udg#QX^=!Z0*xUvmkZ$ksh- z{R>;u8|*Xl=Mv$aKTOE{AhU~+jeFaO`7}%4g2rY#;x{h7qSmdIB8gO5>&GKA$~Aec z(E~2O+gUYQ>pHCS-Bf`*h`vH(j?O@ZC=Vvb8PgngyT?JdxKF=a285mFL-$T*V*Q_E zdtxn=M7JZ?n&+SYjh7)T1XL|Ih$zDjD@SG4I$1?7nK*3pJU)%GT|J1>psULW$~#AV z`*QU4KYH5_2r562SbJWUJcrEvyU{Sx^W4N0iIz?S1!RjuryTXx{Y0SdKeMipEw#+EJ%&jlR5m(!~OJ$XEZz>8Oe=GbCFoW5ZElOf4{G@-EbajNAcY_=4w*$ z%OG-G#E&fm%(ng>lAg8(GF7#>5<(q2`bH~CKh$I}-fvG04ET(-MnwOxEjgPHgcUY z{%vBQxF9EW7SH`^&>Ov&jW%rVqYP;tG@`*^_A$cWQ(QZ81sm6U&f|mo0xGcssS1a_ zvJ+KLQ)qAS@fV9sSyGRLjfF@pLM=u5js6w zWQ~kVq$&EZ39lh@npxsjMIMu&S*zBJm%p3)8o%nH=yRZ87`4#3%k8u*jG5*n0t;X* z1{=Uy*XylkDwHZsw_21qEPT@EH|iPN{>yisPW?V&OH%VPKrDI&)Lxd6T2@%@cQb4=glZ$(C|(((_ZYShXU5Rcqqo(pILykQ#JdvtJ%xKktf`7$Wh0}?w9s-xff zO5EGD2s6{8GN7=grrVy1wgOR);vbpeJD680==qu&#c~(KM4J^8OJ{$*Q zp(w1%u`dRj)3I}TfF}^+J#`p^te<_kUr+tC$4<)DM#uQrX7GE|dQ3w5!-m4fk4|KQ ziNaC72SwiCs9#N=S19n=h5|8a>j^r{f~7;|V>7ie=yt=3WCpB`m2#-QavFr_N=wb# zrJ0Z~Nq^ZBtk3bo7QQ1VfJ4005gt@0L8W&ivLRXq)L#vGZVEf+9i_L!6Zi(<#v$w( z)%17AT$TyM*(PO->YdvMpy*E4UuM=5X#D`Ld&txE&Ncg-d`Ww_j;>R9S~B3-m)gNJ z0;Ty5(QSOByA+zF;fEg1Al zAIEG=UK5N5{Wr^?PCJVb!ndo|zW<~PQ^xdm)bfdTdz@j)W5xshH6B-^Ma~txA6{{{ zMXq$i4RjOs3nYFQsWUt@PsD39YL(kGIm+*rIM)I*e;g1v- zz#n9Iy3Hahfi86{TK>KLBBuxg+xJjH(VB}h0UTWiNGz`fG2Ax~UrXHpWU?t$C!ep99iVPr1-#I2cGIw@`wpWW5^aVI&uDRY`f3L^)Qr!0ZT_Dk6vH|9MJ>xH6@b?1mo zQp`e^av!$FWtb%>8#byiq7m`)!CcvY+*|eK>Rk<)uWX(!5{EL#8%be_!8>*Qd&K_uXz{b2UOiT#U<(Vwd%;DG z&#oENqc6Un^*?=SePF@aPv_uq;9^<>L* z<5T!SMC*GHmCUHp8sKK{UiH&X>`g&9X#;eS4?aE+15gMLEfE3GVdtH}#&I zpAYF@0X8w(l#O`@xIkTETSvysa*$Ho_u~-R`%$v1@IP{$DV;%608`kSiMLzChJt%J zDju8NFb}kRfPWKXYOU+n+i+0zJtZ9-g(>bP!HeD|(bH{*c0V|s8u8)q#S=xOhhvVM z<7an+14q2YK&&G@I*#|IA~{9ymgJ zz|pqeO!N%Nd#UQ_@DDk3mP63Ik!e#>qQfpYyC3iq+$jtL)lZp<$cCI}y?oT9<-c8$`HZo9qVwwM&ua zVm@EVpibR;^4}mrtdGCT&k+&lWNd$P?xLtv@RSOjy|L58pe%W-g_aKe1A!c>L`&R|Q_3EakDtk%5wuKxl#=KTeLjg3nCCKp;KVLr#aPG=K1 zjgA|?`R=xpO~L2bl-`8KgfnO#Whm4^=(!oemSfmq?m1M0i9h5-3mItWad@TmX_x;V z%dcZAQm~%XNn7LEW2tD1iAhl(B|Lah{>3UA=K9HvgZEi+Kafz`n#!;lJ2Glq%9i>s z*5UB{``UFdiOYdbh~RH*jz}bF8miNT?g$|?++oR=j)Yf|XP2_=_6xOzrZZXQej7i8 z+BdOj*pdMH{-Tv6+(_2jL0_+qX)ewX4FzBk!Yc6!K!04|4Jo09Lru7P*lYOsuGUwQ z(YjI^0BRGVmP$PGYC<(xn$$$1RVC-d{fLS=m0CnHsHI9+NF0dCW(O5iBe~0!<-=Ot z4|Ym9LEVTwc5{^_ing1hYeF=(O=rOY^2OB2JsrysGlTeWl7zvDCol!mGm&jZpV(N= zMHJ3Ogutm$4(es=+~ari_<<2wG4RV0X1c@dlk8!c&lCm1V?3x!V1$l9JmRmSIJu7K z(^af)ADBAo%riOP;2T{j0l`W*Fnm$41DS6|=H$xM^WSV!DH;#iRtAKk28PDsrH<8! zVRsk_JEe)LhvHrYw%c^?3J1ng3GlG7&NK!Lv~>j7OSzMoCT%in6H>SBGm)5M4(Bf5 z+c*;lQ>|X^HeOt-8+M{#9Rqya9>#xXI7hO^`k!jEWq1t9R|tOgf|d5p{7(DymByZro2LHTlt96+c^M;XX!G@v0pBwA;sw<*3N9+BZ7{LV4L>a=KEOj5x?9XWrsW z2{rEVOljZ%FNaR6Scez8b^rU$aKnf2@-T_E6}<4g%6bCpJL<+)A9UT*J}bb_bDbHP z?(y;RIq}v4jUT8hkjT8JN}~W!Z*JT|t?sgHTISngYvsUwvYUdqrV;fQ!$N)*XAwU_ zvI+A+N|hCW(EMPdn6pXXXpFQ84hNFqm+{{!B9>~g!7%8fB^2%@UM>`pCOdXH&AD8~ zM;*#x@j)! zYt5?%>I(U4!U}|{+C8h*^2M`_?k$M4VGZ8pl7Hjji>dCWFVGly(^YM+5uh{Zh9tNt zpAl^Z>tKIx6WKL@bt>xfZ9p=nM6o^<$|Q?2A0oQL#dlt?ak%)3Nv6$D$A6e$b1%oU=XZQcYIKJob-AuzY4>>R5`tyfuQAB zv133D?dt+K&6i1*#4zqR4PaT#i_9s_0;DwfM{k~WlktQ-j-6*}B0MJkTpTHowI$ln z2(la3U<=6M`DO1)wX4?SURqtRb%qCy2#=&#Y~y*QWSFnHJx1{<;`0(#EN3)oX0L;V z&|_&=u0$JiKJvyp&$;t;@DAi34pWmaHuKlivQsMu$G4jnC3qNZIoW>S1Vus%Fnt;Ft{Zrzr+#&3eDYHy={&0vzAAB;rI_wcRpErlU z5jhknU+Ct#e|{mr^9?o@P`CshwVRYk5D}B<4L2iM{JBjRPYy zK0jcO9VdSYXL(%}*HQGjH+=?PJ#8pyh79`sEze!T6r>XEVVY{g5oizuRXcIXoQkX2 z00*$8I(SG#OIzZ*8clhUsaP_s;!UX|P+crO1rbKtKKjVOz2-!;A5g32n`$zz*U@%F zZyq-x2XZcJM*Rhgkg!lOKoElb`c@tr0LHSyYO64c_BlisU&)+C%8s20E;Pc0 zYuOlbBB$Rg0)CD>&-iF57O%2e_Muh7IKNyyJC)4AX~H^5J5CE9Own5#uE3nt9sXCx z?=kAoC>1EC4;lZqkye5g4gR>)Ok+PTrHNwG3)n&MX0TT4$suu20_7Ntl|4tc zh|KcM3j$lQSh7>b0%RF-ifacLDOeFS#NOquW=_?p7Re3?KJQjScOe4QG9r&gUYVp# zFKbLV&u%YNY}#&yox9mRT?!oJ{5!tTSD){LTeSTY?>~pzNRH}XnxC}(X%XCZhJ4N-yFRww7~pu!*HaA+Hd2?pzG)0M;~rK>_i;OH~x##ZG9+o z&+C=Y0@Nsw!Y0o%lkpoveXq>Gg){Ty(ZQ^D3PQtLd+|7yDLi&H&rBQ>BbOB_Ok`XA zP~6nM+M7mg$&RQxs~;yQs?mH?B?voF=72_6v5%9^^y8`bGXu~Ny};n9NqjwPfSZXO zJ5>SQ=OpQaEY|dXu!uo(+kM&niC{fE1oDK$zNzE8M-aVX%ZWu^!PdAW5Ty?pVBg^z z+|ZAi9?13i>QvFvr=gfRq;;VbsSR7cA0HxLG>_k${XpQy1N{KeR+K7htli@3h*iI* zKV@4C@@CBQG1-AO+Ko0)4uhjTK<}hXkocF5_`6#PWfa3b*xf;&2V<(i+SFf%P48e( zkg<>JR2|0)Xr0!^t93+XT$QtdKg`jsS|KN4lfkr9-$bb`YFbUML}dw?E8_!Zk4-BK zk~*Yk-O&<(B1Dg&8gG@fxExc=5ypZ3_no>O#~UOER%kPy+dMc+M^`a5M|yC>gxzIk zy2NV83%!zqQ*;752T?qk*+X`cY+Kw&!(@SDZueTJv&{Ym9>tE}#dD1>vlfQtZmi4z zA$-8#F=41L=9|!rV;G>Uip~%$n54Z)kWI8^TMHciOA>1L7H5Kz=ky6q6hdsV$t!B{ zmG_e8f#q4N)xAGMIHPh69!yfxClzQzM$hE#egG7>(pq`7 zRlB&DM(Ud*l4CoqN%zL`bM~s%g-xR zw`v(btL41iu-zz%$1IpoJhZ7b5+eXzcp;j?sYp2oeDQ(u4Ad^zHF6eaoEM1!GKLXN zYMncP9uVMTyR`z|D%QqNto{3L`5#p7ms~&T6Vslpqov=Wp#j`)ah8?N;yNV<_c(GL zZ({COfr8Q4c@f&1#L;igxs>I0^vs|{YReX*8sSb74Zn=6>6@OZOk+z2NP%Nm#kO{# z+yywCWP!(fNs(5rtEn+Llg#SF_DB^2NM^0xF{L8k2#1+fU%{Z`9}EfLnSW^`jWRne zP28kZpW%xo)DD6$Ow(5n0bW~1In2M>AQAyBIOKOL;I)>{8*CYTf2>PexWt+XxeFoA zkXY*9>c{IMgqhZYZf*?z9qvzN;j?A;ujrifk+WF!TT$cMr-y|P^Z$926OQaf7VTi4?6F^n zG4>8+KTc6EtgRLG^BmBWCZzki(SMN(vTa~Yr~dm;89z+F9Zb$+_rHWbt0ZA(^*ynGH$Vn zX@bR3a&sa13Q9;fPaeKu{EdxRws0%O>?0;p@B>6)5>UPB6w5Ct(lcYlwIV=Qb3jOyX*En&GPA;Z(+qf<*g(yre2vtlg!`?>i&@(i^Eb+5c-&_?#l;I72E-nn z`EcCpW5*TE5i_6~rG%7R8L437-$v*Z-$xW(XoDT;Z-OjFj%kTq{9~DnCc~OCg%5(C zxAkB>$E&EFKutHc$JFir`zdD1;z(pzm<_^FG2{;$EmwX$tgb$(Bwe$)cCY*URkw1X zp!qdaui09^cys23`CAMW=^$ww@}58Uq7FI9#wo& z;12bO;o|Sv>U<;R#~FIaVyiBR@DgxYU1yObCK`jv4m!3U>Z`{1CsP!M6j(}go!H9% zhrhe}ZaMr$QODGdSg!SgU)-OBZ zvBaNCtg^vbOkR~$r(MQ&q`^n}!$ZwHH`GKrV~w}uL=G>PJ&}-SphHwFQ<~IJL`#N1 zJx>44_Dg^B8Dro-wZ{~=Cynk>bMf*x`LeXO>%}z{b(?!}y0S=esQvMjZZbODl}>nb+&DEDNuNh}LM^a)$tyy#CGZWWEGw>`kPO;SO9Vne z=m@Y5uoTODtJo`S)YXIgv4R)z*8yQvD)!WsIoxtR3Yqc?10MvIek z{cv$&{uCyZS#Z!i?aaW>AhzChsnq(goq&OmS2Mj)k)m?|69r}+W8++c)TDj?-2>FfYkcz7|!LV}s(=AZtq63v`LswDH-FV7=Mia8B$#C$UQ%ALh@-&80Rm*XTz z$l(GVf!uH@>DE>s%l;h1OwHO5g0O>-FeQo=HW;V>wcJwl}bdad}b zy%|kA9nOz(lt@{v6d#BvVl$H;NaybQrM*_=jbwYpAOZGpoki+;t~+beUgHQz^KDp{ zZ@otS@JMi9jh6MiF`19!N?!n$EU=$I=l4vj>gJfnYLG#B_g%Jq(=)_&^)Hr|pwCL0 zNJkF4SdAIiGK`8nI zF?JAyUNz8nsK(5{z6e`Io)fdi#HzScnt*Ru8_ z({243?VGyBMU&!&CMm>iOjBuAV+d3oy!o-Oq}*?URaw)m)$HQEU@F^iy7TAzb+pPn zo1A06sgCgpmwRKT2Gqs;2=fuGqtV;CRNCb8?1?~@C|i@`Qsq%+6NScwiE54N>#8ZA zsTMZPlEtCp^q14AOhW-VDmWIkkA9ii7G@+g)6LyYLxNCC=MZ zsO%o3r)J_202U_9VKvC78i`aiZ$|5&+_ZCVnqKfL>{xAyG0;#;3Hp{;FX>)9)u?sx zEv_m#qwv?wDR47$u8XF&?aZYlFVZdKIJmWe*ESj64lILCT8Az*A! zM?&-YevqwovREGI9h3Q-^qPpIv3u4oi2wbCK@)={1%eVO$NTBwW?MNN7Jykjo`HII zD9IVmpNW!WLaBYP_;9*&luY?lkE`dn2IJW=^G>OpgYxbG*mpx`anSH zs2oGC`qKEKgQR~}jZ^kt1*@eBtjy~aVQoKoIp$ocG@Z6dO zjEfWZH!Aq-SPRIuX$5c31wH@9F{_HhZDZxv88HWm-(yLD@%IL(757L{@HP(Xb@uP_V-Q%?CwTUxUJpS;b7V(@S`b9=5$`O$_jn-sxSin{FF!|$}BcP z5TcT!JTfP+1I5UWoPz3L)?(1g4v7ir(|((Xnejsmb7H!@n~2W#JSFK!B=cOr;>|nX zl5hkxS4^>52N&s!BFSyaGKzgMvU#tSh|Vxq`CjQ_9@$cDk6+&*06j1!oZr#9*TWA&TYHcYt=Y>s@C`HI*D#y zC=}cf`Zxu4eWiKwE>F)->ldnIbzO3*ygbQK#fJHAA8au8BjWuZx1jT_-P~mU-wl5F zAK&7L$X{g6xoibFJl(S%-4x8O;nrsPEg8T>4N28f#r|ErdFj0iUZ=bSdtLI*~4FpH*q`Ca`zoX$ON%Rf}Vy_&7vDslBfP? z(3lQED}BNu2H1>d?|iit=)e@0=Z&zeB_rj0+(^i9iY``F^#Y-dU zq9Z{3QADK1eKA;$;?Wp{cjHzL8~7@H!Z^8VIPB(mi&?x zRjIrJTv$c$NH;a(?@Wj5huc1jCi0;N^$nL72;&7kERk`7dqiUU%hlh@rI{Mvm;?*9 zq-^LpQFFZGuFECeR+6LajStU{{0zQ!|r)W2gbBMAJ&3)HKR>!6ug^mW0ZcW*Zz02?b1b<$|h{_P$`P5 z`JYQ{zPypg=k*Hu9)k3hn+^S}j@@g+$j{Wo?te}XEqPnGJWf~mZosBRl^yhCKlb}9 zi4epm#TdU}VZ|FEBuI_Jt=*wJqG8Fn!(pWzDg~%-6RguyV0=Uhc(dZ_AYfmMiVt@I zw}#9vyDs4he~iEun1~EQW;{fRKygLfPOY+`VYGw0RNJ8#@@5_hAg#kR!Z=10K{$Z- z?bQXAGH;cPs!)E)thD{o%cz!O0e5h}migt)BT5!R{|FKxFb+>IjcXJ>GYD>{3?vod z^mkP3O9nSyS1d2g?oQ~5R>%~1z#EjPXdDdueqoB%u|z!5~%`w=S*JvLeo$xp4h zsauiE;L4KNs#VW@wja z!*4D1a7u}JFC}t?A5l}CD?i>!SgJTOo4YxTJS_Pj{}@kJzE`db zd``~h2hc1|-S|^&+Ks2eBl)dZOb%+qu@$(?dx6*WgsaW;JEk(`%m2>B4E!Plu4hwZ zT^D& zLk~SXC_U3htS&15&s6}dKy#i0s^c{vTkRLP-3J)njdGc;J>Ef6&sMOr!>(zFn^iso@&|Yc|U{q61Ms+PGv70iUyuL zzLARh20Sq|``AMMuyT2<=oe%SDmMAxoede`-QvZENC%tne8@r9!bcw2RZY504&rs*MX76yh75Z+*MGh%Dlfb^C!HIWN9GoB zq#P)=Hb1)fa6W=n4^zj{0uSELk?Xd@KF!zBwDEN}XwbHtF1Zn9UyCU>xDURj6gTGa z(?TRMli~#v7NVIG{i=KuS0_WHwpuanTUZBm;`nw{lJUrs-Z_wtIbA&hj;dLt$fl-o z@5l3a{Wy31Q>Zyt`{Q?s|3E%dH$rU5!eb@{(voVkD%^MVT~l8+yq+6CSyE=!b#HA< zs(jQ0*1cqtD>PzBLjI;dZd42U5V%M&99vBseOA7Tg_x~YVUtghs3(nQMO42_D|EPE zv)LjNVHVXeNf;v>caiWWQ{WFvj!UKCKQhL+%6n9p6La25C*xv0a~t7y_4=-IgQ~x4 zt5RH{7u=Y$ju5%Nm{gB2^??aECsIVeq!cNobZzB5a}Oq7!yTv%E^h-KSJ8_u8*qHx z=4eavb(l1>UBykrdxMmGum)Y@FhJF;FeG0|-rLx;xv;q|vO6QCX-I)v>?DXq<=I>_ z9W+}B?3`{k9cPllp*TxN88L ze)b+<{e7F*A%Z8O4E&^+AyGG{;dL}20?CaBJmQ!tVXr~}@K`s$)X8!d`d;jtQB|dx z33Vz4#k&?MxDsAOV+;mH1nN*h_#MXZ9+I?1oP25?t~9oBSk9lh`OF zIkoK~e`gxRpWLz}36Vx2PGg%gSh&L!{x*ye@QydUSaT##cEZ>;Gl@i`*jxgaXAQ&Z z-|by|7{u`R87milp<+>NT1tKy5rxNEoSw%{us$B5jqM1R@YDAt^gA$lxSslyqHj$n z28*KznicebVSoYLb#*>;Eu#AK9TKRB3v!AmXUZ3Be+Ab-T z>O`Y}v7G%>VYA#vC69@rPGr3q4(g|O9JY2CyF^@tDfK_XRy2ksQY7b>TDXJPq)$W*QY5;MO(G`Ds02dN~QW4}qY?hp#5!=;k-gz<+KIFW+$5E+UUMyITFt(#0XyyOl-N=3uhi z;Y^D>BA301__3M}Y7F2~^09ggJ`njJSZVlKGRD~7$qn`)>66|u-3?X8aQ$`?2e_7Z z%B})OX|x+hHwM_t^F91x75wWF$Ih}9CUUIUjhP**AHFgG5vwvuug6{twH0qMl}GxJ zbYezB(LyW8rkQH(gWz7+UcZc;A+Lt83^e>wfmgg;GJ!4nE$2j(r}(aM;0{pnjr#69 zb1^maJVfI+iUi4qC^ZDut-(*ne5C7sn|t9=ktD=SUu_xg4{D|juEkgU~{0(rbioT1U9O%_T zvriZ;MRMUv?$3d(c(NSEcLT8PSB+o3@Y$fgVc8_QD1GW*prLQunkIRv+8L6;pqNTWJ_U zh7ZWO=TsIAY!1+XJ4p)D+w!2O-wTZ9tX4qg>Sav81HZFHdXHe~i^1SNBH`<3Rdb9C ze$zt54$ldnGv9j|M0QU>yiV;%81R-)<{0M>R)qLDHW*GM&K7}XJ3uP5NfQCf3?)NF zuUY)AOg^C0O>S<=Z2rs@y=bNQIFch=KKq&=oF%$jwYw34O1=O~%jfyEL~?ye>Nv7iYEuLJ=I4o%HfXz@I0by= ze91TX1ikZ6*8QQZ>|~38MQKRDpDWS88w9kiMvRhPO?7Ss-TA-YM&fSJ3lrCa{1FI5 z^^)Qr>SU`Zee?YXKTnzibqJ1PJ1uHnxB5CWqU$^jB>EiRy=R2QT>EhBX7t5*)_ZP- z39LSna?Fr)h?iXHW)FS?pmvZPWx8hm5kGW(fR(voC0Eg0z@ zD@I})2MJ25^G69@+J|7nwp@1XDVQ1p zOox1|SX_LlY1!!Cx)p(i@*M^74#Fl}OksceI81@*tQ@wdS;)|(F@L-CmopLeqxj7n zB8%g>7nJ$F|96Qv{)h^Y$hnT?wLN>zfQAndOg``DeVDxQn-Vyx%HZ-aSoyI`%Hi(H zni(vm3Sd6|NFbQzFha5EAeoD(+cJ0ta!bH%nZmN!)Z%bx_FNS={(QHesK2bh zFEyo1x4^+!23CQ=r>*z^M<4YkLN2cl#N;}_M|Yq??*>}xsKvc|9|4W)9Z7wZzH-a3 z&SOXln3HD=SI1&!7huLA`>A2)PWuth_YeJ#O@oHQc%N?~KX2r@h3@-$Tto8jC_dmN z(fCKbSa!Rn>y7PrD!Q}~Tq)gFF&Qd0fBY$dB-oA78IpIYb(PI{buA+_o4x>-IH5&w5{ z8J@ozb4tmhA8Vkm5~rGP-gc{RIRFn`>rDCBqd#&8OgK0tSRqxcwIeJ6)yPpouLV{yPFY3rib`HIcB+{Gg zVl_WBzD{qqx`Id?M}s^7F1WE@V{MI6X8W1 zfp@B%tNYtwH4+~7W0sXp`1_XNPLdsn7n}s zU<=^RNAIncz@hlNw*$uP{l@Zm5;zWr@e!bV=7hr9&w}xbdP!L9g;%J6+!-&!F z+w(rZC*Jq$kKKRWJ7?!U-}`f2pDO^jI0e))i%fB#O`cRxz@;AB<8;9+5bwynhkS+e zxi^rZYf`pDCCApee#txTgyS0jhbPMEIs8DxbK1kC^o#3>-k$8SiwO#3O;o6RxeXKEcqe z-EP;NeU0Y;r5j_k){{s*rtyzqW?h9Y#9FUW%HDu)yhksc@Ix%$fuldD>#d#BerL3Q zN=y}_$*`+i+>9cz9gyP@p9EWJI}>oFGrQm5-Dmn^tGiiK0AR*-ne&bXPcbQ$$vrS5 zdMI1Py%$|&L8ILcqK@sI#8SWfuK|&?Y;`6+Jywmy;E<(;p*BJbHaAc+Ni6C4ekRji zw!Do3vv!>j!54%nqeekrPFc6>@YyfORou55J!=En8}K4ECZ+(`(Ga~+6+7M4%)WhZ zP@GMojy9%dAMC&L)a`P-IFRHOJ&9Rbncs~qx-@YaW$WvewK*B^TvIHE6#J;)UU35+A4FG5h`gSoJ+me^U|(w zGB)`1j-U&%w0U+${9|26iqM;Xq!x{EN!fwg5>FcM9QgI(*IVx+}L+ zJ8Mu>vj6ktMU@5f)V!ZHlXwyD%+C;!NMbGqjQHG#%E*8o6E%L@{92ls5CqMxo|F)y zwOf24&y2N;ksk|V;+=7sVl8?o*}p##^|$?Xj&-lPs)nt5>2^84P%!);>`70eWBoS# zo{J#%ET=~GI`w@~wD&=yr;{`+cb_&%qTW>wVkV>WsSKDp`unDof(`T6yYaa=y&s#} zWz_wiDY(W>>o_NShuf2}wDEc>WP0LSXjOi%9e(3HzrF!gig=4em|nA;nUUWTpu&}3 zUtj8Jl8+<@UROLhf0L;9h)0Rax{d-JKN6yS>LewHigfP{efq;zB4dRrVMb1NHZ`C9 zO;q*1#L;ZEhzY`P%~aJ(WfD8v zAq)*-v|M*F@2J$W?U@ENl=HA%8*Z`FfBqPat186{>T^9*qF{~BMZ6<-5}LQyg}9GY zDss@@JP8oDx@#C)5Qk%syZtn%FGgc(a$2#{BR8US;_GKG)d$QFRtOJ-x$toTFT(kn z%~==Or!)1JZQVhH-nx|7)l!c8OTu6q_yw0#vLXA}cgoGqz-jV6cj3$0=8>$#s~igi zG8iWEBsBRqmseV(iX0BBr)FMllSUl!s@V-q4nF*QX)P5uP^?yXISJ9b7VB>&9c5@trarMcF9-q{LR?Ku+Wpe)ZN7x zz#X+$i7mJ@=6?3SK8N49YXZ>AGMfbvc1Isd(f%*)9+K@c=MrjlzXd$BA$bnc$V@ym zO$9yyv$HWH20^HXK)e7=CUuXalZ1-bGdw&Zjv3k7VCjrXh5yIfy zjQ4F~m_QY{vuXD8fc9=1^&9{x@aLAs_e#o+9*HEE`MQpg$7%tkueny5V68DGU2&T5ffNU>4e#m9{v(L)bQDn-tDdHfIBrqZb{wioTp$Csm zEd{6I(xNL5j|yijY!6_Sk0Y)9RA7hHs4=gT`k^?ljo`Bv#v%;EVBb|v*K~UpVD+j2 zKM(*58%H8S4DlKy11nEg%)F#n3gpZ(-3PbbhV1_|`5H*Z=>U2N!Rro|My3*a0J(7s z)GE=JQ#1B)Gl8)-RS25$=gc9Kfj9&fdSfko47}9vtmkqMH zX>9|Vm5`noUm(;DK*1wW=~1!E`Ai%_OqVPtb&5Irr)~9DO)J4hz6Gp4D)Q1;5G>|w zK$g<-je0jUc=WpRMG&i`I!SU^L7eB-Z_{r*h(s2%;GlJhHUSEokX47hUA%n77+8zY za(Kk-N`3x?jztLiCwkh@+ir6_efo@Bhja%eM^R!K^t@uny33M47=o>GUz)g5KDdK# zC;a!=5NrjihMtzMWX+8Az32*da$#Kj=prCRpOekpJ7%*{mpl_H+{7kt`~y#(MCHd% zj9QhA^;PajpT125NATg}hLPJ9`I?LA%%Yd&x97MnI%yHpVY4ach5CP9&BTYuD-q=^ zD)L9wk_=4*x@@TG@TZlYd`GU))od9*v)jqaXvKPzYtOB&Z83RH(<#apA0QxaFJ%<| zb3K%c!PuL)n;{HRzwG54mn55LE!~CN`VjJ!^l`~~0F~oO;boc>0)d*vRP_nxS#nC3A&NoN*7(wxGU*uquM~${QoOk-t{gWfmsn6x~ARd?q zMCRBazRAsd_}5uuEx?1U8+>tY)}3XB+jyvjhW)IlmC^?&qTa~wGs)W50acsN%In;e zYHJ=%&1enr3-t1C_3=|ntzVStwSpD71RS4ZN zn>~?f-*@Ze-XBxK>SPbvFJzmZwTG}l7*aPVt_E{=DO+&5D9vKYLDmny@Exs>ozHC` zT&u)xE!^N;JHJK+jQGwNdn3#1uUqNKRK zQss?WwARzN1FAkW!gg+#!ZaBpKcPNdzKyp^sYdA<5xQ&jQUe~4*1>O{$JQP0oB5Jc z)%lAw7?ZMR*xX_V<81SKy&t@Yh9!AqVr~U9bHhGgU@7-i-m0B&h)R$pp$+sGO+b=l zGtxnqw*zbU&8v0e-bxT|)eleq^2rN*^Oqipk{@+s`N$d7->!sNJ719QHnNZa@Vd!;1{@F&;e;?0@ya{u!(Y z?430Pwg)$&fN|^iyC=Ux!s_#ISO(?reX#Ubs;MOtY^^Lvyp1DBLuZ}tkW!w)UF zzVFE?kHuqzrn`jwT&sF z_7MTix8V}2@n8ZOUVU##leCdd;tLWz`fBtv=e{&S=x@HO8VCE-u(9{T)A|5sv*A^f z#OnJ@lrn{M;E<`&U|gWS^$B$5@*C<`o->GK%{1AU5FR1kBD{p>BQ4$AZ_qKuQtZ6b zysBggqC82lfUP->9ei^dcv3S2ecnn29D&rej4Ytm2$_O5<@a!I!uCtc!ruFoJ^qR- z_F6qn>X@#(W(@Znmu^_sCQ^?` zIooaydPna!wCCRkNBZ$z5l|K;shxYdC`NK(=nj%m-FLP_SQUm38*%N$w29INTmy6@ zKbL)z!~d+jyPf;R`d{rjIz#TV$_rLj18XHGd+qDwr5{r^bal^Xrd@0>5?F#Er!38N z5K|U>9=S&N(SQVpgi1W+F8-yP;!4M71b{?<)L@u=~TKe>kjN) zjbW+hwxgF4`>Mke{Y&2ytjCSs|01_M5)5P`3tDf`oVoMPik4iH&%k{Unwko&Y~Q%> zQ_2ARaR1<8P~{b$-)PrWGU&!wx6i#il`UxQ+`FN2avV==>C3lEo4Bga zTPe@fJKIfrlB{H&29ex}Rxk?@fu^shj~uV1)>$1AkN^{uT}BZG{F4*hPXwL!>X6s+ zSKZSH#lM2gX2RDdSIG)w;2@VF=TDZ;t%u3EuZmq;_EiuvZqb07Bt z5J6U5!fdk7$OA|kB&5Cw_4lL+`WS~Nbppx}?9lW=Rlrx6q0PeiUxsPnv`(feV1*>v zQn}|FYJf;hL05Zr#h(;K;&E@03&28h93ZzF!74n^%Lu4MgGL9;kb-Pf-F%!Z^zVIe zNNElqay=Bs9e1YQbg_!@gdtQhplzH6Y^l&^3xI9^i?=VVxrhGHGs|v)yg<+)F(!b) z&R17w=Z~j&)52Lz08iJP)Ril(HOb4mV{{HeI$UejWlxJoWCoPLY#jIuXZ5#)SK}*< zSg=JwqbEbxQWVBg$vct@nF|Bi8OR!hhn$)v`iZ&ixDVr)jy<$<30dPY{`tUw=#R_sM`**xes{z-7i5=&NJQFx)eQN5uLYy-JHPY z@uW7XTg#7lUUQ|+IWD+oHLsjMzGN%#JNuzPhASJ)%UnNg#5kTINm)^I+Z_nx1Xh(G zR~6Yco|Qc!e30}J@ZOi23-E<5kT5ejqP19YibCa7lVw%_NbihY4zaCj=r0tC^b>DQ zJw2<;FAK)xbr28J$>_|W%cwxd(GDG&44Xn1mR~|Mz%;+?WR>p6RI|#xEMp-4{IytZ z{X}#8eXc_z=h95-svWdQ>$7i7##Wr-`oYmH#p-0DO-_gFLBjTajPmUcy#J?rptGR0 zD6d>Jc|z8e&NSm4<8-CHR|q0|1S00Rs(m&+yue>GYx*BdX2la$KPJnY2i@LkIcaOR z3vlx1(64Eq?jt|7ncZj~C&uU0|2_B^H#Q`_Te z#Hp?H&R~ngR?k>s?5K+?KPgxUh)`no>9%Q8z;hS+S!Q+JoteDD#*CX6hZ6TT z7N6eHc6si#`vn&Nc<-jnamg_Vh6PSva8S%t4PcTw`3 z!g5Qw^orlkp60#W`@hn$0#QqM4>^ZD%>SKemCURf(4*P{ zJQ)bV^TByKG!YMGwd#gm^`IIRVHX;}vIF)23L-6HTDgoGH z36MlKEXi_Q+v#AM_!kghZlBR=uHObY#9h70t6=*03=5~oM0NDmTAn^(l5^|?n=J8J zvWd8wF|PP^i2>hfH%d5*w97FUP9$B%GqmtuwK)9X>{RUFk2vm!k9px2kUaeG_67L0 zeL;JlRSet#ct3qcwMe+hjV19BDIl{Rw@xn$u$)vtTaq z?mA-UJj(lGysl-LuuO-=*bmc2_YSiHGb6;z3>~;tWPw!QslqnG2FShG-1l?5|pW-KKhCTaX7hG~EPDk>CeNO5&VZ4vr|u z1lb)#$iNP)?VOXibpOG7o8%P@SDT&fgJIbjDAU z6bry+Q26@z^vTg{a=vEraHY_b|aTsU+L7x!tch? zb9FK|p+|qIq=lkDtI^VrR-S4gR(I3x@uGrqDvw@nCOq0+q&DGZwW}s}CMSg3^(yxO zJKGxW#JOU{vpaQXT5E4Qxy+N85qHb*)(u2$YdDp8V#i=~%9qIDJ}Ikj!dJN=y$0y$ zy^TQ3*$2v*>Lcl2MCQ371L}X_heXqB>uw6SXuj!I7YEFzvFW{i-GOHxDdCj}2RbRS z94fIWpGkuv6>Dyk<&kMHQvQn2ucui` z`U%%jJ!`|gjHP^f>D}%i(;got7sHz}u6Q%@qhscUzxw9n_R&R$-#6=XWc;8u+qOV*P=r7Owo{cmB9D z<)#xG9|;FHok#FpxcOq%_V_OU24C?cA7{UZM03=-BEF~vPgSu_JRZ0x4n*oN(y0V6mVP_5mqVy+>G_v8<6w^5Y*QD$&hi+EuI1DN;` zZXeEbVBtP6Gh6u?V@xJ&8Oh|~0lWaV#Ml;b(2XZ4G;jjKZ9qoc^5PSDVw^d$!oP0B zje)my4!|}-@qHYuq1CcmyZdXQ`NGpH7Bkw!KB2gGXex1sHa%Zqw4ZBo6N(mQP@!_l&9ndRv~ z>c;Rus7LJysY|@>F-weVPq6)JFGFqM-@@8f$0t5I^U6{}A&*|R@$@3YbkUm&(m!we z^JvYRTw+V|vzUiyvA?UYOnU}BIFi{s|GhFf)g{DpAfd?9P!+fD3YnM@<7o2Hjiq_4 zrtUPiL@#Zp@yi`i3t=NcIkJuqsTMSh;c_EbYg`j3=Qd5*c|ge!vR$=nfj2tl)=RG2ZWm6@oZW=-wqvbz0Q552RQP2 zMC@S!BWT%|uu+07EV{6!L=ePSLoaXF1I?Q8ss@vjU_oD07&b2{T?%#xv->y+Ka^4U zI4U;LZ3zeFi9-85t1zs;O4t%^1Sy_K^|F(EBx8(Lj6gWLEt+4lhX(2NVLoqcDRx^M z6RoN`tJ4BPy|aog%?!g-f)`VfBS9&S{fK0A(^&`?^g^hM=A(0d0;b7U%=PGeCk*w9M#dTKY2Z~A*HO0*I1!3y@FCD4Bucc( zv0Xpa9NlY!lkZ6Q5Q@T5hNXQ&eUI;}5ZZT@B7Y+7NRDkOw32>k;fchGTz~Ou;E+!c z`Qmy@ZjijPKI5SzVRcIpoaq{~*?Wg(u%l2;x@TGjuDy3r;1XodfwM}6EuP}^E7d*% zevb3g=621mOr*rr-1 zogGycZSX?C@H@ppE4B2$-s!z`P&fO|;+3(Py}lkbj4Xz#;U$|nVVz15#i>J{R7 zSLtde*mQ^0vHDL~`rBcYFRTo0JUxlsI$h&^QE^R%lqTx#mmM4#a#^z#GpNg@*C|)y zv4$z34H1)V-|Pkx?aw!fKRToyOK*m4Y*D)W$Ub~ZOc_@_7X45LnBOh4DSj6Ty~sHj zR$oqKyB(*Y2X?))8n@l~-c_Tt|CnCj35OC9v!8;4W^TBdsr>PB{{$-(-uO5woOPP@ zBoaFP_{7`K+R8on(`HJeTHXpPJI;eb$UrrA4axI3=nZR|zqZ5tkJ-S)Dh@FYlL=QP zio2nZq-Gp%82UT&RH8EmT9_6z`xE?+ z5HYUi-fNOx4+C^%2UGLfMFuM0pdd(vJ#?|EJW7;X$anpTpjS=aIw=3|%53wh4*s?F zS+LWzGV8@HEC zE9z&x)6aGJt`(2lP_7;P@kco*$eHloO+%SLd*#aPv;TlQth_9k_LgfAb8vfb`7YL$ zwdI)FbEfdF4Wc2@R`PR=F64HD$qrKS?{~t#Y{WKaS~hEgX=09plQP#aoGWFF?%Fh& z4hEn|GILN9`5WHYI+<%@l1Ly&f z#aMQac&%#67XPv^e(0@SK)(ce?VcVN+?dR7?CY4-qQBJhjrmL^s*mU)J_y9RKmbk? zB7xz{w23$($OhTCJz9(mv8s!a5ySRNPIE|>K=)KGlUTsJSfU9)6;f0HP~&T{p4 z#B+g8ndO{V_G1O4OmRG(VPRpJK}*T#vgT`*hK8n}%TBCt0p*KD zdiqVcp>TjU6XwXPg*%_aRnp>+#j)Z(GJ4jqoT0|JuJy8?iou__DsN~M34JRpY%)XP)!!D zJ6WAfEn5A4qyr3d;SXT0!9VL4?mZuZdWb-ZOE7u7GSk1G9etPY`%h4}- z!CgjNkE{VoVC%z#eUf5?44vRaSBGjSo_t%B8df+7|JGO%&&e#`&fX=0nAAVOJs=1Z z^WrQqxEV@jN4RL%g9P_ag^zx((X;kfvIVB3DXw?0Wpc|r7IwIPZRk0imn~Z*v3Pt- z-j{z}-Q3Tq|H^J!DUH4<_ztJM^1ojqyM9F2@Z;*(ewI*<&VW5dF;|Nh9GvVVX!|6g z_loaY!k>lLd?-_|9D@=u>VAHIa1Noeqj)x*RTLzks(3Y~$eTDeKvndM9u&XbT6ENa z**E<_B;41Hnh=&xfM?F`5BgP>8EU*uVWJQ}FL8OcN(N2cnEo6ijF6OPHQa(~io57O zNlHO*iS_tj3k=W-bET~MH#vR2pObl09(v1sm1_AmuPn2cF}a^2RqH8VK058RP``7Q zJi{S4``TjH{^r zr#nLoG40TcN`5Z(rmt#ErJ6D-#Hv3$EwH&`nKQo;{s^g^oI$7c6^&=kOHhZd&(tw) z9=JyZ`XMhM$pVTt);V-$^;a|74rRJ+YRCbUTbWF1rLUx0(D(kd!|(93c&T&1$$My4 z`B2*%g2b9n$}4!+EPqC3PN@DG&sNxRLOK9NIV7R&Gf`q^9~gsW<-hHk-?%;oJ>8u*kW2MpV?z*vlVozO6-!kDV%g`L5-jx zI(~!2d169I@b>(7Mt_dg)%)phZ$0A~7pU!ntOt8*;6KGy^4qru-R|J!Ql|gKXJc8E z7+Y(F4fo}*p3zE9+GFK*)A@^+pIRjauU|o3^4ACa;)&FHW`KLKLz?Q2f z=yW?%#?y|;7n>Jd-Q}k}NGjBC2e6||hb6+yz}^F2aFc*%#GCOJ%P~NkHBD;oV$pH> zX9Nx2f{vvWxHV5_0aU!=T2t8h05BZGhG6cNgNZCrzb+91^Ky`S+$-3UU}N*301(`# z^8tWB-+^A>4Nf)3q{VlSK$A}wMMZ8F8bw)^%@F3ZXRVwpunL42Oew25oKaES8tbxT z1?V_Z4z;<~jZyQt*{C;_x0WyA$@rGOfmD$dgQ^x$ni~9VS9&69% zyWZz$G-5&esnfyqrg@{_&F&@(jrHviD#6pxsdopuqi^?mr)Ijv&$z;@Q)a&%U6v^t zs5RWSS&40V{xUt=#r_I)@o~Dr*MmbPA+vV+91@0DW3 z5D$@S6ie z3||%y_}tID7b*Nit=o;*?kRF+i`m?6wIBh#`#!<*?)~=(xdzFor=ble)d@|;`q=Vi zj48{YB4mEwkX%_-s>+i2?)tBC zyqF;-4%?r|h+a7el_yieOq$ld)|)>+9qUVCf4scveW%Xkx9x0y+~@0g?3kaBKYzJk zTK`J-8`_dJ-f;A;G2pz|{bw$#6`iZAtLHu=iq@Q$m0HML`sbu8iG7HFaKmXl{HM7Z zkIhGy;Ue5Oh;-qkN?2Sskz$cQ24D$1;xr?!16D8SyYCqX>i~$c)_M{3a?_mqj~EcR z+GoVwX%LQwxT18V6U~Z3n~(4COg0<4feTG)Cpj!T=75PSwIWd<9NfS9SdGj8ZL&RfzD?(K=`Q@?+L(@V6D_gP7?25GgfM_c#cTBxwiT zy04Y_|A`!U$9O)zMNQ3LowNpMY4(;qlnJfrIexp3!I`?BTUDK>&8uAJ!&XkBB%yIh4H_rL8})!zpf*M4zY<;nAb(dmT8 z%ec#Ng|Z7nYLYxk$*7ADAMag|Cik1p6z!gUk2t1}&$8~Vw#(o|)f6QMBzg4R8zXud zTTm~>>y#O+$?vJ0Gw**bqwbqth<~w0r9ccMQR?n|<#c@Zml&yJEbKPcD{fz#zM91E zuW9i%5za-8D+Fb3efi?ZurYfK>~zkI(CX7xb6mI)&X@jQ|=o`h1v z+JivOBvDP52Gz87;f#&VHKkg%AzCMFe#oWj&5z3wl&-%IBi9>)M<{Gx;(8J}`K(D&4NFs(Mw-y10n5 zyW$qtM-g@Hl!!8~S#Wxx^=fbR>^GqyLpM#kV z5&ol1@8Ig120bXD7uSTf8eUH?(l7k}CK&ox`Yg|NsWp7ZTBaQvS+rX=x%jWGCst~` zZ*^FBUb%DYkzszjHQ$xM+0Sy63s)4en+|rnv9UOwOqgTL{3R<^)wZea%l$8n_P@TZ zK2x>WVqDQ3GtIrzm0`9|O9u&b%iuRUlM}9H_W>?YNCRfN^#RRJL&?nGPM=3sIK%0MWLV4~sICNvc8n_D zF4ARN`iJ?2C=+jy=mCAIpngF$K(!T@^5k6%=v>OAo8N}q=AP-3uYCQ^Y@ZP+_!^u& zUvy4%87KtR?-Qy4z>c4;bWn+sd^TN5^T#@=lJF1s4Y;(7+Oh_}18{-EcxHomY|>-t zjd9q#^SqcDXLGx~q*2rBbBLJ3YNly>kg*H%BvnLoV}Ik_`QL!`Pgl#|n!BWnS-o`T zuTS-VGEZ5t5HQONykde{`1qXmPGD;Us_u@|S7y`?;1Z#0()QcKkL}ie;fdMyeX4aF zh1+Rbhko1rVrZvv*1O62E3a+{D-Ms@1`UInjTt4ND~|}4q(zO#0byz$fbq&p#=ro4 zQ|CR$oN}FaOU58sLi28u<5Pw4g$IN|Y>U7zja$$HNiTAWxoqHD%M$%juKbjziC=ho zvaj(-)(3dtq(5FNUzY1S=usRO-QRoRKE3xP+d$`AMZfiz%T)B5hC1(2SRal5CS6v;U zo;tnow>Sw7#r|=jYQvp7+eVc4C=^$^AKTdsf3Nl2DX_RH<0Tlv)xmYdW!J(30R^~3 zot+Mdh6$s_2Ylkk3YbC;vq;8;&yB-5`RK=!BLcvl&MhH^WS1W}A%N}0d;7rc41UVl z`h5pD#lwf3HilrIt6wa94gx#wA&Y!Ug>*;_(eI9e;2;(1Ki8Wy%rqy|Y-p>3W)SOD zDbut=+1yZ^)T$VXU&?d!BBY36&;z9))m*s>`TUh~wW%2Fn>MZX=-d?V049vIFlXe@rED^}-#l zctG&@2hFl!I55yfgwdGVz_%zK|f4INeNHs3gP<%w4I=vNwzrrEn zC5}Ne&!D`N+dt~M2_bkd8iZOEZ`;Q%jEl5B#ea-pzL+rX zI>tl%xk1Vrzup;zhObJug&P&!;}TYaL8&HOT`?r*g=+}a+lQ<)t#D->C5P^6OfJ8r zMo7p@uSO~4@HryS+DYe{maYrWcHC*rR4d@Uzytu z2E)Wmnj90MMB9&rc8|3;ga^Wcua#l>-heUR53Bm+ZAadIhOFiPh6~fKQ~_0Y?VU&2 z`n|UHWFezUd30jCaQ6$a1=fCpeM8L3^4*?(4xM$4sJi#xX!(CgS4FB!RP)2poAsP# z^38wQ&=w`SH|RC3t*zsk<)!pwwpP!}LpyD+YFWYJHatnB=l4SJNkOeJMB)ix6o-MC z6}W`g0*Y|#2fYHm?%r?2kp%;uosN03!xZR5DGrJ0=ewj+t1K=FUgJ#10}J@d>A;CF z9FO0fW0YCf_ZkSsgZqxVm25ue1DuwaYMyliRPap$j=zygVKavC@_a^V)G-Mux{gm-6Y-emD;tNTCe?;TAP9Q^z)0% z(8LdeM}*$I=Q0ka=}6N5Y+axfBwM4!UW5Zbn}-}U?(vwDK>vq4TA(ukZ-zGx)~5IaQy zIj&;e(M3ZQDGyFObLi%=*dFCv)2ozEk|~u3;ygRJ4YasPjuzq}TU;q{$&+^b`&;>^ z(SG_aW0!ys;Xk$Usd>M8R1(9?@*TZbH+48bIyljd8=244KL;T_g&e?i#1rHCb+oy) zn6+t@Fv|R*6~7>f05`ppNhc8gzND0m-<*y80lb8^G8U;~Zok$&ZrbJb=6ddrbqu3> zi*OU^8#t%aM`mV4d%HeL-d>t~Xy|-!4~^LC!=;w<6#c&89(WvqTbJLx!HeZ|bO60FwysgQts0e4n@`GcT3*-)F%@9a@}l8+QH)mL8_L1q|U2$hRjJs&{` z;Gpx^TTB!J!M_Px_jz0Z45(%edctdg`2dbmLi}le@=Wtkhb`J|rW~uTM4~{7v(|ND z1SIkOc_jyLgre3VFA^R8z)x!qh9=4vS%r6q&!xw_o(vc|dr|bXlgqlidNIm>233iY zG~tq}GvHGN3}ESSuaB~3+Q+-w+lg_giw*RS`(3F0PTh=HS-oPh?prM!tXWfF^1sZQ zMnvD}sZ8sjX8$y7ZlA}h_d8jiexfVf$)D}tu{vuD*i2Smv36_WeL34s1>Cxgy5F<^ zRF02Gbt_XroycwU%fn8Ft0WXm^s^jg@OxK8pO;0xwkiSR^v_$z==#!W=x&`|IeLIe zdsJ^Vxb$+jBzUF-!LsXzVY!0C`DY?S**<-tm_4Vb0?+eO;!g~>kc7p496hjiPTwgG zY!!iqkIiI;zzz1C^AmMo6Y$=Q6z~rd;YhJgxENRXV4KShEo;KwppvjU!O_Gp_lT!< z6W>5g=$ooi;*>!hXNkb5O4m-RlJm-Ib60fQ`PXZ@PSHBkl;b`fI~79ye&S0eqQtwf z-QsO(<#-FK0e7}5xez|7R>_7@2i-irEGK)V;uI9tg+#8M_Gwle3LC}|V3U_>+?bhk zv|8`QL`feWEP>V$XLjCKdTD<(_b$GU7_c)*fuj(bmz<$W5se%IP0AfAE}X$$XCGzR zJd-mHsDN(+^wd`Bi?;{vL9zQ`{3F^QnWJ3PSzT9~Gz^@X><`VHZgnS|?f7iRkbK01ZC`G;BL5@9`(e9MS@8qgbHORjBW&A}p7=ByEQ$lP} z7C&#qb!>UpBuH}n^yE=!q$D)muM+MX%|OpN3_B?EGDWY2xa|#pj6dI9br4BTMUPuw z>ff${tMW<;kp&1i`L?1Bc{ig)$eXp0q2MeN?(gBR8i-ye&4+3j2=4e{>_OZ?n2wpcHP7gQoTaPd%rdO#&GA@4eu{!_cYoab$G<0}9v!qnt$J&e+b(d>3r>C% zJ;naIu>!)^{d8`$jcfn8#XqkUdBXmY=eKwl(X~)?Cyxp-CUt)^`gqQs^J+}qs@csl z{(&UF9`vFVofXxP??0+X=0@3KJJ6-mZ|U*92p#fQ{ecd^f=8Xslo}hO?=E>hCR+C z00r|FaLF5JnEy=ZYp}Yp7(-9D{7lW`b+6Z`_0>vnMyWgqVb`0jhk--(GhWD0C*{s|i5xT31GtX`4*OFC-e%^!g6+!3Rz zej55b4nUIuTupq9_uZgzf@RKkW`?e&h7R=L8eb4gd3=!&KsXDOOF4`yS+TPF=OTdC zZ2CmS7nahoE7Q-}|0=6!9s)iRZexc%?K&lL;3;?9@h7IO7`)P;=<_aPrdMaz zOqOk(P=_S3&LlC~EYz3b0m?@-`!1IMFxri8W0$p`|4d9Nb78?$zuoNflD>4;IlJ!? z`n?~rqI>C3XNfYWKdj*^qjJjl$jU@Yp*g$D%tV>nafS&gXcyN86@Z`Nn9pVccTTHF zIPJ~?BW#xjfxB>~ALlb5?scYC6yE~w`?3)Gr_~fN(&Fnb4APOX1OJ3P! z94hpRtQ->%Xnb`ZY7t=_y1_>^omUlUaMu&W@d?(eEC8fnbz> z;hTS8ee3d5UlX@}WM*{Txz2lL^_sL3c5xKORZv4@7>*a7VE-V;fde-Q4?<}uSD)V- zrO+UKRdXxl=Vg;~^t+*uzw&^>?OgNn!-sAb-F_9haEL35z#NJ`fzKGp@&TfH8-+$! zp6iA1ppqH#Q^>zJw>Ww7Y-dFTPp=?HEU<|$9((NHW!#h~1LTfmE&>ox7YI~OlLbqG z!9s)1Gh=`>lP0Ugx*<9NpwM@gB4IyfvpGX2AD~5stx>D0#al7Mwt>tB@3ux--IX5P zEw-h{!Sks4)f^0-mok+oDw&kN=YEc7_BywaJcroLK?ivl{sHkNGLw(i;qev_sqI!&%Yz%;5{gCi@}@W zj@hCdJh*6R@1w|yI(H))32XZIUuaf%l$CfCx;svoKItugfNnNO@`;~HcM%D9mwaNy zgqO#|4=ngY%Sm0Yi|k|q4`~uc<`KhCu44nr>Rsj==b=2oC*aJ8jii%n9pj_)jg0|Z zKKPxZe$d%{LG@)C#f>gArvu`vM60cU*HT=s zc+!dbxRB?!^|bcw@9aqjfAbgf*HP~K*hj+rKLVt7OVC}w-PB5*dbQT=lnASj)wiuB zlH1?^gBup4JCxHRgfqOxUtm){7L*QWZT`Iv|7YOk|B1U4a=$k5xi=U5J@0g9iWk&V zT9QwRy4v@r0utNeB)zZs3qs18P#uy9;`ITwD9!V>hSa|T3Buk3OlYEtIiaotHY^m> z0U6@Yn;ikBIWSk{u*Deqy29kwfIr=ufKrl~o@5|w*NZfAEc6=}wR(fQ1;A#7V0(Zc zpQ6nm_U`9}e|MwbkSA~V+c98y*dFKs!gF8c5-7|uR=Kwm_vb8BWL?mLicw83VG%-> zC<|AnkPh9^MxS>y_r_E`_nh3SPO!JSXxn6#V27UUjr~l*4#GdeMB-LP+eD3zvnLg? zaw}Ypc*s}Msdakl@4(fR;Ph1;;}t+BM=RS1mreaZ0o`k}38OL&cW3rY0HVuQ51+4n zR{Ua60nZW84b!u!^u;v=W#Uol(PzY}m0xk13|hF?odO#Xst8fW=2rzXFSqoIBnw^> z6PM0UCa>l0FQlk=*vQ(LJcBU-i`So(^|RBMIju;&vZ=gkgdavs;(dFO^`7i2|0~_C zk4~o%F@?HgLa!73?-QaG6!B&`^4V!0a0apy$W}dV@584zk<~sdX38hB5s?!<{2+cc zSFeC;p`h(#Ez7+OT=D!e7*ny~^ZZ%7o4GuYe@RZ%immSeY`=nQ()0C{z1+G+x4Xv) z6MG6W!kKF%niNra2$X1Z$OZd%OH#~xMgv}7!Q)L_=V=X_hT-N_or1U9oS}?ecN5iW zPGgzp=#dxwFO-ke#r)=5WX{*B6G|n>-@hSXp)66cF+~5JDQ_-0P z9}7%}_FXvlC56#_4T{N{(9K$3YI;MWPbJOL!nX^`*f{wc|DWBzoIaP7bMnCZF(F%4 z`M2ndWpCw+jYr=h?Psy>mWSjb3j@fk|3K>Na4Kxk9V(H0;k4B9W$T{qi(Uk>*a0ar zeLHq_ezB3yO0IC?VR6|Jyi&vWaiDYMcWogByAk6uZ%vlbklhuV^#=!hDN)9F<)419 zu7VO>`@5c8{y(bDGA!zUTl)-+pp5fA%@=c=5ZKSKn)`d#(HatWStjtHmg3ty_%-o=$bM->;HR+wJPn zENJl$3VP;jDGk?7b}j>Q`y=vp9}@kU?`qHC(A$O85>|29(TVL6Y{y{_u2x_|aHtg| z-oKndh>LE?pMy&L`w8;ZT;XaInvfXU5BiGzdrMZstWZ_Q)bP|59_fb(s(p@Q{!FN% znPoB_$MECFeLD~tBEC{?GRbK1N>hzgTYtU*V`pV*?$OS1(#$@vc!}Hho5!Jti z{C*lESbZrZ^9ap30Rx%3rVWn$X-8@^eyRblq-wUQmt-_l<$wR=Pp>a14NrDULOfxL zr-t#?Y0LyIXFw%C5&MKjtJDzQ{fH`8SFqhvR@x@ZTH~~h`>0;HB8^PrwNNk5u+kym z3ZN5Zx!mb)x5y%V6)T~tAZh;gjs9Ul64^uI#|ED;CUy4VIHQA)H-{Vq1_8T$S64Bc z)=bJywHk4PAn-HyUtBk?QaeSX_nA)m9C$v33H6@T4BXR`i^R}1DVvb&DdU?@cM?`3 z+pJl6Lo+tN`)97g98Ku(7~Ey+Dn61;y@-uC9F;_MJa|7qy(+d24c#C{*BkB={6%ag zwFu7t&vyC0Rl!&1U(oK0ii~9{Gy%gNC2cWc67Xy5a=Yt)Q@Q?KMR=pJdHk;;ucSO7 zJ}IzE4>Ne^W4heZQcjbT!P>P`&Pt7ZoXubFJEivb=*;JyME=mOu{R5^cd)3u4iY6R94$#pgbVIjJpTo zDX6Nd?k4$Ra|dp~B8z@D4>CR!Q_F4*sK_W#~&;(WiWc0 z6G=54ve^kSM4x>U6Ek;xVjf=)x_tr{YOKchzN0C^-FWmOQ-Y;3WaW!*zwUW?iss-3 zThNP3mT1vOvKRG0u@~RENC(S`rGvH-*a0^)Ox|jc5>o@=mKDapmIlCIj+g`%Q#voA zKkwOyK9TjK$r?fy^0){9YDP^uh_N90>>`|r^^uSgrx>kO-O2~o{K6brF-oi;+h1}( z$Xt4=$5liz#3%Ia45IOw<(MG}G3eTE(e=z7X!HA0yBvN6*1R8m^>9KT^=^0<*u$Or2A!WUo&%kK#e)V>Ectk&U;953zBbN5t#D z`k)cnqp_(-vKOKDQT8mVe&;W?_BZwfDsN0P`A$-)Hl-HlZT_b>>Vh~E_?`c7>XnEL7+AA{YxHL&4C)anJmKDHF}mTATG%y4Ok$$xGoz z+75Rob5SYK^SCP?nFvm#h3E4KasPgjky2jB@SIWzXvjbibJxEtH*CgPGxc1)YVS$| z1l~HKm^bKJ=wv6`larX=162tBbe6`;l-6ECMh*>G z8VlXfdv>}_rS;|L%P|bX%<1YON=tGb#T}93=A?P?=4!FusxNxz;M7r-wbh zI@OP8gAzn6S{=q_-+cAb?}bp)y=sAZhqx56jN*gBC>i6s0*?F~mTOn4chO&{b;sdz ze{0_*49-_Wu5~sg)Bt@8plJP0gX3i|Ku(V#%UsS;IrMGvOxbQJid%f)rzZRwFxvnSq%D z(t_q-EtdGQLAHbe(&(y4gQS|V-7HfHb2HurDRZ@>etFrrdSJ!7tU36Az^5NS{_z6z zrNj3Ewz^xkQ-)gmL=Gg++=#g%pZJCbSZm7Ch}uMHzK^vkAjc6{8){xuT}FnE4Lp4D zwGLZeaGcSQa{E1>kSCg0+o0PkcX297^gE#jnYf%(U42Y&oeKjW7|Vq{iRrwWyfmAM z=u^1d>@7}5szy*Z$f@o+{|?o2m8Rjh*afd*8Qd#a|tET-4|v?PoN&53?tzxh! ze`P%$eFG@a8cd-tlP>dH)c##IG)JwNmhGj>;uRNR567NuMVFy8_c%>_nWL@E@}?#P zx#wz5clvaZlo<$G;S&8IE<+Dt%>blZAihVt^{K|UV#E;H`v}{bIgr0ptB|(7v(_Zp zz$4@@A{@_}NSoDc<%j{~UhWNaHCQ3Bwkfcc0ZI1FkAFS(?bgR}!7g0Xi+zQZgwQQ;}c+$kzo>$^mkbC&s`4iKb(1IBZelU9*Qrl zgOL7}oO+f4otOUKQO!`#LzFM%2=zwE6*C95;Via{>HWA$!vtBzEcij&pHe9pv+x`Y zO{hMn%IA0N^43Drc_+m?7aEZ&DPf8P zMT|dQPcr0KD(@jDw&~LMV|eT;EBH9@JGHQdA`v|wGcGYLwUR9=eHz!N~Ns1YRYfzsz|nNmlgNZxmaJx4H^U0rAj7!q=0cN3y64qJ-hH!bp;3t6ukq z(C{HmhnP#qaojp2U_-9je6ECgyi#-?PpT66%lSfhiZpR60Hw~GI?V(<@pWFxZ=|;p z;4aiw$@wPzD(D?%0d{}@nHSIup1J1`^|NKGnlC`W`0hh_+u`ZZ`T#pLQi+l$@?auy zpFe_Xe|XJD8HZgyHd1nAS@TQv-Q0zh>M09+!w*yM*(Py+!+RA8vG4Y;pK~5C`@cy% zp`Q#5m>X3F?W5WJ0WayIjSmn5Sgx)8!gi+6;@tvB$_BaocL5zp`QR5m(9?|Z3tN1e9<5Z6uecq!j+P*o3naXbA0r)PIMWOnhkS3-8nxwNOv2ckPN zvN58ni>sO^Oz`D-0mXaTxf3c9aQw}c6oa}_UY?I5J$i-seZMS9PS<^6Z#-MXrAgeU z zQoNz}-q^U>V!DRfd3SDNvQqH9#%*P1J%3b#h0!Ait^7w4Rl0wH~Bk&jgwqshm7 z{fQ($xs)0533Qy!b;guR9qR$@76+WS${$EVlwzW;J#W#!bD#?}?;)62oD{Z+V+Z9fG?zZ(KqICq2--J`hUb4OY_XB+BA-hTr|HPDicXD+d*$SnYdKK~lCd~UV3^A)t*5>`^ zyMH}Y6$1KBva6X%CKx-@{cpk-S1uTr51M30hlgm13v1dx74AXZulAjke4^?10PH8U zlUSoE!TGclN#-o9-!N{>`b}}{A!(!kb<)XC#KYP4bSa0Ay}#Hf=yK7T-cy5qcRE$t zyGu0jopW6%Fl#z{i|E__`>egUFDL*!{!g9?zQ=gJLNXp}glUSMJBW$PpO_V?DXrmx z$CoHL{=R_6w5-Dd1B)K;qszOCE0DB_mm1?Occ9tIPbkw|qQ!bY7J!7AcW#G?0AB>n z?~F}$<1*IbxG;7$v6Af&=|qfJx;PRTP3HJq zCNa!)!c6OCpkUb<`uIJX?jbOl=1T)5l2bP`3?hnLW?>uJDG?aJwXEpY;IjWg?&%;_!KbRwl?V0Xh zug5i2m4O$Bwd{?$C9VamnkCOnB%fg|b-6!(lXitvo$0V4irrIw-u9EoG9#{@H}LKm z=e*>mrTvp5_|v`%`#5_gpFVBEtsa=W?OH;bR5TXqqGxaUnjQFQSG4*(O+l;O-&>6y zBCD1yAsQMjem-{>*N=~E)HMG^*h65-3`%U5&Jt8maWjgS4y?~h!SvuD#HWuB5n7CL zQHE+TaN5;$igpjj-Svinr{~dP7Qy*sSteo&x`7MXva3G=vX&gzM0W)yQ)s@H2qYz>!7jmc!LeZ zl0goL`q)DpzaNs>lPxDAGZX)(yHC2`thJ_EC+b6kW?vna8T6MhJGoMs{m+Y10tb?BPBe`A5M6#GayRW584B`WchXUs&GUp+Z?b4D zz{%W~#KEhJ2EglSJ=n?*hH6j5J-o{Ab2QW=fL^ppcxJer+Xj~?s-Zdwe9wT;)cuC% z9vux{LQDG&$&(GaTCW=!GcGNY>l%2I`c4ze3fE%^CEw1l@0 zSI1?cb6?{b@I!4e~Ny~_&U3&xHRUj)dMy~jGzGj%p{NDzGL;7mbXRqqdQ2^lPJeq z|NZ%DNez+DE=HHDWAaBZnAhpW1I2pb$EVye=M(dj)Fa*Oiiq!chAwh>|H=bHu6;(i_m&_=)J&%vXVe`{*l5$lUm=z_?Zb z5C8iF>pwg@_R<$8rnvW>ahG_x>}L7xgm^)YUXXFCfC@=Nl9p(w6c=fqc;B6Zg?L9T z3azxgP}nkm?h@DcUmejANP;qc$bRy_m|f&d8*c44acjn7A=`;jt1IONXXOp9PTXp& zvv(_zz>!)cM(u|EQvvL?X55oU<@G?<%_ZmgKgHEHa#nvXnaIeaQs57F&~G(g|6RKN z{X$n7Uj}`QpuU*;E3nRb`EXF%eRVhG5PhlEeBUTiR|9ensjj?b27JO{eiAM>jA4V-O7eQs(=zv+ z78}7UtWQ|_KlzDsvAc-*zT@l22?QRf^yF3;krG)y_QHP8xfQ3M$3e!o^ffRTiH(of zgoPlVD;7Xt@qP1tR3^WO`G>)%o$q_u9IdUeLqy5G4g*#R#YHm(qYAN z!vq`mCU}G^#U?9nq#8GKpC;`PM&n=;%Ek)>{xQOUN^8Mck15~od)nqQKmllr`KGNH zW`PTKE)WoG7++!o6}ORb>RH5A#b)i7&%rf)6hAFxfEkUWS|=BWQ9J{`F;w!O0^ZSG z^3F;Kzh437T>nz{9*zaaFR8x3)GX%z&id5Xia#CrxjW%`2bI^W*t)0RCK)A#H8xlZ zTiRM!%@7k@gkY|I$Clcf^qoO*!%DXeU?av z;TJj^@MDd>3n4~G)P!_w$C-)*yOV5g=N(~e&T}9IbWz%0?I4T~6ET`SyiCRnlI@1z z1MMiYsh=?ug+F+NB+jiR?zTgKPRM+(ZgmJXLtdYf++{2^unB@>Ge?sC1MVa6~XU;HSpPnV@-~QQdtYb@}Bfx(1 ztV#)eqUY`fNseT`&XI6S)RhIpUD=qMj)tp)wsd#8??t{u;#?vv6qmRB1O|yPivkWWm^Fba}8kAi&aBmHv}d$?p*SRkN|`rE4)eULmq) zksN(?g7QQciK^4w4MGdIJpprI=&pE=fzfh@#p3a=%}-d-{EI{Vv+53ubk zHHe?m8YY(yD9Sl;mhjfz-GVt)I6hga+?#F{PnI%lPzX87G|28wG9soVu3p>iuJy~D ztzI5e!HezQ3l&r=51R^{eje$~ywXH7;>?eJa@gvM1_hN5AmWL7gf`Dk$aRdx>#Th3 z7QOjeTHh;KX(F~kej*L!-eR=#eSI}}N8yh%#MZwN^2CWY`b*-vMZX~sSP(9ca4v*W zoj-2RYlav~ob~&ktY~&JX!EvneE1M1Z`&6RueJb}K~58_>BJ;p*&Z4aS7%3x(+P*v zlK1(k_w%;7C7uY8s?$~d_F_(2T3YaK{>j9Ym>4<$NAVc1)$H%Zp*7Pu`%QgA!6P%) z4?exi-MRxQ(y4EBi%M#aw(&b;L4Fe(wG4yU3=f6^1l%*Sr#K&}RuyHxHIxB5)oUmq zQC!Aud&ODTsOP9|8pv^NM*^~cHnof#njX6gMeKrFxF=#9ys|5;h>wM1k{u=)ZXPkM zDvjJuG_GeeK?F+T_TxNndsED;X20v7jPvF}P;hj{%m!&=9eJ>q9U-Vn)sv{S*&80q z;=D`e_{3|tM#-`dOuS6z+u#X1^4nm_ZGPy|zdtrUIoG^1pci#xzDHKx3xz^mG<0S! zBdo0>jnUTA$ZO9&SPWgB!}xaU{pKZX32|rkXLhFN)s_)i*ri)<5Cw5BTzf1MuwK}= z0&33%zqPd!HG-PkK;x;}pYQ4Xs9>XYcLrJk7B zx&HFl?Wc~!yCNq321ircswW*xMm-%@-X>dZr`>qZW-De3(?mw=o-+FwjhG* zXsPYsEw0jd_G_0JdXJpn>8hv;=8uVxLt~2=HmV7{0i$Jg*l6SHKFrn?*5CZ}pLa#6 zlMy?Za|sF6zj*d9*OXQBU*EX?ji`4%lDzC0C{&B6rxOQT-NlPIE3elosoxjg%IRyg z{PU(%2-;+yA9Ip+KcN?;mNN42m8iKsN&O?lQjzK8jV!J7lB|r6_}?oy{sKzRc7?5JZ+jw7OHYr=(eKP~T}kR{3%d%?5L7yaXTgtK z#eh$kiL*;J-AzQ5V0N}l^c|}wu&t&D5Cp<%qFAYj{>)s#rJd8kij83|?SuV9`csLI zE{iOSErw+bK~p%!3s(wfb=>2nxQghC?2c-%p_Sha)~uqfDwJCM(>pcDQay0x%d3+= zZLR1LRwqSwUv%6MPimG41h-# zRePQNU(kp?vLde3pyMg+WP%Ty*wWah8N>!QcwYD$uRRfkU^Y&S?b5IO2;n}Wt3;UL7%fbrnluTX{*f0 zjjoZSDa7*!F%rOK}ztR&siDQr=I*?s}IG~XyR zVCgw!a} z**I&xC}&Mej}y($i3h|XT64fpKfn+c}*5XPF+AX<2{}ub5fDgauw4q`}ERS>*|%o=WG=W|cG@UaGmE|oj9 zzgGx6R*pz1J!jSunOq=Ewv!w?OM)-nlHALeY|jff_7Tesn&nm3!lU;03Me|0V~zx< z5R>f#eb)XJr*+OXhHMqgK_|dDh%)bunG_v+F3Gz`8MFTTA0wp5T*+vP%zRHJ;X7lM zZkCzhukbi2V(04YEHCnG)X8`(b?{#+6z~c)?pm2Q*Vsbb92QA@<1QlQ>ba{uQQuP^ z@_2i1V(8YR8yD$SHIdd;o{b(u;77N3_KBL%M=XWRhvO`J`NkAkq7#1L8?$nqpGj(eArwh5cnO3C<38gd z8-+Tz+BXUbBsbd_5A!0gMsdc z`iXiRm<48DdL|H&pa)WRUC*h;4UYV0zQ3!>$aXg?rAWjt8KPlw#?@P0tNQ0?7Cq|X zsINKYBSqZkO^V2$QqO3qv&3y(U5dE-+l+|`AlGQy?q#0MRtxp`JmoaE+?u|+KmFbZ zMq6}SEoeK_Q_90-Q>6jOBoL)DsJrrY?Mnc2D%+3wMm zXtegzKQWh#l(eWk$sl1TEh=l4Jj@;1w68qwzRvK|_Kk8plAX+zL(}EB{GHWpqpT~& zE>Anli2a?^xZeGgw9KlP3Xp!`+Y-sDb6)5o^f2cu|Ln)uHo3npjp4IrOV1s}(#R@q zHatV?gK5TbYzIGXFj1Wa(;SP}8^j~pm{xF?skR!eAQOL$6_2(b*$v>-uB@{{vX`{T zJDY4hGR6LW)9XQp#aLi7^?p>#Q0|}4<@^Utv}F1 zI#(d=IqlxUw_LO~yAZJd?N2%}kA3|=fBy6sRq&DR6CL9!&t6Q`R9OTK%`+q}VBz|_S1f#_+%Lu> zy_XpySq(;A=$~!xU-f!+P+yL;a4w&-GsS50`nb|dHnGo0T$tV-4?$xCx?z3i4+oC9 z*W07if-dsW^uTAj!c|11(=rW|)?|;|pkD2v0SitI{%OiuaQY%W4R) zD+1Z^H!Y@j*yWMy+(EyQe%GVbNbBr5U=Z_pY3XuoRm$}8||Ee#4(e;HBxW8Lk z8H*Fg6%9$Rf63t87b@={-C)L4lQ%cKukgk1{*gFiP>6p&4+jTT?XPPke8pSLWPcJL zxUnCN5bbH-PDt&?Zp!z%(e}fm8--2qfmhx;`V9kqVaPFDi`p|fxYNGEeM&%rR2^$X zVB7%0RQMT1_1_a9{Z~Dmj+sX1ogw`z)?2cRDp6~?<^pBPV^nuC>&AJE+Ha72&IBbS z;XnoCm&uKa?fnsg%Cp*w)W3$eiM%#7vOCz1E;zGPkn5BIZm}aGHt)-w&zbJj_m}1< z9!|e@#*!CiF-`FXX#bg+?zv&7>>cc5ijA#i@7)m?jhvB@%j*zjl58q$We-K#MxQAP z_sL}$ypZIuVHh~uVijSGk9)ql6db7ij~5_cxqodhO-ZXDLvy1xC|po-u2EaY?=W<~ zJ05X6avgBNcYVxzg+A6J)HKv?=NzCSc6e`k;k?)l+YEWbx8``}oY?t3iQIt1`6>nW z`{Qc&narI=e;c*OuG%Ya#bik1`=J}BI7GpTl0DE zPMg?PQ^U@+jfdgo?cUzm55M%@8V}dS4Bgg)Z!7&rz=7#tVY}fW=Nn$FhYbauhLR_~RkQxMbox3EEKg$WKXQu_(xGJ-p6C zw~m;O)OUs};uCu|qgDG3Ygfv|IX;RcG>DOMG{)8IH&6Vs9o!C9o1a1Zs9&gk1o+fR`A>wK0&P4dC<~yZirBkW~wq4q?eX@ORr3*4;6YD zl2-f$q7BNGfeF54fIljdN4vlqiNN8HH1IG03V+tOR2`vhEp}?nLSBZ+$>}EKvf>;~ZQO%t=7UX3huNaZPvT zzT3}3S0mmtH(QvG12>aC2n6AL#>eN_qaMf?xxnlScf!`eeU8>0cYK~C?bMVJ;9$uPH=X@*1 zKNmJ`$V`zVy6=V`b z&&*+#JQkCjeZ};a_%+p2W~>@kRofh18R{M$!1MK|5{WTU_!iMH#lG#Ntug_p)Q)G} zqAxYHS6+RNCm-b~{CfH~iv1yaL`=2lMd>Si|>h~r1sl?24 zS34o~a#eLoewMK}N;c25eJe522MivhJ!G)_vC6}cqX1kV^V?TVlj3pOIip_YT!g>% zeeY;1Pm%=YCTKcmb!r}V%BxW(?xH@j-;qH)UfI$8j4N?!sT{cY8}3`4K^^@fx=srj z3zQN-p%sZ^dJ#?7(5z#OyDC%cfw#IX}P;k=%_w_Q8%QoAFZ%Z zVo9q0brpN_DXO?^XwycC#9!1PrQ5E&Og)p0YFa-U5^g4syz-` z^zVkrs~@Z@Md$@5>OX^#hr-MbcW|cODrfTb$vp&>t`-H}gk}d%?n#CX5KGv~5Fv)lepRGhIp42TZxCe4dk9doBd|qU_^j;`?1V zIw<=%EKNAEY7QpYT$j@P$Y8DynjTZwJC@;ePbYo(W=r@EG&F@@%6 zv*f7c(Ji&-s-Tu3LfpoqJc412D||Ymp?CG?1>AMTlt_FB>9rqwq%gAl*m+)*s<%jc zECOWTomcS#I7*+6Dok+;4M;7_=MHGTQ)8Tgj$REjs{5+eq=9ufmAsy1B`Sz5pIKkb zT^R&~IgWdSYWcC@31%g$M?WO$*ZP()^l&4JB z?lY-SFBMxxDW(*MF3RT zn>g4q8*snr0sK*;>YtgGNdp<&-CA?8^&&{;&+6hQD|zVrBk-+)q7hlQNyCu-ld^xl z{CBxHo>Vs7`ep3ADrHO|Nst&LQV90(JDiN`cpY*r=%YZ?x;ekMZp`#xiRl11>vc!m z5Cw|g1G+ZJ@G&|7Ujd(a0&DW{Fml3yUAkQeGNsS-5hA}%;wUgkQX{axbOAE4)(T3r zd}DiNEQot^zY>B`zet`+EWT1Rc<%9oBu ztZDFrKVi#mka<~wK~%f7m0eHIg@6O2D9T1IP%62bK80XBqn!mSCYYkjJtxQ4f{Sh) zhk-^~{`E=OQD6^-S|HFR=7^yazX{hAD{}y_y$`*Q%F_-klzu18?nFwm?=4+dX2*QG zs7}MDNzwJe#mU0`Zz|SZhOik$6)E%QTZ}g{QMr@fP?4-WnvO%if?8)Ss%Qga^v$DV z_4x2y(6M>jr4e1rw!j#7+24Q-LPlz0V?a^M$F3rbiyfHkbcctz(>J07J8ZCysVmE0 zZ``g&snehwqw(LV7y~ylU9y9KvP9A@5eC9J$`j;ah)ot2kw!;iTM{zTOxsO5dtxel|iQQ^`O1R1V=27-|Z!TFHMyzxnA0SEvBg8HVBxp&pY=kV= zbYbH>RilB6yI{ZqD4*AU?{YXu3M;exB}Bzc-*o6I-6m~N5>7?RPTZdZI)?HjNXkGz((UTR`up`0r^CV4txJo%K+mm<1+-e4USejx24W90>rH?4Dx<4E14#cxU zy%dzG{!@)NXqcLgBQ&7mO5?kA~4yFJ>%B8a@YEc_=qwD)P zSQU98tol3-_Nyech;E#eyzfbzeOL)OP!TP&dHL5WkA2~b=#|YYrVz<{c!S-nZ|u>F zv;m*^;z(OR+B2&&N)SCXwS2U5g~?i}^;Q>@J649XMh?N8Q{<1rrrGQ3V$lv`Q{{Y)!ZY{Bk7|y zyOZp1^4#H#iJaS6?1Y9^{i4{^s}4>``e70qVpIXj#zh%nenD;A_#O$Z{LmIc_PJiK zhpijR%I#xS(9;MuxQyC0y*2Ep@IIL^o-uyd{Az9efG!vBFwJx?d{^zJ1qDx%7nq_6 z3$v*g8ax!CXg2nbgTKBlU6rUl>tCC_CrN)j740kMy}AifdcMgd1uVT7ME!R%)dfk# z?m6PQK3vG%>$>=}l^;aDw%~Zx_kg%Z?aeH8qqM6^o~TAa={McGf# zFJ}aZrbup*fOg%7hClY@_X;h-iZ|>G@V{N_@FoS1Mp?vxo@yda&P7oBwd?Ret%*fv zGvpuPUfzylD(Bzh_drXS?b@)OF>S`hrOJvBAN4kYInFIWDu$Q_w}@NBX~Vh)$wHHZ z%Y|hhL%cdz+s_p1>*ZotK&$)r@y7 zoJ?HH4B~kG?@ff&oVritx^|N-R+QDvv!B(9F+3O${Knt!_n{zpRg%ntVZ&rl&Rmrs z{8pU7(Tll}?d6I905f@QJJdUsH(qR{{RR1>gyDX~o} zI>n!*jd9Hf9(!v-YkY9D0^71FijFhyapZS7066I72~NE4vKZFGz?@$~XngLXr-$$8 z`v&4|!fVu`){h%Zhe9w#+$Qs&ft-spra^xTzFmynU=PO)er6#Glk^5jkAax~xe^-b zlFS@sN<@QI2N=M&LWV6HaoObFTO5N$7U9?1D>#!EYW9nD~$d$fr!GVZQBx7`eUB-FI1y`6ThA%nlso|U|A zM}o;ovv$Z%ETa`f3&26OiQJq|4#=&a+B!sD`fYZ z5MyB3^U%8+elF;Lot;KxO|H}aK-cqq>iO3%mnH7qfh|YJDMlEMz6%R$Kf<=y&EV;B z>I0kWAFDFh@Qz=>`}~Lq$oCyONa^qmg3JH<-V-fA4_@cJ1BPk7a1K`=hUvXZ;Wnn9 ztw;wlP5eC*rK>Hk;Ma2glJ^|<9i@b^Vl4#*&~sNAB5ZNYk2@`@aQxW zA3?q_?3)NRSMNa(qZLhCI}Fw0wBWh!%bq;f!|3Fp?=bi)E2V|13FlLT%Gd(e0LDGW zEQY&yVq% z_N}SvZ6%kltob$141SB9{=Iexx4y+|3KHZESMOM7FMQN!>v^TIWK*_&FOwpI~ zGmqobAJ(n#7c4CaODnBRZDV>AxiTigG^du{t5%pZ8xdAA8) zY8S2-EOIto^d6DAQ=I3fd-dP#dFNvSu7eMhvvj$$4>c)WeQUVODHO!G9d;XuKuQ3b*A^B|5Tx58GkNa^l zvAzt%bMd4>V-v8xv%CfLT9{$Mo-l|}mojG4ARY#-z{_pk=q=p?A8zC0Xt3_|M6M(kT6bapW~H$MO&G%kr0nR&mk@bj-M}t}lvrt#PB6cr2VmY^ zc_&9U*l%_djktMq3IBHLwib@9qp`t@7 z(>$2QECLuCS+=!b9<7i`mz1Q50;T}D_)2&Tjw>R;_cV*RE|||Oib`OATi4*B&&HWw zVoKvQUYr`NN*WIDv2d_#0)8G#6%FVb^d17jfX# zA*;cAT=S6;ZU%n?7M|k3Q2qR|;*e8;P3EiTBD(a@X1(d!Z2IVG5HciNOe_&ZlT@F- z*YCTTjmar#@IKr`8nxC$Q^Rj^lOwhFrS0WQX6gE+bjBfy-7?K|xeexU4;@!lPoDzl zQ=@nsN#;F%hT40amld*IK40pMuLW$se3MUU5DneXH<1cr&JoN?wC6o`edNEKJd2>k ztSKi*^IE1OW7lwC1R#nB{%+@LB;@ZeEv1ZlHTq2?Z6l)j~G)KsVhCOScd-J z&JuEEx`1hMoKKhUc@;%#jxOjO4**T0c!Y;3oit~!z6pYLN-miX+SRdk zk?ZoLgZp^q5|^w@kGu$|3HWWoh6`hCl_dXp`{U#ga$1zvd`%*$;zd^0><^5021yCs?TLR{^4gStLd1I0C z_#PihCXI@3>?D-$?B2=qg83N(*t>}*Y-9^+h!G)iq4H}c1cF&vSwP40?CsspD#kFqbyoW@j+5{>3s55N z)sL@CUsnn=Jzg3@m(s|JpIEh~FO4uAbFD=|uiebq-+Z-8n5p}CW!^DMY4F@dO}Alb zZe{JB?eA|*&%MiB7FqpQs8|hH$Y}&1AYJmKj5nWW zG7kfKst}+b&&b3>L|9L*qcduX(dgA9b^ZYdFzJGY|PybJXLaZ{3E&B*`y={iRf^xC> zD>~ttGpjaCG?Ju!143D!>SXTK^&|1+lxe{E=U8SD4GC=y6iLLs_S{@0AI$&GknG`m zYAR<&$Evm|8yGubQq4|wYX#LMmnl3IvkIl-wPCD?HHiY_G6^WyLWKd$g$YmxO-yWn z4M6(o57~&4%6OB~n;a+bmV)a5HMq8yVi-EEHPH%^#roBq_c)`ss1jptdBYBWu7 zSe=ny)x2g6CJ&>NmjKt9mBLWq2qe$py3_a!oSa79(8IQv)&_`c9P!m27$}Hk*D_td z9B~4?iD;a|b$CHr^rbKoZa}5+zNt3oq*m;&Y4P}BXP%0CPfc;-?-W;NFN{1@s^)To z?@BKypkGoh%PIm3g}sIFL6_hDnSDhhW`G0eFOsR=PHh(#ANlVCutNh@ORf~E=2)2( zKR)9Lr5^Sm?9GlD>X9JZ1iNEmWR2Q7We~lkCvidt&^>!6s{moqtU0aEWY% z8&o}kZ5p|0dX8HGgPnD1Hs&a&#v#-YME{ZPkm>$F;(|5povLG@`y>9s#%aMJ4sJk!?^%KDD-eK=nzEd}*#O1K;VO>D7g4Jsz28QQo zdvZJc#3Q9G?2K|IWH_EqzFE-hwvA~a+{)eZCJOPR-&)hh)q{Zv7&c~H)O&-FV%crC z>h|{j_du$UBcjbZz*6=M8g*VO!;+|f)#j!ru?Uu8fKRILOqXD5;h9SxJdL%{a zRi8kQDP0!3)#Q%##XBC55->)P#sG=Ds+X^vc@k-%+W-r^GelocN7-V!;6$2s>(Q*d zdCF;#E&YR5sfho^k8e`1Vb34p49_nk%Fs>_ec8s%`}Px!aK<~_M+p+$*L}n37mh=) zM#^IMjiKx?IL=Jq7IJ2`i|1tpS&s}aVM9Rr-6T>vGfu(PRQwA>AE(Z4X`PqXzYUvz zBiC9HuE@1yDOHy`aVmBH*xa;5=)#dVm*{Wy>D>8#NYg{0wkrJ5sxetO!aZz%LET1C zo{z!^kwT$F7K7k(wF;}e`r4r8I}`^f{!GK_K2dk9arS6ww9?F!o8CEgn}(ly2!@4B zz9Xi6`W7zoF#Popp&Mt4@B2JR{y0QFVyqjvc3FKkr9W6}XNrdqycbRmd{1=tNFV1c z-kRritZP~@gNs-Yu@PHP6j0F^#Ywu;>HFSJk#afuKY}3-VM@n6MLpXqv zB3F$LvVMDNbc#BPG>F`;4&G0bQj}mz-OE}wObSWj05iKSK%Q>A$Gf`sEbMc#urC$v zWm*J=Dk_o}8x3zlIkeqY+Ifs*&z)EAr>s*5=7i%52N$eb zp*_2042R9;|Iz|HQ0NPXgb*`L1DEkG)ie68bUbQQI3&d#SBG#Qg%3kuP?b)fhJ3Nc zQ>L)K0KAO}X^x!pcRe2CgDgd=z2)h?V2?wzQc&-`%;NQWh)}TGS`Q>X-gXB=EIqK) zMVzUmq5@5d(Pt>0N*a-&h!&~x@B{7Fscvn}4GkR+UX1}QS$L77*2~_w|NY5Jx^r(1K!7OYfrjwTtxU;E^qtW5&Pw}#Z7XzN0n|+O zDJ51fClSk8ec52mP2==T(mh^P#~0Bm%@e!rS}HkIl2ZT5l0y-9|s3K;X^A+ zncR?HK2^|1x!3y5!-*v1qTx=Gzn)@zy|D#t&H7E_$)h03d355F)u>VMod(VBkU^v zpb~AHv<6qy=qYeCgsTSdj5ix3r}}`{&a7>e`~|^=;X~NEG}(Z^qZWn}gn_{u(E>n) z&SJr&wet)Gb2j!fGHCMIme>WxG-n%zSDH@^0X?S?28fvv18i8U#t2XiWuMO$o2GXb zY)Ea23lU9q2@^G%OZ%vmv$z!!K@B)8ln+{0$$T2bAlyM9%{6Pt-Nodb47D%7ln0nx z;1$pguzO>BV!f;7X4XF^#u$^fmX%GP0@O4OBoMkM8vrnK`7zVk01CAD4~TiJ8@&{r)FKQweY{Pm^k4)f1p_dz7m(&|;`c@J2h#mliC*nFpGHSmAq9LA#)|HmvQg zd6zX6MH!ehKj|gKtvj6(<&EWBeSL60jzDBEv{X&;khJH2GLA@ey zzlq<@O!*Q^2^|^-^y+M=WvlE;+`PMkr5+mqlsHO>-~}yh0V51v)3Vre*kjEaVWe)8 z-yUPgMJW8FhaK_qe?>9#f8u)~dE<}NKmReOGaMR#dWPGQ5}Q8NE7@9ONzRBK`^Ign z4wsgvB+!dFmQ+Q}56*DdZx->cn_pU~h|CG|ea@+G(Ir;|C(2U|=g<0uK<=rU&VV1c zuAE2`5;g+0P;GrE1EwPjmi`!$WQ@DyDBrX2?fSj@emvW`UO|!8jf%gg@BY;!{UfM7 zBm4yQTd<_{*_Wpy6#&V$G99M73W=}iuRy9lj|JGIb0o8Qt0`>aHKZX3j)CeDC|ZBo zzPQ>^xIu%i^bWYp$@fTZ>qF)azdIm6<>U+C%5Kk_sI9qePPUjPbEN=g!BTwxUc5|}W<6zmndo54J-2ZZ5^G$J)odRSr z_;J>SLloy|19{R)cAD~HBfhh|Q(LvI!#z$%%t^&}fnJ6#rlt3HNe3TMqlBiarOyqM zSAllU14_9~Ye|8v-$VlLzLI`EK8Y6|+js(#UZoWmvra!9(8;$@r<*0=ApA%slmGCo z%}e<9lWTMB7F8rw?z~AY$8S;JUsIRcK zG>D!h-9SGPjgkm|odo&pEtsq*Z@z^Mdwfh0xrI!fb9t|&5x(&<-j!672maO58X`lG zpm=)$Ip46jx~T|sdfQkLrS>JE+fNEE=r=Y#o7n}UC!0TNlhvRvKXG! zc-Dx3fE-7FI3fbI1Wikg-8{V-15nN&&H3o#6LWR+XU8SF7#Z|-SoZ3z+uA$XdsP@; z)hczy{KyPLpp)!eW#{6@ig&Kba<`-dla^iEx#1VXBJC~KnxPXdjqkQc>7>sSC4)V~ zpAGhAKl6JzF8Nm3H6`;0#Lp{@d`)fGgJp7iGvvDvq5)omosVhWK=PsQc>TLhlu9Z& zGI;nPN}CN-%2oa&%zBh5?fMwJJ{O0t%y$8?WBe=3mtth`8!puOognt=M_(r;9k8m9%?*-<>IM{aEJBMD#V! z>_7h+|M=&j{HmBOEiGPmTBnIR;eUt-4zL+GWB}`ACIA<(!%`VoimmjOj)D*U7&uU& zl+X<7s9cN#e9#8nYgi{!^BWJ|KKy32+axF-qP%b%v?E=kEq+OoLhsQbg|-lJHQ_W+ z1^!VZppFK=6)nMlDVQs?2F=LNLxFvSQ7e7BD^l z9Jpvqf5s}nM3$8T6|1B|J#p;~{a=i}0{6Hc%?pt3JP)-H7sZOYdHsuU(Ts_RK+Tsb z{?wrvewzQy!G^Dak&a+f0F$)JkbMj=8nFd1vU!*fl@harOX*tdiA~c~#DVZqv5zqY zCs@J!3AB%s3>>X)uy1IG(*f7|Ke>DuvN5(h82?It#4{y=0^Z{|jx&R6L+?6S)wMRj zx)&TuqLgRTxb#HuX-ev1ZTGPVYOX?qpPHqOH^ge@7p6x|7Kx0A&#~4&rP>S6EYd3T zt4NgRUqi*^$Zgmdr#f|ZpByE+aN=XMx69ZX}`gk=mv+J~L;okwjv^u0LtCU!yQ3CP_819?J= zfqOg3KtM8WcVw4`iB0=x?3=v3rgWv-b{j5VqEvjE{G3b^8>P24HFlL4h6p)5|2N#5 zBE87X%*LB!rsgM)?U|aI$kz_~wjJ6|rb09wTZ4kGQ-3am1&1HY`#xj1Ax)Yz)EU}> zgh>^cbaK_9`uh^~%d}H2$`VtOZiBhyTEb#oFd5f{hm`6Q2yrk);Sz-#A+NxRqn5ywN@0_kS}u1^+!7EycFd z#9Xir4;{}Dlb`|ia@~kutNTBfiA@@u3}{?gv<7jhmQfFve6>WnyXp-HiKlB6VKc_( z!^9mfDi6l%ZH`LqNH}k4VaTe5D!K_=2OYF3*8WDrKagAf!-RPEFPrgv83`7%hNr)I zEg4?xvqEm6Kc6Vt{3w~*?gZXA6EjPR<6bW_hT@SXOmg<5lR{b5hq2*ft%YA)ARh5M zZ-iWxl*Rh(<~UW)HPX@uS<|LobJ2R0>1FJVlE>~8{(hR9Bm6;k_8fZ#@?Wns=bI#ubDJM6MMQ1 zB58TPuXRL*Ij(~@B|YVh7ME*wxyXf+!HYQ(skDUWju2k!Huid=uBC_NHvhpHFbGHW zF~__!V_f`kf-;RsKPO+Up~G+10CuiOhUti38BJtCSaa9tLITWjgPrJFn zNEDPBB{j1_F1Vy>*q8&E9DZtb?^(5JkyTy(P08Tz*6sOHS@TQTMzQqn11?rwR*EG= z`MlcJ=%)zscYHfyw^3Gs(KSf@D*H5M)&xj#@H5sKliJGr+G-K@=^Wu6PV*yG|G~5z z6h$ys1nEfX@p#lJp>?B`@2nG?Y3&Q+vWQ3acxUKT+Qm3QP8`GlohV{tFxJX5RJqmO z3(Evf?kiVZ^oX37k0b(PLH?>bDNedO5AFo=f_uqn#|i^XYfbfpY|O zadLW_&rv3~-#TYP-(~ToolY8FeC)Gk_d_OY=kOB~DE_=vwf0?-ra6^6743Y&KGLZk z_-QqU&i^)VaHh*Gu-VrmDLJj!j%*kq*>06c2TvLUGMAHl_GKEc%X{&!vdxt8U!b9f zL%N-RJm4p#z<3>56w+z;>ettGT4PAy;Q8Y|Z|(nh`u_JEs9u+VEqV=OZ zQB>d)`k7(igSmyZPWBA20MG+76jA{2oz2^KO=Md@el;fdnzCZI=v%Q=2o9%b!BW*q zwA7hMTIkvR?aT_~PIF3q38(|V*`eCxoF;)18{kf2EbY0435`*xWwA6hRaFd7)wyi zGGLSA81NY*Pla2w3A3<^)VZK3oYaAhQe^NBY`Q5roeKhghgsMQH`t+)mn?eto!YQW zamdE$Q_bxSiH-ruIFGR%!Cim=Xs-z7S;YL;c_%6MFoH5b?G+9Hw?G;VNViuDTlPx1 zD%81IDCn)ln;Zi z@yo*mLaW^Cy<*@&iC^0WOgpntiY7voOZ*n<^oKJfnJ7(6cP9%Gqb?d(#`~ccZ2{`D3Rh zB&3o}h>)k!ImTXX5)8UydrX*WDxC3STwSKBK#F6yAn<2H4W|m55c9?Cr|J(FXNS(@ z&%|22LOK0eN9>xkyIKPVKL&l>x;GWuULC8C3+cPNZuFf&1wKGRWwp`c_t(Z7GKX1E z%DWf1KUrrXNa~++HH|GV+`BwYG9gedsTtOH>em*L1Wx(SI41j!1u|FGroA|E-AVrD*>Z&1eh1^VvEd&QjL!fhhleaiiL4Q_Qi>rr7Xk>CRQARVXC+ zZqVB9YiFu4f&|8{zf!;nIon;_2Fdplhp?!Bs7-S1hbY}Etey$<#oZlj;Z2w|0{u^q z<4;jnx3=@bVHCBswIYYz3&W=MU3@q2x`g>Rc110f^0*|-nILvJH)F;`;EV9Ecdct_ zvx`MHJHibc!v?TdPa4YvZM6sM_QVk5J}81P-0;Ya4~d2d(V=9}cRbsRuBi-|G<767 zW0qT`9FQ|9yFT6mCJ@(b$UfdUH}u5<==Qt)Mt_* z+}pN~6mi|>AV_3R#Q$UuVr#3PAaoK=dt*@2T6@eKwJ6tmpQ8q-gfY= z?1E_fE<(BETo?7#U0f!m2O%N_27a|qj&L}r*Iwu6jFWvvSnntc-;E1hGv%(M){L2| zV?P-qLQq`Z%2a>p^OaaMMjZT>!r3*#DEl%&ksj6Ih^QbM)%00&yXGvspUm+m=Hfi`HP#&h?#SuWq5IH~?N1lNDGBTT zKF3pi(TqvbT>hL6FDf$BTiTI;41{$_)-i$r@zgADlOsL$u{UN+R z_X)J)XAht0;I{$y*MTwJk~DTwE&m_Y!}v4y@87?-h5~wd*U!rz zviZ-S?Qcr~oE7TySN=``j1>B6oWK(3@Bj~;>qe7K-?y@X8#N{}4_PATt7AnE2m0Fz z`@iWzhN`y*ekpA^`)Og0q`Ie$0_>OlO4xO&V+Et=X7DQrv7`Gi@iAgz&A`gaGyoQW zajWomDsNm0@)v2FnBX5@!9W{MJsMpxLo~H6!W_`#kI~Vj9dW9lup^?(xcTJFNJbye zfsH(t@ELaWXfO`hXW*u6i*fD>t)^t~voV53g0umz+~odrQ=LqCtY$2|xippxK4qBQ z%{)sX%|RTteXoMcaB9gm!E0@^8X6~VmO~oT^ z*g~bX`<&1K!)4-2xMe2fJk==;DV%G*A+Jizg?04g9sSv{46Nx+m+4LLbtiFwj{DgA zZ?-*`IE#2CN9oA~G=O@``Dp}s!a+pP$8-n|HvpdOL(YWxipVjw+Nov^yiY!wI*6WH zt~<(|r1fTOVl-trk#k2}CoiytgkU`V<7_NVqT7~|D1)OyhJU+rdr-HD%f>SD+DJBk zp{|9tLhUClM>-Vt!>Xh>(%Sb4#(5e)IbF41u`nhdoY%5>Dk;h081F)DY5953UD{e| zd$SJDkozCs@(KeR61=SOM04Q7h)KKWn?%{eAK76@8QW!?`Z$rhs_pNTcsy&L4D0io z!?5_@Q3Tb`y|rr*K6vAmN4UmDWG60uT7NXjLhsSNmE^Sz2@`i|6HJ-uk14BS+%^0+ zE%6_*bfA>RW^_O|r{h!sa{{Y1!%^q0Y>0sU8(wO8jF0jwsjNdZPNYeOPue_MDUlpi zv|p17bc#GtoL!#;2U!HGTIHL$SNvVkKGKJcwlChGkg3VQ<3FrJQ(^G06|8cCxG9qC zB*$yiepgAazlH|og~30(mhgX>$hw$%Y$o0tKjxQ=eLCsZh?7lpc!^5s~QLWSVn;qQB7rojd7?~S;((zOJ&FAQ-6&Ol(siGrr9ML z1@$?G8(n=E`8(J+s0Szvt6{^72ohI&c_MlaQFe@Tdh&)=4&CRD-g;FPhPfx^@@k4~ zv#r0iQI(O$AK0AoPz}DH90K4eu;|butI*HZzxqCx2ffYGa-T#ANJDuqW$&I!U<0YU zkqRJS=O=t;YTJ*kp#{l=vZ)SvXiVr^xgbqYOfyef_=yak)`ewvQ!BJBwdq#D(>eXA%yQf+_l0iat&(Moh_dR`sYtHcP&8gD&T{qpW zIEqtWeV2k=#JIsf_e4z1Avqz9(H3UPiUe;}{oUe`)&4_e89GCADRN99Xg@;hFFV`I z_aigqPT}3iW6I(VI}isaL$%XEotz>mGW}9#FwnxyCR<&-d12->{zvRhfGms{ z1@TRB@H}}Wcq$$i2wGR#Nc*7@@+mJp&x|c^^BTaV}dkZmiX> zdxQrsfBy~QK>gSa!c@hm0jM#Fe<5w;eI!3l_9H0nDkJZvI1XFY1rzuO z*k_xYHI%J!gvNh7c1mBNoC5kpbp3=@S}S~fjVW~?Ucj?NS>c%|ebBP%O*!e_Rv-aw z5WYQN@g(1u>XqRnibi2hf}<1)M#?TDnJTB~(0c)DPfTU3a`Fbii7%S-X2GTy@&wj3YVrf6dThPM4lV> zWS@%rhg8b!QPX8iTyw)X<%!8-!hN#$=d2GRETOhRiS+W%4A&ml|Ida5I%bL~u^wo* z>T?=;o@A;~)cNJfWTnC9=Z^PDd=1AEBdo5%t-P~1jGyxl#Aut8U*{X?;V`aVsATZK z8CG&gqp14*okGe>h^9Q&nBNry3xuJ5>I|#XT2ilw*Cf_W3VQ9S)F<@hE)48^6;6bw z@yeQih?9#R_!iS)AE*s`)7OY+h~ZfOHD+l8B{M*8_cWGcBt{3?dkxH`o{6*M{}!E} z$LDcnFGGnDe+n&gAf*}RrP&dek9*G#FG*k?XLYpuJ3%&qutyupD7uVSP%QVY@Rm}S z98S!UoW387Z>yzm-Fx@#8uCZv48#0o9XC*kxAAt%IXTz5*r%#1o(=)O*bTu*IaPx->kVtl0%A3u zQa{Z7iac|?BP+L%*D*XG0R~_Tu+BQ+G>(BQyRH<1?FSaYH4y57v*dwmE@iTTqh_);g2;`HY0)KIJ! z?_kNB+QYjqmQ6w*XFb2$NT36S^;?L9c18U~qj>4~LmT@n=ZAXMefCt==k-|OmCwZ; zvvD5Gy*Vv?Ohek~!zuo3|D8Q-tQxN zm|vt)ea*FtG)4<0+e(bWtpC-S2$!PMyTy3#o#!6{zmfGmlnD>E-xpi^nmh>EE-o?8 zZDncz;d+x_kGuY-xJLKYKTJMg?nPX7kH1x-^I_};B-k!*DeT6hv28g}lxfoQEfn#OP^&0H^|ynx^B z%7C=UF7canC$_!8!j$akO4M&mkrgXQFz;gX~9`YUs7xca$(rXY2ccg@725?`}e=_(YNRsN6OxAVU6RA&2_oWBMgp-xPsW zVVF(crD7pJ&Q`F>v-#br6N%3K*xa{nLE*vSCRp#BQw}#QF1y^@q^<@+H|{J@e^oc) z_#alBij3VXnM|)dL1$|VDt2~upS$kt@~1qezG0kVTwE1gepLX={rYN9I(7c!p$oJB zxl>Zyf+c(z0af*g>tW0sCMaGm?$F_${hJ%)msR`UkJtaAujpacdlIa6{O--}2=;H5 zc$6%W(kJlpiuwNI)InLUd%WrODx-DZe%wK&o2KCha+yWl;~=>2FP}yoq!Nf5USm(( zOWqfM$1~DTo}_l8CK%?}))kXKeEsXTv2@Wu2YBa;y8P4O6?it{wR6zee!&QdAH3cm zuMfYK*?x3-ulo%CF5zOidiMMPB?-@$Pv2OQX&b4t@45(B{6ispX-Z~<%KJ#f@UWw*CbrZV!JK5g*nD`y5ib)cUFhfRevUY%a{;$usqAGoB^H z+Z`lq)7HwcuH2jrGRLV~UybyYkzNi;+As{g(V3>d&kH*i35nO9;T?GHCJXW1V^JDeGULhr2Y^UcBKXJm8;HjH-bd!#AttQj zX6{~gAGn62X+u}Q5Nm;@n;%~v<;{u6$p;*&rYN0f==1&O4(gv*E@HvddoDVU!MgW2 zT{z?$I$bg(9O#Zb-r!Xrt|aordxm2)+Jp_m{PXhXaV+p|vAj4K_FmF^$5S)=8TV(# zqpj6B(ar5_Vkqk-S!}rr+4Q5p1WE7tO=1en2XKdiIN(bmMiB)?+<+UP&egtGI>B7trgr@-vK=*&l>YH|TE)TAcy1$|B>m1@c#CH$JnBSj#r1y;tJ;4AQ}w|fFS zukaTW+B!6!EI)_kh85(1n_CX4{Q#LrIL3CxR=HH?#Wrt_s3?^N-08g~A#>NlVF2$W zlZb?YL#uwwr(WSl5H`38V$*;p_OuJY-oeG+F##KawK4lRTJgmcIBKaq_LA=sP>FL4 zXuT4al#W3%gIB#5fo~IuNq~22=*A6(BdvheSshGeqH5|E!NVDsWFKy{*j`BE>n%&o0m>Q^hXD5 z@mr7aBOj6BsFc1+w^B-bZ=k0(rPXmQ0qL32wz(eXoHx$rCf8`WFJ=KzZly|j_V~&Uypy$`uSqu`BZL{k|JIzG$aK13-3?d zEo%X_29021!X&~KgxJ~Mp6=hsBoSu%(ZGJghsQ-?w#dFBf#*{i(wbr)2y-_dBq9w} z&qG{4uwc=wv+DC`#j;1`QZO*yJrEi~t0Mk=fNi*rLiv>SvJbGU`lav-+5uy0)OGlu zh5MB4zB{xe)Y>{M$RcEHZHD3LW2EyS-WFp*qXj5*gjOgc3Td-NC(qu5_dsX*{llmC zh=Hly@C*%2_(8m%NWik2*OJa;GErYZaQ7u_+pgO3Iw>tm#&(s)d^n;@OTD*F1bKkk zHE47-74SkSwzVNkiT;PzacuYP78Mbga9d*x1w2U=xW(7>q0NoPXLM6x?DJNtUEX zzKm3NF<#2uACcIXx!9L`H@RnI#=!AhU6NcQVI_5Xz~XY#wDGF-P6Af@6ZK22-%5V`EW=PmyB zPR9179wWdlqRvZsd%kVPtThuXWgdiPRhg`$u>fTo{f0?g!_TafoFpEmvPKOq&5-vX zb=;pLT)DLqrxabOrOk-~f<_wr>MxdbY}x$>S&kJ?+jvT)rIEj6;hCcz$BLR2gw1sI z28v(96Q7hd$MmdpEfVCueywBi(xX>`Ola_K=hcZc-p=)0{lE(@#gJ?gM<6JC{PVp+ zGwJfPFyvkRz@3B@`EAe6Pp`kXIc7a

AK(e?N&t!NIt?&&6F>o0}ugK25cm56i^QLUT2#$+NyvM zL{39+G?y-sVKuEunA{M&iq8u*W@FIR%oI|fVv8NXut5j7jLzBuf^a)9v!ffZ0H;qmJb(hfj;UryREM&j5ajx*V z#Jp332r8M#*fLEexSkpgf}3}|)UegK6M~vPct6H^DwMP8O6WBsJ(rF}QV6``u{Ge| z`as#_z9f_*OA1pZ;UI!awcISrKm4EgWi0hyQK>9*N$BlRSoJgdYTq|Q$jduhxE|}Nu8(zDr*y&O6@6k)lY>cs|a(nJ4i(j$0uPc0a>0<=oYSJc$u%?-@KX!jt zmzp|OwvTI?2fXm#JX%cHC(QZ5#N^rFWL#NdqZ#t!OJvWuQH_MTt^P5=S;6w{GwLg{ zKL&XE6VnmH7~4LBGWI_VCJNb|*?|mS4a96xNJN=%*ILzHfg&i)E= z_J$u`d0meWZzaDIPPnr=N4ps&x9vu0wkaDtbSe&PT=78ni{;D1H(_?yLq1x|F0Wj# zBbBz&tr0JMg*$;|+681$UAI6H*PqJ;*}e9Tu3rz0(04%Sf8GI3dRY5w7he%E@A{f% z29rI)E^@cy(ZueMKRv>RGP|Tz0%O;*p5Jl3$_10Ne{wyL1%Q-rH0bt#lWB~_1$FisdT zcKMjaGX?oE{XxS$BW_R#8dvQ8o)r-Hjxw&kvf-a=AYo1~ z!>ut;zr&PCMYytu)AD-P%o48ut?B8%b6lVJ7-(hZyy<^4w^lYrYBc42y+{V6`>SP!`?OY z%_b|Dj+`)+6s44^D5z|~!}|U~=wAXxv|0$>HmOIH zRSHImb1I2R5x*V+Y2+&7u|fTP{D3ING;Ej}D~aYfL_75ara}t!I>JcR&iRI~T+3s= zxSyomJ=VS~E}ab^HrL_G-gnEqimTu>-(bg#Oh~lv?5uxl`b$9(N2pWOItCwVeWVH@ z(ba?3`(b_-cY9JJ;zazF0za2}d9P+oVx zaG5z1^>QevUP|sIvFm6&Drdtork}}BsX58631F;@#Bsj$SRn6<6La;5sVAwiqeO0 z7wIZ#qw%0`k*$>~4#vr^?|$)f4##d{yk_{nvT&#`%L%S*nUPR{VB`;-U`FmJ<(q6Ib-4rh6HK#>*OH%|WL zp!b86HhDUu@puHF+@*CfQ~PixM1SzS7evaMpXZ6yq|WsA`OnsIYZ&MHtDjZDxr7Ff zFh-ZNA=xjEZ41`7Nw>&l_{@I& zKm2To%)KXD5-3_Er~epQotmMRGEq>5b)8;|Mk3F%8~R$Mov@OCf9&4bnuWyI4ZNM7 zlO^vxpjS+|GueJtmTha(YI)>#vKYwj+&P?gI21Ct{}ddlI-W1bd4JbTI(U_3O{MR@ z#%qO4CLMb3WoP%7hH{I0B}MZHxBpGZ5Ij`>k`&K&rI&L6vE@W7@IlMmJUfX?ptCW2 zqQBt1hdN7S24d+tS*qLC+Z?r*iD&z}UCVa8n8-Hkq>L*Ro^=$x)Hd*e)Pg48E&sf# zmNaQiVx_WAvv#uh|8?kW1r#F3NkBQCKb8n+uwRd7Lg-e4@Qtsb_5UdE=wc820X6oJ z7V8aKkvejkpP46@VDTGy3Dh5GuuGN^x!+TI$Oagc@E5W~4jRtw4Zj`k7K%<`U_5=e ztsnYCP)v!|$m1TfzhEnK4gic}rLt#z5~s%JeszKcxMT5*e};t$8_cACiuF-S?JR_g z%Z&-#*%v4o1rimB7~|glaR(h`R^1&{pu}@JV-VQ}qXW*f_&>@E+C%;YUKV|wI-KnYKhwx?O?DuZ} zeG>~T(88lP%OaVy8lo){$(g9B%@ zYziGv^0BFRebnx5>EM(FFNDWOkqv0v{^cLE|Bgw80W(yGKIo9#Fe{yMLv~#FfZ(zf-*BPhW;w5@{w@b07 zPdAk8T8@qr7sD6aZ0qmfDRWbjVl2U%_t{N@?R3&5hvm&P5A|o4*s{O3u7_(c{#ZPm zsa{KoSoN({7p&V{RsOp*3ag7aT@6FGAk6LQuF#SK;CG?5%!F_39(6`ih!+8;L+Mqeb;!q z4a1YSzo?1%apR$z>@-jk`*w#mD!y?*0_AqI#hPGxIFdI|CQmi>ccaB*XDZ#A7z}%{ z*CRgW{ZWJ1K+VXOScJiP6*VZJrnDZH=y(ruy*^(W^c(W3pE`_2fh_D|aRye^te*V& zJ#sJ4Y+~1#WYWeZHp2-78XB6XQTij00m7q4NAgV^ui4k)_{k0j=qOa0ldo$QNB3hT zwm7^N=BsRxo$BN>Gg zyBuX1%w*vAIGhf2M%ZVcV=IYeuCQ;#aM1b;oPV(Dw)GHIIc}R&b^FL~agS zy`m0iO5i?x`N74foOUB^#|gN1hVpXrx;h~p+!0Z-{ew~wL8~~Q_#29JTNz^g`!KfQ z{!{QD0fTf_+YY{6iAe<}+-(sF-|P&@edV+KUo{^(Hw!g(N7wu-Nm~;qbJf+e8n$S^ z;l^PI9pr9xpF-ggLWwO$-bWjB_^H_T;?0TIWJ-7eR3Zigy83^pI_tM6yshoeFr>gx zQqnD5N|$t_Fmwt7qf*ihgLHSNN_TfDLpKQ0AdSS(9WUp7p6_$cx%LnHAK2I0dwtCtS|B>E`r{S51;e{&MI0#i5)5cjm=BpBeKBOpG~=oKbMmh%!B#-On=}? zp8mw%5%@dS9$p0BDYe>EbsnP+1ROwv6G&Mx6jKd4KDxXdYGxSH23TQW!u1@1g#Kxr znb|JP@#+k)jL+BXQKm}r`Z)YzThtFG9Y<)G(ZWel; zD~KS!oUC@}x0UeKDaVqskx#WXTfl)s|aTSFH2^wPk6vKH?pqYk%>QF%054$D|+E| z&H7`kE{NUZ+`$TGrf&W3q|6)g+QawOLTa+(vUYwg;^((HU3G;zEK$sBe_~yW{qdgo zC}&jMdJOomJK;GuwPDG?AsjRrJEfi72`l0ru8`+P3dBRCr~ zpEK}?+#a9*tTiQ#Ip@%q>=v~1)AM#muHPO(P!d%YBq}TS}TTaA-Y#fni%?`h*8hd{B46O}fkA}uE z2t*0`ts*UQsF<17uuZi^rH=6)RnGNL>AStF}1CT zC%y_IG`p(uHXL9um-jVSa{pr({`)FZQF>#IVhV&3C7h79l1<1L3_Zc>)Diu$w`G+x z?X|?NFDFHa_(uy6gcr#nPinp#UnVEHwo0eACyvOXcPbhEwa`BSStMjABx^WE61eUp zh?z_KijWU5x$?=E441`gnntq{5tKfuttAar<+<4?47VXZ9v_%5Q$h8Hh7#>$Q;R-_ zD_a{94V&abKM?Qd4iQ>gDf1%WenwW$hG?5!&(IWYu>w+uA|Yu)1^WGSE3ev7o8)5pJj@jqJYFB26@8^f|e3v9&Q2q`83Qo+!y*`Q|@Gx>-UC~bz` zh*x`pZE8m6E@?5Ae>62w$0TChCW$kwcs4cFw8;OJlBu~)b;avKh07n(2rEqge&>x# zminJI^NKZYc_no@8H1+f4R>PZ)Jt&zb}H6`#tPf@Od1BFnVOtXC#T zmww96QcZppZoSLIv@^bjIS60Rr8SXXFm?oZgfjG`^(szpwA83p94J^xGkUB7FTUp} zG;)S5Anz>}Jua83KWz@C3ZDM?kRiz#Eq1fSYnK8_@cAiV2Aqj1i$5xBzAk1o!K@}+ zL^Sa*>>aS_r6ZqRPUhbaM!Qi?lfX=GKkrMte_LjBqsKws;S#PpRIBmmmv969kILMY zV+hiCGFQz+sE004w|lj7D1XRyU0r!-wk%!O{a<+hKL{U!D+YRTH5s)MX}{uocq8am zj~BYP?0L*jJMegzw6wJ3uo%YDvn_dvxBDh{{kN~LQ`wF_DZ>qQ#JY*!2b5W0#8W^H z0}sw0jQ41v82c#SK(|H!2sA>o{Ark$s>;D;F!V^}Ry;5J*QaWwNQFS)JVVL{2lsx2 z_c#Dw!}cD+25JE=Y0n!Z0`?*qlv8CZ)+%xym4eAZ&<|Xm z$XQ?;Ds4u+!r&LN{)?9~ns0f2=^kJ;g)!x}$rk;{9OndEH1rj#WH=9wBg z@;zu*+?wwiD;4KevTZh-Z2jgZt(iq2V5v~qHo#H7+z;dyg+Ymt4$@ZXbueWQm^g!a zr#1P?Co9_$XMosOh2hI>{kh-LpP9h7MliNr&ba^$0jx-K8L$}s$V-hR5vk5=QN5S8 zMCF|`1flZ#kzDN|hXZG=3MVgWxErLnZbP!;$|zOcNqwr>;f(>{5CWV2%pFpPaUo}$ zoXO5}DQTxr%L6L>DA!u9R`V)0P1bTjfFErNnlN$|Qkm_;pX|2@q9WsNdpo zf7G`sW#B`0M*{p}f0$0+%S*!MGr$ZCuG`7-4o%p96V!SR9g5~)ZzeeC|48X+5Jc8! zF4wjfd+^O#3TEUx1XA)n(U%P5PXQ5OIN5Ba$}~mb9$(oeV@Vky|k89 z=BV>F+;?Wt=SVoD@0+&TKv$@Hw`*l|%)ZmlTKNaIf78bi=N(t)e!Bc;8QB-tGqCw2}>`BKN z)%l>*ww|cb_YguwE+a^ucIV(*p+|SU8?z}CT-l!@*E?RrTmKq4{7^L;u(FkP=j6+F z60>re)8j-aayKlw(R!N}GeEOgqGX1z>y9^x{}{XSa(R^|oCQI$WrZ_#>Ze5yGVdD{ zljLO4Mk8d8qrmEid22bqXK@M8cZbA7)r()8{#6y?#Y6t#rtce1d8PuDtzfkUsDudf zGE1iBjBIbUzA)XVm7up!K77}1Xt`W~BII*)s)ELTnTQ%5e-u{&xO`3AfrvEk8|4V_ zaCX_Muc0>g;iQcVinJVQkCEhf7c^Ri_vp8oj+<1T5NSKtS!j(^88A|N&2dcgGtNyp zkT4)=&~R%PX1y-XhB5VhOy?D&fMYerUh`#>5wXEOxB43r!dg9hUi*^B=@g`(#(HcF zHGV68dYPnGyffpBKu7g}t$0s~`44HaSIJlDQ8f(fp-d{~xS9-YUpT`Z=IWb^9&`u| z5$fhVj|CXVXol|%p)&D=Q`b-NGYHSm=q&`dwV(~j>IAYz2vwW+jBCqf}`dQ6J<)D z*yo6NpG8Bd=X(UgWb!wmbg|deM^sSzh~ge=U-hKP7nymp43qV zIv4wE`Rr?rFdu=Ai?PBUW(I|x!&^#9``g)+r<~al^UG>mR$u3^F{EB<&ZSL>V&f}a zteiF_L_o}D`|#*0ZE^nAd}iIo?QnKIcOQ;jgyCTZwTc&p{N;QPuj}Lt7DbWne-`rI zO8-JxoZFibSkG>wxBb11gx7e;`lcaO&GA@;vfk^r|Fw=fXW%-te4Rtn)62wN_fhxK zIHDH_JcOa-X9Iem&qZ0pdO@i#6$_1zyR*ieAke<+Z7!cVm>!)LM+q$!8;bT5CHROe84gWi2ZX?+{op|KnGCIX2SwjU?Gm^00aQv63<$$4?NlO%{c3qb zK(?T63kwT5VC~F?YWJ7dU1T6A7fq)$9d)PORb9fn`l1UO!mTL2nQMMBuA1w#W05IK z_!Q$~4ByS4s$ItKgLlJqp`2Z>vtnBz^ryaKw5A@7&pt>Y9i>ns(QBsa(RAa6GV$GF zJ>eq&C++1({S8#dT2_RJ8JD5`=O>6D$}|2Yz)pKwn!`c{jAmyX>!LN^lPL4dQ793> zHN`EJPBpI)q}byN{KJ>N+r3QdB?A304-=7^x=38AhMlrPd-kkgqHbg|CONhKLj~KH zAM^e3x23SFC*}(SN-I?FWRAWOG0NQz{Q1&dJZTaVO%>dvoYV?pv8lql&D&?$3s#kP zZO}IOGG2UszZ>AOSlmsYTs}nWNf$%!UL4tb=yU0w(jDg0xAod8;sR^I;qAIt9}t(X z__iKRI-ULLj|v;C@;0-eA^Uyr9Jp9ANEQDTSw6{yJ`o`XHZ)=@Y|c59@EE)`k2r9P z>GVOqtKQ9xhU7=9)plzZ<9XP|WHrWcy3O-HY@R)arV4Vp&RaP;yRsFJ`&E+Oui{pl zZGJl^67ycr?diJ^uYOc-`VWo3s>XO%0#P>?ctCD!V(61wrEe{K*G8|(;UlY z=}G%_cE2=N2bW`b6BZDALT)lL5Br!jM_+T`Ll^~>Fr7>r!{JM9kAW|!KkbQR=<3K4 zXhxCec38}q)XV9i@@m>$!Q(xK?mj)_f7|b-@^knp}U2ggJJwy8J~Wh#l~B`IH8m#i+>lk zr2SR1ntV{8XR*^lGqCB`Ak~^^^q`*) z?TJK^-@GS1`}v!jxCb9`wY#<}jVdjY0N;#%&Tsv(;!rhZO=vwX=MlM0P-6Oey7o@Z z4^!QoQ{dysz`HdlZveZtDRtFXO<^h(h_tFXo;mH97G`|9N2{r3bl~&#q?w$V@yUv-pMvklj_2Q179Zdb~GjXB!@%KU`ExXs_@Z~H(~|`-r?Od zwB!O_3Cqg+Ock}HWl?U)c|13TeX?vu8J98bAF0>OZ&DkC7NR`DwaR0fVn>p6lGZ9U zgK)o&$W32_kHtOjsVI&&(qR7Xk$mTDY1+*`>m5$V#!POLV|%55pEz8Wt%H>n`iF`(Ue~O8>v<;di!`IeBBx# zUTblgh9^y97$EbMI@5E0DM9FC3;Na$Q;MARE)ArZF8x1pp8Lsc{dd<>hjbvheRVlv z{YeSalfCX5)!pbLCq!HJpN0Paw@M$Ux=8J=t7r#`~Y;_Z(WlwCQ==uyaibSqcM59 z1V~Yc%%0C-d=72{qAJJ(l?T6}vN4E~gH|!SRBge@TbDpnZ3@lum-Qk0{irm71X8Xj z;i7y%?5Q6K8f-YR)V6RQ*JaY6`iAoneDx_sZ43Adc-0RvFebql94Q{)kWV!z zex{#bc?j?NBUjS!D+7xKptLF?9oLOoQ;d#gbS5@F_Z0}B;@Rs$`JjZ9(&D3&N-BYZ z`FBowo5_E@A`M}3e!30BYBUGJT`DgVaF3Q}@Y~_P&T4*Jkq{ zajo!jMX8x6E~>Uvytkx^xPdN0zV%j;Dna}c$ca@CY<>&_kK#6m(*k%=iAAa?fiU7kd`^~Jhj1pdQMl24FhW6o%rJ1;Pzcu)4nIunE zCRmVie^#UGv#gXgjA|!M^&-|s_X0N!Ldfse(qV4ZF=oQt2?AS8VdpM;whRLC7wSzm z)Mcg09cG}kOCk%6;Obb`-3)aq$~41|RMum2A)8*Cq*JH#S9!WEjHKPf}C5Z8TezON;eZc(~8*t@jKg4{#J_T{m zxRTw=WxW>LWzzU`kF{FZG0{3Cd-eKx-|1cS!kZ9q& z8-vS{(^b&j>m&z8Ia)~1_sbqq$?G5P?#pXJJ5k6e0v{ENm6SFS&yYy92jh?VJ08%y z=iRTggRk3}$b)GbbBhBb0D+M|OwYW=PA0B@usI*+^w?lt`>GER^c#q$1=4kFC`2q> zeKqIUpyrKrxNi2=TE8L3QXOVlmg6BJZPK88^8kq}=IpbP<9a9_Bm9BgtZF!rpT;O- znoV38jy2rZijZH+OpNunnXKzvA1e@uHcmUsTdQh{Mk+}hH(n+^v+U)STtBspGaEKF zTJ=D@KGCCgQtr4-r`#qF8AWuhp*WyCPKyr6a6{=#KfNhJdHw~Wk!`4SCjIl+3qgx4 zEaX*N40F!6{qEj#Hi<^X8GJj81QhhJp zW4#qsXF-MdVeY#ghKG>rg&1d!c;-KDMhZB)Auj8PCfEc~>wSi5J3Z3;m{2s}E@ z*ae>3(=pIdrO}8omfkZ~R13r}%*ubfRBB}VI90BvDAyp{^4A*k!`fpsZc?&}3lf9G zQZ;Cayt~%M+}jO7lVVvz)Vw38t!>R#7lEs7`(2lhnJ~Q}NiB%?qPJ@&;2?CDIktTEhgI}eMrkj8avgter)EB;ySXMX`uNL!iT!I4@;paeTvG zP9I@1I?$Z*xxi}Wu&*p@S4lS^EnBXMC^(N|olfvt+&sk=v*A3*KMgCoKr%LL~?%C-Zn_S>JSyKR~I?7pi1 zMOpPm9jTDbUvOEJy+v)->4IT0Rr>4`z{#ucF0M4T06!0s!#`$josWj$r4u&jKGO(k z(Ig)7u3N6^+exHkI}zsirq!a1TK4rC-t`q2&mcaL6M$MXzrM8(AN*s0@>{f4Ew7!Q ztx`OmoZKkXy@aERU>bWEZGTf+C2Z)cADONpOXNB{-~ip`W;1d`=K?;?XFj_q{raV9L= zgoo?flk#||C2mDR?r&|!)@U@gU}Y@2hrghwV!=ME2620O9`Ce5d8#vPD%tbf?)2oU^rsepZGWo!hAvYiki)(JOan_$<>wDiWtVHL7 zlXYjkPuXVmk>2Vkt*3sOj*H!P;rd2V@JZcL@N9;V z0Ku}x?JGv36PbVsQ6u~@LzqHh^#oEBKGKZ6#JB3-SlY@o7~<@|443m!m+U=@h7{Y_ zW^0vfsjfuja2yg$h3Z~4mWbVEjQthvXuUkP$IHe)&QRb>Qz>Jtz(*$i?LRzsJnovz z4mDxSDln&*jC8mjZky4xBmwfYa*6Z2#zVRr1dATL7a109KBzkFC?AQwV|qo@LdmqRE~ za?gBiH{knWh{=?N*YxMK4uDI16+*yLwm)KTYeSf>r@ssB6$xR2SL|4<809NKC@^g~ z>IgK9>Pf!=AoGuta^-t;wVJ`6Q?Zs!JBJVP2Aqo5g!yN|KLa?;=C*Z*x{z~k-(anV zOQMLmeFm(e_fF=6A`*9(vJCzE<(V};*-z97&N@q>3j-4Yx}JQdEgc{2m@PaiJ>o*R zh*&E=(qJ*>^CQv|e~4Luax1bh=Agn}$=SgA8CLCQ*8Q%E8l5>wKu1TQzyHR(&^Tvq z8$PC`Z0R!ExCRDUcMT_P>jb}#C*ILA5ZtPqv<(#y)NaR*8TL1U;5XUXH7JG9F(nw0 zG&UQuc=B>PPGODfBsU8P@%0kaFK#cP&ARY>7a?t4iZScsf&iarOO1@H49RJot2165 z3!WFszNqS(8x%Isc6qHUd$wCr30AKH%6Gg##q}3>#bAA8iT<2=?AFdohoj=R8tu5T z7dKfl-<_`lq<%xpjwD=-6tltXJ4+Imm=PbAv#gKk2v=ITUd$PW#QbiUr zU0xQoPAyQ;ixQU$QO~@PA=t+CM1;?K@I{MHmlX>*y6LjoX8mlN|91ILPykv8DO>ym zuegwN+wO!Ij8RTjIE1ZuK5H;2pyrU0Y&>ghF8RL86;i9R(bC{u^vB5+b=Yku0ICqu z*?j-!su!jn{yo}PW8bk$Wh(W|Y9+02pmI^zb>5-x>Ia@LzTk>5r|ZId7W+(NEh>Be_PFut-8zQlV>V5{;D*)-Ki;xfpP{$> z`i)VfRte--n@B&8^LCan4#BY;(&SGY_$!!<;gAP!fb>p@FE9DegS!NugO!5}{#xbf z`L&z(TP13!G=1C#t;y=tNtCvg@&>h%qOtG;zu=NdU;8!nK@8}qt(S@o-_Nx+-8dFRMqkt3Op&YX4yB+z?V}2+gjgD3>z#rxV)zI z`hzY?Z+8^x9ar$yEH7hu30_e+=K5GmMm+Yo1vz8Daz#_YSwG*El`C7G)(0D>x#Wwq zNBA(+7`T~4KsY~N2fKb-dw*fa)zOH*#+XsfnlqC5(C2;?gVzJPT%s}tVe~AItuQP* zA8~y&Hj|kFGH+y=L)}5*?uAWpV5Sth6>)o2c3+3>#aSzN#L4E6#ow8PsheN_2~Jki zCg7tnooT)C+eY*C{$lQ6GB zrM}pJx)QFjA*g{-zh=HC`FBj}0IRlFmeDZLUks%$H4LPqiwwSXsRGoYqBGC}SKt*P z>chwj$}TYA@G~F|jRzYa6@ijU@HX)rW0brb3H&zr2(r=cVuP}*+X#y_Vn{vASvsRj zjo^|0^=G0`)LgJjul3$V{ecr31<~Ov-No(?ZITvTR$~FEcCkqt0gax3ATOU)FFLxW7mkg^m^)sF&lAP%MbXrOtjVk**hj#iayL$|?jhT{dIP4$t^L z^r?awV$R=x2RwZh%^6;b*iA#_;kN7@ zMJ!%Bd;;1y1Ge6m4AFbSC|qT{3r?7u`ZrFtlJqp_Pe2`NnDDE`Q5L-wTdMLV(*cRmhf@NjCu?8%*sW=^%6?n7tHZL#_LC9& zP*i<*LwK%e(&_(0Oej2H1(8f*3IRSdyM@PZAEdou87X-2zb4sDBvx4jK88X+-rIfg4b`)A5A7FHrPi@kr+N>t|UP# zNkON$7RP(FYtGflt%TOkjj7%L_Gy6oqy{V+iHN>)$~iS=!5!*w4&%HoGDkgTqtP6u z@0j@Ncw9oceoPl-IV}Boo#s^Qy1^XVFtrV+4n^(WsXeK#{ zzv8M-{l!&Xe8+rI&%B0fO<(c<+goABA!@!K{9Z$)MiHepId>n-Dw z*AYHFk*wsU42?aIC^2m+>)z_Ejpr$0-x2w8F>3+>tQRDBwOTd#jO86h4rBx4qWe?= zbOq9Xr#zGH=S`oKv~&W=7m=(4Bff7fZEe*fYl9EvTG+63@^9)=Gr(o97cPATI0o_& zaZLIOs(W8pV*6T>+(%ZNIpz6DXT!yj`XW*&XHK7XXBmkmN}Ea{@kHBU)p0HR9pN$1?x0-eyhQ5`yvx75E)@Ch-!T0YCT)_XF z;Cl$D-KX85>(Z2B=y#bS|DyFBwZo0d<8fz}EmQEhmQ2x9$^ZR3|7E4P(t34t6tA#0 zpC;I|q1F)E-h1#+h;vTr)nL8aCG-bW*lXuSgKzI~9zlqO9)}5M#U_1QKseTpEeI__ z`3=v2`^aMDk>wGS=hkr4!`RarvB*r)-;8T&W9#8A%960@XEE;^`-Z$)zsFGTUzMnO z(G>i$xQkVdz+PoukLpQE$sVrRDIRb~QCH7@K#M@AQ}Au9qvwrT6j1Daq2p8+FaObEmYD6rd;NuXVKb5;@13o|CE z!Hxa(C(t3z6+S6t?CCntR6s;4DV3l)vpD9Am+SU&m0nalp6W}4-gd{zCs++v3|bTV zXV5#~uGAT*cl%NV2Ur901GXKyNjV?OkXIE5N{JY+@JcSw(58>XlH!-SJQIeng20Bb zQ$dXyPt$?Fg)lQt6e`W|)AeJ7yo3FG`CXegr*7S}Zh7K#Np*7Eaxn)de&g;+|a^|3b$pWIi|LG(pe z>z-LTGgB;her#Q<>LCNuK`0n%U0{s^%PI21$N>xmE({Ty958-)8CKT3R+q0?i_f#v z%UkvgWpD?@6U@`d_}Fcnt2CA({@6UOXy_%7rcBuWM)gK9=UQ zK@Jy5Q|2MnOU3u}k0z?lJxjIgUz|E9I`99GJ8WRlMCnpJ2v!%AvtN&SOXpET3n!^H z7M~_$yb{y(*G@`7_B9f&(}fgk+IT-O`c**K)>0xEG`+w6EDe{mK83B34Pcmy5ho+W z)?N)@^|SXrmwD>@UPTZyuF~oq3tuDFerO)NWD0v#etYE@ugb_1Di^CAuQ7j>=KZlF z&+S-0PIXv8tT$$ZJ>2Q4JYSjQx`)s_PKMcP#Y9>r&fk9DCzNN)+{TwP$Dpj+k5h>k zN{knVcD=C1cAR#Rap%ihv{_5Bt%6iJ^r@TWJTS}YX z)K)CZtmB|jyv#{WfF04gqeLk~G8O?NY_8;F89OQ+DF5>N+$o*Rvp*Qh*U@oR0rAla zJ2)n8KoD;+|Bx9THf>ag;)f)9^=R01Z^Bxb`4?UVlVuNYrI_ahe z4d+!HkDRBqRH5Q=<#!I1dcoOqEEx<8hBZ2CFPpG38U-vRJsaB{_P*<{Y?TfI3=K=` z=`k1IOPOD}*lO1dEzPf4x%bN?FqJ&FtzLSl3~j%hXJ2-jePC_I^kt?Cv#m0AibL(? zGGpqmKd>!NmGtu+HmG6t)F}^7>TXbV;Gx*T``xi3%Ar#>fHUGCBM_lt^4R{sQ!Wzn zkKMty#DCA~o^JVcsMF`?eud|dhIv1HYkaG7xRtQ`rkL*6$N$DGVkxgaPpJv^NZjrd z^<`c8yN_TM_H6t)z%s%p0Hyo?36rhn0D1Jk0J!JH3abe)?_~Qj)SXNyYFTfF@FHI;rlnQPw8#fvyj*Acj}38l1H3EP=Tc=L`JPATx=9k3s*KYXpP`?RZ{7AR>7Mdbjn@FhQbVMB~W#YESdD)%)YVlvyWn3Ep=G@bK zBK{qXKKLT)GI*4cWNtf@RYGQ!^?bS!@x{*6SM_pvEnv<$5IYE=}M&rv855z2stSQdz=wKW-u{EDt;YFBFZzyt5_Ul&*=P+G+yFl?50D zPEK_y{Ob-WTBokA-u*XunjxQN+VJmpez-*<=`r9PH2V&++!#S%li{&@u>8{q>#s;m zPW}FMH|GP*A0z%u`c%C|5^uO|=Kr91z1M9<%#2H zDm3k{Z<`%7x^kVthtqRMsy$JA^u12MyQuR)iR$ zMgHzIxx9()yOQ-b>G;+JeiP<5nzG$_Tzu8*?ZgYD4-x`aGiZ-#ySQs#G+!015l(-L zMLNeG~hDQNYq@0 zC23}ehrNs0_CL~fZYx!j5YqI!pbdCCyT?xw^0H0?S!la1C9eHB~B7C8VGA&I*-H!k#XQZtHmS zdaT#E@|x~1Eh3hjT)8a1M9l^Eh2U{EU0)+9Lo*&iw4UTOoYgi%Fc-|tRgjB^%%|B) zTb|2y8Ob5V%bQ7MsL5VpNq$ml^a(LFFjl^{0F`hsDAA>deQnpGv;c={^pa9Ai^=fK zmxw9UjC1=;(nxk8QBpw_aNax}e6g{Ax7zxc-nsDXz(l4 zYYQtZGYl&)Q~D9rmUE{&`iV`x)O#k|;rnarQ(yjMHx&dCyJYq+SeCa&-nsqQEo$2E z_C-o_chJsC%ak}3gLC-i9B*bW!n5DnS*|btgnIvn_*Q0#a_xdP0p6mEllKN^NU!>W zJ<+y_r|nIo?Rcg5uJI9}SATX0nkw~jI2txd?|zT?#MBg#wLsc)uPC3H$CNnc&r_KT z>r^S3cYy=hNs8W=j8BFn*=TwfU z@`a(eNdNOP|DSzK9JlSf+&;=_Oq@Gf^f*uGJRwI-e-SWr-v`47>ofaDMetvRoWUV+ zCF147wEW{Q%qr+ff1={!gej=VuS(VsRuSM=7(6!{z2|FC31*|k1>p6Agc!ax{T8)P z&+r$|O>@B40>tOnQ^Qwgb;m|?Da`R#-|73Bu@NS3`+G}Z46pET2ZTcvM3gbm<9>)q z@x@N*@mW3f3LudYZ-em7!2r!m6uXh%wOtO#Oc9R8N=EL_yHni(p?krSgL+of5}%xg)LuzN`T0GK9ZwFe{2en5XfVWRW$w&4o;zUPF$G!0Bh}h z>OLe0Xpx}SD%pa@Ls44yA4KrKyRtd+2mXD;un4w1i(N8 zf5N990sUdsUicK#)JzvH4E<)YE^1@O&X-?~HK$cNT;M}5&Y(hfDBNYv(1Be>SCj11 zXA=9HQgIw;EP}5@zQNbyCD5gs_erQ*Z>lCKX29=x+$Zat2JD8>}E7W1t2?)pkQPF7IZ?)C9p?EZb2$EuLY1;Ouq02)`@uL)Ijl>xMg-z-O|2oun8iAz$%Aj_^%Had;b8OS?w<6hBKU(f>HT9NVKDX-AbnQz|0( zQ(flbR177XZ0v5})8Zr>kGB1%A*VPaxyrTXJke^n&QnO+_=>Y({2K&l&oHcFRDd1} zA)-GNA1P~cn#b<>Mu2c7*5@`(?L-Co#YpKyiIZQ~$~fthW{-TCuqw*eVVJop61q^5 zn(>|NUD=2GS@c#_;!dwq%BTLzcSvC^`WbAbhr(v8_F=?z7`NjG7Xm)h3fXL6QN~so z5U>f_UlVmGMP=CgVKN6^$nzufN992BC10le9dp(qTtMVu1{Qy+^-w(99kFOJBhNRd z&R9@Z?Vugv=p|GzVDXC0q7?|oQaN|2X+q4Ecc=~%;zh8G_;(+9@jo-DWL;0>vi%k+%!EzHDSuTr{p zebJVxkX`dXnF?z;5{}Z5Wa0wq!kdX?{0@%v7$t#vgf**#^XALdv};`>h7P}n<*F-S zmSx86A`KaQth;5BM!MpTK5fXF7>W06D_+yO=7kdr=OMe(!vDB#&SY?n?z$@1zdt6C zxnAk>I(&HIAl;i4TL1ns{!gUD+g{(rCWndTW$L~hXJyK>!!gEO0X}ZL(yHi5@V*KB zZv?XyE$-2?MO5!(9TND@44RQ#8d~blVf`q>=odgWOXDFDv{iZ|VM-}gAT`P|@H<>G zmvwDE@N08@(AY2qFqj(fCI2R+8qgh7#+>o|v**r}O@1vF(Df`cuT;8BMu(rojD$xC zeeMUFIuPf#z!N2PTNQ^Bz;TB$j6gQKfLVP^Axti*$rSsvZLjS+Tt;RXhG< ze7SfD>c;$B*zBvM)a$2MaeB{zWq~o2y9822Rae@M09?4l=h^fqTfV#~SQM2iAa1}; z6~JZo+`L#VKnGrZ9{Bu>9$2nHowkecpR zW-4T&V9QHm`I3EJT_EMPPX7t$Ht_5YjU!>MwIA?uCYeBSkO5f9|M0o`rg_qu9?Fn2&O1%;Y=-clSS@zF;#Z!r4ws13uN<(YWE{uuc(ZlJnoosROmCW9|rE$p}iNvpD61oNd@mu9kw#3iP!0A2cEKu<^jLB zg2v`iAXv*fDfq3D;6ChP&|A*}S!A{i|FN9U;<}fM=5gAlQW>W5N-~FDbgVs7O<8Ey3sM~F>unfP8D51Sfnm!rvfCLSb`P@?H;q7%vdb#6bWpf;wOi6C^KQ!U% zS0?8455}Jai;T1WPN`9}{i>+?PS;Iq^*X5}*cB?7t--N{FQ{bHZ2eB>Bez#|LqKn%1qsxohMONxVIUVPvmf_O;#_ zI^|*-H!fx%1b6S@>vcC|1Q$#G|W%8sDr zO%P)1!BcZ2y3g0y&zc((h@MS|8R1qyF!+7?I*UJma4`q2pVku8lsPF+lBZjkK-rLM zDWza$-$aBQ$zIrzC9c+Mr=+;w0*q}1YxT2os%yQ*@~|(2nLkCkaCfjOq$++fCm7dC z%62qRZnKD=Tb*P#sjDidd>mo&b&>y6KB&IPZSr2B|IJJ_1koV4`aGMivS|YOZ0>Cv z!>3s3FvNh8uEShB}OoO#8Je?CO^QU=? zV?A~QFO|mKWan7SrB+M{)jvX}N)T1BKYkb58~;NywwutoJXdv(tJxopu?(7aj*z`uGJWF3 zR@s_8RUYhHeQ1{^l$YE$7W=P#WBz3>^NlWTR`2|;-s4mE(PE-&4wgejYV`Syk@sBseHYi2n#=|O zu*6W+y<);u(Dr}Dqf}uO$;gA|(^$HGrvhy9gf_T2Sty|ZTs%q|hmlMi4*K2?x>8o< zn3Y*cVQ>3+6(P=$P*y2ZS_Yn{3AVCmU=Z8x5FI?yOO^w`;d5oA!%9WFHJ-y}x`qCV zdXl*MK2%j+^W(GJjCrS5oDe|uJRi!AB^4;5{5@R{(AAAREeGV75jIOkHdAqZhi)cd|uze zT}fs5?uUvYnE)^k-9|JMCEJvry|Y_#HwymmV47G%dcFl2*N2eZp1QRoRj z2sb&zez<@3axA+n>~)R8jtohAH{FF9vWjXCk$m8^Zorb%w^QEA**N~j#$v2kd3MeF zv#cZ-NI80~-gFmd!@a+7nc0pM&8_QpG26K9&pwrqRdC(FA6>4opE)aw^mAXy_+~Nv zt!XWsozQq=M#XF>15uhj9MSI570F>zc-EI@6?pj$#Jc?&mgfCae{mb!q2Sve_VZ4Sexy-bkRbK zyGzjGQmklk_aMc+K=5J>UfiX)7A?WuJ-8Jp5WEB{?ruNd-g~XR_J5QdB)M{MzjMtq z&)hRVX`9|#LQWw;d=YYqOgF3WUy_VxSzZdDK9jAb%uJXxVo60DxVoI5z z0ebBeEzb9D8o{>vLWXxfaxLJLa~9Pwc6(I%34um7EE>7 zvGN2NDg+DS3yk{J>Ag8@K4`_b8LrCI()MvikJ4E6k9UI0p3gb|SiNV6JSNlaPu`vy$P zv}?Cg9wWH=Sv*SV+aHzMXifg*#3CbEaX?>C+%(DW9@knQSk?7ZG~cWiaeqjG{<>nq~~mw zo*6#h^|f_ERud)VKDe)XvO5s8TZ)lrWs&^%dSv&rE|S_yuhH#(7x6|I=&Eq9 z`^?!j;H9BgpC5mbV~uMwS7A+lK6Z9PV!QK%jG)vX*fG3A_`*8Bl`H4bly5V>`1kMM za0bx|WvtzyKlHC$=FUWj5NB#9 zPI2e4c2o&`LjQG%Z9$8H&}zf!#E_jMOxKLOm5HeAFhqY9}^`n<)Yf)$EEQJJK*>J%
%y_5=J&s{N+pC&Bs!f^z0UG`V2`!g5UT?BWh$UJlhyS{wAqSwOaOy2Y z>KIVryI|a~(h<`beqtXvxs2s0kxMC$s*JsY*?xgS_*#GkLI@TJ%N%+3cGnA*xO8}> zM<^nkg~F~hfHP6jJaQp+doV6oTgSo!slSuI}f7!+JI+f{o?{1BDG zK15^%LdZ+L_mcJjcPl5=r;Hx&uvbNov`a{5Y6ibkv3kmR`y{@62desQG67MEt}lvm zDFH3Wje|5}NHZ^fls#eo} zFzG^a4PBYV|o4BP&(`4WsyHv;GTz{Tsv6>C~{}Kw3M3DWHZuPY!k+H zd;f#;gMsfUY){8Ao2QxcapDn6wOV}F3>wgBY0xQGUc+~K@Jm(r-F`YxMG1HsLBYf% zk=Iww%g=r74)mC1x|(`bf6VQ=&2}b|BfeW#tSJF5a}wpAvZ?qRwWUNP_u|UKpb}}m zUEPnOTf3RB*&Wh2-Kz4XTU=Uvi3V-_I?#1q_UeC?kbmAiC!xMX`2sM_}%|H z`@C;J97f%46HU6TXo=nVNF?RjU_q=IE=}y0LZ7hN^k{Rh;1UCAmj^y@i&08q`D@5f z%|>XND=mfE89nH_7}~alfkJ*mv9|;O%a#M8^k&;`tHb3s zX`4223%R8kk>6RqI|~odoc*hku{Xiu>1p*6xdH>zYlSO4C}aEBcXsK;9O0H;QDiPn zoG0+&ZPDN$H3}Tza#5M1#7CJ6{`^MhTXjVVX zg?XDTKV;fr+U;mqm~`Y z@`_EsS%;Mg=_{S*OAQ+wD7e2@5Cb*&Saaa1LA$o;=ZM-WofKN>mqyJM>?=U`@>ZoG zc2ez9idr=jzBz=GQxy%hSmwaV@6F}(iU|ci*8b(Yqo-u)B9n8_Dm+t1|M#Qxs^(7N zb|7fZJHqAzI`GmONWFoBo@ZZ*5ge5RwYSWe(uQhfj~TX)o`dKT;AGK3?Of66O5|rm zh^3LMaW4_ia0T!H>?*3IR&5QliDPJAI!C%3l!%)kGfvnt3jV{r)i&hEOoG1jgBZ;6gF`{@bixV{?OD4Gjnar~I|2L#R&vl1fn~+g=o#p0o z4U4CfT8YH=U{NEu5B2B)*Y~NyciqP#mOLqCKA=f{3 zd~fs)(0uv6(kmNXITOUp$4(m~6G%|7Gf&S`UI=9onADODk7- z+R(xTrO5kG>YVC{T|S0}B_}dTqdT9+TxCfmp7*sWX1wn>uv;35SAxsaCFI0DAP9ng zO`;qiKi!iufK>Vhco_jXEhUYNIVzi-s2J{j8@!%TsnhO|>lr@76X7|{j^z(S7&cQP!mtit( z!^m?th7!QB(0yyl_{`d%V!P?=9DJg?`GfrW11x_-lf7a{d5$OS_g+oH+o7ya$>oPJ ze7+9ezROD^_hsd7rM)_6IaY@LnVc%u<@QA#%8QuyjH78+IGRk67H@~^Xtnu*r)qT; zzMLN`W~TCZ-Y5E<`>}zU{2SLtP<1bC3e8PcdsZJ^Rh;Su6UBH*lMMSi7ng9 zx~v>4w2c6|1@`^b9J#a|TRS?JjppidWwz%FDLIcD)5e`k-p2vt>ZJy`Jh;VrzN&g@O= zJ6n0fIeI=Cx7#LqqC#SHhE>`|i6^d0S6)OGpmgs?9v0wm z1$%EIJq*YOY=_O6g zCkHJ3SvnFXJ1(F{uvhvxWDI6CANKLtXvq{}@hplxGAVC`?z9!&+K#yuwPqmG zAD;_lN(2NAv)LyWz*KEdTkd}vo<>nY-)$$N#VoS&i_@u=3QklG4H++>wJ7>%v*fjf zDWiG%h5Zc8+j>#~VmU#I^~DS2kbf5L_j{|1-lgT{;tSQ%h7gJb9O{$b$FL3Q%neig z&0wz%j6Qx(FLd8%dmkRH=y4yHobR1>3LOX$1PQjemZ{GwSRP`+7b31B+ zl^p0;E2z~_)j*nu(uiblwT6g^Q2yynYe1^*_(=De-v)2qAtG!<5!DW|xycozEMaBJ zrH-YK$;H%1EQn@F|CVOyUTKaO2ZxIl|0T}#0EwVAY`M09QX;F4VI(yyKp% zS@A_LSDo#iODN+EOi*7=AHBz8W>wNbZXq-eZ`Ql zN>Y5W)O9x9-F`_tzu@QjS3Iij7S^S#O9tZV)vR61)e+@4g-O3x>qV&v5bp3_{>bHL zj#XRHmu-7GkITwPl`xF4*SIQ0rPFYz4c2SJfjTh`dS03i?q#hZ;Pw_zm9m|^e8i!* zDrjg{C7`%-&9d+S|0)8Q`5Xwb}ppZ;q4ljbHwr*{3EEZ%;z zTjy(Rx&7TI1*QSjfZIJ~2+v@$y&Hpg_wAnvox5mdmR;|zd)Y4y(>+?o1v-#{WLA!P zNny;__+>g3vB?z(yT;&`w%&5bqHoCdyngbGnnLxNZTZPxiawyY=zM6SCVbVOnJi&WnT ztKVlZUX2i~7D7~V8G%ImI_at|7YIG4a*ee$AuEuJixueDRXNQ*_OW^NN4h_#Qv=lw zGr9~jq>3y(+5bHI&y6sVFg|B^1k-@GxQIBGFg(p`{YVR5-M5zYkh><5==?@_Hk)xG-9i0guWB-3Y?63JpSAVk{69YlIL z+cG9i)iwX}8Dip4m9G1qmuw-r+_^35uR`V?NL?1Dr1=>DbQx`y(L2#4(Vj-f@yTbI13VPJb@i zNcw<>X91-`oe22^lJ`(kYCnCv6RJX^y`hrCC8{(4FksPnGIH^Ci7Vc(B=B#wzT$= zLrY-NOGUi7GVSzUa9x+$zgL-syPTR0l>I+?hxlB#TFE2N(-CA++${8Y2U`8_@X8vp z{|vAE2dZ=0$nicZ(aY8TaoMPY6F%Aw-`g{JDhN44TlJnILgYv_BvBzDt!MUYl zytS@_P-&l6p#ZB_!&Y*LAqcJAcp9%FG#@ zpNyL}0fF&ek`jVIHeufW-r?s+h8QUpDHJaTnD+K4hvyuOA3K|M_)i)KBM3nY&n zi;^u?uQd5M=>siRIFe331P#Dkmq(MQ{Dv7-o(2caQq|G5>*-32RiNfZLL5WEG zDf2p~F6`NOXZ+LiqlrC}ceb1{9+0uDj|IhikwY$pNY?UmHr<&nzmVqq4p^l^+C378vo6NV0j&f_Wc`T- zHM*>OipeeOtZOq!xx~y7+=4{pHQ?=u@0Ka6OdrQ0!m&c-RX0N7yh)NSYf$Cg9rT@o zS%S!aa9WT}v5uxvCXZ(Yehn`0GYw+C5;@Imj7|=~B#o-~5Z7E?SgV0HAVQZY zANiM-xi~jwY$C7XVb!vNOZC<7%y|1V48T_zk^*j)eT&m!n56sie{5GCX(2BbyD;AU z;y_O!6rZ{6RxoRXDh7o8IQCllS=xB}jWB;O@^y9qYz=I;@~W0pn_nUnm}NO+v$nlQ zc}bz%!MnZbnzbY2Id@#_TReZ@xBu;d>CHZ-^npOfVuYWmCeIdSgd&Mx{?%)z<5-oq zxwKd40}r4WMZO^PS=4n5rv1-#{S@mTEM{6)b&cGC?I$6e(b)bseW!QxU4FCIkH+9b z!i|BLs2599%ec1FPzV4?~%z6F$3MBx)+*^aEK?`MT+CH$-+c8@?fsZ7w*D& z&#HJ|_iF|R{QVxqH&&ic8@yI%?+N{L*Pv?S(qw)@w6Rc9mv1EBs3!!N$nXc{0j>|9 zbVxP?lPyUQF@ga3Nn3nk8@Z=FXmTS7-f%U<(j)mMO@$E`_r5koxmfT0DnHUL_32)g z(}~Evok;r&nV$@@>$-P_W1l&sK^%Vh%@LD>B@@BS!zlWPsmIL5TW?ct(wrl_EM=f> z{AH8&sd2}~E(PKPlOk#m)7(6%{1+UY4g=$ptX#>^RqE zANu22$Q0s(*b`#Y=pIeYd9GguqK6LUlGB4u(e+DFnnHBCONQ9E#sduzAurBexWb!Q zyyyX)HBnY_@V1avnbWwVqKx3_VLi(<9=Omnu3CL*07IDjPO}E&sr@rjXM04|(64cB zU>uvcuTCwuPuY-$BjFdI@qLvmg{eQ&7tM&4Z7ss(5!`RSe16{=8hEMsN2wP0g{M;? zR`V03ZlOhjEBEn;ELRLqTut?~+}{gi`jwMDe57o1CM^?`d-IPc^n>|3gV!4tquV9=WDbHq`Fnxo%9AgN-1>gs2MBayx0JMpL99H1%?qzE zpr2xkgOi2u-D=h~S3opyKgq_xJ6}4~5TKCG7lkcsn6r-g3(R0Y_GF5ZL=(|&*WCu zs-nK*GT7LV7i@bWF32Q{dD!ovW^lcB?O*=vOr=V4)sX7L)0lm6A+AGQksT@_mG@tB z{GfsB)m%V7`8>*NkrdOqhtf5_RPYiZ{j7@>^*VLJ=H`2*>-@s+Bv}7QJ*qFp(AUeXy2<$|gAJH2>D=#N) zO8Z?-RGYsXtBW7e{bAPs z$qLBnuw~eZUyzpy1|Z@nMEyp#n^ z5J))E3E@5t0o|C`q%L}qPfIb+U9fo;+8cBAA|!j82<~xea1u#`CCP9zkwvF<@3$qP zAl{VJ zH$UJh53wc0o2KS%plH2yIbE#L{FsIjByq2{CPrr4Q|A$oCdox-JU%ZH6~MZ82awhJ ztdkS3}n{@QYjSSb<8#^|ZW|wr;Zi zPB?WOk-t=pT==Bs)Ld-nGjdC#*7rj8{XLS0{d**i{UDW$2IO;Cb{4NFwK3YI*-hkfzL4u zQjwA!b=B{QGQ9`>wn=5Kx@`Pzye1NI#5l5vJGrV;A$6-qKoa&qV}fo(S=O#~ECId3WCU_^hI^U?)%=WdQd z{`8v=cH!=9>fQX%7h(xveNa3ldd}>&Mwx<*>9tq7I_Ox9q|Gcx ziF0V!%Me;b7}WKWaL3v#W7`A?!M;bTt(*p!>vCu4Y54mrYud7**sV;3&J5`MNVQp) zY(~P zR*D_L)-OH{boTHLo?L=l!p_1C%n_IJqc>u>&p%%Rm2oFOypLVmP;o%{hn)P!laLdD_K%6Mzsc|M5y*R=bZP)?!oFYrDm-H~ zn$lmWkd)umB`zCBMfrTar2CTKsK&XOYw>!d<}E8LdIX8|PdZq9jJ%sA``M@q`jK2A z0Dksa2tYolOcQOTDV+a}1_L+x7o*Pb+u@QIw|BUebbb`hpe%yxG%h}xKD~LO=Md0$ ziC%&Y&zYt$p)6bj&g_t4r#s5(9rm7&su$m~U@a3LK)h@Ft@r#+_F`C!k*aj?olc^j z@Q}7NL*j3*vA3tqgTEDf=~x7)jZj32^(KRyJSbkD^L3UC~_tnBNP)xk+1xoX_VxV-1#wSAeGwJBQp3a@f2SttBzMt@i-1^@exHZ0%^_E5 zqmz;8q3DwTdF$PsC2Ms+l&7Hqzgv6iL1aL(`ozvmu}T!B2aMM5Y zYnO4?-Hth6`%x_a_K!}FeE-F!L%UJKx>KEQbh0jH)GE1;>`m_b+{NN3Jr{+x$6A;i z?=GF-bNvO8@lk3_#rKEpWaQ(!5Q_63x5ef^MQ~%BArvTTpdWywf+jg&LY!ClvvPG0 z_V(miL_f&`hu9WTL75sm(I0^`ncF8HM*x+>Syca5I74UI6%b|P88T5w`2-o!gd_V^ zPu?a!cvzerwtsG6CdTPWtpV+4 z!s1}bc@K1XbEf7qCkAxfH@L#MuOkYgleh*y;dbF3+$?=n?42rZIqe$Nh+GBO}xv1XIoGV|=^Alh0UN!);;q*1MhVNiw-CqR&2=<2ZB8 z-he_rd`EQ7QApC|3!%ncHeXNy$-=S-r?p3yUx!@CTas|th)Wdq>{SKk(g0_V0E*|_ z*grBlI>S?Nz%1gCIT;hKWhgqJN4E((H9I!XO z$kJOD$05hukIJ3+MCFS;5o;zD28~rSiNP`0S$l}{eFe@wP1LLRak2eeH9k&R`#BW=9)5@w zwc+W%@hMBvzV$zjm@g=9YDDYYTSOh>w8rBFg3(!#m6y3xt{$HbQd2YK`nVpn@A#cA ze>28A7J7n7MsI6;4Htt_1)}%jn}tGU1nD*n!{rdEQVSN|M6?#Zv9E>W!nZx0DAlmE7oRff*=$a=Z0&o0>B^`E|Jp_s0aY%m!^Bp_y zk}N6W(i975F|E=w%jS1WUvEgjwQ&zNf@3+_5%n5yelx_NQa@%AoZlf*Ou(@?8yoZH ze75mw1cu7;uF8bm`ZlMS>u`GE^5!kP)HPVve>~6To}V>*`p+NpHLDHVFXM|g#2n;u z;^{_*6Fz2rDdm}bDGUyVM@8GS%0w@^iGmeY!F-!vamq7JBjv;5HnCySH(( zV2Ev(j-?cTc-_pRwx4oypQRrLyW06GJMZ>jwk6p*&vw3=Pd-}*_VDKOHTo{zDlV!WPuG2TiLQuY z6EizqLxoHxn}fyPtr;423vA=7njG!`w)fy1+!W8cY~3dKCODW8*AETpuNyP@22OPje^+?fcDk&R07uz^APj9%c*|q754XgAsEBaA$trs6_b;2;iAmn z0gnWiiu9XzIRyzbI@hAsBP*+g_mJq8VEPb%4%jL`vZ9)A{xa6*?QHP7Id`U`!58lhr^ceA2spj#NE3;>;$?K( zUaCu~=|fe-pgSTQOtI@VYWRtlO7Pvz1T?rjk{d|I+VWCmK;0@Bia`xz{f=FFWpS_( zcC82+@>sZ9?(bj`>;3j6xm6Zxcb1-AQl=SeH~GnMHL#6%j{gtSYUt8t%vTgG56cQ- zwqwF}QwQU%=AO>Q%FYH8SH$zB4HndKpo+y?$LL#}IHV!H=@bqw54%pzjvS8|@W{kFpb;{RIuOk8Iye9unPMPTW&t(HkX}!YqTB4^h-=tdikhrtPc8R(fW991gy;x?FkT{ z{B9wZI?o-|G_ICap4cw5=4=9W7k{d{S>YodoOs>Al5?F!TI0Mh0&`o8PyBwnzjPca z!x}bFjXpEtIgG#6>N+>M0dC5srVEP=m&$qSg}I#Z<}z#9pZ`k&43!XJF@x=~*rrjo ztgN4Uwo%XgvH#b@@ZTzn7b4&!M@cR6c=;dv4yD1!6!vsf&JhtZzS*e?4?F4<<>-1i zlk6;2mR4UMdXJp>*7ob|fg-<{=lMrk^q!#xH^5ilZ{{SDSnv!2Q<~rLZSod_55v{c z$aEPA-a}T1?=2~M4Xqgg#62;e>3S1zvk6<`gE&Y_y}tuAGOlpSf^~+hN%MI86NZuX zpn^kN1TDCTq+JIZPdH<{&&k06nM~^WjXqZm9!V^_`xeWYD6*I z8P*$ng1$b6N>OKqB$vC*C`??c(!Fu5yQbwbD^Ly+)5pKTZ>3GB_%Y__#|S2vKZ}mjXw)y@1rPK*h!q@XaatU*kXzRi;Df~Alil7zzg%IO zfH7Er3o@ph#?j7TH1OpNy!X2XZTby?%`rp6T@46G*V`_|(xLo$phnn1K$BQSgG;T< z{x{R6E5FH!1{8}Ri9xVy67dtYL?cX_tlvKLa{eC`a> z5pTVkUSp5k5qDFhxSN)Omzg~7;h*@|KRDVg(shNWNpu*==Z{GVsUp}!c>^dY7@hFQ~cFnHe2^etxZIOrO z5FU85e6(SYCw)3d#6}%H)v;AW{}YTFDgMD}S0Z(+dn1O#IoQxC5A8k0tgLBa+!m`2 zRN|t)x|8KA;1^)|R8G9;ZaR`hl4UkfF|l9bR=-^%&b?~m@bU!{`F`z3t#*%);33IS zZ0r%DPn!mX-TUx+xj*l*S_h|)4HY1A*bbq{A z7M&ZKoCE1GeTIn7gK1~x%ghVQzhpaXHqV%-vDJ=05wcqR>~RNHFUScM*OY5H@b(** z4J-m<@pvP2T`BGWd**cm6+i`bq>0@Fa?Pc^ZJqg|MSOhr;g^z!Q0!gS8okPdH5<(s zFR4sW@7j^s%o9fiVP?R-M1&H?atCQs^I&_-!g18k%DTQFbAiT(wnf<(t(nM3-%7me zT?Z@NFw&C7vpK`Cul}Dl@*0Yb2$kD}TF$QzLCND4JeRW$VoPO>xmQX1-;Y%ta&a+Z zR!5%rWH(x9eLF~|_X+P6Kdy!U=umCfUpg{bD4WfC4GH{NEmxo7p}EF+s5i>uOujnL zIyjEE-D7ibVk}Z$9(gzr_t%}UfA5&+>N@hq)z+W$3%6PD=>~OTH?9M; zrZx<@()u60`X3o&pAIojk)&}a6)TqEks-UlP|o4`%xkI6q)Tj~B&PKLlIK6%fRoA8 zEP}NTcXbkehq2<*$JE?Y5(A_AIJGpv8m<(r64e-dH4*NdhG(TH=hq3L0c+M9ii~BI z!S%{`SNhrUA$gKTjOGb|KYwE}=~ojDS>1@I6ewiSB-4T}=*e2na#)mImQ54Of*ByJ zv^414@e%xV@fpaJM@+$hJ6f20qQ;90&?1PJ*eqgg={rp@!1&_{-p?%;5^remDGj03 zB7~)oGoQ*FKbTr0{f)VhjvVTXIGsU56#)*n{i74BrKf(+H$gu?K6#up#=sMVz8oH} z`4C*MlQ1kB#ZOUy;Fen?Kj{??w+zAWv}NT)|zxdfz`3LAMxj_ZaM7%^xFdF^YT$orRRoRwbt@=^|8> zrv{x#Oa1fV0(+(n@AgJVedwLQl}My!1K$-?RuR?RGYgPFQfnx_Ev5dms>IPy#I40YD|Bx}b@!q7 z`8{P&OXq?z-Mt*%fBh0oDa;I-)_0JzQqdhk`C%kTL(t>QoGL<@V!)u z6PTQGk7rM^=tGxp%5r9@o@Z~qJ}Oz*I$07@;#Oi*CAS;S3|>X%jvPXHV))AO zkVM$uC_lunpUOPTHOq>a+LDktLiL1Ub0*gd3KMbSxE8Ayq@o}J<`DXa-Wepzq*Pult`K~6hJVoznUD$3A1ED0w9PFg5so5qs3 zoz@2aC7+IpxzmB>KXvvNMN@oGPuxqXpkNM?$1X!V^t-`6Cf8&5doDb?7ahfp^QY+N z)P1A56=7C!v-)wFv~PCgw3lhW2Wpq+9Kmm^{+8Jg*9Emmsn;^j$S-8FCFi(4nbo3+ zvMHv_|F-Ppn7vpfLWS|mRzLNYfJ0lwe}JfaSa$(lDCNm)pT%n8Yu%W$m^KQ+!GniA zcY(b*xBtN97smScAOpLR%u{Wfl< zi<$^)kX@t0>En_m?C?I&ZYTn)7?Sho-Y98c_aM`v1tt_o7!FxV;z0!hU-M*;Z0cTn zl9p%3v$2xmhTn8xoJiIJBHC&nhMXnoq#={&{S-~%%k=!TL@viinL0ILqf=hjoUyRb z8;G#+^l|(a1We10{;OVPOe;2dKX!|Y^E!hQPWdD9Z*ZU=PJV3KFL7M59u+F=z*nB{ zsd#|N^vJwC6r@Vu^COe(ZRnLFqz^B&JdvBy@)j1-^|ou>*TQ zdtK`>KCU*ml1emhlOl_r#zuKf4n*<2MJr|(1FXSPOru395+yTU! z&xHIn)>xz6(3oWxz`QVu^(EDiiST|vgHusgVwHX4KjPcOXQ+#FCjQTdr{`e57Q2hd zoK`m>)mo$fxD|O&Ll$#8Bt4C=?`$66oO^AS7HQ@y6$%w~qh=c>fa8fJH%n%9WAEIb zF5OJ{$UGTPNeww^Ue5VG%xLO+8^+Mycz8?au}pg_7m_F+)|4xWz~}efS|IX9nnBG$>I|$d-YH#j)gMhIqv>_fhn<(BEvgPl3EUyWYm znm#fR8|c4q9HX>MABsPJvdd8OcPV+cYJFcHofxi&E}tk|PFQY~7(`#7r#9Fj+&Y;R zo$jv^Ek!`DiU$stx-f#9%GqL(EBj5yZW7y<8F6!Fo={}s=zie;b!Sf(=P{oGrqG`} zPHC>)C?uoP6L)dYx#8D6Jm2Uq;NoHO6Do)Dvd~6dnqoHr0`{cEoG&R1T z-g-16?t6FJ5l+SAirR_e|A~&tZXN>~vJ6Y(VT*O6NS!x$oLo;-V|!Top0JqHpIgtw z${5v`FSd8WDmLkkL8UqU$LRD7?&~%e==k>6^)AW&FU}MEC^*fD zz??#+w1K(MMw9V7YXb5<+S@m2CSP?a$(Q@*x^i@vdPtkwzs2TTnXK|HCyjp6)0$il zj$UIZCL)gPUkKqQe`~;SGqV%Ce(9zD5JG1~?Harr{y~Q%JZgdbem2+rg3{%sWgX8% z{ISlZ7o7=OW_$_Jt1*{(@ymU|?14Q$nEdda{$Ddi_g&;aAlq@DkH3+%_V9W3C3fLv zqvmnbVSZky!R3GMNB(8n5J5$LI)!5ozIczSAtCGos!_*Oos0|5BpD|AaXEf>yZ^Pl zas)mk*EQDRg(ldusMB^_r5i^$5C_f{)v?k>$x9>B`A1mbBoA3bGv0iZ7Y!mRte;Hq zgi?l2XyUT5l!m1-oJkbZ`X^+Cf7+x7B7c`V&)_*r$BbJrB%6wfw%OnZ==eDuf%St`KEN*uFnC;;q0g)>d_=`s)` zQ$1nyvVbVcPfq{AK>}xohAAP>S;Y*?JpBgq9joy21I`ZQCv8tew8$|jJ)pbzwptpLO1cI4sxQ9(hI6u;Dn(v1 zcj@6~cm+2zt`d>HH(Ni7<=wHKto_ckKo(jU&cJ|O1#UA}4OZ~SGV1vRNzqJv5JQ}z zwAT_z^%XxteIRqFc%}vDBw*_MgsQ`);I!uboy@Dg1oer#g9UVom|0m_9YkKamvcLU z_W@~PHOS?v#GjlJgs5|(Yoy%9-*J&j&JDs39rASJeNvH|1z+~%z|vJPz3>-mME5Z~ zGr13PvMn#U&aJKo(CDt>(^o<+jP>d|aIYZKyKDEa#M4?{BN#(SUcC1sr&*)>#Ub21 zebg2ghExc9k0)7nNc^Ivhr;v%=rGMU-G8j>#s<*aW>UfHV~iJ8S_wv0D13UK=i5OS z3x7sO5I_UPDKXTU`02yrjpIFSAV2;a5Do5bk9nI4L~l%nULJYd&thEBcg}U~5j#Ay z>QUeD>u~?eT5EK}Q%5_k`$?QPSeP4zoULAX7&lZ^MuAj858aT&0P9oFjySjo^ZQ zWl`dc5#f^}7Bh>XV5pdn$rD5m4-wAGovvia5PIr;`YcfCqTl$^L**z`c{dNCP2G5+ zr~6VvT|=B3r)PiJoY=)qq^=S{qv*rHzpjO6f(yp^aT3#SG{B5H$CMBO8<-O>DXm1n`KZab#Le!81m`Gy! z#woebY{!io*JOzoD&oheGt#X0Ah(IAjwqZ@l+RNWiHr^>XJYRiS=MrXeJ%_K^4~qC;QHymHlJ3_&U$Fc%Gg(qA_X!&H>PI>1!4FS`Vjpxb;`5XqytOz? zjkF=3C6Qk?KfF2d@i;B1i5fn^-9}dP^VL}*l2gD66n?0P@|sQfd4l7nm#c|juYah7 z(Yx(?0s88S1Y}a*qQST%B>-F)nHbv}~r|+~gIiG=#Ya6V;&CU33zrhA z|1gMn=Po1hdL$0okwrCg*}F$jca%vdA6-86I1zffJ2jv<)B%rMul1)hybKKv!YDJM zg!GUsG)dXZwsEg+W1%5E9;4j2)DJIxp6NC@q;A_|5ei9+GXDPl807rr=zVir{*D#L z|Moe&z#>&L*Foj2=iom^hvvr~VR!(0E`FHEV@TYO(W}q@A6LDL?mlKh0dYHi^0~Gm zkLAK>)HQUGdDzW;vIVtlrMUE<5JMmPiV5<|Eh9OrpzJ}EyD}JX8>mnZjyaKS)NvuSCd%OlOW8m_A5E14;;#H$=|%a3C0xkb%>+BzT2u1Q(52=i)EWxFzKJ z$=U&?6a3Sy_J`1eb9Qk2-oYik$igrK37W^!GzVi$XNeN=7NV+uhm~&%@62B%45IZC ztkVdbGox-+b&etRyd_`Q`GX9dRRV+!}vHyrPf3vja0F)lkrq7V5<> zZIesAt#BY`L$gkA)lVOJ6q6IA+mDifR5`X2{7_3Bc9cS@Ne2$cmk%8za(C6gNyf>W zBNM{Fb_;B1q5TN_QM+z>j)ZFj5o9mV=aA+`T&0#2zcx@pcBi5%rA}{<*RzkyvQ`UL zpTmr(x(gUBjycgUNb#?5d&B@12uo!P{OiTQ4B=>J2D=j!1 zs|@7);a|wn_#58*sQhX$FJNfM{}YX7L98dyuRN6%-GiKHE-5CC$~eT5>vveAl^OXr z&;Q5Od55$8w*9|Ui`I(0M<_+@icJwht=iO%*@|7Oc4-@X)vDF0MU2>chS;iFVv{JX z6`_dz^L?J@zMuQ~{hQC??-nO4he6hvPQ-eY}cHWLD~qWxy$Xd zmYCjG;j~>#FFhLL9Vh9y2cA1O8lym7Sj?8OagQSZ#;DMZw_rbb@DepF?F!!Y(w`y5 z@V6pW`(2YdtAXF4>76g^Z7xr}xXr3@jvD%iNtw_cforyd3 z!ulWMo2~nvqq!PV!`zqHSnNZLM=-8m4vsrN21+-499L2O2mUdSz_@B2@{|t!?PXj}fCYpqQ>^)%f>Wh@YsM(Y@ZrHC+6q^hED@_!FtSOPNt3iwwQHMtz=8 z{g7R)1rDQ2f(4TjowoYV&LaY87_`mR*exWPEEA{^?iR+<%mH`f#WHWETC(7p z{(k=6c(4XZbYFh5bp;P}atBGqer~pADfXFHl*+0U}tI(lz=ed(GUTLM4a7__i?cU`O+p zhbjC{J}_h0Cfm`|oc55{BYOEw4Dpht@S_$D^psrrinULV;-E3i=q%ZDV90ZkO)CA! zC`mkjfVaNp2b zM`j}gHeVgXxY5Vc1z(On2v~WUDbO}0#6%o*4V@6@3(jDRL~5@@2B&oM*%_U^hv27E z>PoZI;yW+486G~jOm;Bgs(dX8!&mh8b(8aOIvfvK4i)5x*)a2KCwcT#ADpYLfja(O zO3z6CDW!<_)BoF|{?mN!uBtCDuteM)J04B7G$0D&K&h9{XV&H9ukEzrJ20W^g}9=y zo%pEmrFcSR@L2?(USgALq)O%WN zZ}a*AT2(bwfu(?MctAeg#Q?8%<)&UINOEW`nhFDmg(yGOQ;L(IFpVhHaJ>h*4iBSi zC=2c$dU;O^0TeJ_(hvYDq)*S9iVZfc7<#-M0E2rfU49*!^L?+A3N)m6wyHw^Q}FdD zEr-Ow!*O7o-mkI_b;L;DTj23R>eo?zD#~bzCy39~Bu796rEcSn-+`hRM{Z7^g7cB!-inqYz-nQkkQ?UG8KM{*21!gTIUK9Kw zV+6@zmr2j7zfW5{!&%ekN}Y~LqPjV|UvRJs=_^+TWkozmOwOzNXqaT&33tm^0I4tU zK=nYS^ZBdw3TM7<+bqYU;&!tSa%+PR5k9P4LyD(cGp0aJy$u---bIG|+aTTL!NaqW zG0ySFDR;T-`)ok(m7P;k#{H_2*4i)zGw;}p;k5$ zL~hIq66=n;cZQ`t^lqN6ay_W&jpnW5JXNfk6+8PIlNOBh!PU>C{k-9iU>Qxw#Zwds z2eyX`b7nQbhYk*{D zSfu~2EWh8~FBVnS=7UjLwN=(H`^HQg5+&M*tFH|H z<5s7Q{YOHr4BZZ$5S9KI7|iwTn>wMF=R(p@X}eB-uPR041Eq#&{5?Yn@;1dnaFVxqL^^H=iz_xZoSV!oT=Ol=#P&qbbQ$<>dkkbZ*9FQUMN1EV$e1` zPoK#IJh3$gTR#&6G(j62Ewtn@rn*|La|_ zPtFiQY{^@G&jn@OXYjcihh_Wnim#CGTdsU`AZ1e(Izxe=EK)Je2_Ng_LS0bTt=YUh zymBrh@`t<_HahLj@IYT<>qe!X30r|yx(YZGN(;)@)ABrb*=FyGE3kvmxp=#+#I} zj1Apg>x;q-q&@-0B=dl*(0RlGN&=wOf+}KJGM5UF$oD-yO2s20mhV#>)CJ?N1CA0s zU;hZHHH){gSD^E2?=Z`;IH0zKo;5Kc>+m=Hlx;DSd0z_Y(cEFMdH-QCp8=5h}+=+0$HNyr3>H z{NPTp)o^=d;oACWzNIuL{t9kqm3Gnj$6AXp`*F!i$2N`=pnv7R3v+Yj#R`Bw>6;4V zQ9X>~#YSx8KFkJ60^m9ts}*6CQuB%LG$V)Wv^MZcUXMK1R9`TC!Y)qUq%`aiA>+$n zF8%EeWCpV46{Eldb}rJ`pZ(Dayfj{PysrRZYi~aB`g0z?{wlDhZp(i{S30YeQYpUS zueL}3{-Mq%JC`T)2Nr{VLehHwy#KN6ajHEN5n#}Ow@b%G9XYK$pwzWrk(9Y)_A(Qh zWh+>jk+%`xQXVMD>>?-4F14Za2qpgB(p$vkKz95q@eMkJ*TQ^S1Y4q;m1|-5)UZ)1 zb{AavILwMZi!Uhs5+2dR>2e=g@KLIkXWJ@Do+Ehh{tl0F{{*M`UCmV=P#M(APR5i~ z8}!D7lfP&bJ^wfHQv>CSPd>BsmJ`I09EZBU9lJ{@mGn-~-ye`V*fR~WxI1}qEK?5u z6Fpj9(Q->WczTq6GZmrD%fL_qf7Bq4Ncwrx+-`Lv;*s6KF$cu$famO0HxM(5XyTd&#m)hJth zXGK)`J+{H^jddx>Z7qAL!lty->ct?%&(N@^!|R|&2wY$7MysogaCvI;bV-J4(w$Uc zB%jB}UiPnXZXGF5^9h5_4o}Bo^rN?(etdiTxtyEPb&cu)!`B08`WsJ-H9+^nd8!r6 z$!&eY_m}#=sQNurUXNx$1&=HE{9HrN90l8dzUZz?TM!8BYmXePLz%pQH5Ud#3rX2}|h-?E<&3c(;1T$UB#rxe1S6 z;lmi8fH~U7v1ff^eeIvThgB#im;W z!EZv0&o0uLXr*3WB=?Tb-g%C-VSi0brX4;Sy&CunsjdX)@b7)-?^@IP#DDV;VP#p+ zWFcr!!Zux;HwK)_I1O|?4L+CT0TC4Hy#iB_^P=XNE}zLZ4-1H~_7+?v*wd7P!4 zIVe(6%>njgXUDPvgSX4s&ttKOQYhbl%mP};z-0W)8`bN{zBS%oJopf6Ny-=5=qzcz zJC!Ow<9Y@a)8(QMK=7y+9`2H)Jo(|STV`$R{D^QaZdyj&T|nKzDrz6g!I2(*Lk}pDa}b>stn~8fqD_zi;)W?+GeK-7n>ZYNtyM-lz_E($Bhm(_ zx?N2hPMgwwbVHqe1WvkuZ{Kgmc)eQv;FJ~CC7Nq<%+2AY`TD!zvFL|y3^ATF;<8y{Yf4y8HeTvrX%k%xCcHoXs=eMyA-L#y)M}Z}@Ot{Ac3%#I zp#ELk*_@8QpCPC9b36ZmM>%rw`YwGq#X)2w5`{Uu=*ROqJd;?zb4lz;IzQ>D4y=Aq zG5qhQ^uK2RGNhJcC!w7CvB8z>*a;;rfY0KA?B;$Nj~`5$t8>2)k7Blg8H1r!ifc_F z=($v}7bODgfJpGYE7Nu;GtcXLqt0Lo3%Ve%uyB*F8Xt`Y4HEtUtVsWvcMh(7^3hFp zPUpb}|5DmPDB?4fJ()aq3J-`8{-OJQMk*t;?M6%r^)2Yza(ro=$lgRc`+zEo$z)TK zYw?q;v_?J+$Cvj5?~$1RgmrUY{D7$DB-84zJcw|^etp<8U!Tkl3y`kI7% zm=$k-#pfu7wd3|=_J(CPm+E>B)w+~U#Gf=pZMD{AaJ9<9aj$2&! zRS)%_JsG>eP&{f**=|WYn_`ZH`!`ifafr|Q{&l}~i^0+*T!F^*nb*?V>nn09Jh}T- z?)r**K#bjSK_TzBc~qK|&t&Y^F>N@ZcQh=ULsSw;_(HoRvnYX?p>RS^Ew;g=`H0D4 zGJRepxuT>zg3Ss~OuJ6WW$=h(de+&tGS##+y9VTZw$?_Np3PT2mWpA#6ffW3n?X5Yo0XZrornL<7?AvM@*8`-nC*2~*mc<1_n>;%10;7_09?30)qJa2szJ=M20nW~g6 zG9=?elGL^WUVPOTz$TW*HJteU%}9XTz(4;Lo?1RlI3Uigg?#xWG$?Mj`G8FD2x_bK&%rM7wk8FBSOIYLP{w~w&&1UfZti&-~0 zEQhDi%^!_KtD0oin}!+knCL5~4a6tyYPX`P2UOVvlZ23@m~o>dvp6r~X{#;u2ZGFN zc--}*cp}9PR%IOU{E2Al!N`y~R01g1b4T|ctmCiBmxMk<1h+FjVDL^xila|N8w*Tp zF7$^m2VK=42DiMwK)2UQ`pFA#6DAYydU_X|iDA{P#Z$!b_xSNWzIR^1R6dm^RgDLyq^ z=Aq?6e9%Y3jf&258CWE+OzBYx(F6d3*jSvt;wcFQth2Ti!l#3*MdzMq#n4L%%bDyA z|Nc*2eihB#*{qY}B7~LDWsAl1%Cg)eZK|EZ7wN%oys)*)t`|_foG86(ZO%>=;USnr z2fC}1V>gmF>FQUp@ADV!bB*{7zYr9e#I7h!6FIx@j20e$ex~T7^6!bza#VcRcP2gO zVwnr&?5es`M#7oejNHX`GyXWX;=U8+l72ZT>_47J)Gz8BAP{u@GgC3oqyb+Ok7z8s*u!!Mu83#KYterXjx z755O!zr%U_DjM7ZBuU?d+gs;SJbS?e7ALLe^pq>5?;_)!1-Hy4>AW{}E9Do~ldTpH zn*qEERp$C2jsC@|u&N9+=YZ>j`K9OjNq`J|9tgO81V3wf%K!=ic2%vM|NY5)VIsJHh9K;*=u0FmEd}}>jw7-~X!lg(8{M6@uZiSb<&Lc8$DCdv< z)H9*q(ve<&dfWT)Ukx~f*PPDcvp&ID z1+J0;Htgr7A#5K}{LTst7l*2k^Hl+soBe*d=hIKU+4b7_{xl3^*N|t0riE-?w_#}; z2E5-d;{ue;{Ls}{2zhv;?pM~eaf6pNCA3qwu2(}~>wby@UZ8DOZmTZC&YHrRYdJh>Eyebx2;=~wVqmbx}&=vJLPET1K-bNb+*GPqq-^`O+K?@CeP?mQm#!JgxzqS|)9S!UxV z_4>ZrSKLBKXT&ihUuUu&qp{S$TRBZTk7wcbOi;QRI#=5e_k&h*K`a{3usbOLh1(E= z!4h$I!y8P@89e2Yjdqur>MzbGH?dmZjf$Plp+ z_F%AlTP!~=51}d7eQ;)sWa$LqfSANPwvlN3xYdAipKG}Pc==QTQ^xTg)R~n10}sy9 zz~-K(aNn<4CbWmUe(>KfR?)c+seo?FYP}KGQFZ}R^{o1~LX|(5qh<>bwFmcwjuM11 zH`JL|I5-AErUFo^W#;ihDC#NHN#X?jjN%J%limtL{@zS?{;dO=)+QHF9M2 zOr)TJDoM3IeLOYV$BYup+d{oU6k-;qk2;J0jZ@Hq)fgON&W2KDMv7 zEDXo$C@T2!3rc36uAJd+j|Bp@vs1qV8`t_IWM6X;=iYBr=W2ke(fSMO92@O4PuXw8 z4CSKgtKGv?OHn)maEygGu8rgoXfJnZ{kH3;`zIhg zTyhJ~Os~8FO9?}rk!u+N0yAT1?meTqnf8`B~Y_F8V?) z^+$DR^fL%|*dmcDFvjsPv^3$bBNMn=OG`Y8|Gb`rfw+-5KvP=g@G0n4a#Fy|15{mQ zO(v558*d?C);jMglT=3?HXbuxW}K2=XAh#GL&Jm6N|%Oy=f9-e%SG)0@1;8Ir zURyC}67$NSOFGI6V%c3TclVkMt@9AS@n2`tl!&sLcU;?HUl6xXV^WVM1(Sm_d>_4( z@=VV+oEmjau^t>(epuh!aiZeLV5$`7(;u?T92?Y?T_K~SxUR>ON?WvRG_iSrjpaFW zY`ZFei_4v@JmZuF;XmDRw>y;s%yT3nXJZK6=Zgm+H17|7{;x#e z=`ry^el<-KdzJ0%P!M%-jNDutxA~P%Xr}N(Jp(gdz|niG4>Xq@2v6c4Q1}SM@4oNT zz*6`_g%Pl%}K3 zuWSBfH{$s4#5`uYUGgBWV}C$RLxhw^io}VrJGtzK@Ee?r-nYSrK5JjpHW6Y#HuI|e zra}CYz4MFqlArR%w&>y<#!p;14KuN<7<{6m2e?AS;QeZ@$_%#T)1G){ai$BCf{^67dzx(;IGU-t81US8&%Dhk$=z z);7Em^>fQOFd&FJsvw~OQ)nfME*eyi52BDy;hz>M2xNii>JlRNT~SE=({+ zALcef7PjaVls5i`-~Qt*#`@Pbk_`wmH}0*zw!hm8{$6s(NFfhXn`-7_WU8LqPfW^J z@$CtDf&tWdyV^nd+c+0m5z!%M9kURzCZfB!(|3h0jdyEp?JS5Aw!Xea^hbuTDUlVc z--GaGXZ877ZV4ar1|3(JURJ<8QkC}x+eRh#%n@7wANog zF!$iuv2Rzh=!}dxHfW>gs?$@Zu@IQ@rio1W%Smz{ZzcY8C2tb zufxqCg&H5TZQ8-7&V6TfaHLvWObu2#I&%w!YkvQ>_8ei=#Hj+WF zMv;}Gy56WN1BMWWbGlf*uPbX-dK}FCG525H0GL46gF#`o19q3%oUKc*$dmy+(goX! zpIFDwDdt7pElC%w0w&{C-m>iFpCe^KZn)lgC-p7Y2&V^aX1>2sfm^o@YgSe~GzZ-n z^kMY8Wq@@GU(hxjEvB^P(!+4jd^LJC^%bCW%A+QQ*0-e{=v;hCb1QYb-ia#ZL*%eprG}QtM7G8-&vjTyO=x zZ6{t{UMFFaxm+vgOg$cEe!~w|4O*r|uGg_$@`%+#{=N6~jm~PCq9AxNRBQFYmhZeD zx{sy9FGwGA(cFi}CbkgwYg?SV`^2zPQValwCFAhSi*nWnwh$QV2#4Ma{^h0Y9BzPG zA%Dn9SGzkcogTY1n1l04XeNG7L0I|;J400OUd{&yhYSvlpM^`kci}?D2mR$sxw;?A zQS9NmxM%OEV&L*RJ)}|T#TBy>OH1ci<;cpVe3?$0&e_q8r}SnhP|H@AGCJ;3l|p$9@BRm%gR9ZN3>7k&cy2bw|{$zSs37##11T%`Byl` z>^BpxY2Jr=#64Af@dvrXVgU9o5{@iW)R%9xHHR_9ptgrMPZGvgJiW_a?H||fHLx;_ z!AK6pkshUHZA})>Wb6a@mI0UWQi@ANg_THV`d>>CzhDCUh}rKKD#c5-@IR=^JGyS6 z_2fZ9s#iJkp?`fMp}ry2S2Dw5II2Gs!F$#>6Q>UL4voKrH=&gKubT!LHGNUR>mZ%c zFjX_rmx;}&2$-O=vwWxz$`!iO69F^xGP_3;MufM^Od(Y>K1+AgNc5;;1oTnni$Y#! zaqkl4zZ+LOlZ(F0uNsP4Y*sF+F*CP$j6;}sJrDh5`cT8(^3Uj5T^K%t-3S@WXj0?g ziS`~!&%F-FS+c{k2)TWhi3ovUsPrkWjrKiv2sP@XwTEgntcGr5Ws{f$eO>t1L@kcm zT}Io<$s$5{IvX#bBN>P(ryfI8Og}g+2NKmXK2A^DF0mtL7(}TF(R!LcitI&f92H*@ zVu-84mvTWEr`Ju(FK$mASRN_HvWfJNP|F^l+p$OkD zxlEvSK$hg5_E<*9fVic0oOryhx=W3^^N#=9T`p55c0trmCWyPQ)i|$E`Y3LE__`8K z)ZKy6J>pjYa`pWwF0d-h?pUVLM+4yQax?f=GrR{5A0ni;KWrL&V?dH>K=w%i3V6Rk zaU+gTI!Ch4_%&Ec*MouS%B1VgZP$r4LI~Ud!VO$ZZyL~tGPu;jpyxmgEl!}n@&VR5q_BfX(!)6Ko1Sr@!?(9Qx<$-X*$1Py?kR(Ia?aNl? z`D3|5=($%eKDe?X9DWmEB0%_FEOo4{|s{u#+TQfIW zT}&6faP>hp!TFIV4U~R_!ob*#8#X_rS}`fXa;ds}BG0O*+0JJKYKs=RuGCIx@qkP= z8l_E`O3f_}Sol)40*3`Eq>`hu?e1dOSw8mA#yjQwa89D*%H9xVT2%8k;ehg#x+6#^ zXmZ{Lo0r*O-&$BV_3vYORUJ1e!hdV>B=DRqlM_zl+V9w(Cv?ImJI<~gD|%@pI#`z8 zENkg&v1*}uB!sYZw8}QFzYQN~oBguKvPW_jXZj4TT^r6cn=B->?5cIYkcyER`@S# zsoU=Xgd};L5vKyE#zljbooyCV>ZopkkUIJVzKyzec0hu}p}Jl8oxi7^)aIqEW5LwqY%PG$NbG6JJ!-p?b$j4b z10yP)1M|#c`s4|VW~g}T$JY_1FLv&ph=X3eCr*bvq#t{sD7QZtjAGXnkQrxGaO>OF zzc3MotqUJ0NU`xTpQhix%Ap_TY!ea*OT;6Z%Un=as6-<+2d^KBiGzJesP79g0>{;Q z)e{Zpi2T<_I$DxPtZGRsJ#GXt*eM9xds<#$^+lg|IM*nOn2Y#L!$R5>V~;Xz9UqOc ztG4v?TtKCg*-Yg;G6{mlLTYa^+gW`&9K_e5_qqwSs-V4RrMvJE_jbJip4$ZG+)YlzNu-q=e9EzC2s0n5v5e+ zz;&fgp44-D-TxnPeRqH&{Gh}K-DywY@thBtDDPmxL6RacuX(PZ&PLirfcyE$sZ>~Z zK}dJ;7K=pq#zm@fMiAuxXq*4IrCfjDxY!m-K&D0@&O-Y{?EyYfr*qC@1qy^diAW^H zJ$qseKdu7Z2BJ~)z07b*4uK#_S>uwl-!a7VP0{)5!t+~B0n5LdP{LVX^_?=->EcA# z07%TE2Ohxg^xTx|`ZZI=nL5EPA{oq-3DMe9K<@h3ADZF72?f7WB#Ajd^|IrgXNKzdpOMXcl09;67v**a$es#mBWg)ddMqJWFfJ zg$VeR?#ezI)H@$elT*zRdAjClIVCziaZ$ZhLk6c%*o4OR3$a}n|MJQ zdW!bX$z)hrMKxtyRYQfJ0D#TW) zL`q9{wlOd6@7O5scy}Ru5XsvQzV8-`dUaEZ*@TVKi%R5csV}IeJo42y3%}^(kg*aP z>9^5Cvt5a!J-w}tVl#sv22pOwVS|_z&CS!;AHiEf6AP93Q~KUH>j#H* zs@Lm%f?x|2SUB;p*nS;%MN8GI)?JF;LHeaM*&Q-nsH`7bXR4a+8`Kg%mR?k+XV>S$8-&27yl;U zui`zxaetgexTeOvj=GP8x|O>Mms4^vNvx?=gKA8*L^-v$c;OCUHn0xpvC?@`@Xlp$ zZ&v8i>H~ns%2t>;3z7bUe^}`7AdQ#j#Xg zN*|DBO8gca#y6DDU<57bO^?K^sGx>_sy0nI-xr?CRYkas zr27^7T`g69NDF*MCdi4pU-Vjz$057`9ZbKa!z43MTec=Eun=W7rNA(PI>XYQ zkKn5yXV!WVR+lE~dNo7UBG-m-HXv?Pn9I=PvEj&zt6Hh44rWrjPqh zG_o&g@_fD4$yp*&Mi z-re8azy~4SdFM?OBJJ*4^Hb)&!#K?u8uib)6Y}lBiw5{lP4xmAflG3#`*wGjE!f6e zf3q#wO`(or@iHM)JXYL3YHL;4Y>p^^s09KZFs2|>bn@ei>e$lQrcv-e#=>tBO)RQ z8~mwBf5_Vu`}K28;*4|_G_VUc@tU9by7d%Hx{(?CBEkl7FU%sQ1Od}amQi>s5*HDA z+8OIOXzxaU^oWUz7%8ok=p1`|!UOOWusPn~jN2Oid+L(gRCK~2WzELwr8Zn!19-Ip zeoB{ua-*;%6Py^2NI`I#>KcK)yrFbocq;&n;y5rjgT^0@nX91ejH;P&>Lc^3{ExJO z;3HreWgcBAh2Cn_?QZeOsqkf28k7Q=Z;@8?8uHG7l|We-pnF#8X8v8cdB8YT=nDm0 z@QkB+1OJa?*?3(WbTL%YN~sv+#qOcC#Td}Tg`;e9Mn{t8!@YCG>5 zr2r$?If4tPDOAWQ5pl6-l#9oL(*F;g;dz||qHBLVS=>jidX`SaKn#o} zU{><(2+}+6wi7>>`RHQ!DtT{Vhxh7_qt7&fj34d*StBJKzPX3P;z{TYO>zYkUM#6T z)2Xnqp?}U<)%2;~OoUsZ<c&rVOX{nIR zrI+cpCN;9~{Z7+Ojm&1%wtWo!77D24q+YrTfz(2LR+Ew{a6&Dy@HEqo^+Rl3R8T@T zDdhyRFg~5CQy&I(vtx{k(Brs zn5lIwMo`&o^cdxvM zjJLILkWoF;<*t2;E_8#rafdL2D8tNW9C;1ps(@w%rb{;X7g49yoaKA0-PwK&pHB+a zwkkQFl=jR#NDjW^6FPRJqwp}u>X%Lu6M3{HFIjJG(|A*xyEVyTu9@n{!-GZRfY#%a zO{_CM#4d?-Hu2)(g19Ev$!6nK|8thOvEVoh>#3%8E1iAtuL7sJJS3fQF^Z7A6X>bE8e-i8bB)Zff)Ig0kjAY=_u;`+nJd;uG?$Qz#|P3G5L z8IW__1aZOKh%+VdWI9kc2=m2nQ|+a{j)&LUL_USvv{o>60#$s4?ZA@{s?Jc_exH#m zC1lR!+ZRHOO#ZcgN)FFKn)zN;SF^b-UQV#dT&@%O8Z1+bM-u!j41{*c>}hp$?sDxN z5sc>tC)?d@I36-NqM5z>0KxGZ*({eDgfy0Po)jsFQ?t>iyz)7l8d-}S1>jSuS@R=P zA_I@-Pp?h3f1#eW>ktApIi}~|s;(t*yWW$VtJa+#D2yGquXlK%zySn>VlpG^)kB)L z#yGG_?%|56VY5$={(T9UgPAatF*@olje;+JPqnf9|5Iwx0U>zuOIgEXwM%Zi%5kmV zuvjAsy>yupa(TMM6BRJunus@I1pmkyC*iNLMcRmEslQ!6))pc0PPk^gIPs1F63_5}7Ei>d-mkR?$)RBDWW8 zZYW`ukk^V$+e#9ccyg-rbod4N*;nuh90z^mEMMq@35haOQrgIzkYr*qkI}at%b$Y! zD6fn=D5a~;&%r{SUX;!)uMsn5)faXrlFI}K9bZT0udNI`BV_6I#ItJX=R$a&CZ>u5 zG+|FT!vjN(3o!=8{T@4aEClE|q;sQxMH1pLQ{(;4g?+!y0;S$H=`Zc*0kV-jjFC|0 z{YLFjgwU4L5I6DnR6H-_<<*grz+R)&OgRpScNl?i-Z6f8O*aWIvkB}lB4ICB7P{cm zaS6j}3@TnQTjnuvqI6+>kE@vo9mIzm{hQpm$0~Er0J6vW$};Xw$*GCEeja_=%A@uV zKd{2=?2?rRaGA&A7APqJ!CES!Y?DoRXk@j78l)@O|0x z6q~{+K*hhRq5WZcAxDYLfeY=|^IY7na8Ie<4II;${C2E7d>@lj-@_9^jwuQ;N?I$F zZB*|0$^I2;_~VJ!yyscJm52syX?UE1+g>)SIVXMrdNd`DMvWCUh8D&S*Jiw}6Q&f_ zdIEif&l?H_$B7@}TZe6Z#i7JOyQ~M>2`}4kXHYx!3XUu62N1@bm*`rz+5!|n^k~^9 zCezFOQOT-Kcp_nB80*DSP~Cslaq-;zrjkQYh)eay^YvU#gYEW zdA5femkATSEwF^8d>p?8l}%H)@m55?-gL+oK1fY4WqxN@=$ft9t$!14|1!2yONvGe zP8uqSRkrT5gp7LHGeEck&Gd^^RXPsqtNNzLlw~SgHp(#gz9<0t?B_5}OG3W-e%#ix zBpwsL;wQ0Q!y#hkeIMwv<79PotPvm@csgB)v^}LR-6yzuoS-y-E%oo;dstfIgaU!L z8dLm!@x|T|?=pTw=4c3GW#Y;X7uGe1L$`zs)wAihn$og#T*#!b+)iaaOwDu|YtcWn zg0c-V2!Tlye&EJ)D%9Qgi3$231G?26Du{k}%-FPmB*>5Ype*|TbB4xN3bzM5U;3_zZ`W%)w99YEy?kuE=azOldA z@LDhy)Gk0N;nY@0=pfF4%ye)n1$8sGZvu0@x%+8aUFZCsT0 z9M&2>^0>Co)eQ}^&)tm%AS$f6$vq1bG2WRii=ao21KzM%|az>1e78qhAp5QS?~l7iC`G92Hp-T zhcZ~6ycc=&Bwyzx9%3b+x_o;L5YYeLE>ck~&u3If5F|rmUPSB_!MfG+sW$4FKSg@n z%B(qIo4B=)=S_~D(&UlNQ=SLG7BO9Svh z*xwl9M5(8=Hwx}9k0G_k$M;Aff}%I25MdVnwSmdj5JMpCa)azibhK~bBxIe`40 zyfoil6JvCY?z!64i@6PLxMQ)(J!ZB*@4s!byP*~h0dL-^h8wPa0v0MHEcLyBY1Cvk zSyH)+jl4H2-vN71^?v@IrGPZmHP5>ycr!aKPRKEH?Oygo@kdJ)(-Z?Pi|IjpyM*uS zvuPx3bASw>(DuZu$0ec;qiXvAoKeB%E%2Z{2tt zUy*zRO?umzr8WN7-qn6w7`|Nz6))a-RB(a%RTOi*ZKRhtS}3+*NtzvY<+`U(k8l4% z!%dmc3t++WU*T{#Gf$!8)X`tY=~IhJ;+0J6J)|_kAQ_9SwwXfwN0an+o3JGS)C6CA z1h)&tQ&2m}Rn`P93ehIx;aGuTKJdsb9%U;R1(pFP7FR9;{ZY#TotzX(2E3 zxZ3>~95O7k!U848p3hu?Dxn zwL=dn48dX=wPq(9V1Ez~1=7s0776XWiQKC3drzHDrYdLgLOE-iu0F51a*qWu3Mmn5 zTk$dN{-wj1OCPcFQli*A-Wqd-7}bKUC@jIEq-;p<_6k{g>sITE;k7g*Ul8CsD4VVm z6(Iaj6#az0Iq-wXCX505-TGi=-OG6Wi6KSk`K;A9X8MKx*b!JHgHr zt87wm-~=B5e`sL&H&yV~Ue*lqz?MvboO&Pqsp+<}#gk9bw{P}8HKjC!d*@qIocbrN z?!xUrY|53r&uxZH&h^)Vt7bX;Ic8||aE_KIj%pNM0V5w3FbY&NO-awA_y;Puu=D4O z0NjrtZ&f;<6E5T@WDS$E>??CfwS1Yo-@-ko@yJ7RRnRN5ht=_}Dk?IiW!nJ1_?)jPj**m!!$JCAoqnOmrg2K-?9VD=mx|wpx7oQV{CmPV40s63x z(1`fuG~l?<@R4u6`+rE(n?j^Z?FVg5zf(k^!+w*vZ|2F9^fEbxbp`|Y|+mM3%~9uhBEw+#IH*?b0_RwtR%WCQR=TyxAr}uN?h%ZY`viD zZ^&n9wA#a><=Dh0C;_musUm?U>a;^kR`_*peAI9!^L4^>JzwRroF_UL4u4p`vGOyA zX^F5|FGjqtJ$UEL*Q-l+J{`*JmFkzIL^Aw4;dO{HRf*U>Q&@sAg_pAX2uhyJB;4&N z!E+y&Eqzl8cdQ&->o^)>=UG z5h8a4oo@FFF#o))GGpC@8v5Oxi*3yPHg7w{;EiPpfd`;AtX1YZBgFl*FGbwKrk-18 zUZNKxqGv+Q*Rc*V?19P8B`lCG)-|@P-nd1tE=$vDxziSMpUe$gQ{#6;U;S5)E8@rE{>Duh0Nf+bzIrmh7w5lUhrBH{otafNr?RZh09Fb=Vrf1c}vVLHf7HIS;fbFfHK{ClYMbpc8(=Z3 zbQtZ{c{cg;lA-u?wHTEvR^Ee-u}c$2Zd9=Rj~$z}0FDhL^P*6~M5eU5pX+cyv0-+?*6Y2ep&+b0E z|Mw!7zpI?je9xIPbH=GSDvXIi3!(t{8Ksc(#wc^~#{sAWl-~`#ubQ}#ip)g5#^6tu z=8@LozU{Pq`bHxuTFGLrO*dMGMf*mEaf8`w8j%HQ-(t$5R6qp~I%YTrKqLoJI`q=v z7byL{EdUDXZPQ}B4I-z{>T7qC3^eXI*EO(wl!`DDuzy>x#+AnK7{2a>@*1noS^0?uA*9z&`CfmPQLfK}g

|RGo~q z^9iN)`1wbX!FvU}KDON^`iQlTk+P0l5iCcRn^ZWh5x@voJ<)BQyn?g}kK`&uE^ZFQ z@W6iXtWV_4?W2O}7m=C+2V+OLQBi?*^MW7TQ9KriXEF$RUvR?2-YJ5Y(vvN=5)ZD= zv(-DnSJD+il&$xVV#}WEE~V2)m42qYIKFw>qM&#Y6{*Uvh=8WPDcnqfRp``pqit7Aej8Ih^d;oO>tosJHfm_6`_nsuQ!I8 zR7C`o-$0WZ65As99lLiyTjn#{B?P1VYs0nOVkigI@j$iSMKAUM#Q-vs40H@qM~JH0 zvtMLJ?Zf*e9J}DKt13|!zHWQqZP3&?2oqhS?rWkr&C1sp*(R)M#+{5H1KU?^5YBU=Zp+VLuv0bi7)|b6p3V z4fs95FX%3BBBV3t-B1galC~B#oJ;C97=uMwrDoA!%6l%BvYr$t+9_1Mm3St@tI=se zylKhA+OeH>Gm26z5y0NnIRDVtN7}r;NobQ0Ffo5C;!QH!tIE2~YMI4Y*jPO#gly}P zuw>V<6VUfipD(HG{0Qg1Xyz^(nr9lWx$ENjmK8Nb9Y{T_=sDj0b;?3HI6JZ>oWKm6 zs_#yTf{hKmC9D(5hvs;;`J?#RY6`&)*!CgnGuqIcm|*v|KSsZK?z&eBl{j@5JN=H1 zY!}ls*o${tPr9#SVj-}gl`qlZ)Sz8@%x#lX{bgD1^%!=axiMOFiAf- zZl`~?@s*v1WO95FXby|_9aEBwO*x%BkAvm~$Tce7?A|FTG(N{1Dph<5X5@vPMJn%~ zp071!hC+ZF>g5{7WLWzzcsykahmH@W_?{E@m7nLR5Wk_%B#)mlOivRtuSGwfZBW2A zZ^j1;9ml1rgSL=fDU(t)vDXKb&`f%R>JLvhNiPX1CV~Tgch1?+sEk!EB9_ZwY_B# zC8ud#5d}?t9HRiJrsM)TZ;xt!KwPO&&Aj`wp7Np_trCD{tWPtJlEQ6>G+1bP^gTSR z>)P=!2gAIvA@1}CUt*V}n`2O3SC|&h9uiOW5-7NC9yrJstAJ(yVoArj<{E5Q5W1|C z_VZOsk}C2KN9mm^QJSzb=upmbm2${oEX5@(UwRqX7=!_UgGZ2o6b^e&gW~ z>9>_q&w0>!A^NnC)4rRCt8z1S`#Cz=V-dRe3*LQbidimtxF(V3#c;;lIA^eOcs0Sn zz0FWvzTcxQPeZoy--@faNMRFfe7k{jniD< zmlKE@A@-ZEy$QQ#3*@#)f0sE~0-@ba+aDGGEs*{L+D)2b*< zh1E(TT3sg>O945hlA3wdQDcLx48#cW3Hpe)gL8o%>UMss3GKZz6R}#0%&=|;xZuR1 z>)4ca)p+bG&6Hz!jZLW&o-@i2@+j_t!8w}U@U_3Fyh}>8bcO1=7sG%{;EmH7?)4$| zEE4;cam3f?Zpsq5LWOWX{W~9YCSEGxy!Pm&tew(l-^!72%Q5{Sqo}C(BeqF#C;5S^ z!fD}G3h}P=&=aBza?o#&qj^0}EO38b5LiQxv$dJjLCXbN4ul+23PX$hmlCaJdrxuC zhimfG(mp*@Zz%n^<$$j}xjcq^2>CjUbR5MuRT>~%^!)IHwjtHy9OgMu=it@ivD_t7|d?d7q9_SSpSbg$)g zD3{mjFcSaZa#zTSx|Ztcp(szXFUUH|4zP$9Mf}^5{_OyWP4%bdD%F4b^mz;Eb6CbL z|1?kx4r|(y&lnol$uJV=SfvvGYSl>?B_Y0vsam(i3=5BcOp>F+@>sBdzWRQ?-&&4z zG3Z6jxR4*O2oB;@IxJM;`2 zPX|IU5Yggvt%Ee2=w(xZvxd++lKer=UVAym%n$o$2i^}e_NyhkD8h=xdOGa%8G1~T zAB{sC=ka}fVTD66>IYqVb&9g6v*s&B5?VHCO^r7R4`yagxxnBx#h>WJGU z3hB%?<;l<&YCFm~!@IqXICA9YG%u~+Nv~hOKeCPQTB(3%C7U9$qmkl?1bQDju>x+qsi@AdsNOqp9Qk~tatH)T(9gTtaXAmWF%KjT<2YH1mMakRo?xv7~ z_Y+?n5DEuH!zbF7Dmdi$>?=9@|DIp2Q31&$J$S8cF`=fdF6Cp*Txjn_+e3r56EicQ z^FPw(k6Rq0&c5oXG1aRbxm$(~u@;n-NvaV$=91J;iGUJL9_w@ziN;NZ2ah)&MN~jN zhsRL{q#MJ%sF!iKsBi~X82;gURE}yr(^Y?r;0{Sodis?HRfIUdgn5rjOuh^H)!oBK9r=En_mAr0V1ARovKpDb_<}tuYThSR*ZE7o_fF^`>gv`{3n; zD>tsi*%A%IZ^h8-?J+9)<1_mDw@{g?bAe~rWuB76CX4SAa`CuQRPUwRjES`14kr@V zN#ZMN9l`PuqGX2&h7lY|Ij~b|Gtqt_v)hRV3!}OyxgMo38#6brei|rP{<4Pl4{zF! zVu^81CMh%>i7mu-BQ|&^p^dQKc_7^yoyj)Lu@GP0)kK0JsiC$d-*?7l17Y&Ml3SdX z{Zq^Cj_cK!beSrtIO?*h(=eNcxz^P1VzJCgl#Gxof7+sHJAB<>tz-_lGOZTgR_Z!- zU!c^1NAH);1?SMp2XlL=yNEp5`M?9~aMv*`cir5)=1J5dnq{xsI_eFEKWW`79Eq5gTEDsRSB-3p*rMRXQU9@${CJG(v^Vb zu2&&Pb0)|Mw5`Fb$J9T>zPg#NPIJtEDNlzy4c((MT+%9eoVVGPJ(*YYx@&x@gQ6T= z70RZjp|d%-9kgnc7jGR5z`RW#SXd0* zBar&qPPQrk%q@>kIiD8=hjx^WG`UrvS7Ylj*JG?sp5{Eli2pl@mqfRdv2m>4JrQwE z;5ud_vKF0aHlGreT3qj@C)53%*8gYdzD8xOPNn9z*1JeDu2H@U_L+E1UJ0HZ?#lnZ z!0As~=z(t!DVg4`oc3rcyY=L+*mE(0PU_S5-N3EAY0nC4E-o(k<{_b%uQ!(mnj2`1 z#3X?F-e>1dA>UL>F@oS6UfPHkUd~J?n8rYCcU9l}4MaX4Klg1helg&4Cnhih+HCSFl$MAPf+SIcFCzTB^d3q^4AK*1SHsrGty`+RI-EU$2QaIl3`r!C_t| zOjK2QM%jxQu%wzNhdMw53WbsSe#6T2N6Fi{&i4?ivZ=C;Bi}*QunUMY{64sX>nBW` zll22ea1Zz7<#l(O=s_;)sN2YjZfE}UJ8B4f=8mgdsrNDMCGo4}TzAcQ_E)Dh6DawB z&k&_RZXaPnJ>?Z(5I`8{4E9VKa`DtR7VUaVGtfcT+YrgKX6{rpmAhHR-K_%}qIyz^ zFr!d*nC5ZQ*7nrJ1_i@W(+@yB#Bb1KYkW|MHg!evlfuqsg=AMBcqgLY!r&@5G3FsQ z9Sl*Qwm8BwliCu?nsEFj8TKZ&<<@?zkD%)?q8(Vy~29*0YG<#($MEcILH2JtEbVLGrk9OAz zOt;kF>yw`GzLsZ&e#bWm+_vZaTr>EP)5$H2x>}v^#*wZ-VQ;AQ4n1K1!@&B#%nnhU zpSV?1o+LeV7C`vrZOB?P8+u$ZF?|M~u6sVcpnkTfUOfIjhMR*dO83+W-nWMTsy)9> z#$JPudM6nr1TLc{4v?o`L8`z1K>BQiy}1iG4GJ=^_%DO%6ub^_SA~ENEWqE+G)MMm&eb92buB;PX3B^W%4MrB<^_z*c}w9rG((BU%)AJ4~RmPhRDFcgPHla1`U2tL1i; z6h;sz=R3Sb<$S=ZiQ46#>073*IT*avk^LLc*3DkRTN2%E)dDa&Dek}5EClCssLo5G zx(9wWyc6kWCg5P|q}sIJrwt9V>^a7_o1A86%BDF+G1?gd7pe@_MT4j5w4?VdUbbDC zQ587)SsiG;g*3z-E!;g);F+Om3vc+7=qEM^_ikIXrn(oU-|aWrI-M5rE+`rBv}ACx z_=?9vTDOS>s}t5B$~`5{8_+uI%_`|2+8sOHGU4Qvz@=6(dFxZu*}A9Odf;NfZB?iMQkC^f1g;GV-7ZF z&Hh#F?p?jIn{@DJbi8{s@3_f1cF(Nl*~1g8&Y8&B9|LukNb1dWj^OhHXq}nizaw`< z`&!sPIQV>yfnarx4c0pzFgll+y>*yW5!vAM_5Wbt-)c4G44-RUPFd(1HSh%@W{3p~ zZBl1NrhyZY)#s8Hbx$ZNl{e7^{S03M_cz4^zD+{&xS+Y-CN~&JYNXh4crO=gQLeXs zq6!}AH*g7Re488i2xx(|0N%#xW3{&iG(I|__QASb957xnt#=Za;IA;*5|r7cCj9ph zCKAA)WJ`T}dmf-OV>7Y^gv2mcmHy<`X`6$%LAD`{5Q8L_AmIH|r(UnRIKPj(!$opCQgM7;9E*BcP7Yn6Q$ z0lg5YDxsl}N8igQ_LI-2{A^B@!IW*3!?;3zC~b$AcGLcj|9SsX7^~{~t4Q9yIpjgQ zNI8pn*<9n(rAErDXfib}2LJQaW}zy`b{^Kzk^sQW57HlbRk+S}rmbQOwqS^N4KOIi=>41!GW zLZ!0`AnnOz5Kav#@$7<%-Q`y=4+f9GB31kAE^96=pgj@Ug;ynF#?=C}36RTY zY-hI++B>FHYF%|^lpXF$D;!K34Svx7x`cP(#=|Eu-pc19DbMI`x3r#$T`Ol`)w)%r9=`rY4<)N z1zDs&_SB6KUigUI9rH#RbJ7^O-WG_E8m>jvi@!IW>rPN{ppzMKoRb42O{{iY7kx!F z+-ab4Ean{-L&Nq*B|+NHx>w7|iDNE{>MptM-Q1T*7HBjH1A2V*2@3xmL!;ZZP189M zQ-TvPX79DqebHsDa@i(a2m2FKcx)o*PUW>!{pNrcp5tGcYu0Z`Qf1E$m#COFJ zfe7ALZ)Bg=*{$*M0(`?D7O*SL?aj2yJ1-5GvT5h}O1)GAC$cUDgFh;a(5od=x}dKc zTC#k_Euz(19i-5FiH=L1*)T2#-;6iz573-8=Lea)eUmAdX6CoG?^ec?>{gjhxTuqu z=RZO~-CKB&7K(eZweOTARC+7Qv_EVb<)d^FQ+GzRu*Cp@hS8Z!SKe*M-@o3gnW;eH7 z+e+(AnK5nuLj^kPK^MZwf6x*kJs?{d${S+#5Jc?E0A=cmR(+7)CvuI9~9P`+&wEWA|0N5y`MXyXxO@WF+7ykqo?ykt6 zt4N_>dlwZ)?LGdTP5yb1+odDg0U;nTfxL(j<{_q)peTJJgMK}b3e`1K2 ze>V8ZS}O*a^us{CB_P+UpPU3clE1pGyxFRR%2usmO3C%dXOC>f$~*l0VpJ0QO=5L{ z25)xFNjxGGk=0J*b!v*fWX zgnH)tuV5!lP!r8+DRWGl8;?aQmE-*gd}KsV4*#=0*Rd?*!RVByuA;doKnbX*t#5j} zBt(~r`a#V7>JR1?;|D4gIs0CB6~}LVfrmXwFLu{6#N(aRVadv%et={veZ$Tbz3r!g z!_8bbe@|OB6WV*vY$HR!t5$#q$w4t(>pp&kFA}#Rk3k%h6+of8KcB}I@r257`~U7Z zz@>O3`6EzqIDzQi&E({XN;@s2Uq7e)$&H z!wrsw5V}1r{f&R>=veyx@`fL8_;!Ex3^29a+nbA>3qAH9l|+@O>M#|}>uC#<$vBJQ z29&TW<)Ryy0!NQpl2>Bf^s8faUpSm3lvixJ84MRiiK5z)_#V85hF%oM2*CI=_`Y>G zoD3p|5fE&+{QMJR_O`vWkF)^djFr>z(>#2KN2m1of-pLY5S-6@mFEq-A1x>AuP`P2%e7?cRn*5NwHHo?fj&^^DrrFd9 z>rzAfRF}d}azV?`&&CCB$`wh-Fm*3w^Mq(WJj2uz8P}qjYdCHClada~8<@S>>FYnq zJ*)kAxKK_N;HOyix{5>>L=E5<>*RDwmSedBvsklJgm_O5(~Md| zhgUguuuhNSSQJ2J#J3I8o{&mv;b3R)?!B=u!!0R+<_5vCr=R#lVV5ySB=!^9y?OU@ zGta=$!k7}hp;=-6?WWqQahf$4Bv7O4crwbAM?qwR(#a<7DBw8x%FH74bM?=%2O%j+ ziT8pm7#_0#o~n0tN5!Ze$owCn*toqSU^IADQ%D?KrWJ3xekgJWSS)u7z^Z(JlEk6+ymd^W?n zotML^gYpYl&N|6eKlt%Ga6%Nm@SEE>P2?ggL285%%GY7Y;t8Mi5+dkXA!nlOL9eh4Q7j}nF$8ff?7c182ImYA)r&TeLfnLJ*c1Ts0bHx1JwehK@<$)abUs|A-07R zydFV&l$9XcyrTrIpv**)`VZC@EeR?Fn&D-^K~5}QUj<`M7i!)BDkP7-?-2KtpYV<` z`>QwlHf9*1CW`yNEkC-`&~Ggo$RXk7BfrgLw{fAXBa)H@?B$A3V=l{f^Z82X;(omV z`@$Xd>?rkm(x)rb$bRJU=J4bN%f(>8{rm;U@$T!I%M)7b^Og8!p}K7)fNB+Q(1!Tl z(r<4xd8$0i;U9e!anN<+*jBm&aeJF!G~jOpu^l#Z`ok zq)rZ==v3y>Fw+8X97S=Ja>-R@KEhsnBVh-u1ifPrK2if)=7|1jzCpXcetqLJ1hM#F zu2lgQG+%CZ%h$x+tW7FlclEkOPL9Lxud;BAi)ZW)eTb+&7#9Uz`H*V=%*e{AsjK!o z5JYu0CtwrniEVoS!?rc$c=q&^5=znY{ePRcXquA`GNo$d)~#;*Tg14}r(9)U6Z=Zf zknJ27MK-&Od5yxB0{21#2~LEVF&c|=IB*nEqGS`Fh-#4GFmGLcLT5H;v?1w97UB|+>E7oDt?CJ=C z8G32Kgx6Qu%hzRuDWp#DQSU#0$*)F%6V*y-HPZ@b3f73?Ct+exKa5`4Mn-NLYNW z!%fS$Vr`&8(*Aozd~~g{3z(3*!ByqDT#Q~ji&>l$<}U6QEM5W7j3pIyzh2bw zc?K&+4&BEq2-`i7vgvH-pByqTKLO~7#fVltP7Vkq;jk?*z16bM&AU~kFDD8*1f-sT|2ahGCXYJPU=r0+=Y6>h8pKobAJ_JiJK zsEpxbmXGkUTctP+Aq@mQpfx0J8uWspn^7aXNQ*Ou1=Q&7-R8FQz|84;zs7HMx6Wg|5CaPj;LOi@%cHKdBTU)jq~BwqZi`+$Ol1Le)Z-(A_=g+bEF^>WwT8xsx|}eIr3TqLfV2a)o~`X!`Zns zm5#&w$^7dbNwH62{fTjs$^vg)hkK}O{a8?f35@W^_`4X>h!oqeHPuj9&;YT)|z@=XDC#O~}eUneEEBDJ{ zw4l^wdY^3NoWZ%8<~%v}f!K^Ys$h1hm%ahg(CYqW9EsO5C5-m0+X8>dv*QN1tX;hM zipPOKP_b>QhiIw=wmEu$Ju)noZIA#ii~g}7j{Y2$`uw?Q4rkS?XdRq{ni8w!)sTp! ziTF7t5bg&i{5t6|7XOl-nXBqYpkGeu^D(-uo&>K8MnG3N)8;7@!=sm}xWxuR;dMtj z3yUs(=xE)(n5?PJLNr++6;(jmNikx+PaeYevojAFwWj2aA7z!ADd^Klhfp9{xFd?! z=RnX3e$4(jHKr=4l&guHeG%QvCnyiDWEYo8(p z>egwCYcg#O-d#E$e26gd3VDfS%$?XMUv>!|5(gBKYTET3Quhx{f_f+HxYq3uRpapB zIj3#e!c!Nu`q16Hg|_oGbq6wOZ|o!U@wxh+*C|iTe?RzlFYwPkG=Y~$M-C)@o!qbR zAN|#33IKp&t6`;!r1t}nn+oy%Ng{2r1|7|3&0JKZTfRun%w>A5<~CO9w>JrOm@@1o zOJ7nFF&K=qi%Z-M>1H-EG&x@3xrG+f)s;{@%Q7lh`zjzNTtMRxb0JDjPK$8>w%UKM zhh4Y1=kTlO;OC8CpeNRdLNEz-zfd-L#38!yiWx`Mx0Zp&+BJ{qfHXI;-lq(!uxdo( zs@9h(8u&PQjnKcA$rf9a2fHUIL#U#pQSAG(WDT_Q@lQcyP#(Td@1w=SB@x+o)9$!y z-%O*Y1ee66Ya^Smnz}3i_TG!s9fm$;>UFmuZ)nrJ99tU@3tsOK-3V7g*Cn8}pK?G|eUtT;l1|@$TR>>BeO&uVf_C(;5iS0sffvRDUro{{XVO8x@q>y0 z5sZ&5P8AH1_=*iRI~WY29Uc5Nhs4QUMswpEqKejNCqOffClhzfE>W6O3=T|~Kf#z7 zE)y$JN6YK1anIWrb08WpGg}@e)_%+Ok>{acSfBZ3X^s}Z{*epikM~Kzl+ucmKOo8% zNLh1S-zaj~;fYS(I^Z?1WF9GFj5&NueYBNmgCFbU%0 z-OHZ-2$1YD@xIJn%hnuQPW|(g?O8v6q~F;xeA2(jWH>cS2F!-V9bVTV4Q?Hpx5wUn&%Qk)ayc*^XH{ejOn1FoaF1jL{7Nhu6_zL!wqv!=xrFGpb~qB! zil#!+LHB17af_rIf`(!Js9y0BV zuWwP!H_hyRxY2u}N^ClXK6iY%$7Sorl4vf_DdFZ3bUd1M!UP$}zT$|_rfjtZNY}CD zJbL`B8mXBB+cNwp<;LE8z(Mo-fVOxLzZ;jiI6x$F*)rQu&qW1CXL3kGrG)J*t(k8! zMJ=y~tk>0HdSik;$pZ9UjOt{u-C=q~h-9nxP+MS7Xm#K0NPaNmxAO>C=B{ zg1@Ab4NExxm18GH@$vrhK7EC;?^{w4 z88UGqW$^(O>F4uHL-st$Cdq-pv6r#Q9^(m1%hy;9hed}sd&lm6sr&dRO4}V&+-VQHkwdde(mZ?7y+1M4sO+Tp)f@o2J!FOgny1kY4Ryp`aYn8|lk~Tyw4$sr zoZkf&vg~v%pXnVi%TH+m9joVuI8-5o!%uV7{4A2zO!5TDxZH;i77qP~F$*^yI2_mE ze@~U^e5u`tkCxh%wd;%muYM|6*!f|Q!aUq8Q)rO=s4Ol+{#W0#R|9(>@VWi92chOv zYWoM73saD}&E_&4t$n{GRi)GFUukRqE~SZX;q9Xaq4H-NeHAaTwT@GXSxTPPGeO%! zqynB(5$=!!oL^T1Yww4?7@hsS4fuZIvBT)90{rMwCI-T4!_jum$!a{oxTW$dP0oe3 zjwDgq$GdF)G(!M%PoTeZptw4oGL=TJOUd!^pP+}85xz9f49j1!^#oIiqBV^5E2ZKd z?Pz5Qeku@4QsPZ7ujp5(6KvJK_R%KkLhQT_i@FxBoLkJ_aBK&c^{gT(I{2M*BQn-mTNw^W>89 zM%Uq`e$W4CUR-HPXUDF|TL*#fDgaY{+|4^cz(qSNtDpb7hW=|XAU|N5^KYT$DWSm& ztJwXs)vG)S;=m^KyDxbHm5zOH#B$6+Ag{8NeLZ$oeh44!Z}E70dsn$^cV|8_U<9fN6fB3(L+^H9isrb$XdIx0EZekKKUe==)ERrY^(~<05*R9ox z7n|}%Fs(AsIWZ=Mfa6QNf&=5iz#0&fxGb=j0?-lbq3;azu;-x=*<#O&R9AMpLv)QY z#NcD`Kk<5a=W!)rW#O9yUlwuyxt61a6YqQT8XAEx$fH&W#CizAo2_3P||^omz6kwgr|svGOhtpH@B}f zxC6C4xVRXFuNe42z;NP^0yU?le)9$GKTR7Tc{_ z0N@|XO`!z(jZ5rX6Skg%HVu$Qcgt&>8ItkmXgsIviwWX=sydIJH)dy@Z`fpPHOPQ3 zKDE|#cRG2as@&yaBDdq#rDMfIiGjJPjeeaF(u%w~9QN>! z)8Mm>sai5qUEj;)JHtpmBMR*q!01DSeAYc;t=w2m+279{V3aWItj(g)xx~WX>b4p; zg8np1p|#qh3BhfP98~%TNl>?OMr!i*%smCF$AeT8C@zk&Eff7FfS_9t*k_<&<~Hq70x2MGQdy=^pgu~SsJ^C0b8 zQ8(3C!?ZOb-Hfvf+&YBupKv$aUp-`_{{}0_ei#^~fcin3?)VZjMB@uHy_(mJ6FZx@ zLE>IMjxD{rqZ^E~j_}#BvqU~v2{@r9JThTY60m-=e8nTm*-u8zlGOI}PzUs=GWB@S z>9to1R~$V)EEgF*7~uGx$K#Gh40E63C9T_{&0hm6I~44qiw5EoOI&1Xd46}qeuPzu znK@fkNBW5Z6mU5PWd^6YpMKgs42o@A$@F`O2d~8)|BhrB2!eZ`dbmEE?DDE>knk8V z|3Etcdz!_(Iw_BS9~gJ`1$lLFE9mM`gnW7Won&-NZEq^=4TU<9miV=2@A=DDl;ytY zbVrW~vIv&Tsm;5t4#QvnYTGd`-_(|ROTZ9&+sB<-2z-`L;3YJDvgS30w99aP$U%<(e#x6QF;rc&IR z;ecB0k4;$lPYOt>uP+>|DFv-VldYocD*CF^H2(vLKwuibcPE=CIVE0idVzT?o;fFl zzb7Bj(UW^0Ly59~E|+AfhlUW=8VINoDhEy_kJ9Qy@AX+Oi+cAsKCS2{uh#|W)zzdR z=Y!z=r4?D%pP%~8hhGm=HT#-tcJjth%nsZDL7qYGLY}8*%wHGUPT!lKl>AM0+)OZi9{W(7C#z-fefj5E9JT#bd`g{e0zA6>+b4r1(^2wOe5b|siigKCoQiqx0>W# zgSRY%+?$ex4|7atZ;;386BR18^QF)pQ`A{ruvGTxnet`l|MM)JT%q!?gjvkgufJv+ z_tdk5Puwv665yiF72eB3R?WBU&!Ue}?4Osowy1^S19n9cuR6?oqHypA!;{E84jrYuZ|leVSCV zpGQuqDUaq+ox_^mHIYRQySG(TzI_)Y=VNlw@A4obbme}va1z-`;T+RNmW6f&%X1&d z*=Pc1nW!##hCi$J%C7jH434CAnvIF(QB)lpr7yn!FjfUWL6|7(f4URP zHm1yF$-oZhAK=Bv_BZs)_J?nR!gvSlD9J*iV4tupD~g3Z*2g^E*7xr@gL)c3OS1gG znuK)P?s;O}0F0n{t2KVquUFa+L3Q~ro#X`Ki|<$O=(Zs~Aj}Y#Ij6%ZVgj&=m=%em zoSMAK_<|gfwV!W|vV*A}fqsB`fp;w32E{B)H|Ad!Y@vB96VL6$UGr#oX0l(@L(=s+!(vBz?K)K8yIoY_&Qe#&4}-}$@Unbi!?k#= zOMp9|Q@MAL3k;-n1`7Sz$ckD7dhtSs-8Iczt;@^z(!P8tclUaS;|-5QJwz_t&+OE2 z^Ylz{gR1Z(unwl6IPtytm$nxV33(|E!$T=!V^*>*3-4v^4=V5f9BrV_!9H|b z==qpX;5J?^>(rmNs^IZG4ZQj3v#j^(Pd3?nmZxWK2fw6@Li}lFst+~~x)Tk2eY~8{ z;iO*!Z6|+jN&5cBoO~ckqe5(_B+W5f2}jxBe_4a4|_E2Na9*JbCAqNiN@4v{mV3NjVA4Hj;7ST99TXjUhc8 z_&(K^9;Ep$IV8@8$lAs%jAr$e5EA}TU+`w^<#(RB0R>O05C*jTpcPYE;t&(9SG zp@j8E?xS+!UBijG6AnS3*Wftbm=^p!-VZapiB$dbwjqtw(l$W&>#cI6NS&KOxW2f&9j;p1~f@wE1?|X&f(PQ$^0F1cPy;NGPN|TZx zh!HEQsF3*#vp+NPRo-i3tAQa)uXQjVGp#wcg6EDMe#Y__{^B`{^s_y5 zARYUj^r#ii93^{p6KA#d0o(Cc|9xc+XsOCWRvQoMv&YxB&r;j`-ClgIxMO=V!Sxpu z4F7*nAp2pMX(X%~Z$85!*;CYxoCmYzh0}^H$zn?P$?A_xe~4fAHb+au#+8B&h|VBI zMIlgtGxFwh`zxG^64Y0@ITCI~-yJ^FT97n4dXnsz^i9`C68L;s9$ctkFN!q*+BjW` zO9qGg*4>Dy9geL5!yr9z72veEO!Dt-Y$G@@spCC1drgLKYd6yAf3yHPxrnC&?H}XB z@OtC1)!^^6`Ci#fTM4b!O_KI{4!6SQfBSrDACYi1^N|`)_-Ycr@SCp>H`t*1*``~b z^Ub!^%|M?*gy2Bp4Sw^`&E*Mvknn{807JP_%8J&$q@CY*^$QL7)F;(tb#N(UovRf* zC}YF;O|AqY$k;O+cU2f0=#$eV3qr%bH{})Bh-Uh8rWVZNE(o7jqFnShmx+qQLGN(H zcHK@*-QDAIxPztMW~i~2i5q=3su*?4$@iT*ll5Bq$|mcl z@nUUEQt|oR^^&pDr01Mt-E+!@ zYn*uNxkXig#bf)BlkRh_EwzPsQ?J6QE4Y7mdIyMc=e z#b5QF;h;Hc(T#;sg7Foj3eJ+l!;luv!DPVusGow}vabbA*>Tl+X)@HkO;E6;zuQ-^ zY(!{>YPX z0&~kEFMuxU_@#MAP#z^oiK3H(@A9TZ2cPQf)IOK@xJS1`Q%&p z<8q@y@};pb-LT|C)n+lLQH(mmT-Q8j%Od;eF$*K~2Tcx4hXSl=yGJd%5avM(Jx-v( zs>Ig9+=3St)?+-e#2&>6a8`!^fX+C(+ATt&!z4YcDgzRLi5IMUbAuCmz2eE)3kRmk z1```h?XwdqblDnwy+8EPfTG7Q+cI^tBejVXywZEm{+m#4zpCmywVE@0ru=$#h2Go| zzNtM|;pEp-=0?VF7fb^bH|BSMvRHVj<5Aw)8zlBu>xXJN?tZPKm-N@aJA1n6+q=}4 zI?xt&%tzrX(ITxDGCa5GkbQwK{PzDYn{7k2=LZae1^!C1j-dlRL| zhZf-16CVBx82|c_*}}=V(<48uV&udgNM7lbBsvUaNTztNy|pJ`bID?=BPpx=vK9fK z+Pb^zo3SCMM|aM{BlE|@-pfAg{`UKqudRf2wV+hP{EYLKEbq|{Cqr)%Cxb=fmDn+E zO8bL%7e`v)8pxaZ6Q~bSsSkCoX2md~r~8 zSTn?VE=@M!TjHaK*MfbXB6yiI;0w&34^=a%aVi8qe3*|0&U3Fy6xz-&hnUy6CM2d( z-{h7rWjK_dqoD652cssbOJ2 zw`B)Mej}EbKh!cqcG!#1LT>Gm;@|nAV_of4hdPD@O5RAyy>gKWbT<}LUwEa-gMY3P zqla7u3MBuYW2worxJIv5BeMFI1@mK@r8|L+(DUZfaL=lOo5Sx)oUEVi`S015K=OLG zJ9=p7FKmAThZR9UkZviFknTsi`m^fdl5mD)GMM@Sl*p z`?F=#Z6rU5M~hmEB6*QvHhvTH*8HZ&X{RGrpXb+%(J(8?b|3rR56V~zi+drPZ2Xv? z700S{8^%@XN4R;^rE8wddWjgCnJ&0$6RV+TJmI6bgVpi@n{g~0{<%F~vMQc1vZ+@6 zdblADgIKuN2!_TFag~d&#Iw!x*!5wpJcukm-r7U1NhLQvCSf}KfJhPPVTL&nKFZr^ zCPB`E$0&^X68Mm)|MM4WIQr)qYq3*VW@<7!Xf+6D7*3}NvwhpK7QSD0#^M=?u3NZ! z9Lz%~wb{8{X8rBsh%qY)_rh2jCk`HRKW|c#A0pK|c)SBHB}n7XYQv*b1%ZOwR>pjWCvBJ-uW*Wd$k3UakFs+3f`QA(rO((-cO z+)+gSOTrNIPl8(}8;NbuqW2>-43miS@wr{&Rr%u~pNo@sa2Xuoo zj{sze?oM{eB^i(`nYbRwR95~~AaYKzlQyOq;+AByDP-2`JhS=cn*Cy>{u9f_YN^${ zPrw$^=i(^Y8gf}82jj*+tAhU`@E7oKJv}{&y61CL%q=Nt%r6GBA;U9l!MSRcY_J^l zM$+6yBW~}84yE9VEJPpa zjLv!}9$d7oJi0vNY8!p*qSNxFZLOA*B&fd@G{f`$tq#M~FhP6Bh6V|euj{h4t@~rg zN`Lv8i1-$?mpmVM9eA%u95I|M#*K)l;`=jDXXf87;ox~}sx~qBpvSIg^+JM;Wv$Vi z;0hFbI+8ccKQ>P#yT7zpWbqsIEg$`i&d!r2eu7SAf-jF?43F+(`X-Ft*v9 zuxn7ucQ3ATQJxA7EjZh6m^hp#)1o4!#pl#AyI7N!z~`YSg!p6&e@eJi6FhnFttNVZ zW75F!$Ek9K!5d@EbL||p$`mYDTG4Uf$kPg})QAdcvQS5YPLfYoEKo?-ScZt3?nMZx zdTm=EA$FV-2^PMLFs1jy>|s^fnXJF$O8*GM^eGCon!SPSEB{>EBeTGEQYVH;MzE4a z=ChCYTUY!P=DUk~gS(h&IeG;g9F5JOQ22L18gVx?uu)8`h_`f8HYRHaStU7~vc*Mr zp;J4HPGJNV&KS+&H|u>zMz}?|$OJ-4F)Gfb4@Xjil;2qDYYzVfV`afu#~z(&Kp8t0 zxy^$4(GFb`WeWA=3ycS!!?3)0Nb&|so@l?#7yLl=jF$(yT8=c)^EK~B)ooKrg2+t{ zGQt$cAsSN8e8OfmK>S22HJ1#Z)7r+dGrcH*XRdjDpFtQ>{C)+iKjNkF>sahJL(^2) zV++o`wI@$3h&C+s=swU~yhU!seoxpx`Yar11&&Ic*};dQOM5Jdg9x%U>?CQL6hKZv zDzZF%aU63muts7BQK4)uy@GVBvsaByKBhH&lXjnw!SZA2oh-1%&Aa z{c{GYBX1EJ4%ddEST3*PxEOl{_PZ-Kin_8zG6v9Oe)d3eiqa&(J_`|Kp?RCb^y~2D zM^cX@YScIY3vykmaeJ&aDn8?AmopvdUwdq@LV8?(?!BU_2|cy^E~-lXhlu}0R%=3$ zjHjwNm4JhlA&kn{T}rurSJkm8Sy{BY6yP#Pbq3Aq$K!F_EwG+P&5b?-wCY4eDf)dH zid;oN3Qn;tOTBs>DkD$)T?`_nwq;-v00gN0{nJFbKp7)pcgdNzw}J7-qswzHrL+gY zF(wicbtj4a^oKxoa8`eYT$0pVSbwT!$*;8cL`~^c(AL4}nbOxJ%nF4mT}cZXfYz zi#T!iy<*(lJ;^Q`o$4oTM>7oZo?5-kfR#zOl(e_B9oT`vV4K$*Mu&OP-}ONZ$|FRg zRhxlJkqL!g3?)!E2C$~a!P?ER$k8e-6M=eXb>pcLA=Pr~$p|#u)5(DW`NMnq&zV1e zpK?43zI%DzR`6Jq&WGm*=om9>{(HaZ{5Ol{Y_^amF9I{o8FPOlFu!-o-Uj1S&uQGu zh2Er9pCGH*bPYz%e5FgvN+GN3+*h9MK!y2eonSv?9}-fiBbf$5Y&@@H2&0%_3#%A^ zix9kkm`sa&3qL%lMMz9SO!9-IIg2(0@WyusT>~tG=ikLn7WZ@c6~6eP%6Lk)XtGh1 zcg=Bd_$40U*>rGFlfM9ZMO6zyb|?ra=$@d4i3fcDHY24|6mjXP=n7&Bt?;w=`;=Ri z{{&&>oidvYW(v@bEYO`)n_8PICV5QGnG)$G7sIO~x~4Q5*w-W{l+1&Sw>16Hyo+tJ zwuB>>op^evILE9^jku4H6XCHtdN<5G1d3`3`<-<^>OmPmEe2;#=suo$Cve^cT~oRb z_eivdE9Xaaeb z@QX9m{%rU27enCs0FJmp>b?2w_7so4)e+-1|DEXC6khKUvn)?3G(IJhQ6dzWX}}xx za>d2BH84|Czr{G)mM`8Ug-#>A1|}xC8c^orRg2j9TNu{I8mx&VxuJx z1mH7Dne7>m#InVqkq=B%A3Ii(U%Kpd(10L1Ro1KW5JjVvfwZ$JXr55*NUm0&)AS69 zO@z5=XE-gZ+tr0^+&+Pdh`z?scp0>`a}7aQyD~ocRhvh$sE^VI=9t=WyLb*md6@*h z!)3k(@#AGSM#$jbAg$XN1n8;o!CbF98goR=>QzML%~^BY<$m+5-T8J<+LpO@zmKfn zR5MygfLturgzfQ4x3{?Q$9#<4rx_BSVi+aGjB{TxfvEGclyOyXEY?zqX)X;>i5l~h zr3;(6VT)=2w|JnT%w;DZHJI>mQ|s*YI*;3Z8;uf>KPKJCxEmjh}%o#@f8LvJ z&pZtzU_}&t6VWG6Fe8lL<*Aek9zI4P+{MHMnc6W+6XpP~Ey0jEZC&P~{5)FHDEW@E z7!Nb7^4oK+Ibd+KMc|dN96wzmbH+ZsX zUm>O}wPxZi?psb$c>8qLYjt>fx$H8Bg= zyT*vJGDmnCAkHvxES6sog%-Enul_>6G?1oDhf^f_865WSrI+q9^i=rEgiM% zQItZL!vO;oAsQrF{k}vJsnWceg2z=uBcK{#{X5N0$PKc>mj^rmkPJm#N+Y>Ix&Yqv zbj*C6PcU=+>7&~o#QT@Dn2S(|;JhF%vvUFjrE(p=B|`Y5)`jol!ols4ejx9uJA5XA z`roUcZ<6!G#7O2Z_+>_`0eE+1K8afz5mlJ!<$ME3w&kvvsg$yL9evj`HSy@B!_fgo z))eW^Oq-ibZA+F!drGwW<$iLxBz+x*UlMvdX zt&ky;b>fNT3@=Typ_nJP#H3Q<`-vKTQUT~5D{Zv>^1@&FeI-_K9Ckhh9K{hKjDK#- zMzX6qk&#h{V! zAAuPD4FM;w+8AO6km?T}qu^-~Pz`TYQR-3%HRJjvAtfN?BMnszNdP!ZkT?$&G>R=M zHc+io@O__|6x>sR{cubtTi8b^1y>2>=kUt+Vu}S#R<&~RRCO!6r5&!L1^xO(c9;r{ zYhJmle9zgfKJwN{*U+R!>8g0jwaJ@ zv5$?)u{eTfodgQ5M?1TzV4Rs!W(S`k!7>IzKfR8CK$@ZntrF=vVGc6Om<414lL{o5L1 z?=;}Gi1f?x`w4cVL5i?KaJhWv90;Lq67+X&+o6AAKL252t_~8?a^k8px7W<%_a>;> zZLzL1D4zuGgS76eJ>VVW8FQ-{Nx>?`dTHcgt{+nhB}8#2i}ZS8S4Q&P0%LhN(`9%C z?h~_7^$K3?L<^ZnF$7^ojArPM_~#8oUhryG{%&7Mt}@_;+kJ9ZHOk2(|EqM)JW}uYrT=%HqbsjM|1)1(ljXcP1t4t5|j>`?`ecVctXI_z&k-a zuaG6Qxu}Q{G#djmLwtLdVWN@T+l3M4#s+QN4R ze0M-l(I>-BIid5W*wG?e$c9=qPomqIpK`&wHv-k~Buk8^2Va-oT3HNh=p*9G@z4mlEYYB)(_W4Dh>vyXC%Ke4_QY{B z16>2bwYrql*;!^QL?*fXI#X3Pig+0o^KET!BuH>m+g}A9J;T;HIPtSse1!H;#h)|n zKAC2M)a9Zv@QSET1T02cbRL(NSPGc;H`iW5&==XL(d1*z8`IxLe%^WiO1h=Rl{SC? zGZa;~?UPBjOD8oJ-YR%i0ywSCl19O)&5^=DnBiNFl+OXj6w@27@|KP@DxLBR#hox* zBi$s9ve1CMD8c{{DZHY_+JVcnC(m`*EeMm9B!C!QG9IWesPfJXWa)}0Q^75C)o(F; z^5p*UORh?gr0l3#0f>{|Wg*9?MOzGfr4$=#kXOm5EniQm-1WTLLvV0ZO#Lj^Q(II% zy|5KbLG(yoiIwV^U)4LQjkHEy{hcDxGma&d)MIDC?kU0fM+#*{fu%w@UODeIVvYRv zyC=rG(p?5Gi?xs9c?)$G_Y18%29yi69XN8qWZ*L6;Ipc#-khIneYCjsw|ooEgVZJ9(4*5FvSLPs-;^I1CSk?^~cx>O%=k}(@Y* zhkH7ft9$WUBita zehN~Rge9Q4_+mgJKksH=%WP_E8X>*!|ISh23?0nXV6*(a)rM~rY zj^D+|e7u5P%Y0ymLzs(UiiovvyDj$c(flseqtx}@p#CyLn^HLdD#8nh z%JmOgA89?xjmr+SE`nem*qE#k)t(;&d)&L<0-#ITduf9iav_-5>1`bna>1XUNGRnG z0LO#y;3n)!Te39kON!4eF56ljf}9tJiSiD>zl{tC-lN`ew%B4OAf)B-<%@Qo^lQC% zj=j;Ov2CD%lwQ6I*RZo|o5t@kXh`s`0u$CD z-dnrhoJ{%1jhSE6u_0j`F{qY2)g{_j1v;A^s#ch5VT6^ImWu1>=uqM8pTly&sdaTi zH-Ma9OS)_VQ7u4#tLFCh)4}#Ot@(DP5iBvm3OWdk&vqq|%@h7Dn`s9hSD_h}xL~nR zO6?QKIt54A7!(xcyUb1XCMwhqUR#IgNjQ{}?a>T3a)XkYG+frvRMyuNw$QVBH*q>b zhv~k=dL(mf2MQ?l2E(g7J1Sgh?pbrgqKQ`10SlBop#;o&iZce9x z1&JfVQcs8X%_=!wh|EGO$%SV@?IK^| zPU106#La)b+jDc$n1J*B+itZwM%xnO09d3+;cP$u{5RmGjrb#G0T01t;>SlO{bf2} zAnS6fQcOnr7phJoD{0+r$oCS+ft(b!=42jVq*C_YQ&r6rf=%ICTs zDLmfZ31z-&=W#vh&(B|k9QXR{VjV#Y%hfcl!}zwI_>4|sOj9R*f-Z}oq#b&6*xa-y=E}Ev8_jwK~!z9DqsD0LlcP3!HoxjWEbF(@CP^2hCjFOUZLeB4GVu=XI|p z80bfpxjxN*#tA@}L93S+<5RVEs2jt%bTi+mxd;@l&8hcPYaMM4i_kRgA)@8L8)y{O z9Bzyi)9qH2-$9N1)9buFQo1pNRP&X^_@{sPifmdRZ$$lUo7Lv~*Gnh8)~bJOvL>(>qQnjbTX6C91_H|6*FviWmzaw;;XP|dN%d_(F)iqj=~!ZuNn zR@wPQ{3Uf4Zlh8Z)5HEWX_tu0_uH`kLVnxrt<0hb%s9imvGa(TeRj?H3@4;vUH&fS z54qlXQz;V1rV-|h^^ONCZ~U?^Hin^~!?pfm(81ai|Jl*1I&xi+dzXz+-^j91cWeA! zKmmO~chNWLEZKZUaA+nfDK7ORZYAaq1wh_LF(2rP2U;ZYK_gMEMOvwO5lDd9?1LV1 zV|n390Mq-M(QgzOnlrqHJOQk*`blramcN1Nlu}QbS-BAF{M1Z9Rqul}L?I-|{l8uD z0L}@6=lX2MH+?fJ{-bTH7Jj>Ct=7!I976u@Z6FRrx-kDivKw`L-7KU~eQKf^07Qo9 zZL5*IzYn3$LLT27*du*7HUIfIJWz`nWyo@$KG>bjmv=uhNlf9CN(b+8L{epm_En4* zNX0jz*bUQQF=_*>(W2j??WujLsF~zxa0=0G(N)EZEbQ4nU7m(xkEz%*GV>s`P{E6Z ze2z(KfhfKxtaE{am>!PRyIs@8#cpT@1(ote=JRx>HUFOPuQup}H)ezN%5PY%6&-I{ zqi23k62JXGYw&297kA>r(Er#u8)3GBYh&Aan;zhQaJhN>G^y2JZ=ot z)kR(3hK@0&Hwl3eD2L%!GG>I-gN&JKtTCP3y{AhPI?cha63FU-mYc1b@$#NpkoI3~+#4E08W;n%gXOwU z4ur5T8^QHNJ_jT=J92g1e>UH)r-Vy#x{$Wmj5o8JR`k@Wr*vQ3`$fr7W{lA?Bd2(q zDX$~I+ljdv(ufnSEE%T+ z5ShdupW9kzZ!hJ+;&I#j5PQ8_BTLNbv0oQAUpH1e3hOUpU(=D|Oo4!;2(tjU|! zQLXoWKWmdNGWnARQ$OD0+yCag8cs+ChpxAoH!RgB9sT7`(|qwy!zgc8mH6PI&?z{Q zSR*Sw>*FwkraDCSbN?U{59!Z4dq~jjOdJVwylaiJWo0lF6sN63cm6u!)%bJn#3(Y; zoYF3lE_(c547}maO0QUaXkHVvWg!2W7KX?ysYjnu$oH=a>^6&SiZ+gORHgKL98$N7 zJE>{-)-!*28oRx*r#5EmeSv*?>bynidqZn?_Y61*=A`fK^xlS-evFO`qR(nUN1SDv zM>wBvDMwn_5EAD)h;QUp5TMg+!efk!^UF#5h`5*I@jxfFzB9S{D_LSyl zYbwLUBTY>6+)_gMMfu=}-qW!z62@Ls6X!bV@EH|B3xp1eg%+ z=Mm?zPNa_pqoBUb6=&Qw)t@W6hjuGn4JhzLuN-9&nMljT_jF$>y2zKt;fuXMx?P_b zGmF8Zc~G!5bL0TCS7b?@lNsJ2LzI_R0NPb<`O&_}rt{F7wvq8>liCDtY$ zrf$^L{iv*__kHGbOG`D3Ou@QRKm{1049f&_hUpsK&dwexGSpNsbVYjzf_9)a+o%w5 zS#3Mps0%dYeaz}{RXTdBQakKk@uwcyQuuOMGE#bYAPZ(j7+CB#dstuBr@>2dE6>|_ z_WD|6JrngUcnb^EGxVT$XxnJ9E4P>1i^0{KCOj?nl3XY&SQJY`etGFjp{;zYg9PyH z9eU4IEe^JnLg$HGQZEdxoTf4LZ`JC~CwL*1-H0~R`f8g67kuwmGyHN>_-@+xS0x!Q z=y)KJKH*r+w?l6zC}8Xnaz&br%%48-u)1gy^^|7MX*8LI^ma4R6=&lP>kYbA6W&MD zyiIhBu-nGXS7XsmVrEMp)=DU8^A)3UX|&}nI{nd;*PU-a%! zerGIOSUDJj>~<gCMk0l7{2 zq(SFEHtIeML>?r}-%BJVUZ@z6~kJ^|fS$^%LyB52W{b&WSvcrhXp~_LEI*k=LQ1Nxq z!E_WS=AVMr65EjS(QQ5VR50i2L0T8I@BM-H@Tl@&+LPZ@x|t~X7E1Sc`D?O^;Tb7P zK;b;h8CPE{j`TnG`+WEJHr~v2_6ZylN;9BI^IW@p#?$!oTY1vtB4H9pFYonC$J-P>2f4d%_<}p%^aH04)GqL=p+Hg3B#hvH3V)~o6H7I9+ojsd&SgSk1 zuKt8y8lMRmCyns?$-jsE3|I!?PCHW^urpvc(8;Lq^;3k8QEQ&nnHyyGiP!jL2?H7kXn*$}LF_ ztv$|!A{keI&f}!2VS4|(bZzwhqI8%v6}#XPhx&tN=gUD6*FWwg!hn(^SSRV!NA|jQ zk~3UDvPgL=Ve59i_62bI`3WOd?0l;Yt%pbt$CDeXCydT+^uT>+mrrrbV_`yTpChwDll9n zH}!2`iB{*EO*?W6`MW{&Z-U>BkwWYPZ`F&t9Fkg%tI9*_vWU}b^+7s4`dm5a;cF3X zx1?_&ls{KG7um2&Pjfi8!Nh01yCUcjV#1?_Ft`{5M#f~kS*S=2K0(=$#AShDPEYbn zZ#W`Fw`fuP2hDSr68G=?!AcE+9->jf*^Mo>wutyBbUB|7y(>oUO8oMvkPz~wPb8ST z9l;a58+A1PbrDsW&l0~&5Ri!8`faoPaDaC-kO8eLl>Sy@eN9(VKX4!ZOg2UL!%vlP z2)E?0WW~22FVeQ%y6DXnLDB4bqyqa*&B=~H;26KK8OCfL63 zJ7Ci|3f><}U}EXqK4mrxZi&jd+3$LgWU8>M&zSzyWYgn;Y)mW7J~i#n2W?|jxGV>c z%Q!KPcRW-4(DmM+r*0aTrTe0&9n`zwJ#VPs zE~18aV%z-mp{DR&B2M@9GFO+T+@`-kZeRy{%OZ~7PaV01QlPZq6FAh%YNPN;Dl z$5V@I>N${IjjCl=PWgGENW&|S%jt)Hspb*d6^v7sO7 z?=k23lfn)72pVw{ie14u!CaihsMRVSUblXWJ2&zlk->x2!_dBUuXa^O1^bqtJm~H& z_aGX1d7E)9!-%^%wWrT`*|^N+m&3V34i6)p$sv_ST>F+bzoxqNij(>K>@!r+q+22h zyuG_0I4H0lXmG4fz~$P`ad>@>0`_^9Y+dtdaoTHayua5L2y1h~C}p@URC z?z-DPO**!%`U5r5&)rGF8MU!Z%A2gn#v%+j(RHQ#<>#y6^O)}zI5Lvt9MY6chIL9r z%zyx`Hs|Qy5^}>GfcGbpybBwwE;$|IG7%~jwXswL^~6Vz4y>Tw1~Kk-NMekrD6%o) zgNa0|EIdFo0I-t$jA=zRKBNgAxn#@j*py^}WEC6XhTDF!TthQfh4ZEV86#Qtm=?-5 zp=Gm`oj#pFjy(b5jb3jk2Re2MX!q<1S)N9tsSCWXg&%cv%m9pGITknen5)r2>5ZUs z!H?Z?IEDnmjpgqHx#{CtOO!~ys#Dl%h(}}Rb6$;J+SW|*M8>9b$~Nwdr3m|=0Eu*? zxc132KWr1Oa$T=FtZqL9b6=hB&(z=OfEWc`Yx?wgknHjdNp%!9`KX0?>jtK$2*IgYl=BvGD-??wnESYO{sCJ9hD#K z(DfjLzotgX@dPo;#l~kxLA^;)=y2#=F-N!oA1LEe9jX3ntB=ykw=^B5Ih4XY31b(H zqOae+b~ZwA2tJK3>W{ZnoKFg6v_oVfpEYI`Rcv$-&&s;Rgz1N?;5H}dm4$M*W?lJR zEJS@{?o5(YbM>Ag{HQ}3?|yn}dossLQft`+^klXF;cF|t?{J>`V}$M#Q{P7v&&1ns zZimgoGyu_{ZThq9?~$n%qz#XvO)i6;$SP4q=P> z?mK>MI*hB%?{HYsW|EZsDx-}4yKuw+JUrbi5jv{ZaO*q8BTOqp!|YY|m+~dcaXxNK z9>y^YA^bh#-Q*YND@2r3{2J@rO7-licOhnCEWAPolj#uoNQ58EsbZ@zmT8(Nt>zkCr z11uM{!@4rUOTE1T4-|b$Z zM5J1rKX?VGlwrCB%M4s}x}2o-hZc?oVYHi5{yo;ARN^msoX@r>?)2MC+VAZv^~dt( znx9<|P0dPY_$Uz!f6K%8KGwJzmOfmg-^RPy=SN2x)h&8Z2Qi;0S+ksvj6LO^-CodH(@_8Hm;z)Nh+47~lc{Cr*-;E- zrUXIh6@x15t$B->USKCg5*AtMYiaLW3Olr{ybIL5lID>=D*WzX1;d+kmM zyGf#(RkQjpCaqyIS{bW-iq7fgMbEZW=|Yp;2d^T@R8m#W@v3UjyQ8S14%yE#dgdpu zT-~az5e31cr+rrCLYc(BOc*hyZAQ{PvZ6NYvhFyIj;WfHj-m;qe2x=1uO?YvX)%#j#i^7sqHx)sx(^7%UqII3 zH+jo7m#8EUinT7o*=@Lnd(=hK-TPD~%q^in%(-kyUFN`}!4Kjz;;JszBYhov6|5;l zTi8UF~mno*r zvN6pEY{r~#_2SrZ3)!8sqkro;H0KPP%3&y2_%20aSy<#=?IpT(w7eySpa*RfKI*op zxHM_;`If_hOC&E|YN=#|g-yR>h7_OPDNH1`O@wd=xADdfa10&%~oUx3_9Ju@qI z0XknIY^TW+tN4^?5qY?aYMgI#dlR8HSDVgGbsz5GQ0ZixEc)Ss*W2WSQ;zgT5ZXCA z7&Gd;)Pp~{btT$XJPxXmg|B1HM02aN-QDew1E;R4g;ai=9Q$YXD*>LB8<`6I{<(Lx ziAtPsJYK1j{HShQJP^8e>n5+ns+^Wgx9Cqj?BO$g)8&VTa@yl!zqy+$xYmb55%b|I zFyM1(<5yeKL+9tF5+!SIj-sMK(sk0*(rJMJJ9XSn>MX=kL zmt;gc4MpkM?SAF&F%@c4C1I-4+uDq?Cz!cKiC-VM>f1txhd_eF=GGMI0ydnOf>NH=S~)9*P*Vj~*7 zfl9*I{a0dXzq22UZ*lF*=vb`NqM_GYUl!3hU?iy|^j*|g!svoc8TJ{P67+=Hm-aVu zBFKhc>!pd=#X=RONDo|Q-Lusd`M<{UCtIT4a9~Ee$H)w!e*|Fwqa|p zLa=YFXg^WtHHXU|4n3EU9NC>J1*B!kAKv`vI7!4aQZ#w-mgj7jmm3^0h*`n0H0V#K zV*E@f>it4OnSNKrBjBLuW6x z1<*#GpuVua0Ef@o8ksysHZ&y`m6uX2r+Q9^``sETHbuJ-Hl*TS<&d5z`@{*v4UFLujmN*WMTQoNTbr zukTFia4Nt4vo2(`@=ETB z$6VL7A16jeh*xrUtj}Yn-mhvsPA+yXj~`Fw8l2gADpWj|q>{y4Rd#fuSDLgKlf?fq zZW)(3%04oM#jb$o{!dRU1HM;-KOewM40;NKU{0C6Y5Y&!#wXFNtW^k(#?~;;AYD^w z;3)^%MTHc~TZivw=mdpJSUMhP`q;Gvm1DmL>-MyP0T^!hm{iY{o}MM2YFg(YVv%0A zMNpl^Nr1h~S=`~l)0p88*(9g!f7Ak`80o#pq^;Te%1f`tJ^??JwBjtDlBsj~dYL_< z`z^;fhBljmvt`(J1U-vH$z{x@{`=qnS7}0t`@MSOPO2|;+D=l(6ZSz8z)|TMCNWht>^w%UZ*c4wpU-eBY6@z z^k>M@ljekn_rCH2$kLQ5FGH1{X%#fYWjyt&8%1zmo4E=v3_rNHPPkXI`b)Ns;KcK0 z6|Ia6Va-Vgz1a>FS^({Wy*KXX+z;)-J{pw3M6$dsz#h$1@}=$Sq#1^{w4 zjw{h73LK3=5t%*Pdp&z2gI3}RGZHZ-cikp-eY)4QdSm_a#T&iZpnZHsTc+Ys^eokD z3)xJ?f?taDs0q6#xn~wTVd?c%mG3jf+9+ht!H)J3OP(=|y>dK)y_4YKF7?6HHI?*x z#ow9$XGq<2QNQfWp-SXl6#YlHYOQeB89_j#$MIap#}kKDtd94!{ZqU3`-}&-avX{x zzVc(239RP7P1ioF>&=jzKcCGH;WV0o1I?JiMtVAR^bpwedvbOlu1uAK-CRLW)Qu60 zl+4-bD}kRVn)}4k?TOUYmSWV?ah|~h-^Fx? zK&_YN*!b^!|AVNd?p;jJ9*QrD~STmXs-3!+?NLS8xY-eTH;K zdu@GYy`B%FH9bUu9P%J7o>T6@!lZ+Up+&15_;JrKJb~#Y-LcG(fu!e1p>m%f?HtG7pdw4wqNE4MFr$4t(s>!<}$?& z#;2BgQEIPy?4|Xb-rB_#cYBc%rk*6f5dqMP7)|dWy(-qk%gb!4` zxb$q+R-Kks7a|m-Lch7LJJyR`n;Mc8dby+-Lx}OVCBBIn^O{jJM_XNDT~A1eXA}=|?{er%hzs+UP6UwtW_Wp8M&P)E2h)75U9+ zPMVsmDr+U+JE`$4mDEofTB=^gI2r2q;Ar=bS2sYkQWRIBq24luiN$(lO9{AT4T~;c z#u8yTOMI%s!rFn05-)3VXESF;^i z(+kI^M%tIlOPpQ3HQxDgjZ#0E2{tDg$($k zCVVo5nJSDuD(vWqMvFNc=A{L`Ry}2@3dh|M4$lm(<-f~aUDq;jpy&4NO>Na+_}c%d z7!iLARl$Wi20t*pEa0H0Catk=Sv)i^;|v7fv%vU^6#sz}`~(Z!M;-ux`J)ociWd@e zsOTd}xt$0* zSoHwNYbX(`(-&{H$EA%!?O$u) zV+8&wYJd?a(Ec_K{~x#gCDebN2)V|C4S{NqQLvf(=Y!L?-K}{cINq&YAxM(980sw^ z^S?aoFMj};Ic30XXj~V^F)o-c@~y4H9g?I2#i>a%_cF z731*(m89>WI{+V zCJRFOgJ#==sSM6`?(BC}HFTc~U#jhqEB(|K|L%jl7sWdMIQfdNq$yG&+1i0K{fD`a zLnQ+$*ugr1uiK#y*60M$ROtE;G%tXI{C5KV-z;q*KHK9!_TIu6&)cPFjy?#8!5FWL zRHWA8Xb@X)(dT}a+WadL-i4p5X?V0>Wz>-zQ6}K8`}E73Lm%Gmu0?;XZHA3hhx4dW zzHedF8Pr@?OueF?{}0aiA2Y@P!QZf3B&h}jrTiD{;q#-K0oBRrnOTo;!4T|OT}dTJ zUB7R|+_U)qaruAdN0E4Z->L}R!Y@rkDrUaTHEMX?wVd@CcqD43>*RprOEj_)$Ad2Z zUq~f@pJSf54l27;9!Y>JHvPFk8S>10s53!8VVju{vc$cqe*dNf0pD1$uDAqOWTAWxeZs*^tmOFQ6Z~>)yv^I%ea`BBQPGt8>(^=&-#tiOb z_J7CZKUF1pFbbgFolHzYjQmC8kxSSy0qfP{ALs1HJ%1w5|BhHlCwVtMp&8djwi)bN z8{7UDs8>h-T<^bO58@j;VY@J3;Zma=&6PNruKmpG0LUCgFhZ7i61jf-d#!l~u8#gD z_iCp=^WTu8fg9l|>VD6<)UV>5eLJ{y9AEfmybMOO6%Z<4(w13aY zCy5O}dC{=R?j5l9J*X|n;3ck)pFF{mQ_@|w+nmY2v21tAuvdx-@~)H4UWJg9VnZhbPUks71d~e9bZI|x73JCpe>IfW4MnGU^x`)z|5pd$ zKW)crpfqVuMcUAQr5z2TmPY^s%3!<_gbmioIllVeSK375{P(*6VDq9_2K<+=O!!Kt zxW<7ZBF@B%nj=jf-Sq<5ytT86PDFHC67TkYHDbw1xJebQ ze=*%6TGKpr`TGB*$;cCtHu&8{67Ua{23qc}y-5yxklyu3j*aHkAKcO41b_c0^etP0 zF!U?E=ySJ<0Tn7-n2V;#r?>v)S)%G1cvXG7Bl_h(sH#nQQ^#;q&g2+vM!E*1ZgEmw zdH!BJB+_t<;5y2o)C{{apizVd1Xk?;-T`b10SMnpI`!--CdiF9<*Y{x(1Dqs^vXMJb)tw$%1h%~&;b$59`c z+mO@C;<*FnLm#@QDrY`;_wN0k7n1#rdz}NT(6?S&BLw#WKuT#DLGqvf-ze2OI&+{} z@enK|Hh9z>5iSz64^vvE(U1oc=R#q7Kr!O5t)m17U1B!;EvDG1k5@wrF=&E zE92XzH4HzxUI}uTDgj_Nuro3?p4E)Tv9G47a-1vTA~#IxPxl(g-r~55G0( z7A9b5@c8o5x|y9Ya{&kkb`{R?w;gE!A(SPIQqL zprH2#z%u3;8kZFK4>~80ZBEZ{=@Eeni;Xl&^S|E6z6Guy*IWEy2aZ2PI!64^YUFLg zs#}5>WVZVPVVYumcQN>sLeta z0}4WJ;a#{>P(u6os)Is^Q}2`ek?y93%d3ey;_X@$6&d=v>NkhI5(0ql;CT zFDBTt1gNFrv{LfN7T12l75M+y`_8Z?v#sqJ6;X<)ND-x25D_U-Rho#PQk4=qq4%QF zOM=J%(nP7!6(RJ{L2494q?Z7p2%$r0p$7s9-^Me}j5FuVdFQ&mpXZN;EAV7zKWndb zueI)bJ>8i3yMwEyW#xLY@qT}{TtGr1-+X_2IM&-lFK)mfyW0A84=yNHL$qbWuu5=a zdE>&yV!*?)sLM1?gt;eYzuU&L)&3?*4lXnQ$gy%4J^=Vhwp+xMjh2`oHVUJ%SZ~3! zU_;&&X^M>I@vaKGd84(u@nL_`0wfS2uwaZ6`XPpIWqjoua z5pla$0JF?J(kwiGnre393j5{j>4ag$+Va^8prve9<$OBQvh$0ltl1LhuN>6TQySQ2{M>FSwJzK&tQB%Dke{{GKK;ZE zeRYn$)LXJHS2vy=IJqM|uCx${dNPr{PnEFnpM7re}pZbc} zo|~&!+^ z@H<=kB*BWyo8GwJoawU02Zc`OjG2xNnDDY` zXhZ!swN@`Sjkj%ZKK$`0h*G372GC{>YL?1o-#A;BJM9fvW+2lIfR9MDLoQCoUJmlq zT1dTFaW>^l!sDtrp2{YHdjDK^0b;} zipJtJ!^(+6-!1do{vT)h?pty~F#J5qw`^c~UN2V>M<4SS_lcTZFrKzwx;O zV$$7)XH-=#WIu_^mEfr>ItqZHG_tBXX~1J9eUFkT{~beA%Z#QE7?c3=oZjU;Dv_=# zb3+$q7JuWBN4?K~{_X67->z00-R}+CErvtvk|owfi))o1>fp^&aDZghx6LWU(EE}F z20Tf_g$@z$i5A=QxcXH`$<_Yv5`tfM|9Q@V;(Eq8k{l^-*|aFN@0DWX#@ALAL4qBk z_shes_z$L#0(cZ}Wu3s%Irb?AW4rHv*Kwc?QJq;cLl`<@GnH$%s5_qdkeM+>n)g>S z_ph$%7=_YfOcnJGu7ebkXKKjZrNSCqzH-kFP#@;X+@HFr01w<#H7;)RlB?&P6x*kd zz|qE?mb0)p@gtM?7Kglk+;S|K-U5|t{>J!Nm;oRv zIQKRj+#@bI84I>3r&PIqWNEQ`D44o=Yf3d?$2Yv~r|+ivZF8RTQ+=(5sXOw|=An+| z6?6;n&t}hLzPWfbG83GjRS>N9nO`6_p6GwX>hl~M!p34-@< zt`ceJMOJ);TP;9SHp6c5WHgs@j#^wumUV6AzNmy#>7@W9^PLJG)qU1n_BlABoxdVe@pJz|rb`6}%ON!TjgHolX-&TtZl~bTrMva7epvpkvkceWSDq)Q z^y*~fCzo1SO7HaxH78#Q={}TJ#@33hb<(Tq2(VrZY8!OoQv$ujQVZ7vpKxScLa`J> z)n$&tJxQo2-N0S@q%;c37~Jd>J%oMw{Tnl!CbyooEyClTkt^qeXWCuD{O1hgc53Yt z6jjT>v&Vm$J@m(OaD4fiZ)T3ou&Sq=;Hl0=O{oWunyA#--E4D%_)3t+Fa=Opm2H}11Ve6~@__(mR&j6(@I#V%Tii7U zmGz{wFBQLk>1(H~1h+kg6p)AzF4=6|p>S~c(@gB^1oj|>tbFU|Z|6IszU z8j2hC^w_Kt%(>?^*!t-WP^RSQ$EevvSdN`Jn5 zQz-6|n~|$6LdB~63PA~Zjpvy%R!G%P%V*+AjN9Z-t$y3hAE(TyDq1(gUnO`v6abtU zTKyXmgXe{c^X}n;4RTLoUs2bqZE<2SZx3n+%la};HQ)dJ&V75;o$A-p>mkEGTFbJb zoiKb>xUpzJ6xC*Z@7~{M-WL@ooF>l>D9R=~<)w{c^MK8ft66teq&TD#@V+q{dKh~i zn*>vCnhE;fDlrl_eKY-yfvJ^45?p!`83W>=0_=M?dX~OwDVCrW8-^q*X-=R1VXNGa z92jP#D=6!J)YE!!OTzbtriAGUTR9|%(@l20uS7XIb< z+W=$nyU}iQuA-ZGGDJX_CVH&7+a@$W-vk;aT4bN>>4@}VauYB??Y7{KeH3}!b-w}y zI;mwzBk7pE1>~~cSgOFnIq%oZ|6;@cx-4%)qfY@3`(i>7abCC8f7&#=#{gWM9^%7( zdeY}}%^J^3yysNMIFMvR)!b@wtk+57y`?5@7Q!q<$b$yL?UeNeb6(U45@4*A?&~Ob z`pny9-JZo}UO5l!F$1tZ8W3nf>*O-r#nL>ik#5smmZ$}XB4&APLLB@Hvj>WD}SOF*4KIR81W-#zXAE5eaG zd?SM5cyH~uXh{maYvyNa;c2zQ0?n?T&OA@$Fy#Zp6;KW@1x@(*mZX%~HMi9M7I{Q$N2CdaF2e~3<2~1 z06QWaw_9Cz-kx)SLzQ`wsM1V!)u$&o*IJfP*c`%Y}yC2}CStE9iSF}RvV2hd#|31mR6n=$-MjyX&r?8JkqHa0O)(F?Un#U^j^1uts7=lC{G5>)`4HsVIN`YiS74lXQ<~0B9Q3fjB2s*?XcXZ|-`2V0$36q}Cz0 zxm6NdbRj3N)>F&-F74_spU;K~{V^>84Yex;JNmB;#DkOl>Z4r?rC51#AFpFB&S z3J_xF5Vhz0^kRa-Sgby@kN8EG)S@G#C+7-X4YK!Y8P(8P2dRs#X7Tg7f=k4;?}J@}nujxe)dLeR z=z3rj*B^w5`|ituI&?r(4z%*iUv#{d@RRR5W0H54QExGpmwp3~ogi*H>K5H%s_@j& zD4j%C6sgr=p9FGDU&4-6OHlnGZC2M^(=w6+4~^G166)3nL$X-XAhLXVqZ8=w7TXOT zJ133k^>paU z8H6tLjW|(`2a;EuAycD*3a>fd+9#eVceLrK5Fk~1R5P;aTMQi#1hnaZzJ+(7JHg;_ zzQ+_aDKzuK?HUZ+obR?A%ZG$>`q>e}Ur~1%J zKe`fZJlyQkgQl2kgkN0$A(J@b9`xZk?K`^$h($cgdO{X0YwQ^k;+QBd*wAT^ODarxl!-q^!g2ro%E9kB zD7)=;0`J)!zP+#U{q$X+2BAlIf{05kaKf|?!G?FMDjd&hmsYh>K@oXUWit7D0|wJ@ zw6M@ofTkY+g@k4Q~vz*0B{4#lyY| ze&kaurntL$@qV69mFs+E`(8Qi{Yk(5zNu;s%u4U$0%5qg*A{0qbncw*z`odj9v|u2 zR$@;wCOa51(;!D$bnUOE(i*JQzy~GTiStf!dYPtKOl)r@eicN0OYOlwk5xyFPX)x) z<)1hM?*%ISVr7;mm&`BS*q0FQ@eJxu``i_FN*RFni4S_gQR-I3YT7Blzf0bwsG4Zq z&Tl2%ve2}>#N1HRaplCX4X<0f^uH1)_NgN(`jUAdiJ0{(a{MICDfl~{-dB2aF2A9! zr|_>dKLeKNJv;d=0-fna4(d>E4?W2JAyWiMIrF zdc9Lya|iV?zS1Sm^5hY7J=Vd*e(Kw0Cmx1`4{La|c}Gr|;%c&bTtr zl5L#KrFk7D1rpum0D2KQ-0N|~}#a3kyDO3XrId`{JnqOWuDB*s+c-xbYHSOsg zG7+(Pi$Nh)tV}&!`nT2XgWC@opsX{J{xeUl#@aL5#WKV7zBIR_p$wpm$pIZKZJw9u zM~`T{(9i)wxX(Zysb4}!dF6C%hQA-Bkw+~StwW0cy-+urbl_Bg$BfIlD}Ve9_g@Ym z`fXbKxOE>rimQzFLXTa=-DOT4?z}Xj*(f^OQYvjTGf>la(7QPOF7VYoWna)&7&Y!4 zjrw5x{=^LadU4~R&Bgn5pGG2B9{-3ffA>h>6^NI;jiOfg?SuP;u-gZU#Y^7`gogiy zpud~?4}bLi^!@9)qf-a=Q&X({S^JPDfF#l!D;is$$xXAiO0D4jH-Go%OW#_ZU7-4;4CZbM`*c$YEZc4B?Q!G8ZZVJ z)U1R9xBIlm&pL*~tebul0sJEjOy zG+w(KGQa}Ld%k>Q9e=ME{!nZGA!q#dEtXAu0UH$Oy(OpjOUEg@G(t=5l7LADPe2lR zCfA1CSGP0?o%4AMiqq>R2X6le{{Q~c-ijQc29a}HdEFsSi}ez&M8FOtZZ_n*lvR<^ z`CW>=MOw?eB1Jz&p1;3jwf39U*-JpTRK#1ibA@IIZc(nQQks9|48Z@{hhjJW@e1E` zgUg-D63BIVdui?@nxTIGWJAz~#q=zY;*={Cu`B<0g`)#FN2|3g*0LjB8nbDj`hGLC z9|ws5ZN&!(ILH1Z35j*n=nmcHkfTR_%pLyTsFrzs?~7Yz;#bc>)b_=RCXx84W`6(r zi%E}JT59DLj1iCH03og3MWxr1A zaZL)%lzaeWWse4~QU1MY{^{LM$sa%$p_H&Iqe;t`k;^S~W3k`bxPNdWp%)IZtU6W; z5+PMHJ~pqeZu%cQ{hwXKj}GVQQ*~ceE!ewr$7RY{;Bp=(IpXoA&IOY$!VtEpkDp2Ik{Xjl8&3E3Vy;(gW zkNyd-=7_{aUwSPS9B4c*$j@u8*o(C54H+K-)l3#uxFM*k7r6+$T`6x)xHNe#{gc4| zVIRK#%B@|-ekTW1GI1C#lP}XNrID8y&02Szm!`^df*q)FI+rHfRh=IIL0^NTbjFP1=OeHd_8RseT}i&K!X3)BjI?r~rHQ^NbFqw(X9$59kF z`DfEX<$U1yciy1fV;$x`b7Lr}!ly7V^4rv>Cnmcwm%RSLPJmV~jqVNocjd}J%mV73 zF5IMGbQg$ju+k+`5IfRTf7TL*Cdla|DqdfiHsQwQh@RVtv11IhYX2Pgg zna1AE06c`M^QvBtb$|ICkNu~m{qEd8LHC*J0f@o=4CkdmiUc*nD(i*@nYf}A*7`%P z{?}OhgVW(>e3T=w0OXd!&TRw;i$8h>12V@bFg1-SL+(2*{=Y8=ptxNIaHZDetAGyA zgf!3?2Fmh$#<+iCgO6{0Sgb3!{*wCKk9~swvb**C<_>ioPKyAAWe+=#PKlcUa7yJ(^EvBelNIshhNHFeafBI7;`ENE7+^7`;qum$Qb9)3P zf`F3EYSI(R>GjuKpZ$%&OcL~4#k=+gFM&aA*j~1@+qgB4vxolK#WjtlK?9eFJ#X-e zwlc~?Gmly%=!9Y@b^ZPv1gu*}*_obGfiTkKPe9&j6#8uJ;=lR&|LHDpNFB`ug$Dli zr6;P*=0y_76RPl&romueSgYOd04UV6ZOgQDdKcW$joXC~&_!pO_;CIAsZ9SprYyVs zt7kTjgLm?LV&^bG$zZMcR;IN!}YY4TQFO%LQM0T9*&u^ zG3f7i{fBp-@>6TD4Bv@GLd()$gANl(-|;z^x-g z-RebVv#KrDKASZA2b%?kYSiizB5SeNKP$AI@+WD}2!W|ho3-jS_&Nc4U!7~YjS(_{ zandIFb99sbZBP`d#^1vI_bvL}c9-RR0UYDji7?BKt*QibPZ|}13)PqzPc#7;jj(&~ z5iq%~oC1?-Bnx}68K!+SM&(EN`L|!j_dUlW;Aiu|G$2p`hFIN;4rF_h3y|3?9rPxm zwnYllY&st;{P^i-;j>6k%s)n-P+(dK2!Ha8`H5EcoP9PJ0j;Y1-g9E^NO`GQ|c32MVee$u5# z9s2n($Nv%FKE)i>uR0A{Y8V$%--Z`ictaSBpnJ??f$jj)L7eZtn1`udyifi@+!rVS zW*yDAH);2$agIISX=m>QQxoR#7i^QJ&%Er1?$h)D5puuBQg6CnAO`x+9?)n= z+_Xf>0n+Lmz^pkB+8+vG--EJ#H<4b?d99r{u#s~oU!c(Z<7bsdNibIQ(o(#2!%VKg zHI7%P)(tB_+;mA|6VX7^kJy zow^^9Wf#+!1kacIRK2*)$5Z==3%hrp9MUmBaJb`xu3p$P1f%IhuE0VQ?!(^(>}fC*3vZ7;Vn;<(ScvJ zl>hYerP?!w`pi`z*`MEY=QeZMiFoq2W;MEYpq)xH;*v@>=mA%lA0P6+Vv!-DzBO+5 z_RJc*@=au=-xuEma#`YFx{m{y-riVcEALF-IR8{plv=Xz4TI@wz^t8z5%Pv!VBNR~ ziqoz>w&9L_fB|Qcj#Ag^uv$gx=(>4Fq6sV0BiVRqR~j1GlSpQDAm$M5@A$H2Ei?v~vnkp@&XT96fMS<5ucZV>yEhIhoImWp3TM zBN%a!`{W(1Ysg?@O~Lc27iB1-uFAYhbPCw>2rl2j3Muv0#5$yX#7oXd;+r*wYd5mu zlb7^Rp2%qm&z?CBB8n70t!Zeu9H8l=YjuC^gbWGyK45PPQ7FwCKC(uh?dysVM|0Lr zpmXKIb^O9GnGP?xocOk9W4e4jUaD%Tyi6_*(B6m@b8>L>)-7nxuUW6Z+{MXipFlpw zKUCOk<5UwUJwxWtD+}wDpSj9xA@re*vP|J!_)_VBaCKX3d3bc)0M(bIl=6t^sy=P} z&ZF(BsitPSZHK+D<^0W`M6lWfOzPL9)pe2ZEr-4F#8|o5z7l(ub&t35L$3|o%cE>& zYff*Keq6iTGK6|j%$h8rK~7o4H}k^E*-MvMPj9RZ^^qhW;-ITvddV{*8%%jl8yS=B zf?|yeyT@t@1AF+?>dR5JPS96$u`pv;wXEX;cWHBZb)u3Kythms3WfFibU$y(w+%sZF+)_b~Xw zhXVv;`4K*yBM!u(Q%#Ug!iLDx0S8aF8_v50k3&U2YLkqXdwTf#KK8$&B_4*T^w8bT z%aE|_h34JXtl_YIn;T1HYa!O0N#Cp1k~{cOrcLxqrE7=hX#u>Z=}S%eRicbCcHYUn z8Mo8T@|9K8CzQ#V#-@yYcU`sy{s2w)6hXFO%pNB#9-&HJx?ZBVUf+9w#KCs_Vvm&Y z^MPt5^oKW#$rpCaz6R%4b4(XLrAlB4ttm6&tf+CO@9NBh7Sn#V@nO+$fO|xKpsOrg zQcXGDN$7d-;D&kULsS6{W!~|!ms)B_Li5BtM@}1)Ox~!c?AY9hrJ#B-cm%_0gIfL+ z1!C0UwtPjIk$J^##7V@MN3VfTfAzf=jhD4^`K_{<&&*SmcuEfak}1oK;3%*y=+}6R zF*mu;N6+sGo?z`V{fm;CsGIdfY2D!;@B_fIpVT<|)+h_fwT?t7m=p?cD`0}#)byoO zsR~u?$Z2<6koNrG)h|_gkkk3!u?;U;`yEP22_gDJI2MOVn;*TNK`{R>^I+d-}jiD(1TK+O;IfP zbSZNcr5Tw%MBvvU;R=p+dv6bEc#Kv33?bpkH)MXU>Y~O*zdA;! zl78=T$B3g{gkPXYMj9`b2%VQEXl@$~%=E#1Tzopb(TWIUg0-X+n&gBbAMRN?`To<} z5yfQk&ei!9NoY;j*N2EK{3czf^B0 z=#C)rV*w@7hHo*uWOWhmO#iZY=NJBpulVBt9D5YP4-Zm+YX=5PK#Df zh7JLfYX-x)7~I7!4#9^tEq1RpR!XZ^)a=xHDy;iu*?Uzszv~{)7WWoIO|0ED??slD z4F};oJ?x%vO$3&FVX}o3`L2UD*KTEP7lz-ph9-CI8Ggk%c|i42E_smXu1vxOGLn`} zhFGAGyTp;s0k^dmLY^&ArPx8&8B$lLG#fl`BTD7-OV!C*-WDvcD#amH1HW8^^qp{3 z4;2eXZ6jDUOK|U_T&R;I3Snf2AE>W>6K-EMADl2%VVklFvd!n)+WtFJg~4=Nn;GYn zXiW!2Be8GTGb=$&YMV0UqG!&bp&ojgw3*z~$k?sz&H?qsyrSb>U=h7LxN&3n6x>S@t znj}5963WZbe5uLB)hG<-E;x{<=)>|AR+F?L zD4(iQlF-|5a}W0(*Sz(j?apRvQaJ?pMtFDF!F=fd8ZdI#%mJmA@XxstMDLRdDGCwb~*Ln~!R|P@Z)C`4uB5?<^IqEUkr` zSTdNSDN}18EoL!MwU?7|M=QV39`UAGmVQu2Q;Vu5xqXkK?Y@K{?)1c*_A65{2AhhR zkVd(BpE6~94Nmep%C#d_^*NH+d`O9~M&6dQ?Ybd4WKjt{Gf!UHh4OR>And)i=Da4G z6zGURXlCbGZ9?r)QU|LgRj}p1P;LayAROZ?Sw)7w9>E*NQZtDckiUZ%7TAQ)1p`YZs3fmUUvKD=V$Y35vDA(G`l8~ zy4M;@qS6B5b(P_LbMo)liZ|Vq$vLomPFSRmnF(bMxg>G~Bh90%*t52%Dtm*a9eiOr zIFccI#7g4cl^K5g&MNY#IXVnbG9dMH8xdy?T+yg_(XlQ+#bVY;r6@BcV$czobvtE# zr7ZhVN(r*Rj<5@+#nf|_<+X{>wfIdf0Z+QI8((|*{b3ztQU&>(AZ$6rMF7&`Jol;C z9&O{JyU~$facN6grf#Nok%u0mR1SP*I`8G@I2N;BtOzM-&6by|jGnjDc4*yA*5~f- zAa9DqWfY!-E)&PgT6a=PgQX!xQ|349Zn7g$j)-~AK2inzVK2Wgacm&J;;_y}jVksa zX>b@S;b1Ta+35^g*vYqB8p0avc_|4inGUThhQi}kY{OMNYTtC6K|;Nip@$5x z&>j_9QoQZU!BdA^X&L8bxrqh7P;_T>fa<>c*NS!Vacs&c8}>No2;%F#N<{*YrT50VDlJ4+pBol_LM2DDF7K721*du~h zkv<*DbbRm0H9baN6%yv&ba!P>+{`c?k{4s)U=dicMWWadgc)OM!&os^HVMI+y^40a zIHV?*%FDRd|X1O?t_$JKohJn+m>kWAjrY4j!tm? z#3j{BOYzf4nd9g%=>A)NGe|5!SuDZ;rJ7LNvG1^KZ(P2LX9=BsuyL(x`~bYorMBH! zD*1J{j)nbeN3U%UY`Py?qQj1hSlmjZ-8@+d<%q=kDD#}Ekb2LKU#s%u*FdQ^&Us$j zu3$XS={3bN4MOBv17iC=W;6gt>JWLJCU;tRVFE*58K_UC6B`so2P+{7U`Hd7a0Ede zLy$)AhRkEbney=$#VB%_r#$!CN)ANN&sd^o9>Zq!*`>?HDv&m=e?T(%R{NW zAABu6{xqvDv4!B2K;@2P*ifPiuJV@JfGk&a{m)%qErY^XwIaw+K4r zY1QTn6h+f(S0BeORIh*78(A*G>pxMc5uSDG5wRr5S1~hP z!jQI;Leu=7Qo`}~X$Cf=MafFl7s=N>iCLKwO)0@QIWww{o=cbtGSRsLX|z_i!SFQL zRA@xCsX=sv%%bP1HHG-X%y;f2FVaWwsVdFW?+`yqF)C)^=$$>`_{-?4kGM+@HXdt! z&oD6+7$zNJ$e+=2IfHyCsg3@jE@uI@dd$!Vk>Ob)O;Xbku!UoxKD0G7iDKQ_3^-?s zaD{H%U`Dl=3CcpxMk`-B>8E)}4$KIrGsKbe;;!Pr96h8mG!g2Q_jDB!`PNtUX~RwC zd9^Y59EVW5e%0ogiaLV(M$PC7cTyg}e`oHpP@(BE%G#VKzPOAxuYc@tNo~@o#BBH# z# zB}LACcm1pZDf2V>Q?ad2;jw_Y?)G`t3GZ{q`hYAc?8=NGT`60Fdj^hC3-u|#Ru7Fn|*!BQ%dQh zD`vhyqp-j9Sp_c08gqvIk~C(`m}<4kkRL(TZ?wEQq7CIQhxMuTEs^Vqq=r*=Zi^it z0I}tB-<$9x#f{)iVQ>TRdxl!lXunwG3;`Rh*DO1=hw}mxD=a;I9)neOWFYArtTVM| zp6|xbQ!$r@^c*K0Tfpc$(%3KvCSBM+B>cKH_v&ZiZ6dQ)bKsn0(Yegm4}%{wbDaiR zEH=R5hr1lpb-c7Af_al?Ek}#lK8wa)z);31rMMZO_&*VMYPqe(h@^U9)HF-AAcdGd z`@%KJ_fx=kd7|7%BoWDbxpYTy^|!%oB~=`@SC%n?%__6Qi|rBf9WHW5)ZbEK7(|pl zCW6wzIif(2J(}UjGpexnZ1^f==+Wwo1s3nqZu89CamCt7WW&L)KIp*R^)a>t3wrOz zS)Caq5+UZ5FVA7ChC8qhvC;<(UbJ<%y-nMYa^A?dYmLk~@dJ=fOzwvQcB}wIq(8%9nh^hi))*X?`$>+FX7FmX)@gJ~Zoj1yHs#=#abqaz{bv*p!Z0#|D z-YGk3O@9$qxgK$D7^g|2vWG|SLG#TM%p*1$O1hUK7I~(kwzOK?BVM0ElS@*cbVg*# z|MHZ!NFf*!J`V|7&yBz-;pHUUcc`|?K>6JYpPO~RO+4bV` zl21pW#jQu<((IPH`w%N^p}lVwYfUM={c=6uRGNL=K2kK-5ARHWp>lR@!}Vy9O+!IS z+0__&E>cH+3nj|qj8BBWUTkyL)AW0G(pv+3(I!==J}uRInI)F5M|oJEPcX_|>^{k| zj?{gV`q(;1n@~RUManTtleC=scIaKBDvIe&E&nVk@L4X4cUMsnFoG|{+ie*Kr9%h# z(p?L$wpmnZ7|wdXj#))>t`4fq+vrcrc4V}vMevaZu5#i|9xBsO=;q9{6Dk}lD*2`D zQ%K9!y5gXCo8hZE9qKJ|ElzNTrMAc(K?Ral6EwXeI6*ErYITZJm^X`FZ2>LO75s!L z*Q76O*xjTLl(hOgJNUK>2e~fl`+YzJt08CVg&!`kw2ErpbIN1304F7xPF;=#bEXs# z+=KjRpiUu(c~fL1a`law$awhaR6aEO7y=@a$BWF?4RA8?wk2$aRthXu@8ILUH}DVaSTTebrj_LrP5z*Ts%g ztz91Nlw1<_l2Um~47vuFN|T&Nvqob+mari+{w7^01DU|hk}NFQlSdlx#=2!`Tdu*dQy-xQ3ad4U`hzICK|ap7 zYv=s5xFPKNZKn%*rsjs{%|@#qj;JAI+FWa`7n3(Y^)06<`(;{R{-WOs@eK@BQO`?M z#J8om)^2iF2rEiqWpz+)@77ZY*$sX>uB1irN^FjM_L_M`&Er0jORpv4n;11A zL=kOpNz_DgCDl|Utb_Aj&BmzfX%x}+#*^LWd!NjnK`z&P&c0ZpTyR}@#cnN1Gj~Uv zi9Sf$!SJdteNs?dHCNBGoX@?`)oVht95$LObi{Kg)Uolc z!ASQcH4P@j9ylQ`rDB9?1PevZb4WFn4RJl?g(_FF>fuB9KPJkhr)oze%8#xNea(Ed z9|vrGJSF#fF(Fbi`o#}4CpL<#_ZYW7jeB-+LGpR@c^?g2tl=v^+dsWg-s$z=@@&vx z>j?JU8}r0c>7jW}XOgwt?V9aTOnbw&O9eST*4pMQ^eV`7DJ|_%u4BE8atJ#XH&LE7 zv)J+>1+in^sG>Y~c069I5WZJjlWgE4U1DPJpcsZKM;VrR_=MXbnGy(0Lj)d>Dii(5 zY;g|Gjl_?6evQYP8i}KQpHrsN5+&Ys%VduGj_Bvb@7d&NN<$X(d7VVB9-*0;RsMP< z*3iCmE^t%Ktv=kBj@{ZvH!R1bHYYTNOKVY=r&#RjOWDR_+YVkMJ3CJB))(I@1Xr;u z39SQLrlSNITF!pthg7bQCF7ym~C1P{LlRp#%9-%<1``i;jn)|zxdw={i`MUzpVqN_djF! zpE3N#LHM6B{LdKvXAFNpcKg)F|AmSF|6zi|;m|QQ@r!XMg%nus(~Fs!aQtc0zyJ7Q z`y>amYwe_m!Ebi?of!8#?!z8LIfkwHuwwMCR*Rxbl|tXTj!B;w>wm&eOsPM8ETCS= zy=L*3iyfg}5U+%S3OFRj0m^NSPJ6{xdh=d!1nu$P#ChLw+OMBa?mKX(p4^v_;`e8o z_GlfGcByTi#7*4k0T=ghH-S8!vX>BsrK)AR&u7T$!?<3&b^P0r~$#IZeC^a+H}9DY1W4&V5_BZx6+bBniI2aQ2v7 zTrqPHXu9eRD&zUMRFfhUizg$+99hn9N3JpxOb`BLlm4~hQ4>aRORl9Dy!f+*!BXd_eUDJPu^P2rL{44KTn+WGU_)Egt87e4`-<)gYFHaZwZ=DSI zPrTLZ)W>O@E}2sPWf|9F4?xMHnuc2rPYEaEP6{^X&>BW5Y&UP$PR;_F$Cs2wPidd* zRoU*M-Qk%h?(2UZ{mWr`{n}4Ioprc>M@9pX2)@yq^^b$Q$@d)niQ~^edTZy1k{%(+ zc?)5=_0+G8Jecx>1xfn3B6<2xY zAN9w;Dt%0SqZDPY$Zbn#eR+NuRugQ*Jp@`7_d)BzWAE!$(aqIy+Kv-eMepIFOW5;J zlBnh-BuO5QUCT(6a1pH58WgskTv4EFuAjn}%?2s|>FoXbBZncyk${HzIOBgkz3{uw z0GqK(OHs!Dm7Dic$t&6&(ZFyiA2madDx5%5!VSXd^Bo-vToYP8izF|C?&4IRS{R?r zx6<<`5%d%(J*6dOM>O~_?ulhHr%Q0(%a|#=O7G=EPbNQ|hyx{ue3qdiR-UQJEcIGQ zDV1jQN0%x^qH~aZhx!u*^4IeFG4@c-?c&L3o+r@}pGuB?CXwf8dzK;@=qHOc>1zZM zzi@Xfy%1EMBJ(6De*T3j*gSyU+x9CVRVFaG zQ_Q=6}ex%dtDeFb!I`b5-Pfg9rrI^ zPvlDPxOzV*V-IgPv_K0+Shtdk)|Qtvsye3>c7~U$Cz}#mwoY~>Qk%}${ab$qo}vqG z^YKXJUpL{VuVHauGZ)WiuK7C$WiFgqk6!g?xwJ!omhr~+so!iOzoR@5n?SQz0>{eZ zh{LyZN?fdq(&=TT`C~Ie0U3q2U@m5{%X-TWw;64?eAPU?*pw^01rRuz(n^IUYk3@F z8MTH_Xd5fWdh}d@=JR@r6*R}bP>vlsYlsTh_~Ml=ra^nH=K75Jw)?i0C5^#BeGzI% z^d(M*sGL=OzBqazy}*eQtQ(wkCtz=#9a|80xOI-HO2#m=OSISgmijTPmA>?D`#z3s zSO-z)h5AmmdKzNAdg*k=)7LN5d77=?w4Vw_KkDy1jwm6Yz7RL*i&nV+~>_n>2%@ycLR9h9YRPMpBN@9ld3^I5@cV;^oWY8l{pVNFcO!7|bA>};WHF(*R zRGQafw~w()kBhU>7M137H;ZFL6*X;8EP_4o3yoRu8-ix^2KSsWW5Z)9xQbb5RY(uu z1mUk==t!EcxXxqc`fnpS2NiH(4@V2XJq7v)H~LA1-sD&ekDm-DUEX~KQkXepKTB@H~=*-jo~wf>N4_QFK%99Hv*98xXfoPnn! zu35^-ka4lxs_eF4DS>bL{29>7r!cxFq;#tk( zo1;pFrZRX#>Z4J+*^jN=mp0_^Et&rKU;OM@zE%plS#!r`iQ%eFj9=%apP&PSLCVsgg{5pY48> z*MAR#S3!7excy4%uZP>l_lx;OkvGK~qnpb`5iGy33>Wyg$DvZncE>5|kFiQ$jkRdv zavTx9BP<+UluonOb5&cevd3y-k$wP?x1(~?8!fb}D)aN(PWiXN<}4;>3{mA86(!HK zwzDw{=bB<}(`=Ml&7ZUel&A%%BC!G6yG!ibeqlT1_j2yD%Ef-@fk@mv;gN3W$Q`xO zZMHMt9c6$zGwR=7l1>wEt*zXcrg%kOlvZI_H~Upd&T3|_TKHSog@fL&NOA!vXQc~p zPsPt<;=}&iv8{G`Qzt+itsYj(Np^K=3g~`2XKESBRdh2Ea(f;IyKAjc{Q@sizO&3` zEje{Vr_40EJS>2cJ!ko;gxc81@_e8xTiK|K$6731&EC2=zVIf~!@Y>ke=j~d1_H*} zg8oxK#OZzi&<>_k3P#)zvWUg7&_?l6mfau9ukz5$FG^F50L;_-pkZIV2X~cc-zF09 zH6M51Ih%UCokm+8g2&iF$NU>_%jE9+H6|MgRwN^X$Hm|9h3a<1>X-R2!wA6r-C}!W zrhPxQvga0Lo;%5mG=i2vK0p_|s&%!IrnW~H4o*B#9T=+Hv9g+LxWOO7r+C_iVbu_) zNoA{BRC}ro@<5QwB4SD$9_hO#AAES~Hm?X>UaN>hgZpU-pY_u%A~qYjLkc3B+3{0f zUJtuJ!)ovPRQcXcD3fxE((XQRK&nBpHD_`L8w0<<-m4jc4S?T$p zYo;FZ0=v#QEY+iSm_q%SZaK_V9{=cao=y|7yA*VlV&CW&@3`Sshkt5{dijv3zqfEY zdfWuY7y98pgr?Aa>pc?T@E7YHU7!FYA+14G{Fr|&lJ|j5K=1syofl_c#?TM7;Jx)VAYWAimvi%xn{GGDdSSrMC+fgygANYZwekoHKzhsynx7k6umlo-p_a= z#DHI%4j;iTwzu~bVzuLA_&6kYu=LWd5UmMd3PKp|bT6jl$4}bFJjrT}7RP)sjZ9&n zZLJVCkb0s~PgWyF^u#{zQP9obT}J@IX_Q%u&YZ0kM~?BznaP#lJt~?{)|VYu2TwNz zxR`e7cB3}eMz){ZamU&$80OSyrC&|=c<@Nk&y`MfZj^#TR@u*tWwk2Lc|ybEFiAjn zL`SfFC+=|fXVKF(`Yb&i+xkh=v1`p`e*k5_fzu<2VIUDJerU@6mq_+J?kU7|T z!PRi$nnava(`!I})ZVPiVzV_9xcbCz9e0uwv`vflW_^V=(GOk+ymLN%ht}T(BqZg^ zfca8mq?swl=a{M45+9R^r$|zp7b*#dEY9i7pU%=NL9BeWx@GsZ`RM9)bI|Hct#VTt zrc-m{0Onp*cDO>&*@g(x!iPdXLx!@KKC^*vMFB+arVEIvlm0CB=46Eb>SPb!K7Qo? z1<^aI#`h`HtZ| z7+j}#hAysXbI1&4W0ZS0&oXVr+@;zF(`@-RU9faK4kCrd1g&?zgzRMJO^pw>_@i9kEFV?f=A>UA;VD);cLq5qy4(Y1N zMjMzdY=tqoogh@b8?)8}KQF8M_NT9DrPzBJr};hWyQeO&=l7rQJqK{;7a;}S;~Api zTAmNs_mdOYlJx(KZbj9f} z26;W>0FNs&1`>I*dFg@|r@0Fk?RhyquG2S;(-N!utlmU5>+FWEv~7Xu&R-{+)HA$u zg7XGTA%pN?k+Wd5w@D3h{<0&9XzV`F7-fw7!lb~<%&1#bL|8Dkyjt#lHxAKVK}6O6 zECM6l<&wNmbqxP%e1N*;j2JDW0hQn?*2%K=4jJuU0F(m)yWP6xw!_KN=50&Znqb2Q zr&F&!ol?ABnJhs2+*&}jF-eevv4Uo%&u?hTH-GTU^g+GInqN}Xo2g9S8ugzEk$$NK z-wnqEVg;jE66XbrQEmoS=w=gx?DAJ5P60uD{8?@JEMIpNr6Ye{lmDerd$BusRc#m< zAp&FMieG!omw5Qg=YM;e4}lic#Ok+g{Nq2qYgYv9huylz5|T%l)%db9_cr&L$(?i; zA~lP4B5v90yg1%T;rMtAc5#bT}1v!fCYp3ohh5G*?WKlNVv(Fn5ZM0Pvq+2+c7| zXQUBNh)n;Ozt8E|FL_MIiZK|t3YIcbJqJZCfi>eM->>IQHwL$W26@#LF*Rx~Ra zt0|d{or0eNyT`aG4sqy0qRlzg?SVG?$`12Nyd{0Ei?ESmb}Zsyf^J=VZzZXWp!Tei zU}@6i_pCqWb^qj>rU&O+VtzZ)IMe?bCCfS``YtDbquHXb>t#iuyGiLMKLWzTUE9=D zm7hKuaR>5mzE}D{unzXDWIitQv1%wB3)*#SS5*FjjsFr~?fp|hqk)y?ftO=f`^Nf4 zLD-C%m_f9pXZlnTdhwevdL0k%H!R}lkVfo%8@+XJi3@|)J6Ad8FWu~`{0%qTv+o>b zI1K*AB20i;R`5UN+(-Y^Rf817@(TzCUb3FsjA6HN+FIY3$$jlZxDd`IiTwPVvfC!fkRrb?LP z7ai>0Zk(!nUF&gN>~FBlOGrl_aTh=jDkkn)&rc)Ye*N(08>+ubfr2B^kn^oV@1IW( ze0~W=<$qexi9}Bd{!?N;n_V-kZ|kglB~n4`&z7jO``kUfv8J{`aQVhq^7ckcx`p{j z!w?5K-7b4`MyTan?{U6cIb+EaL>-nWj%^X(VMA@}*Rs*v2^7I z>&6D?&vTYi+DWXp&1pj6obc7B7wEGY=Ot9oI67tP{igUT`uuW(j7uF^P+#>sgZV(5 zqVxC2E!w&d{e`xD;3Z2B4gFbRF)w>J@#?L2+yJrJ;cd#6s>?i>>xx#-?=l{gskQ!- zZWQ`HOSSLY3aM`_y#D(3)2|nE4KSoHp6x^~lz;*{jw`PFhpdfVhr{hsjV+PO~#n^Ho8+1R78lY|t-xRrxH7n-+=` zcVv9gWNN$i*ws_SH81kViLYbmGUPkA%`3H2bAOkP9&l!$5m3jZcD-tQGrc=8_F{WW zjL3DZg8d;Ay4_d$H`S#bN^Uu=2XVK^Q&){L1H#51qVf2faU|lXMj*w5-KjX=6Yv+q zG10;K#;xJQfBwpFPhF~?0UDt~+5AVp818A!_qnh5^}VGq*^TYhiCgMlhTHX&;mtM7Sw?_O&!Er|MCmX_O*^_mcR6g#kpo+HbTn@VcA{Bu~KQ*CfpSp7XSa=s{| z)RTeQaQd!ucUy+x2B)_8ixhmv&cKH!8?VOijYr+*`>@OIjYR~x2Zd$coU7vohYjDg zi+u4z=oi)aKMZ9k8ynWnlR^Pd%J6#ZXL{#IHqzQhfbu!v)tpk-_4f(H2i+Vt1Im79QR!o z-L7fVfx>9@IOSX7HsPVRALZQK_qKz{zBdet{g;0JpzzP*g&u>fsm1Dwrp(`6(+Rug z>=wC%{#Q!I!u{SX^=R}MvBgXO_!7Q4T{G4pvCcA8!vJT(N{LoKUyGgbniyQZt%bgO zpwpc9t%UK{cDn<;H)6yi-=c@ikN@7jRp;}z^aAv)%b6bQ%VqZtO5KLWJGPzZJ*9Nh zKWOBz-$x~*4?X_jr>=_g-+UAG4ee8h#pP@0X2Qg6KI~uL#QWtqqJBb2{*w9+nY+A!aJ5=UM& z?R*qPxMxET(eB~`_M|`Zh&&EI{;v4(@Fb^z`tC~P2M@6DYc|!CyV1W zAt>#67)&}uPbRspFT=ynx*B>&{=K?Jf`isq%MmsIm`x2?Ha}=c(mAur-oP^CO=qVpXU9p{cHuljD({^A^I4W`(zH0>pRY+%>NO6;@nKJiZp6Se&=UfEI2Y5jrD37_CMLnGOUdG@S!+b!9k z7uKIV-Szd2_^bgE|MMp_|6Ih6PbeJCBF4;Dntu{FW9$DV_x)C3-2p2z;wKNXKJ%+X zP5+JAVrR}L?tgt#T#vn$*6jYu;e_3pfu3W1v$YPneX8&O5GSLa`yr+j%+-T^xOW|V!oNN9vE}}S^qZ{N=&g8ylB|hz z&p)oC{%n@*;l=Mn%`E?p@3)`KwLbg&zt9Kze)o%I{8H0Gzu7XvpO?joLT@9WGaIkU z&EN1*%}>52espUMxNSct5qW57biY^TW91yK{n6%Pxyj(_82{flO}KX%YtBYQ-u~ot zPAYQmvYkn}*!*$${6)Y2T}$Vchr-%_^)d2~XYg0yrM!jfbj*dKoBofn`FO@qH;dlU z`tv82)c;oJXOeH?(CjO8DS#>Uf0BFx(E#QDO4oniU3>yD$@@2S$Rd-je)a$Hf8bCD z(M$x{~xfI4Ipj`}Mu1RanC}M5Qw&r$B z*VWM_xoC*G{K}(zt-C{U?|!#lTOeb_d&>1>-{g%AI}NZ?02MuCZ=T$5`ECYRFtY5i znBsGIt&?Ei`Kw5*j17-)cjz{%dfQUL(wWaE?hld(>{CZB;ctJE=s zQ3>EALxcfnsdFy4TEK)`##$B6grlyk$cPYwv|d!u*Uk2B4es zM7xj?mY^!e^_KLUo$a6H7k6~~rP)}8$=pVnfJPyCA^E{|z(6n-b?L4+;kY|Cfi}*r z*d-uNWe#xTFQza-#XLrR$o8b#V3PlVKh6$e#c18fkXM-CUF9JhL(hz>G~URW9Nr>!2eZ^a4$00u-Zs@QH3zSv7gjz5N!z&=G_K zDmZI|rQ{Oyp&zuU6+%l6d|?zaV8h}1YkJts>%R zkTa-J8rgg_KL;Y**looQtr?u2D|49l2wet9I!3&)M7GufRY;s$KsdWVT%vVUSosHq z@~h(an*ol3DNXAeG>p8v=4pU_@kT8GU~qtm(1?*0-36tTw7?cmeFnAyU|cW4YRa3@ zz(~d-99D7%_jr`dKpUD48A-M-jcqSctRR2zQ8y4t2XNYxS)*lkwvkvFe6J=OL9BOE z0342}>bFte9irGxUZTzR)12iHWL0eCSfcNx@nO^E$;sxy>*s)yWcY~xVXqW39$`hP z@lQyw5k?{x0TITgImQjqu4+l z(2iKg2@i_ziip%Ayj7ZW^$2FDxF>)`D(E7vNJ|4@tS@ck<&)ADXVR7jyM*c5=ijPy zBCHpeZpma&6 z;Df;n_mka%9pEAItSP$20=t56!hB1ATuWtrP&3qMLx zIHlxvMgw&`_ZNbF7XA$*c_0=dUmvP!;Q)hqgb8PEEEHW+Y?T2-P>gH&-3HJpME5v< z8A!mBKtW<}g9?o6z*aRbov*=Xn>L9s+4lNTyU+cA2m&VPomRCx)&^?8bf(pNmG|!y zEc=qo&xsbyKi>psh?t(Mtq~b%yKN$XK%70Re6(DLyhNK;AbrVwCW}vA@)!n@=R%u4@Rh z9l#1x83otI^YX?q<06z`vq-h1Phv7CcA{}vp-(iga+?sO>uZ5Gpo{xPUr^F2t^vO9SJ9to(gU!bkE(O^jwX~tV=C6 zN%JQm?vsa0*-ir-@3@+=;ju~_``*j3&>VMg6%gEh*7vmMv?+wQc)1%vL~ioTISmy$ zCUjVMm4<;12$Qt*!zj}giAz8y9IjKpq$y|z0!TW~;=Y;JJAi>RrLai>Qv{P904Qnm zKs48d{NdG7;r>jFnK@u%#1uT|ymx;jCxUP4Cq!#Mx(C3x7nJSBH{*08OVEVOKp{Qq z>PLf$w^*PBh5%w5rSvFWeNB;yMyM)WI7D*W!iE^uGVU7yq_l2Pc?9D!-?%=3@KPzm zhPYD?0(Fe0XKZa4`zuuK9svmiL6b16<))2@@=WdB>dNGt7?FsJ-MBl21sFP;JpzUc zCL}(%ZEr2Ug>_=~G+Mm`RHaNFx^D0kj)~7ZvF8B{*z7?%g(g!+92<@33SS3$ z=Ba3*>>>pbO)eRPH>k_)=OGJr&8@NLT#I!gUo2|mu?V^PkFUr*>!`B^vKWlK83r7Q zjXC9Fua%Q2fYT8MVdRCw5d(w`h|Kk_YXd~YwD!=nqww5(P5aWL zK;`aLv00;`DS)ENfW!(SR+TiqI#n1T2wu;Kr}Ij$e~;B5s$Nquz*3%xiry9HkMD8_ zv6)Pq#?mhsTglYdXBx1KnHn4^|7Mkk6H@WwxqB(*nQ^h%nW9`75TT|e+@~QM)T#XI zn`}8bvc8bu&U0_MV;&63Ia=1DY_1(N^vtM5PmK4y8etz{lTKZ>zns6q_p~0NDi+SJ z0i{UhV*+6hB{x9B`~))qqMi{UJ*t+)(paaGP-}^p603?84B1s}1VLls| z9=!tyh*48Vve&m8RIk+|3=1uU+R?JFqKPn1ZB#IWt@tL?LE8#pF3Uh!Sg@cA?mwB) zoC(o=aZvD5p|B2mbn^~p=RpP5BSkjZMrk2U7PPRS!Vq4@W)I)qx=vp1K^G4a%of3d z6eTs1#2$OM)v=5DP6B))@RW;1{MoEo?m{L6s> zo^3ynUqF&siOuGT55(g*?(aa~XrQ!1FmEpjbRqbzU0!m-W)e4gCD@0DXQ_PWp8dAb z@ryT0%JT}YJF!46wv4C)BJM}6P6&f9GPx3A$RfO~J1O;=Sms9Z#T)=kDtlhG3%E$3 zMOfAWCofBgrL5W(ZzrBoLD;xSo(<{>*d;3O+{W?}Ngde?zq&nI-}9RV3SjhLs8E0& zrgtBh07NVWix?ta%XhyKcO9XUlQs}modUQTQp7@|QN~hv$(lQ8xW*C!M71cwg%vb;-DESXj^ zx*2_Ap))sK4-OgzKRg}J1wv>QK^2dP7yNw#rX6}4rNwUnD5+7wR&Z_16g22pxdj9n zd)(``4%R#%pqrEKW%N^|m@zY=0F~4hk29JfVkSag7EYxe#A)sUP6EPcfB8l@r7muH z6IcOE)s<%9pU?<`-3*r8|14D2|X)-fKp0=l>iGK;e`$QG@giw#l|nBtVsA2 zYTcx~oMrD1@ac)fxZDLqqz~z8A_w!kdd?d(SshG6O1G(@GS*9Yd!+6dh04H!AU(+I zAZpw3x%Zs^6%1bzi4o$36>=Yu0^R{APTbwWY-u5K>q%U0g41$kui-pqtPo@F*1Y}$ zea}!pN4O6gaw`xaZrB1;aIgI2o8GqGD?7{s6!d(HwD$euz^d~(WdJ|{za-aCY^3lr z%hv$}cte=U|KeVFLx;Th{MG|coj1NjEi0yrG(pTtIVLn9Wo^TRKN(u?f^0> zBd$l$_jQn{zXNC!E&05#3 z&jYGK#Ak`?i$b2GZV0uB%MamRvU^_Jqb{dms~-TU>+;4k59P16q+Z6NlX=4XhpK`N zYg>Z`MZ@|7LI}g%re=M8M!__ny%HNEUSu)7=0X|)ISq^(O$q2GHXM{RU_=GAkB=5yH~5vgaPnB&j7TBUCA( z+U|uGK!7cDNXv01a)Cm4bL9~i3gfZ0PL#H4@EFD16i5jnI)8}B?dQ|w(H^m4j{W;> z=JgLb0?*%OUg={Z)C*JV2Qz>l3Lo(`th-@fxjN9f{ejbbwEjf)W(c;*h>J>gKH^0P zwGk+Q4+j1ghVHusWu|NNU%ud&u=T_(lV>Xsh{CxUp&!ZxR|iwhVCvFg;Tu{Qb`7x$ z6vce5l%icT5J_C~lXVnKf35>sbe4{98k>HBzL@U{CJ837gyiUzz5oj%2P}Cej%0S$ z@*$83qYp74yk3R#siRhM076htQetZnJXbv%Th3`j-*|nxDy>7dCPN=!M|cn;N?Tkv zn>8w$X9hAvm`aj6Z(PEN&LO;5L6A292;oESkk`oSWJl#AX`lcR%U|#U0cJ8yBunKb zDQVRja=hjN=$WV_5GSy5TOs4+{S`TcDg;zDcVYx!w2fJ_h2blhPOwwLhcTcH%+0&0^wC)jhv#*r*!cnR znf=bJLX4~PlM?qzcPYTm(D2=Z?~j`_4y!WXR!Pi1W(r>ITD1l0!2~R#kv>E`F+FwF z%Z&WNr}OcJ31@(cbknnD9L;3gN9QnQFqt|HO4uU8rvm`YXqY>5)`IcKA7MF}E6Zcb z(pRJQ_dayy3%b+&VZAbNAO^z+1DF*NICoZCK_AqpZ97hprmmE*#oUfw25bE$){AM+ z;X!^2V>F8zwz@i(Dc6%t*y2wP#x^muKIWamcWW8J`(E-u3XYQ+^nUQnqFDz|!?vCGdg8XCTvFL@2V9=u}BK0EJaoC$MeX>x+tWiMBKgiu2Hy zANLCO_e!X!eQ9L$v|m$V#Sx*V7JD{elLu4G3Y2n~8I>Lqd?j9`Wydw%GD0GgjfkCt z{HS_}{l$bU7i1mj^4{&vSm?l}OUnQHzBuTVRPfHP9@HR1yK>wp!fOXwxF#2F2}w>$ zsW;D`%LxNedaxN=64YpYU|R)Cc8O^&XpP0+x48h#0u0G6cl|v=wRToCIJN;0;@Vs; z=Iv=b3Xd4Qo|8>J2=iXe!|21kN^OX za*UK4p8!=t5h!5U(m^K~R`DU`MhNH!W`z|oHt@>;RQLcwZqty{>6sO_*hs_jmKOll z+<)01s~3b|h-#)TcXz~BCIjOGG9=FtbpsrLBna2(Wr4To0&tSaZJ7y?eD;GzuVP`< zmXi0n2&>iC6X4){#_X=0`b`i&U{Z;I#fsCMi^%n`_>+8hGtAfH3;`)r2AVJd=nYHv zkDEkmkjg@og1KlI$ESLOr4XSy7TU5JmjyWR$@8<3JFRT-J5`6}ZnaqGq$I@*I(0U$ z^mtzNlR$S*qO4$N?@nR`!aN-toZjBV#`N_CWgtT`PP8U)y2P9Zq1sF?;y_o@3kXYL ztFTk0JVp>Ab5<_C17K|42}^Zv>W2V;Fr4g?Me0X*>m)CxNWacRYu5v_!V-2c$suDP z@W2c*CbI%}X!jjl}r7i*%(v|I?)x0wk1=bTMIufJJ zhgG+-fiHnc$zB)FWsue)*<{#GNCL|^fCqxj13ax{0?~a}b<39RMc-Q+yO^wt$JiJ! z&g$9%2#polbD<`8t0YDZ*ghnQ87O5~KK%Tp@H^S)znYsY9MvCcQ0ch;L8jOLA!H*M z3W7O|+e?|>Wlbc-iPCz(#o)%XCRaHvDRmJ*HpO)d3?vNky*lF5xo*a%-P6gM&t6yY z5*Y*xR4fF*v>Lg33!)m}l0|+B!!0A45I_UsBMwrX>)hp=v*O17!H&hnab=BdcHIbUybi|76j5EqX%Y* zYbn*P4ZSJ=K!E(#+SIzd^13@Tl&|wlOY+TF3kXFgpg#TFse&tjkYVD-KT$xjln|~i zd%aXKD@wARksQFbS8%p5UrP!anvn5T`;>$(&UlXpXJh&X4JT!1SCic^gp&& z+Xguz1lMw5kmT@oK5#7%3$_nSh!!1qyRl>FwqPR}3`3Eg;O0 z@%)CX*k%M*CO!_et-5vPsU9~boO|kgMacZ&gucQ)pq6yHTwB91Q0Vr>k{n6@>oXm% z64JC9olCJEYL*L=r?8MoG(=dZ_h6*o&`t;zmQm5Bs`g2^+6p`BZBK-e05pL}5kcxt zJx9^o8$bkple3A*(%%~ey%NtICIjyXNuEibTwis`0s{wwPzGVXl{ToOJ~QI(Wvu0a zFdK8kdJ7<_hjkOjT9ue%rzQYVbfa|sV9RMBpJ5+)T@Tbq8upO@2H`|?sv|C2CEGe82#D+u6?$ZFG)^8}jXZ1+j6Nlf8}g(AU`5mkzy2sXOAxr9F9E X&+FC3YT^A1{ZqZHrIfFD!~cH(b|Z=* literal 0 HcmV?d00001 diff --git a/docs/source/serving/deploying_with_helm.rst b/docs/source/serving/deploying_with_helm.rst new file mode 100644 index 0000000000000..21b17e881b945 --- /dev/null +++ b/docs/source/serving/deploying_with_helm.rst @@ -0,0 +1,253 @@ +.. _deploying_with_helm: + +Deploying with Helm +=================== + +A Helm chart to deploy vLLM for Kubernetes + +Helm is a package manager for Kubernetes. It will help you to deploy vLLM on k8s and automate the deployment of vLLMm Kubernetes applications. With Helm, you can deploy the same framework architecture with different configurations to multiple namespaces by overriding variables values. + +This guide will walk you through the process of deploying vLLM with Helm, including the necessary prerequisites, steps for helm install and documentation on architecture and values file. + +Prerequisites +------------- +Before you begin, ensure that you have the following: + +- A running Kubernetes cluster +- NVIDIA Kubernetes Device Plugin (``k8s-device-plugin``): This can be found at `https://github.com/NVIDIA/k8s-device-plugin `__ +- Available GPU resources in your cluster +- S3 with the model which will be deployed + +Installing the chart +-------------------- + +To install the chart with the release name ``test-vllm``: + +.. code-block:: console + + helm upgrade --install --create-namespace --namespace=ns-vllm test-vllm . -f values.yaml --set secrets.s3endpoint=$ACCESS_POINT --set secrets.s3buckername=$BUCKET --set secrets.s3accesskeyid=$ACCESS_KEY --set secrets.s3accesskey=$SECRET_KEY + +Uninstalling the Chart +---------------------- + +To uninstall the ``test-vllm`` deployment: + +.. code-block:: console + + helm uninstall test-vllm --namespace=ns-vllm + +The command removes all the Kubernetes components associated with the +chart **including persistent volumes** and deletes the release. + +Architecture +------------ + +.. image:: architecture_helm_deployment.png + +Values +------ + +.. list-table:: Values + :widths: 25 25 25 25 + :header-rows: 1 + + * - Key + - Type + - Default + - Description + * - autoscaling + - object + - {"enabled":false,"maxReplicas":100,"minReplicas":1,"targetCPUUtilizationPercentage":80} + - Autoscaling configuration + * - autoscaling.enabled + - bool + - false + - Enable autoscaling + * - autoscaling.maxReplicas + - int + - 100 + - Maximum replicas + * - autoscaling.minReplicas + - int + - 1 + - Minimum replicas + * - autoscaling.targetCPUUtilizationPercentage + - int + - 80 + - Target CPU utilization for autoscaling + * - configs + - object + - {} + - Configmap + * - containerPort + - int + - 8000 + - Container port + * - customObjects + - list + - [] + - Custom Objects configuration + * - deploymentStrategy + - object + - {} + - Deployment strategy configuration + * - externalConfigs + - list + - [] + - External configuration + * - extraContainers + - list + - [] + - Additional containers configuration + * - extraInit + - object + - {"pvcStorage":"1Gi","s3modelpath":"relative_s3_model_path/opt-125m", "awsEc2MetadataDisabled": true} + - Additional configuration for the init container + * - extraInit.pvcStorage + - string + - "50Gi" + - Storage size of the s3 + * - extraInit.s3modelpath + - string + - "relative_s3_model_path/opt-125m" + - Path of the model on the s3 which hosts model weights and config files + * - extraInit.awsEc2MetadataDisabled + - boolean + - true + - Disables the use of the Amazon EC2 instance metadata service + * - extraPorts + - list + - [] + - Additional ports configuration + * - gpuModels + - list + - ["TYPE_GPU_USED"] + - Type of gpu used + * - image + - object + - {"command":["vllm","serve","/data/","--served-model-name","opt-125m","--host","0.0.0.0","--port","8000"],"repository":"vllm/vllm-openai","tag":"latest"} + - Image configuration + * - image.command + - list + - ["vllm","serve","/data/","--served-model-name","opt-125m","--host","0.0.0.0","--port","8000"] + - Container launch command + * - image.repository + - string + - "vllm/vllm-openai" + - Image repository + * - image.tag + - string + - "latest" + - Image tag + * - livenessProbe + - object + - {"failureThreshold":3,"httpGet":{"path":"/health","port":8000},"initialDelaySeconds":15,"periodSeconds":10} + - Liveness probe configuration + * - livenessProbe.failureThreshold + - int + - 3 + - Number of times after which if a probe fails in a row, Kubernetes considers that the overall check has failed: the container is not alive + * - livenessProbe.httpGet + - object + - {"path":"/health","port":8000} + - Configuration of the Kubelet http request on the server + * - livenessProbe.httpGet.path + - string + - "/health" + - Path to access on the HTTP server + * - livenessProbe.httpGet.port + - int + - 8000 + - Name or number of the port to access on the container, on which the server is listening + * - livenessProbe.initialDelaySeconds + - int + - 15 + - Number of seconds after the container has started before liveness probe is initiated + * - livenessProbe.periodSeconds + - int + - 10 + - How often (in seconds) to perform the liveness probe + * - maxUnavailablePodDisruptionBudget + - string + - "" + - Disruption Budget Configuration + * - readinessProbe + - object + - {"failureThreshold":3,"httpGet":{"path":"/health","port":8000},"initialDelaySeconds":5,"periodSeconds":5} + - Readiness probe configuration + * - readinessProbe.failureThreshold + - int + - 3 + - Number of times after which if a probe fails in a row, Kubernetes considers that the overall check has failed: the container is not ready + * - readinessProbe.httpGet + - object + - {"path":"/health","port":8000} + - Configuration of the Kubelet http request on the server + * - readinessProbe.httpGet.path + - string + - "/health" + - Path to access on the HTTP server + * - readinessProbe.httpGet.port + - int + - 8000 + - Name or number of the port to access on the container, on which the server is listening + * - readinessProbe.initialDelaySeconds + - int + - 5 + - Number of seconds after the container has started before readiness probe is initiated + * - readinessProbe.periodSeconds + - int + - 5 + - How often (in seconds) to perform the readiness probe + * - replicaCount + - int + - 1 + - Number of replicas + * - resources + - object + - {"limits":{"cpu":4,"memory":"16Gi","nvidia.com/gpu":1},"requests":{"cpu":4,"memory":"16Gi","nvidia.com/gpu":1}} + - Resource configuration + * - resources.limits."nvidia.com/gpu" + - int + - 1 + - Number of gpus used + * - resources.limits.cpu + - int + - 4 + - Number of CPUs + * - resources.limits.memory + - string + - "16Gi" + - CPU memory configuration + * - resources.requests."nvidia.com/gpu" + - int + - 1 + - Number of gpus used + * - resources.requests.cpu + - int + - 4 + - Number of CPUs + * - resources.requests.memory + - string + - "16Gi" + - CPU memory configuration + * - secrets + - object + - {} + - Secrets configuration + * - serviceName + - string + - + - Service name + * - servicePort + - int + - 80 + - Service port + * - labels.environment + - string + - test + - Environment name + * - labels.release + - string + - test + - Release name diff --git a/examples/chart-helm/.helmignore b/examples/chart-helm/.helmignore new file mode 100644 index 0000000000000..2d1303b784cb8 --- /dev/null +++ b/examples/chart-helm/.helmignore @@ -0,0 +1,6 @@ +*.png +.git/ +ct.yaml +lintconf.yaml +values.schema.json +/workflows \ No newline at end of file diff --git a/examples/chart-helm/Chart.yaml b/examples/chart-helm/Chart.yaml new file mode 100644 index 0000000000000..fb0f06f6d2701 --- /dev/null +++ b/examples/chart-helm/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v2 +name: chart-vllm +description: Chart vllm + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.0.1 + +maintainers: + - name: mfournioux diff --git a/examples/chart-helm/ct.yaml b/examples/chart-helm/ct.yaml new file mode 100644 index 0000000000000..d273e118203ad --- /dev/null +++ b/examples/chart-helm/ct.yaml @@ -0,0 +1,3 @@ +chart-dirs: + - charts +validate-maintainers: false \ No newline at end of file diff --git a/examples/chart-helm/lintconf.yaml b/examples/chart-helm/lintconf.yaml new file mode 100644 index 0000000000000..c8e8c5d7d9767 --- /dev/null +++ b/examples/chart-helm/lintconf.yaml @@ -0,0 +1,42 @@ +--- +rules: + braces: + min-spaces-inside: 0 + max-spaces-inside: 0 + min-spaces-inside-empty: -1 + max-spaces-inside-empty: -1 + brackets: + min-spaces-inside: 0 + max-spaces-inside: 0 + min-spaces-inside-empty: -1 + max-spaces-inside-empty: -1 + colons: + max-spaces-before: 0 + max-spaces-after: 1 + commas: + max-spaces-before: 0 + min-spaces-after: 1 + max-spaces-after: 1 + comments: + require-starting-space: true + min-spaces-from-content: 2 + document-end: disable + document-start: disable # No --- to start a file + empty-lines: + max: 2 + max-start: 0 + max-end: 0 + hyphens: + max-spaces-after: 1 + indentation: + spaces: consistent + indent-sequences: whatever # - list indentation will handle both indentation and without + check-multi-line-strings: false + key-duplicates: enable + line-length: disable # Lines can be any length + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: enable + truthy: + level: warning \ No newline at end of file diff --git a/examples/chart-helm/templates/_helpers.tpl b/examples/chart-helm/templates/_helpers.tpl new file mode 100644 index 0000000000000..a9690bad3c945 --- /dev/null +++ b/examples/chart-helm/templates/_helpers.tpl @@ -0,0 +1,164 @@ +{{/* +Define ports for the pods +*/}} +{{- define "chart.container-port" -}} +{{- default "8000" .Values.containerPort }} +{{- end }} + +{{/* +Define service name +*/}} +{{- define "chart.service-name" -}} +{{- if .Values.serviceName }} +{{- .Values.serviceName | lower | trim }} +{{- else }} +"{{ .Release.Name }}-service" +{{- end }} +{{- end }} + +{{/* +Define service port +*/}} +{{- define "chart.service-port" -}} +{{- if .Values.servicePort }} +{{- .Values.servicePort }} +{{- else }} +{{- include "chart.container-port" . }} +{{- end }} +{{- end }} + +{{/* +Define service port name +*/}} +{{- define "chart.service-port-name" -}} +"service-port" +{{- end }} + +{{/* +Define container port name +*/}} +{{- define "chart.container-port-name" -}} +"container-port" +{{- end }} + +{{/* +Define deployment strategy +*/}} +{{- define "chart.strategy" -}} +strategy: +{{- if not .Values.deploymentStrategy }} + rollingUpdate: + maxSurge: 100% + maxUnavailable: 0 +{{- else }} +{{ toYaml .Values.deploymentStrategy | indent 2 }} +{{- end }} +{{- end }} + +{{/* +Define additional ports +*/}} +{{- define "chart.extraPorts" }} +{{- with .Values.extraPorts }} +{{ toYaml . }} +{{- end }} +{{- end }} + +{{/* +Define chart external ConfigMaps and Secrets +*/}} +{{- define "chart.externalConfigs" -}} +{{- with .Values.externalConfigs -}} +{{ toYaml . }} +{{- end }} +{{- end }} + + +{{/* +Define liveness et readiness probes +*/}} +{{- define "chart.probes" -}} +{{- if .Values.readinessProbe }} +readinessProbe: +{{- with .Values.readinessProbe }} +{{- toYaml . | nindent 2 }} +{{- end }} +{{- end }} +{{- if .Values.livenessProbe }} +livenessProbe: +{{- with .Values.livenessProbe }} +{{- toYaml . | nindent 2 }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Define resources +*/}} +{{- define "chart.resources" -}} +requests: + memory: {{ required "Value 'resources.requests.memory' must be defined !" .Values.resources.requests.memory | quote }} + cpu: {{ required "Value 'resources.requests.cpu' must be defined !" .Values.resources.requests.cpu | quote }} + {{- if and (gt (int (index .Values.resources.requests "nvidia.com/gpu")) 0) (gt (int (index .Values.resources.limits "nvidia.com/gpu")) 0) }} + nvidia.com/gpu: {{ required "Value 'resources.requests.nvidia.com/gpu' must be defined !" (index .Values.resources.requests "nvidia.com/gpu") | quote }} + {{- end }} +limits: + memory: {{ required "Value 'resources.limits.memory' must be defined !" .Values.resources.limits.memory | quote }} + cpu: {{ required "Value 'resources.limits.cpu' must be defined !" .Values.resources.limits.cpu | quote }} + {{- if and (gt (int (index .Values.resources.requests "nvidia.com/gpu")) 0) (gt (int (index .Values.resources.limits "nvidia.com/gpu")) 0) }} + nvidia.com/gpu: {{ required "Value 'resources.limits.nvidia.com/gpu' must be defined !" (index .Values.resources.limits "nvidia.com/gpu") | quote }} + {{- end }} +{{- end }} + + +{{/* +Define User used for the main container +*/}} +{{- define "chart.user" }} +{{- if .Values.image.runAsUser }} +runAsUser: +{{- with .Values.runAsUser }} +{{- toYaml . | nindent 2 }} +{{- end }} +{{- end }} +{{- end }} + +{{- define "chart.extraInitImage" -}} +"amazon/aws-cli:2.6.4" +{{- end }} + +{{- define "chart.extraInitEnv" -}} +- name: S3_ENDPOINT_URL + valueFrom: + secretKeyRef: + name: {{ .Release.Name }}-secrets + key: s3endpoint +- name: S3_BUCKET_NAME + valueFrom: + secretKeyRef: + name: {{ .Release.Name }}-secrets + key: s3bucketname +- name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: {{ .Release.Name }}-secrets + key: s3accesskeyid +- name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: {{ .Release.Name }}-secrets + key: s3accesskey +- name: S3_PATH + value: "{{ .Values.extraInit.s3modelpath }}" +- name: AWS_EC2_METADATA_DISABLED + value: "{{ .Values.extraInit.awsEc2MetadataDisabled }}" +{{- end }} + +{{/* + Define chart labels +*/}} +{{- define "chart.labels" -}} +{{- with .Values.labels -}} +{{ toYaml . }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/examples/chart-helm/templates/configmap.yaml b/examples/chart-helm/templates/configmap.yaml new file mode 100644 index 0000000000000..cc5d03782f878 --- /dev/null +++ b/examples/chart-helm/templates/configmap.yaml @@ -0,0 +1,11 @@ +{{- if .Values.configs -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: "{{ .Release.Name }}-configs" + namespace: {{ .Release.Namespace }} +data: + {{- with .Values.configs }} + {{- toYaml . | nindent 2 }} + {{- end }} +{{- end -}} \ No newline at end of file diff --git a/examples/chart-helm/templates/custom-objects.yaml b/examples/chart-helm/templates/custom-objects.yaml new file mode 100644 index 0000000000000..8a65ffd0e552d --- /dev/null +++ b/examples/chart-helm/templates/custom-objects.yaml @@ -0,0 +1,6 @@ +{{- if .Values.customObjects }} +{{- range .Values.customObjects }} +{{- tpl (. | toYaml) $ }} +--- +{{- end }} +{{- end }} \ No newline at end of file diff --git a/examples/chart-helm/templates/deployment.yaml b/examples/chart-helm/templates/deployment.yaml new file mode 100644 index 0000000000000..536983b587be2 --- /dev/null +++ b/examples/chart-helm/templates/deployment.yaml @@ -0,0 +1,122 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ .Release.Name }}-deployment-vllm" + namespace: {{ .Release.Namespace }} + labels: + {{- include "chart.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + {{- include "chart.strategy" . | nindent 2 }} + selector: + matchLabels: + environment: "test" + release: "test" + progressDeadlineSeconds: 1200 + template: + metadata: + labels: + environment: "test" + release: "test" + spec: + containers: + - name: "vllm" + image: "{{ required "Required value 'image.repository' must be defined !" .Values.image.repository }}:{{ required "Required value 'image.tag' must be defined !" .Values.image.tag }}" + {{- if .Values.image.command }} + command : + {{- with .Values.image.command }} + {{- toYaml . | nindent 10 }} + {{- end }} + {{- end }} + securityContext: + {{- if .Values.image.securityContext }} + {{- with .Values.image.securityContext }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- else }} + runAsNonRoot: false + {{- include "chart.user" . | indent 12 }} + {{- end }} + imagePullPolicy: IfNotPresent + {{- if .Values.image.env }} + env : + {{- with .Values.image.env }} + {{- toYaml . | nindent 10 }} + {{- end }} + {{- else }} + env: [] + {{- end }} + {{- if or .Values.externalConfigs .Values.configs .Values.secrets }} + envFrom: + {{- if .Values.configs }} + - configMapRef: + name: "{{ .Release.Name }}-configs" + {{- end }} + {{- if .Values.secrets}} + - secretRef: + name: "{{ .Release.Name }}-secrets" + {{- end }} + {{- include "chart.externalConfigs" . | nindent 12 }} + {{- end }} + ports: + - name: {{ include "chart.container-port-name" . }} + containerPort: {{ include "chart.container-port" . }} + {{- include "chart.extraPorts" . | nindent 12 }} + {{- include "chart.probes" . | indent 10 }} + resources: {{- include "chart.resources" . | nindent 12 }} + volumeMounts: + - name: {{ .Release.Name }}-storage + mountPath: /data + + {{- with .Values.extraContainers }} + {{ toYaml . | nindent 8 }} + {{- end }} + + {{- if .Values.extraInit }} + initContainers: + - name: wait-download-model + image: {{ include "chart.extraInitImage" . }} + command: + - /bin/bash + args: + - -eucx + - while aws --endpoint-url $S3_ENDPOINT_URL s3 sync --dryrun s3://$S3_BUCKET_NAME/$S3_PATH /data | grep -q download; do sleep 10; done + env: {{- include "chart.extraInitEnv" . | nindent 10 }} + resources: + requests: + cpu: 200m + memory: 1Gi + limits: + cpu: 500m + memory: 2Gi + volumeMounts: + - name: {{ .Release.Name }}-storage + mountPath: /data + {{- end }} + volumes: + - name: {{ .Release.Name }}-storage + persistentVolumeClaim: + claimName: {{ .Release.Name }}-storage-claim + + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if and (gt (int (index .Values.resources.requests "nvidia.com/gpu")) 0) (gt (int (index .Values.resources.limits "nvidia.com/gpu")) 0) }} + runtimeClassName: nvidia + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: nvidia.com/gpu.product + operator: In + {{- with .Values.gpuModels }} + values: + {{- toYaml . | nindent 20 }} + {{- end }} + {{- end }} \ No newline at end of file diff --git a/examples/chart-helm/templates/hpa.yaml b/examples/chart-helm/templates/hpa.yaml new file mode 100644 index 0000000000000..5ca94c8213541 --- /dev/null +++ b/examples/chart-helm/templates/hpa.yaml @@ -0,0 +1,31 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: "{{ .Release.Name }}-hpa" + namespace: {{ .Release.Namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: vllm + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/examples/chart-helm/templates/job.yaml b/examples/chart-helm/templates/job.yaml new file mode 100644 index 0000000000000..f9ea3541e78d2 --- /dev/null +++ b/examples/chart-helm/templates/job.yaml @@ -0,0 +1,37 @@ +{{- if .Values.extraInit }} +apiVersion: batch/v1 +kind: Job +metadata: + name: "{{ .Release.Name }}-init-vllm" + namespace: {{ .Release.Namespace }} +spec: + ttlSecondsAfterFinished: 100 + template: + metadata: + name: init-vllm + spec: + containers: + - name: job-download-model + image: {{ include "chart.extraInitImage" . }} + command: + - /bin/bash + args: + - -eucx + - aws --endpoint-url $S3_ENDPOINT_URL s3 sync s3://$S3_BUCKET_NAME/$S3_PATH /data + env: {{- include "chart.extraInitEnv" . | nindent 8 }} + volumeMounts: + - name: {{ .Release.Name }}-storage + mountPath: /data + resources: + requests: + cpu: 200m + memory: 1Gi + limits: + cpu: 500m + memory: 2Gi + restartPolicy: OnFailure + volumes: + - name: {{ .Release.Name }}-storage + persistentVolumeClaim: + claimName: "{{ .Release.Name }}-storage-claim" +{{- end }} \ No newline at end of file diff --git a/examples/chart-helm/templates/poddisruptionbudget.yaml b/examples/chart-helm/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000000000..512bac727da87 --- /dev/null +++ b/examples/chart-helm/templates/poddisruptionbudget.yaml @@ -0,0 +1,7 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: "{{ .Release.Name }}-pdb" + namespace: {{ .Release.Namespace }} +spec: + maxUnavailable: {{ default 1 .Values.maxUnavailablePodDisruptionBudget }} \ No newline at end of file diff --git a/examples/chart-helm/templates/pvc.yaml b/examples/chart-helm/templates/pvc.yaml new file mode 100644 index 0000000000000..e8d203a7a5ace --- /dev/null +++ b/examples/chart-helm/templates/pvc.yaml @@ -0,0 +1,13 @@ +{{- if .Values.extraInit }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: "{{ .Release.Name }}-storage-claim" + namespace: {{ .Release.Namespace }} +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .Values.extraInit.pvcStorage }} +{{- end }} \ No newline at end of file diff --git a/examples/chart-helm/templates/secrets.yaml b/examples/chart-helm/templates/secrets.yaml new file mode 100644 index 0000000000000..4e88e747b616a --- /dev/null +++ b/examples/chart-helm/templates/secrets.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: "{{ .Release.Name }}-secrets" + namespace: {{ .Release.Namespace }} +type: Opaque +data: + {{- range $key, $val := .Values.secrets }} + {{ $key }}: {{ $val | b64enc | quote }} + {{- end }} \ No newline at end of file diff --git a/examples/chart-helm/templates/service.yaml b/examples/chart-helm/templates/service.yaml new file mode 100644 index 0000000000000..12d0f68b03a35 --- /dev/null +++ b/examples/chart-helm/templates/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Release.Name }}-service" + namespace: {{ .Release.Namespace }} +spec: + type: ClusterIP + ports: + - name: {{ include "chart.service-port-name" . }} + port: {{ include "chart.service-port" . }} + targetPort: {{ include "chart.container-port-name" . }} + protocol: TCP + selector: + {{- include "chart.labels" . | nindent 4 }} \ No newline at end of file diff --git a/examples/chart-helm/values.schema.json b/examples/chart-helm/values.schema.json new file mode 100644 index 0000000000000..812d54bde1397 --- /dev/null +++ b/examples/chart-helm/values.schema.json @@ -0,0 +1,265 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "image": { + "type": "object", + "properties": { + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + }, + "command": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "command", + "repository", + "tag" + ] + }, + "containerPort": { + "type": "integer" + }, + "serviceName": { + "type": "null" + }, + "servicePort": { + "type": "integer" + }, + "extraPorts": { + "type": "array" + }, + "replicaCount": { + "type": "integer" + }, + "deploymentStrategy": { + "type": "object" + }, + "resources": { + "type": "object", + "properties": { + "requests": { + "type": "object", + "properties": { + "cpu": { + "type": "integer" + }, + "memory": { + "type": "string" + }, + "nvidia.com/gpu": { + "type": "integer" + } + }, + "required": [ + "cpu", + "memory", + "nvidia.com/gpu" + ] + }, + "limits": { + "type": "object", + "properties": { + "cpu": { + "type": "integer" + }, + "memory": { + "type": "string" + }, + "nvidia.com/gpu": { + "type": "integer" + } + }, + "required": [ + "cpu", + "memory", + "nvidia.com/gpu" + ] + } + }, + "required": [ + "limits", + "requests" + ] + }, + "gpuModels": { + "type": "array", + "items": { + "type": "string" + } + }, + "autoscaling": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "minReplicas": { + "type": "integer" + }, + "maxReplicas": { + "type": "integer" + }, + "targetCPUUtilizationPercentage": { + "type": "integer" + } + }, + "required": [ + "enabled", + "maxReplicas", + "minReplicas", + "targetCPUUtilizationPercentage" + ] + }, + "configs": { + "type": "object" + }, + "secrets": { + "type": "object" + }, + "externalConfigs": { + "type": "array" + }, + "customObjects": { + "type": "array" + }, + "maxUnavailablePodDisruptionBudget": { + "type": "string" + }, + "extraInit": { + "type": "object", + "properties": { + "s3modelpath": { + "type": "string" + }, + "pvcStorage": { + "type": "string" + }, + "awsEc2MetadataDisabled": { + "type": "boolean" + } + }, + "required": [ + "pvcStorage", + "s3modelpath", + "awsEc2MetadataDisabled" + ] + }, + "extraContainers": { + "type": "array" + }, + "readinessProbe": { + "type": "object", + "properties": { + "initialDelaySeconds": { + "type": "integer" + }, + "periodSeconds": { + "type": "integer" + }, + "failureThreshold": { + "type": "integer" + }, + "httpGet": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "port": { + "type": "integer" + } + }, + "required": [ + "path", + "port" + ] + } + }, + "required": [ + "failureThreshold", + "httpGet", + "initialDelaySeconds", + "periodSeconds" + ] + }, + "livenessProbe": { + "type": "object", + "properties": { + "initialDelaySeconds": { + "type": "integer" + }, + "failureThreshold": { + "type": "integer" + }, + "periodSeconds": { + "type": "integer" + }, + "httpGet": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "port": { + "type": "integer" + } + }, + "required": [ + "path", + "port" + ] + } + }, + "required": [ + "failureThreshold", + "httpGet", + "initialDelaySeconds", + "periodSeconds" + ] + }, + "labels": { + "type": "object", + "properties": { + "environment": { + "type": "string" + }, + "release": { + "type": "string" + } + }, + "required": [ + "environment", + "release" + ] + } + }, + "required": [ + "autoscaling", + "configs", + "containerPort", + "customObjects", + "deploymentStrategy", + "externalConfigs", + "extraContainers", + "extraInit", + "extraPorts", + "gpuModels", + "image", + "labels", + "livenessProbe", + "maxUnavailablePodDisruptionBudget", + "readinessProbe", + "replicaCount", + "resources", + "secrets", + "servicePort" + ] +} \ No newline at end of file diff --git a/examples/chart-helm/values.yaml b/examples/chart-helm/values.yaml new file mode 100644 index 0000000000000..9c48e7d061bf7 --- /dev/null +++ b/examples/chart-helm/values.yaml @@ -0,0 +1,119 @@ +# -- Default values for chart vllm +# -- Declare variables to be passed into your templates. + +# -- Image configuration +image: + # -- Image repository + repository: "vllm/vllm-openai" + # -- Image tag + tag: "latest" + # -- Container launch command + command: ["vllm", "serve", "/data/", "--served-model-name", "opt-125m", "--dtype", "bfloat16", "--host", "0.0.0.0", "--port", "8000"] + +# -- Container port +containerPort: 8000 +# -- Service name +serviceName: +# -- Service port +servicePort: 80 +# -- Additional ports configuration +extraPorts: [] + +# -- Number of replicas +replicaCount: 1 + +# -- Deployment strategy configuration +deploymentStrategy: {} + +# -- Resource configuration +resources: + requests: + # -- Number of CPUs + cpu: 4 + # -- CPU memory configuration + memory: 16Gi + # -- Number of gpus used + nvidia.com/gpu: 1 + limits: + # -- Number of CPUs + cpu: 4 + # -- CPU memory configuration + memory: 16Gi + # -- Number of gpus used + nvidia.com/gpu: 1 + +# -- Type of gpu used +gpuModels: + - "TYPE_GPU_USED" + +# -- Autoscaling configuration +autoscaling: + # -- Enable autoscaling + enabled: false + # -- Minimum replicas + minReplicas: 1 + # -- Maximum replicas + maxReplicas: 100 + # -- Target CPU utilization for autoscaling + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +# -- Configmap +configs: {} + +# -- Secrets configuration +secrets: {} + +# -- External configuration +externalConfigs: [] + +# -- Custom Objects configuration +customObjects: [] + +# -- Disruption Budget Configuration +maxUnavailablePodDisruptionBudget: "" + +# -- Additional configuration for the init container +extraInit: + # -- Path of the model on the s3 which hosts model weights and config files + s3modelpath: "relative_s3_model_path/opt-125m" + # -- Storage size of the s3 + pvcStorage: "1Gi" + awsEc2MetadataDisabled: true + +# -- Additional containers configuration +extraContainers: [] + +# -- Readiness probe configuration +readinessProbe: + # -- Number of seconds after the container has started before readiness probe is initiated + initialDelaySeconds: 5 + # -- How often (in seconds) to perform the readiness probe + periodSeconds: 5 + # -- Number of times after which if a probe fails in a row, Kubernetes considers that the overall check has failed: the container is not ready + failureThreshold: 3 + # -- Configuration of the Kubelet http request on the server + httpGet: + # -- Path to access on the HTTP server + path: /health + # -- Name or number of the port to access on the container, on which the server is listening + port: 8000 + +# -- Liveness probe configuration +livenessProbe: + # -- Number of seconds after the container has started before liveness probe is initiated + initialDelaySeconds: 15 + # -- Number of times after which if a probe fails in a row, Kubernetes considers that the overall check has failed: the container is not alive + failureThreshold: 3 + # -- How often (in seconds) to perform the liveness probe + periodSeconds: 10 + # -- Configuration of the Kubelet http request on the server + httpGet: + # -- Path to access on the HTTP server + path: /health + # -- Name or number of the port to access on the container, on which the server is listening + port: 8000 + +labels: + environment: "test" + release: "test" From beb16b2c810a87b28e7b8a7aa29d26f842f654b9 Mon Sep 17 00:00:00 2001 From: Travis Johnson Date: Tue, 10 Dec 2024 03:27:11 -0700 Subject: [PATCH 298/397] [Bugfix] Handle <|tool_call|> token in granite tool parser (#11039) Signed-off-by: Travis Johnson --- vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py b/vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py index b5854ca39ab47..00917c866e496 100644 --- a/vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +++ b/vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py @@ -35,11 +35,13 @@ class GraniteToolParser(ToolParser): def __init__(self, tokenizer: AnyTokenizer): super().__init__(tokenizer) + self.bot_token = "<|tool_call|>" def extract_tool_calls( self, model_output: str, request: ChatCompletionRequest) -> ExtractedToolCallInformation: - stripped = model_output.strip() + # remove whitespace and the BOT token if it exists + stripped = model_output.strip().removeprefix(self.bot_token).lstrip() if not stripped or stripped[0] != '[': return ExtractedToolCallInformation(tools_called=False, tool_calls=[], From d05f88679bedd73939251a17c3d785a354b2946c Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Tue, 10 Dec 2024 19:12:01 +0800 Subject: [PATCH 299/397] [Misc][LoRA] Add PEFTHelper for LoRA (#11003) Signed-off-by: Jee Jee Li --- tests/lora/test_lora_manager.py | 58 +++++++++++++++++++++++++-- vllm/lora/lora.py | 18 +++++++++ vllm/lora/models.py | 42 ++++++++------------ vllm/lora/peft_helper.py | 70 +++++++++++++++++++++++++++++++++ 4 files changed, 160 insertions(+), 28 deletions(-) create mode 100644 vllm/lora/peft_helper.py diff --git a/tests/lora/test_lora_manager.py b/tests/lora/test_lora_manager.py index 8d109b2c81503..0b76f466702fc 100644 --- a/tests/lora/test_lora_manager.py +++ b/tests/lora/test_lora_manager.py @@ -1,3 +1,4 @@ +import json import os from typing import Dict, List @@ -13,6 +14,7 @@ from vllm.lora.lora import LoRALayerWeights, PackedLoRALayerWeights from vllm.lora.models import (LoRAMapping, LoRAModel, LoRAModelManager, LRUCacheLoRAModelManager) +from vllm.lora.peft_helper import PEFTHelper from vllm.lora.request import LoRARequest from vllm.lora.worker_manager import (LRUCacheWorkerLoRAManager, WorkerLoRAManager) @@ -30,18 +32,68 @@ ] +def test_peft_helper(sql_lora_files): + lora_config_path = os.path.join(sql_lora_files, "adapter_config.json") + with open(lora_config_path) as f: + config = json.load(f) + peft_helper = PEFTHelper.from_dict(config) + assert peft_helper.r == 8 + assert peft_helper.lora_alpha == 16 + assert peft_helper.target_modules == [ + "q_proj", + "v_proj", + "k_proj", + "o_proj", + "gate_proj", + "up_proj", + "down_proj", + "embed_tokens", + "lm_head", + ] + + expected_error = "vLLM only supports modules_to_save being None." + with pytest.raises(ValueError, match=expected_error): + config = dict( + r=8, + lora_alpha=16, + target_modules=["gate_proj"], + modules_to_save=["lm_head"], + ) + PEFTHelper.from_dict(config) + expected_error = "vLLM does not yet support RSLoRA." + with pytest.raises(ValueError, match=expected_error): + config = dict(r=8, + lora_alpha=16, + target_modules=["gate_proj"], + use_rslora=True) + PEFTHelper.from_dict(config) + + expected_error = "vLLM does not yet support DoRA." + with pytest.raises(ValueError, match=expected_error): + config = dict(r=8, + lora_alpha=16, + target_modules=["gate_proj"], + use_dora=True) + PEFTHelper.from_dict(config) + + @pytest.mark.parametrize("device", CUDA_DEVICES) def test_from_lora_tensors(sql_lora_files, device): tensors = load_file( os.path.join(sql_lora_files, "adapter_model.safetensors")) new_embeddings = load_file( os.path.join(sql_lora_files, "new_embeddings.safetensors")) + + lora_config_path = os.path.join(sql_lora_files, "adapter_config.json") + with open(lora_config_path) as f: + config = json.load(f) + + peft_helper = PEFTHelper.from_dict(config) lora_model = LoRAModel.from_lora_tensors( 1, - 8, - 16, tensors, - device, + peft_helper=peft_helper, + device=device, embeddings=new_embeddings, embedding_modules=EMBEDDING_MODULES, embedding_padding_modules=EMBEDDING_PADDING_MODULES) diff --git a/vllm/lora/lora.py b/vllm/lora/lora.py index b648312ba76ec..dde347b78bf81 100644 --- a/vllm/lora/lora.py +++ b/vllm/lora/lora.py @@ -4,6 +4,7 @@ import torch import torch.types +from vllm.lora.peft_helper import PEFTHelper from vllm.utils import is_pin_memory_available @@ -59,6 +60,23 @@ def extra_vocab_size(self) -> int: return self.embeddings_tensor.shape[ 0] if self.embeddings_tensor is not None else 0 + @classmethod + def from_config( + cls, + module_name: str, + peft_helper: PEFTHelper, + embeddings_tensor: Optional[torch.Tensor] = None, + ) -> "LoRALayerWeights": + return cls( + module_name, + peft_helper.r, + peft_helper.lora_alpha, + None, + None, + None, + embeddings_tensor, + ) + @classmethod def create_dummy_lora_weights( cls, diff --git a/vllm/lora/models.py b/vllm/lora/models.py index 49cd9f0c236ad..70806a77b9fff 100644 --- a/vllm/lora/models.py +++ b/vllm/lora/models.py @@ -21,6 +21,7 @@ LinearScalingRotaryEmbeddingWithLora, LoRAMapping) from vllm.lora.lora import LoRALayerWeights, PackedLoRALayerWeights +from vllm.lora.peft_helper import PEFTHelper from vllm.lora.punica_wrapper import get_punica_wrapper from vllm.lora.utils import (from_layer, from_layer_logits_processor, is_regex_target_modules, @@ -104,14 +105,12 @@ def get_lora(self, module_name: str) -> Optional[LoRALayerWeights]: def from_lora_tensors( cls, lora_model_id: int, - rank: int, - lora_alpha: int, tensors: Dict[str, torch.Tensor], + peft_helper: PEFTHelper, device: str = "cuda", dtype: Optional[torch.dtype] = None, embeddings: Optional[Dict[str, torch.Tensor]] = None, target_embedding_padding: Optional[int] = None, - scaling_factor: Optional[float] = None, embedding_modules: Optional[Dict[str, str]] = None, embedding_padding_modules: Optional[List[str]] = None, ) -> "LoRAModel": @@ -135,10 +134,9 @@ def from_lora_tensors( if pin_memory: lora_embeddings_tensor = ( lora_embeddings_tensor.pin_memory()) - loras[module_name] = LoRALayerWeights(module_name, rank, - lora_alpha, None, None, - None, - lora_embeddings_tensor) + loras[module_name] = LoRALayerWeights.from_config( + module_name, peft_helper, lora_embeddings_tensor) + if is_bias: loras[module_name].bias = tensor.to(device=device, dtype=dtype).t() @@ -170,7 +168,11 @@ def from_lora_tensors( for lora in loras.values(): lora.optimize() - return cls(lora_model_id, rank, loras, scaling_factor=scaling_factor) + + return cls(lora_model_id, + peft_helper.r, + loras, + scaling_factor=peft_helper.vllm_scaling_factor) @classmethod def from_local_checkpoint( @@ -212,6 +214,9 @@ def from_local_checkpoint( "new_embeddings.bin") with open(lora_config_path) as f: config = json.load(f) + + config["vllm_max_position_embeddings"] = max_position_embeddings + peft_helper = PEFTHelper.from_dict(config) if os.path.isfile(lora_tensor_path): tensors: Dict[str, torch.Tensor] = {} # Find unexpected modules. @@ -242,7 +247,7 @@ def from_local_checkpoint( # When a bin file is provided, we rely on config to find unexpected # modules. unexpected_modules = [] - target_modules = config["target_modules"] + target_modules = peft_helper.target_modules if not isinstance(target_modules, list): target_modules = [target_modules] for module in target_modules: @@ -256,7 +261,7 @@ def from_local_checkpoint( # https://github.com/vllm-project/vllm/pull/5909. But there's no # other better mechanism. if unexpected_modules and not is_regex_target_modules( - config["target_modules"], expected_lora_modules): + peft_helper.target_modules, expected_lora_modules): raise ValueError( f"While loading {lora_dir}, expected" f" target modules in {expected_lora_modules}" @@ -274,30 +279,17 @@ def from_local_checkpoint( embeddings = torch.load(new_embeddings_bin_file_path, map_location=device) - rank = config["r"] - lora_alpha = config["lora_alpha"] - context_length = config.get("context_length", None) - scaling_factor = None - if context_length: - if max_position_embeddings is None: - max_position_embeddings = context_length - scaling_factor = float( - math.ceil(context_length / max_position_embeddings)) - return cls.from_lora_tensors( lora_model_id=get_lora_id() if lora_model_id is None else lora_model_id, - rank=rank, - lora_alpha=lora_alpha, tensors=tensors, + peft_helper=peft_helper, device=device, dtype=dtype, embeddings=embeddings, target_embedding_padding=target_embedding_padding, - scaling_factor=scaling_factor, embedding_modules=embedding_modules, - embedding_padding_modules=embedding_padding_modules, - ) + embedding_padding_modules=embedding_padding_modules) class LoRAModelManager(AdapterModelManager): diff --git a/vllm/lora/peft_helper.py b/vllm/lora/peft_helper.py new file mode 100644 index 0000000000000..edf4ba5659575 --- /dev/null +++ b/vllm/lora/peft_helper.py @@ -0,0 +1,70 @@ +# Adapted from: https://github.com/huggingface/peft/blob/main/src/peft/tuners/lora/config.py + +import math +from dataclasses import MISSING, dataclass, field, fields +from typing import Literal, Optional, Union + + +@dataclass +class PEFTHelper: + # Required fields + r: int + lora_alpha: int + target_modules: Union[list[str], str] + + bias: Literal["none", "all", "lora_only"] = field(default="none") + modules_to_save: Optional[list[str]] = field(default=None) + use_rslora: bool = field(default=False) + use_dora: bool = field(default=False) + # long lora field + context_length: int = field(default=0) + # Extra vllm field, start with 'vllm_' to avoid conflict + vllm_max_position_embeddings: Optional[int] = field(default=False) + vllm_scaling_factor: Optional[float] = field(default=None) + + def _validate_features(self): + error_msg = [] + + if self.modules_to_save: + error_msg.append("vLLM only supports modules_to_save being None.") + if self.use_rslora: + error_msg.append("vLLM does not yet support RSLoRA.") + + if self.use_dora: + error_msg.append("vLLM does not yet support DoRA.") + + if error_msg: + raise ValueError(f"{', '.join(error_msg)}") + + def __post_init__(self): + self._validate_features() + if self.context_length: + if self.vllm_max_position_embeddings is None: + self.vllm_max_position_embeddings = self.context_length + self.vllm_scaling_factor = float( + math.ceil(self.context_length / + self.vllm_max_position_embeddings)) + + @classmethod + def from_dict(cls, config_dict: dict) -> "PEFTHelper": + # Get all field information from the class + class_fields = {f.name: f for f in fields(cls)} + # Check for required fields + required_fields = { + name + for name, f in class_fields.items() + if f.default is MISSING and f.default_factory is MISSING + } + + # Identify any missing required fields + missing_fields = required_fields - set(config_dict.keys()) + if missing_fields: + raise ValueError( + f"Missing required configuration fields: {missing_fields}") + + # Filter out fields that aren't defined in the class + filtered_dict = { + k: v + for k, v in config_dict.items() if k in class_fields + } + return cls(**filtered_dict) From 9b9cef3145381721fa950c89718fe71849ac2a55 Mon Sep 17 00:00:00 2001 From: Joe Runde Date: Tue, 10 Dec 2024 09:38:23 -0700 Subject: [PATCH 300/397] [Bugfix] Backport request id validation to v0 (#11036) Signed-off-by: Joe Runde --- vllm/engine/multiprocessing/client.py | 4 ++++ vllm/v1/engine/async_llm.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/vllm/engine/multiprocessing/client.py b/vllm/engine/multiprocessing/client.py index 32bd83305bb8f..a729023bc00bb 100644 --- a/vllm/engine/multiprocessing/client.py +++ b/vllm/engine/multiprocessing/client.py @@ -576,6 +576,10 @@ async def _process_request( if self._errored_with is not None: raise ENGINE_DEAD_ERROR(self._errored_with) + # Ensure the request id is unique among running requests + if request_id in self.output_queues: + raise ValueError(f"Request {request_id} already exists") + # Constructing guided decoding logits processors is expensive, so we do # it here to avoid contending with cpu resources and the GIL on the # backend process. diff --git a/vllm/v1/engine/async_llm.py b/vllm/v1/engine/async_llm.py index 26fd650aee4b7..24cafeff63d1e 100644 --- a/vllm/v1/engine/async_llm.py +++ b/vllm/v1/engine/async_llm.py @@ -152,7 +152,7 @@ async def add_request( """Add new request to the AsyncLLM.""" if self.detokenizer.is_request_active(request_id): - raise KeyError(f"Request {request_id} already exists.") + raise ValueError(f"Request {request_id} already exists.") # 1) Create a new AsyncStream for the request. stream = self._add_request_to_streams(request_id) From 250ee65d72a0c7b86ec5cea9cbe9377da21d6439 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fl=C3=A1via=20B=C3=A9o?= <119421251+flaviabeo@users.noreply.github.com> Date: Tue, 10 Dec 2024 14:38:15 -0300 Subject: [PATCH 301/397] [BUG] Remove token param #10921 (#11022) Signed-off-by: Flavia Beo --- vllm/transformers_utils/config.py | 63 ++++++++++++++----------------- 1 file changed, 29 insertions(+), 34 deletions(-) diff --git a/vllm/transformers_utils/config.py b/vllm/transformers_utils/config.py index 3da99bcbee9ae..4529cf27ef565 100644 --- a/vllm/transformers_utils/config.py +++ b/vllm/transformers_utils/config.py @@ -1,5 +1,6 @@ import enum import json +import os from pathlib import Path from typing import Any, Dict, Optional, Type, Union @@ -41,6 +42,7 @@ from transformers import AutoConfig MISTRAL_CONFIG_NAME = "params.json" +HF_TOKEN = os.getenv('HF_TOKEN', None) logger = init_logger(__name__) @@ -77,8 +79,8 @@ class ConfigFormat(str, enum.Enum): MISTRAL = "mistral" -def file_or_path_exists(model: Union[str, Path], config_name, revision, - token) -> bool: +def file_or_path_exists(model: Union[str, Path], config_name: str, + revision: Optional[str]) -> bool: if Path(model).exists(): return (Path(model) / config_name).is_file() @@ -93,7 +95,10 @@ def file_or_path_exists(model: Union[str, Path], config_name, revision, # NB: file_exists will only check for the existence of the config file on # hf_hub. This will fail in offline mode. try: - return file_exists(model, config_name, revision=revision, token=token) + return file_exists(model, + config_name, + revision=revision, + token=HF_TOKEN) except huggingface_hub.errors.OfflineModeIsEnabled: # Don't raise in offline mode, all we know is that we don't have this # file cached. @@ -161,7 +166,6 @@ def get_config( revision: Optional[str] = None, code_revision: Optional[str] = None, config_format: ConfigFormat = ConfigFormat.AUTO, - token: Optional[str] = None, **kwargs, ) -> PretrainedConfig: # Separate model folder from file path for GGUF models @@ -173,19 +177,20 @@ def get_config( if config_format == ConfigFormat.AUTO: if is_gguf or file_or_path_exists( - model, HF_CONFIG_NAME, revision=revision, token=token): + model, HF_CONFIG_NAME, revision=revision): config_format = ConfigFormat.HF - elif file_or_path_exists(model, - MISTRAL_CONFIG_NAME, - revision=revision, - token=token): + elif file_or_path_exists(model, MISTRAL_CONFIG_NAME, + revision=revision): config_format = ConfigFormat.MISTRAL else: # If we're in offline mode and found no valid config format, then # raise an offline mode error to indicate to the user that they # don't have files cached and may need to go online. # This is conveniently triggered by calling file_exists(). - file_exists(model, HF_CONFIG_NAME, revision=revision, token=token) + file_exists(model, + HF_CONFIG_NAME, + revision=revision, + token=HF_TOKEN) raise ValueError(f"No supported config format found in {model}") @@ -194,7 +199,7 @@ def get_config( model, revision=revision, code_revision=code_revision, - token=token, + token=HF_TOKEN, **kwargs, ) @@ -206,7 +211,7 @@ def get_config( model, revision=revision, code_revision=code_revision, - token=token, + token=HF_TOKEN, **kwargs, ) else: @@ -216,7 +221,7 @@ def get_config( trust_remote_code=trust_remote_code, revision=revision, code_revision=code_revision, - token=token, + token=HF_TOKEN, **kwargs, ) except ValueError as e: @@ -234,7 +239,7 @@ def get_config( raise e elif config_format == ConfigFormat.MISTRAL: - config = load_params_config(model, revision, token=token, **kwargs) + config = load_params_config(model, revision, token=HF_TOKEN, **kwargs) else: raise ValueError(f"Unsupported config format: {config_format}") @@ -256,8 +261,7 @@ def get_config( def get_hf_file_to_dict(file_name: str, model: Union[str, Path], - revision: Optional[str] = 'main', - token: Optional[str] = None): + revision: Optional[str] = 'main'): """ Downloads a file from the Hugging Face Hub and returns its contents as a dictionary. @@ -266,7 +270,6 @@ def get_hf_file_to_dict(file_name: str, - file_name (str): The name of the file to download. - model (str): The name of the model on the Hugging Face Hub. - revision (str): The specific version of the model. - - token (str): The Hugging Face authentication token. Returns: - config_dict (dict): A dictionary containing @@ -276,8 +279,7 @@ def get_hf_file_to_dict(file_name: str, if file_or_path_exists(model=model, config_name=file_name, - revision=revision, - token=token): + revision=revision): if not file_path.is_file(): try: @@ -296,9 +298,7 @@ def get_hf_file_to_dict(file_name: str, return None -def get_pooling_config(model: str, - revision: Optional[str] = 'main', - token: Optional[str] = None): +def get_pooling_config(model: str, revision: Optional[str] = 'main'): """ This function gets the pooling and normalize config from the model - only applies to @@ -315,8 +315,7 @@ def get_pooling_config(model: str, """ modules_file_name = "modules.json" - modules_dict = get_hf_file_to_dict(modules_file_name, model, revision, - token) + modules_dict = get_hf_file_to_dict(modules_file_name, model, revision) if modules_dict is None: return None @@ -332,8 +331,7 @@ def get_pooling_config(model: str, if pooling: pooling_file_name = "{}/config.json".format(pooling["path"]) - pooling_dict = get_hf_file_to_dict(pooling_file_name, model, revision, - token) + pooling_dict = get_hf_file_to_dict(pooling_file_name, model, revision) pooling_type_name = next( (item for item, val in pooling_dict.items() if val is True), None) @@ -368,8 +366,8 @@ def get_pooling_config_name(pooling_name: str) -> Union[str, None]: def get_sentence_transformer_tokenizer_config(model: str, - revision: Optional[str] = 'main', - token: Optional[str] = None): + revision: Optional[str] = 'main' + ): """ Returns the tokenization configuration dictionary for a given Sentence Transformer BERT model. @@ -379,7 +377,6 @@ def get_sentence_transformer_tokenizer_config(model: str, BERT model. - revision (str, optional): The revision of the m odel to use. Defaults to 'main'. - - token (str): A Hugging Face access token. Returns: - dict: A dictionary containing the configuration parameters @@ -394,7 +391,7 @@ def get_sentence_transformer_tokenizer_config(model: str, "sentence_xlm-roberta_config.json", "sentence_xlnet_config.json", ]: - encoder_dict = get_hf_file_to_dict(config_name, model, revision, token) + encoder_dict = get_hf_file_to_dict(config_name, model, revision) if encoder_dict: break @@ -474,16 +471,14 @@ def _reduce_config(config: VllmConfig): exc_info=e) -def load_params_config(model: Union[str, Path], - revision: Optional[str], - token: Optional[str] = None, +def load_params_config(model: Union[str, Path], revision: Optional[str], **kwargs) -> PretrainedConfig: # This function loads a params.json config which # should be used when loading models in mistral format config_file_name = "params.json" - config_dict = get_hf_file_to_dict(config_file_name, model, revision, token) + config_dict = get_hf_file_to_dict(config_file_name, model, revision) assert isinstance(config_dict, dict) config_mapping = { From e7391949267a4eff3d84f02119f442f46b16d163 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Tue, 10 Dec 2024 15:08:16 -0500 Subject: [PATCH 302/397] [Core] Update to outlines >= 0.1.8 (#10576) Signed-off-by: Russell Bryant --- requirements-common.txt | 2 +- .../guided_decoding/outlines_logits_processors.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements-common.txt b/requirements-common.txt index 112528880c0ac..c71fc458aca13 100644 --- a/requirements-common.txt +++ b/requirements-common.txt @@ -18,7 +18,7 @@ prometheus_client >= 0.18.0 prometheus-fastapi-instrumentator >= 7.0.0 tiktoken >= 0.6.0 # Required for DBRX tokenizer lm-format-enforcer >= 0.10.9, < 0.11 -outlines >= 0.0.43, < 0.1 +outlines >= 0.1.8 xgrammar >= 0.1.6; platform_machine == "x86_64" typing_extensions >= 4.10 filelock >= 3.16.1 # need to contain https://github.com/tox-dev/filelock/pull/317 diff --git a/vllm/model_executor/guided_decoding/outlines_logits_processors.py b/vllm/model_executor/guided_decoding/outlines_logits_processors.py index e1309c31f77e7..1f0dbe024609d 100644 --- a/vllm/model_executor/guided_decoding/outlines_logits_processors.py +++ b/vllm/model_executor/guided_decoding/outlines_logits_processors.py @@ -99,7 +99,7 @@ class RegexLogitsProcessor(BaseLogitsProcessor): def _get_guide(cls, regex_string: str, tokenizer: PreTrainedTokenizerBase) -> Guide: tokenizer = _adapt_tokenizer(tokenizer) - return RegexGuide(regex_string, tokenizer) + return RegexGuide.from_regex(regex_string, tokenizer) def __init__(self, regex_string: str, tokenizer: PreTrainedTokenizerBase): """Compile the FSM that drives the regex-structured generation. From 75f89dc44c6e44cc28bae59d5b40a588735b507b Mon Sep 17 00:00:00 2001 From: youkaichao Date: Tue, 10 Dec 2024 12:40:52 -0800 Subject: [PATCH 303/397] [torch.compile] add a flag to track batchsize statistics (#11059) Signed-off-by: youkaichao --- vllm/envs.py | 3 +++ vllm/forward_context.py | 32 +++++++++++++++++++++++- vllm/v1/attention/backends/flash_attn.py | 1 + vllm/v1/worker/gpu_model_runner.py | 2 ++ 4 files changed, 37 insertions(+), 1 deletion(-) diff --git a/vllm/envs.py b/vllm/envs.py index ab12a7b48dc53..be5d9985b63a4 100644 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -69,6 +69,7 @@ VLLM_DISABLED_KERNELS: List[str] = [] VLLM_USE_V1: bool = False VLLM_ENABLE_V1_MULTIPROCESSING: bool = False + VLLM_LOG_BATCHSIZE_INTERVAL: float = -1 def get_default_cache_root(): @@ -452,6 +453,8 @@ def get_default_config_root(): # If set, enable multiprocessing in LLM for the V1 code path. "VLLM_ENABLE_V1_MULTIPROCESSING": lambda: bool(int(os.getenv("VLLM_ENABLE_V1_MULTIPROCESSING", "0"))), + "VLLM_LOG_BATCHSIZE_INTERVAL": + lambda: float(os.getenv("VLLM_LOG_BATCHSIZE_INTERVAL", "-1")), } # end-env-vars-definition diff --git a/vllm/forward_context.py b/vllm/forward_context.py index aaa3e4bb3a1e8..cd136f43c0c57 100644 --- a/vllm/forward_context.py +++ b/vllm/forward_context.py @@ -1,8 +1,19 @@ +import time +from collections import Counter from contextlib import contextmanager from dataclasses import dataclass from typing import Any, Dict, Optional +import vllm.envs as envs from vllm.config import VllmConfig +from vllm.logger import init_logger + +logger = init_logger(__name__) + +track_batchsize: bool = envs.VLLM_LOG_BATCHSIZE_INTERVAL >= 0 +batchsize_counter: Counter = Counter() +last_logging_time: float = 0 +batchsize_logging_interval: float = envs.VLLM_LOG_BATCHSIZE_INTERVAL @dataclass @@ -26,7 +37,26 @@ def get_forward_context() -> ForwardContext: @contextmanager def set_forward_context(context: Any, vllm_config: VllmConfig): """A context manager that stores the current forward context, - can be attention metadata, etc.""" + can be attention metadata, etc. + Here we can inject common logic for every model forward pass. + """ + global track_batchsize, batchsize_counter + global last_logging_time, batchsize_logging_interval + if track_batchsize and context is not None: + if hasattr(context, "num_prefill_tokens"): + # for v0 attention backends + batchsize = context.num_prefill_tokens + context.num_decode_tokens + else: + # for v1 attention backends + batchsize = context.num_input_tokens + batchsize_counter[batchsize] += 1 + if time.monotonic() - last_logging_time > batchsize_logging_interval: + last_logging_time = time.monotonic() + sorted_data = sorted(batchsize_counter.items(), + key=lambda x: x[1], + reverse=True) + logger.info("Batchsize distribution (batchsize, count): %s", + sorted_data) global _forward_context prev_context = _forward_context _forward_context = ForwardContext( diff --git a/vllm/v1/attention/backends/flash_attn.py b/vllm/v1/attention/backends/flash_attn.py index 251a103e60f06..c9f04ace644c7 100644 --- a/vllm/v1/attention/backends/flash_attn.py +++ b/vllm/v1/attention/backends/flash_attn.py @@ -56,6 +56,7 @@ class FlashAttentionMetadata: seq_start_loc: torch.Tensor block_table: torch.Tensor slot_mapping: torch.Tensor + num_input_tokens: int = 0 # Number of tokens including padding. class FlashAttentionImpl(AttentionImpl): diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 0a5adfb28c9bd..a3335fa838352 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -445,6 +445,8 @@ def execute_model( # Eager mode. num_input_tokens = num_scheduled_tokens + attn_metadata.num_input_tokens = num_input_tokens + # Get the inputs embeds. if encoder_outputs: inputs_embeds = self.model.get_input_embeddings( From 134810b3d9a05510622282479f0f9e2114b88017 Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Tue, 10 Dec 2024 14:41:23 -0800 Subject: [PATCH 304/397] [V1][Bugfix] Always set enable_chunked_prefill = True for V1 (#11061) Signed-off-by: Woosuk Kwon --- vllm/engine/arg_utils.py | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 3db069ec64ee4..7b9adc401abcf 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -122,7 +122,7 @@ class EngineArgs: cpu_offload_gb: float = 0 # GiB gpu_memory_utilization: float = 0.90 max_num_batched_tokens: Optional[int] = None - max_num_seqs: int = 256 + max_num_seqs: Optional[int] = None max_logprobs: int = 20 # Default value for OpenAI Chat Completions API disable_log_stats: bool = False revision: Optional[str] = None @@ -205,6 +205,9 @@ def __post_init__(self): # by user. if self.enable_prefix_caching is None: self.enable_prefix_caching = bool(envs.VLLM_USE_V1) + # Override max_num_seqs if it's not set by user. + if self.max_num_seqs is None: + self.max_num_seqs = 256 if not envs.VLLM_USE_V1 else 1024 # support `EngineArgs(compilation_config={...})` # without having to manually construct a @@ -1225,19 +1228,19 @@ def _override_v1_engine_args(self, usage_context: UsageContext) -> None: """ assert envs.VLLM_USE_V1, "V1 is not enabled" + # V1 always uses chunked prefills. + self.enable_chunked_prefill = True + # When no user override, set the default values based on the usage + # context. + # TODO(woosuk): Tune the default values for different hardware. if self.max_num_batched_tokens is None: - # When no user override, set the default values based on the - # usage context. if usage_context == UsageContext.LLM_CLASS: - logger.warning("Setting max_num_batched_tokens to 8192 " - "for LLM_CLASS usage context.") - self.max_num_seqs = 1024 self.max_num_batched_tokens = 8192 elif usage_context == UsageContext.OPENAI_API_SERVER: - logger.warning("Setting max_num_batched_tokens to 2048 " - "for OPENAI_API_SERVER usage context.") - self.max_num_seqs = 1024 self.max_num_batched_tokens = 2048 + logger.warning( + "Setting max_num_batched_tokens to %d for %s usage context.", + self.max_num_batched_tokens, usage_context.value) def _override_v1_engine_config(self, engine_config: VllmConfig) -> None: """ From 9a93973708d7f52f1d1439f8f32b8c1514d18b86 Mon Sep 17 00:00:00 2001 From: Tyler Michael Smith Date: Tue, 10 Dec 2024 19:16:22 -0500 Subject: [PATCH 305/397] [Bugfix] Fix Mamba multistep (#11071) Signed-off-by: Tyler Michael Smith --- vllm/attention/backends/placeholder_attn.py | 64 ++++++++++++++++++++- vllm/worker/multi_step_model_runner.py | 4 +- 2 files changed, 66 insertions(+), 2 deletions(-) diff --git a/vllm/attention/backends/placeholder_attn.py b/vllm/attention/backends/placeholder_attn.py index 888adbffb8578..658039bfc3365 100644 --- a/vllm/attention/backends/placeholder_attn.py +++ b/vllm/attention/backends/placeholder_attn.py @@ -11,7 +11,8 @@ from vllm.multimodal import MultiModalPlaceholderMap if TYPE_CHECKING: - from vllm.worker.model_runner import ModelInputForGPUBuilder + from vllm.worker.model_runner import (ModelInputForGPUBuilder, + ModelInputForGPUWithSamplingMetadata) # Placeholder attention backend for models like Mamba and embedding models that # lack attention. @@ -186,6 +187,67 @@ def decode_metadata(self) -> Optional["PlaceholderAttentionMetadata"]: ) return self._cached_decode_metadata + def advance_step(self, + model_input: "ModelInputForGPUWithSamplingMetadata", + sampled_token_ids: Optional[torch.Tensor], + block_size: int, + num_seqs: int, + num_queries: int, + turn_prefills_into_decodes: bool = False): + """ + Update metadata in-place to advance one decode step. + """ + # When using cudagraph, the num_seqs is padded to the next captured + # batch sized, but num_queries tracks the actual number of requests in + # the batch. For --enforce-eager mode, num_seqs == num_queries + if num_seqs != num_queries: + assert num_seqs > num_queries + assert self.use_cuda_graph + + assert not turn_prefills_into_decodes, \ + ("Multi-Step + Chunked-Prefill is not supported for attention-free" + "models. turn_prefills_into_decodes is a " + "Multi-Step + Chunked-Prefill specific parameter.") + + assert self.seq_lens is not None + assert self.max_decode_seq_len == max(self.seq_lens) + + assert self.num_prefills == 0 + assert self.num_prefill_tokens == 0 + assert self.num_decode_tokens == num_seqs + + assert self.seq_lens is not None + assert len(self.seq_lens) == num_seqs + assert self.seq_lens_tensor is not None + assert self.seq_lens_tensor.shape == (num_seqs, ) + assert self.max_query_len == 1 + assert self.max_prefill_seq_len == 0 + + assert self.query_start_loc is not None + assert self.query_start_loc.shape == (num_queries + 1, ) + assert self.seq_start_loc is not None + assert self.seq_start_loc.shape == (num_seqs + 1, ) + + assert self.context_lens_tensor is not None + assert self.context_lens_tensor.shape == (num_queries, ) + + assert self.block_tables is not None + + # Update query lengths. Note that we update only queries and not seqs, + # since tensors may be padded due to captured cuda graph batch size + for i in range(num_queries): + self.seq_lens[i] += 1 + self.max_decode_seq_len = max(self.seq_lens) + + # Update sequences, masking off entries greater than num_queries + device = self.seq_lens_tensor.device + mask = torch.arange(self.seq_lens_tensor.size(0), + device=device) < num_queries + self.seq_lens_tensor += mask.to(self.seq_lens_tensor.dtype) + if sampled_token_ids is not None: + model_input.input_tokens.masked_scatter_( + mask, sampled_token_ids[:num_queries]) + class PlaceholderAttentionMetadataBuilder( AttentionMetadataBuilder[PlaceholderAttentionMetadata]): diff --git a/vllm/worker/multi_step_model_runner.py b/vllm/worker/multi_step_model_runner.py index 3ca0d88a42183..e08a61e31fe42 100644 --- a/vllm/worker/multi_step_model_runner.py +++ b/vllm/worker/multi_step_model_runner.py @@ -29,7 +29,9 @@ logger = init_logger(__name__) -MULTI_STEP_ATTENTION_BACKENDS = ["FLASH_ATTN", "ROCM_FLASH", "FLASHINFER"] +MULTI_STEP_ATTENTION_BACKENDS = [ + "FLASH_ATTN", "ROCM_FLASH", "FLASHINFER", "NO_ATTENTION" +] MULTI_STEP_CHUNKED_PREFILL_ATTENTION_BACKENDS = ["FLASH_ATTN"] def _get_supported_attention_backends(chunked_prefill_enabled: bool) \ From d5c5154fcf4c5d65551c98e458cbb027e5f4b672 Mon Sep 17 00:00:00 2001 From: Aurick Qiao Date: Tue, 10 Dec 2024 21:09:20 -0500 Subject: [PATCH 306/397] [Misc] LoRA + Chunked Prefill (#9057) --- tests/lora/test_chatglm3_tp.py | 9 ++++++--- tests/lora/test_gemma.py | 3 ++- tests/lora/test_llama_tp.py | 6 +++++- tests/lora/test_long_context.py | 3 ++- tests/lora/test_minicpmv.py | 3 ++- tests/lora/test_minicpmv_tp.py | 2 ++ tests/lora/test_mixtral.py | 1 + tests/lora/test_phi.py | 3 ++- tests/lora/test_quant_model.py | 9 ++++++--- vllm/config.py | 3 ++- vllm/core/scheduler.py | 15 ++++++++++++--- vllm/worker/model_runner.py | 12 +++++++----- 12 files changed, 49 insertions(+), 20 deletions(-) diff --git a/tests/lora/test_chatglm3_tp.py b/tests/lora/test_chatglm3_tp.py index f17464573459f..49a527b99ac16 100644 --- a/tests/lora/test_chatglm3_tp.py +++ b/tests/lora/test_chatglm3_tp.py @@ -53,7 +53,8 @@ def test_chatglm3_lora(chatglm3_lora_files): max_loras=4, max_lora_rank=64, tensor_parallel_size=1, - trust_remote_code=True) + trust_remote_code=True, + enable_chunked_prefill=True) output1 = do_sample(llm, chatglm3_lora_files, lora_id=1) for i in range(len(EXPECTED_LORA_OUTPUT)): @@ -73,7 +74,8 @@ def test_chatglm3_lora_tp4(chatglm3_lora_files): max_lora_rank=64, tensor_parallel_size=4, trust_remote_code=True, - fully_sharded_loras=False) + fully_sharded_loras=False, + enable_chunked_prefill=True) output1 = do_sample(llm, chatglm3_lora_files, lora_id=1) for i in range(len(EXPECTED_LORA_OUTPUT)): @@ -93,7 +95,8 @@ def test_chatglm3_lora_tp4_fully_sharded_loras(chatglm3_lora_files): max_lora_rank=64, tensor_parallel_size=4, trust_remote_code=True, - fully_sharded_loras=True) + fully_sharded_loras=True, + enable_chunked_prefill=True) output1 = do_sample(llm, chatglm3_lora_files, lora_id=1) for i in range(len(EXPECTED_LORA_OUTPUT)): assert output1[i] == EXPECTED_LORA_OUTPUT[i] diff --git a/tests/lora/test_gemma.py b/tests/lora/test_gemma.py index 15ec66b0f5502..5ae705e474ec6 100644 --- a/tests/lora/test_gemma.py +++ b/tests/lora/test_gemma.py @@ -37,7 +37,8 @@ def test_gemma_lora(gemma_lora_files): llm = vllm.LLM(MODEL_PATH, max_model_len=1024, enable_lora=True, - max_loras=4) + max_loras=4, + enable_chunked_prefill=True) expected_lora_output = [ "more important than knowledge.\nAuthor: Albert Einstein\n", diff --git a/tests/lora/test_llama_tp.py b/tests/lora/test_llama_tp.py index d3ca7f878191a..dfeac380951d8 100644 --- a/tests/lora/test_llama_tp.py +++ b/tests/lora/test_llama_tp.py @@ -78,7 +78,8 @@ def test_llama_lora(sql_lora_files): enable_lora=True, max_num_seqs=16, max_loras=4, - tensor_parallel_size=1) + tensor_parallel_size=1, + enable_chunked_prefill=True) generate_and_test(llm, sql_lora_files) @@ -120,6 +121,7 @@ def test_llama_lora_tp4(sql_lora_files): max_num_seqs=16, max_loras=4, tensor_parallel_size=4, + enable_chunked_prefill=True, ) generate_and_test(llm, sql_lora_files) @@ -135,6 +137,7 @@ def test_llama_lora_tp4_fully_sharded_loras(sql_lora_files): max_loras=4, tensor_parallel_size=4, fully_sharded_loras=True, + enable_chunked_prefill=True, ) generate_and_test(llm, sql_lora_files) @@ -151,5 +154,6 @@ def test_llama_lora_tp4_fully_sharded_enable_bias(sql_lora_files): tensor_parallel_size=4, fully_sharded_loras=True, enable_lora_bias=True, + enable_chunked_prefill=True, ) generate_and_test(llm, sql_lora_files) diff --git a/tests/lora/test_long_context.py b/tests/lora/test_long_context.py index eada902c891f7..e7a34f2ced7ed 100644 --- a/tests/lora/test_long_context.py +++ b/tests/lora/test_long_context.py @@ -124,7 +124,8 @@ def lora_llm(long_context_infos): tensor_parallel_size=4, # FIXME enable async output processor disable_async_output_proc=True, - distributed_executor_backend="mp") + distributed_executor_backend="mp", + enable_chunked_prefill=True) yield llm del llm diff --git a/tests/lora/test_minicpmv.py b/tests/lora/test_minicpmv.py index 2c45ce5141f7d..1f3de9edc0d0f 100644 --- a/tests/lora/test_minicpmv.py +++ b/tests/lora/test_minicpmv.py @@ -67,7 +67,8 @@ def test_minicpmv_lora(minicpmv_lora_files): max_loras=4, max_lora_rank=64, trust_remote_code=True, - gpu_memory_utilization=0.97 # This model is pretty big for CI gpus + gpu_memory_utilization=0.97, # This model is pretty big for CI gpus + enable_chunked_prefill=True, ) output1 = do_sample(llm, minicpmv_lora_files, lora_id=1) for i in range(len(EXPECTED_OUTPUT)): diff --git a/tests/lora/test_minicpmv_tp.py b/tests/lora/test_minicpmv_tp.py index ba29e562e58ec..930f177953a5f 100644 --- a/tests/lora/test_minicpmv_tp.py +++ b/tests/lora/test_minicpmv_tp.py @@ -69,6 +69,7 @@ def test_minicpmv_tp2(minicpmv_lora_files, fully_sharded): tensor_parallel_size=2, trust_remote_code=True, fully_sharded_loras=fully_sharded, + enable_chunked_prefill=True, ) output_tp = do_sample(llm, minicpmv_lora_files, lora_id=1) @@ -89,6 +90,7 @@ def test_minicpmv_tp4(minicpmv_lora_files, fully_sharded): tensor_parallel_size=4, trust_remote_code=True, fully_sharded_loras=fully_sharded, + enable_chunked_prefill=True, ) output_tp = do_sample(llm, minicpmv_lora_files, lora_id=1) for i in range(len(EXPECTED_OUTPUT)): diff --git a/tests/lora/test_mixtral.py b/tests/lora/test_mixtral.py index dddc299da446b..150221dfce6ab 100644 --- a/tests/lora/test_mixtral.py +++ b/tests/lora/test_mixtral.py @@ -47,6 +47,7 @@ def test_mixtral_lora(mixtral_lora_files, tp_size): max_loras=4, distributed_executor_backend="ray", tensor_parallel_size=tp_size, + enable_chunked_prefill=True, ) expected_lora_output = [ diff --git a/tests/lora/test_phi.py b/tests/lora/test_phi.py index 733eff48a9bf3..5a3fcb8d690d9 100644 --- a/tests/lora/test_phi.py +++ b/tests/lora/test_phi.py @@ -53,7 +53,8 @@ def test_phi2_lora(phi2_lora_files): max_model_len=1024, enable_lora=True, max_loras=2, - enforce_eager=True) + enforce_eager=True, + enable_chunked_prefill=True) expected_lora_output = [ "SELECT catalog_publisher, COUNT(*) as num_catalogs FROM catalogs GROUP BY catalog_publisher ORDER BY num_catalogs DESC LIMIT 1;", # noqa: E501 diff --git a/tests/lora/test_quant_model.py b/tests/lora/test_quant_model.py index 5432fa4ad0d3a..026269667b473 100644 --- a/tests/lora/test_quant_model.py +++ b/tests/lora/test_quant_model.py @@ -84,7 +84,8 @@ def test_quant_model_lora(tinyllama_lora_files, num_gpus_available, model, tensor_parallel_size=tp_size, gpu_memory_utilization=0.2, #avoid OOM quantization=model.quantization, - trust_remote_code=True) + trust_remote_code=True, + enable_chunked_prefill=True) if model.quantization is None: expected_no_lora_output = [ @@ -176,7 +177,8 @@ def test_quant_model_tp_equality(tinyllama_lora_files, num_gpus_available, tensor_parallel_size=1, gpu_memory_utilization=0.2, #avoid OOM quantization=model.quantization, - trust_remote_code=True) + trust_remote_code=True, + enable_chunked_prefill=True) output_tp1 = do_sample(llm_tp1, tinyllama_lora_files, lora_id=1) del llm_tp1 @@ -189,7 +191,8 @@ def test_quant_model_tp_equality(tinyllama_lora_files, num_gpus_available, max_loras=4, tensor_parallel_size=2, gpu_memory_utilization=0.2, #avoid OOM - quantization=model.quantization) + quantization=model.quantization, + enable_chunked_prefill=True) output_tp2 = do_sample(llm_tp2, tinyllama_lora_files, lora_id=1) del llm_tp2 diff --git a/vllm/config.py b/vllm/config.py index 5fb9563fcf3a3..c66ddbb47f22e 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -1698,7 +1698,8 @@ def verify_with_scheduler_config(self, scheduler_config: SchedulerConfig): # Reminder: Please update docs/source/usage/compatibility_matrix.rst # If the feature combo become valid if scheduler_config.chunked_prefill_enabled: - raise ValueError("LoRA is not supported with chunked prefill yet.") + logger.warning("LoRA with chunked prefill is still experimental " + "and may be unstable.") @dataclass diff --git a/vllm/core/scheduler.py b/vllm/core/scheduler.py index d23009dae01ee..94c62743883ec 100644 --- a/vllm/core/scheduler.py +++ b/vllm/core/scheduler.py @@ -166,9 +166,18 @@ def is_empty(self) -> bool: and not self.blocks_to_swap_out and not self.blocks_to_copy) def _sort_by_lora_ids(self): - self.scheduled_seq_groups = sorted( - self.scheduled_seq_groups, - key=lambda g: (g.seq_group.lora_int_id, g.seq_group.request_id)) + assert 0 <= self.num_prefill_groups <= len(self.scheduled_seq_groups) + + def key_fn(group: ScheduledSequenceGroup): + key = (group.seq_group.lora_int_id, group.seq_group.request_id) + if 0 < self.num_prefill_groups < len(self.scheduled_seq_groups): + # Sort sequence groups so that all prefills come before all + # decodes as required by chunked prefill. + return (not group.seq_group.is_prefill(), *key) + return key + + self.scheduled_seq_groups = sorted(self.scheduled_seq_groups, + key=key_fn) @property def lora_requests(self) -> Set[LoRARequest]: diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py index 1bc5f65c7127f..551b84435fdc0 100644 --- a/vllm/worker/model_runner.py +++ b/vllm/worker/model_runner.py @@ -622,11 +622,13 @@ def _compute_lora_input(self, inter_data: InterDataForSeqGroup, inter_data.lora_requests.add(seq_group_metadata.lora_request) query_len = inter_data.query_lens[seq_idx] inter_data.lora_index_mapping.append([lora_id] * query_len) - inter_data.lora_prompt_mapping.append( - [lora_id] * - (query_len if seq_group_metadata.sampling_params - and seq_group_metadata.sampling_params.prompt_logprobs is not None - else 1)) + sampling_params = seq_group_metadata.sampling_params + if sampling_params and sampling_params.prompt_logprobs is not None: + inter_data.lora_prompt_mapping.append([lora_id] * query_len) + elif not self.chunked_prefill_enabled or seq_group_metadata.do_sample: + inter_data.lora_prompt_mapping.append([lora_id]) + else: + inter_data.lora_prompt_mapping.append([]) def _compute_prompt_adapter_input( self, inter_data: InterDataForSeqGroup, From ffa48c9146fda1e8810d1cfa159e1d70aadae6c6 Mon Sep 17 00:00:00 2001 From: Mor Zusman Date: Wed, 11 Dec 2024 04:53:37 +0200 Subject: [PATCH 307/397] [Model] PP support for Mamba-like models (#10992) Signed-off-by: mzusman --- docs/source/models/supported_models.rst | 6 +- tests/distributed/test_pipeline_parallel.py | 6 +- vllm/config.py | 58 +++++++++---- vllm/model_executor/models/interfaces.py | 37 ++++++++ vllm/model_executor/models/jamba.py | 93 ++++++++++++++------- vllm/model_executor/models/mamba.py | 68 ++++++++++----- vllm/model_executor/models/registry.py | 11 ++- vllm/utils.py | 5 ++ vllm/v1/worker/gpu_model_runner.py | 8 +- vllm/v1/worker/gpu_worker.py | 6 +- vllm/worker/cache_engine.py | 12 +-- 11 files changed, 229 insertions(+), 81 deletions(-) diff --git a/docs/source/models/supported_models.rst b/docs/source/models/supported_models.rst index 4e5b10967e3bb..6540e023c1ab0 100644 --- a/docs/source/models/supported_models.rst +++ b/docs/source/models/supported_models.rst @@ -128,7 +128,7 @@ Text Generation - FalconMamba - :code:`tiiuae/falcon-mamba-7b`, :code:`tiiuae/falcon-mamba-7b-instruct`, etc. - ✅︎ - - + - ✅︎ * - :code:`GemmaForCausalLM` - Gemma - :code:`google/gemma-2b`, :code:`google/gemma-7b`, etc. @@ -193,7 +193,7 @@ Text Generation - Jamba - :code:`ai21labs/AI21-Jamba-1.5-Large`, :code:`ai21labs/AI21-Jamba-1.5-Mini`, :code:`ai21labs/Jamba-v0.1`, etc. - ✅︎ - - + - ✅︎ * - :code:`LlamaForCausalLM` - Llama 3.1, Llama 3, Llama 2, LLaMA, Yi - :code:`meta-llama/Meta-Llama-3.1-405B-Instruct`, :code:`meta-llama/Meta-Llama-3.1-70B`, :code:`meta-llama/Meta-Llama-3-70B-Instruct`, :code:`meta-llama/Llama-2-70b-hf`, :code:`01-ai/Yi-34B`, etc. @@ -203,7 +203,7 @@ Text Generation - Mamba - :code:`state-spaces/mamba-130m-hf`, :code:`state-spaces/mamba-790m-hf`, :code:`state-spaces/mamba-2.8b-hf`, etc. - - - + - ✅︎ * - :code:`MiniCPMForCausalLM` - MiniCPM - :code:`openbmb/MiniCPM-2B-sft-bf16`, :code:`openbmb/MiniCPM-2B-dpo-bf16`, :code:`openbmb/MiniCPM-S-1B-sft`, etc. diff --git a/tests/distributed/test_pipeline_parallel.py b/tests/distributed/test_pipeline_parallel.py index b818ca921fcb0..85d408efafe96 100644 --- a/tests/distributed/test_pipeline_parallel.py +++ b/tests/distributed/test_pipeline_parallel.py @@ -156,13 +156,13 @@ def iter_params(self, model_name: str): # "internlm/internlm-chat-7b": PPTestSettings.fast(), "internlm/internlm2-chat-7b": PPTestSettings.fast(trust_remote_code=True), "inceptionai/jais-13b-chat": PPTestSettings.fast(), - # TODO: Implement PP - # "ai21labs/AI21-Jamba-1.5-Mini": PPTestSettings.fast(), + "ai21labs/Jamba-tiny-dev": PPTestSettings.fast(), "meta-llama/Meta-Llama-3-8B": PPTestSettings.detailed(), "openbmb/MiniCPM-2B-sft-bf16": PPTestSettings.fast(trust_remote_code=True), "openbmb/MiniCPM3-4B": PPTestSettings.fast(trust_remote_code=True), # Uses Llama # "mistralai/Mistral-7B-Instruct-v0.1": PPTestSettings.fast(), + "state-spaces/mamba-130m-hf": PPTestSettings.fast(), "mistralai/Mixtral-8x7B-Instruct-v0.1": PPTestSettings.fast(tp_base=4), "mosaicml/mpt-7b": PPTestSettings.fast(), "nvidia/Minitron-8B-Base": PPTestSettings.fast(), @@ -234,6 +234,8 @@ def iter_params(self, model_name: str): "OpenGVLab/InternVL2-1B", "microsoft/Phi-3-vision-128k-instruct", "fixie-ai/ultravox-v0_3", + # [LANGUAGE GENERATION - HYBRID ARCH] + "ai21labs/Jamba-tiny-dev", ] diff --git a/vllm/config.py b/vllm/config.py index c66ddbb47f22e..2a9f0ebae997d 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -27,8 +27,8 @@ ConfigFormat, get_config, get_hf_image_processor_config, get_hf_text_config, get_pooling_config, get_sentence_transformer_tokenizer_config, is_encoder_decoder, uses_mrope) -from vllm.utils import (GiB_bytes, cuda_device_count_stateless, get_cpu_memory, - print_warning_once, random_uuid, +from vllm.utils import (GiB_bytes, LayerBlockType, cuda_device_count_stateless, + get_cpu_memory, print_warning_once, random_uuid, resolve_obj_by_qualname) if TYPE_CHECKING: @@ -284,6 +284,7 @@ def __init__( self._verify_tokenizer_mode() self.is_attention_free = self._init_attention_free() + self.is_hybrid = self._init_is_hybrid() self.has_inner_state = self._init_has_inner_state() if current_platform.is_neuron(): @@ -340,6 +341,10 @@ def _init_attention_free(self) -> bool: architectures = getattr(self.hf_config, "architectures", []) return ModelRegistry.is_attention_free_model(architectures) + def _init_is_hybrid(self) -> bool: + architectures = getattr(self.hf_config, "architectures", []) + return ModelRegistry.is_hybrid_model(architectures) + def _init_has_inner_state(self) -> bool: architectures = getattr(self.hf_config, "architectures", []) return ModelRegistry.model_has_inner_state(architectures) @@ -669,26 +674,51 @@ def get_num_attention_heads(self, num_heads = getattr(self.hf_text_config, "num_attention_heads", 0) return num_heads // parallel_config.tensor_parallel_size - def get_num_layers(self, parallel_config: "ParallelConfig") -> int: + def get_layers_start_end_indices( + self, parallel_config: "ParallelConfig") -> Tuple[int, int]: from vllm.distributed.utils import get_pp_indices total_num_hidden_layers = getattr(self.hf_text_config, "num_hidden_layers", 0) pp_rank = parallel_config.rank // parallel_config.tensor_parallel_size pp_size = parallel_config.pipeline_parallel_size start, end = get_pp_indices(total_num_hidden_layers, pp_rank, pp_size) - return end - start - - def get_num_attention_layers(self, - parallel_config: "ParallelConfig") -> int: - if self.is_attention_free: - return 0 + return start, end - num_layers = self.get_num_layers(parallel_config) + def get_num_layers(self, parallel_config: "ParallelConfig") -> int: + start, end = self.get_layers_start_end_indices(parallel_config) + return end - start - # Transformers supports layers_block_type @property - layers = getattr(self.hf_config, "layers_block_type", - ["attention"] * num_layers) - return len([t for t in layers if t == "attention"]) + def get_num_layers_by_block_type( + self, + parallel_config: "ParallelConfig", + block_type: LayerBlockType = LayerBlockType.attention, + ) -> int: + # This function relies on 'layers_block_type' in hf_config, + # for w/o this attribute, we will need to have workarounds like so + attn_block_type = block_type == LayerBlockType.attention + is_transformer = not self.is_hybrid and not self.is_attention_free + start, end = self.get_layers_start_end_indices(parallel_config) + + if is_transformer: + # Handle the basic case first + return end - start if attn_block_type else 0 + elif self.is_attention_free: + # Attention free + # Note that this code assumes there + # is only one type of attention-free block type. + return 0 if attn_block_type else end - start + else: + # Hybrid model + layers_block_type_value = getattr(self.hf_config, + "layers_block_type", None) + if layers_block_type_value is None: + raise ValueError("The model is an hybrid without a" + "layers_block_type in the hf_config," + "cannot determine the num of " + f"{block_type.value} layers") + + return sum(t == block_type.value + for t in layers_block_type_value[start:end]) def get_multimodal_config(self) -> "MultiModalConfig": """ diff --git a/vllm/model_executor/models/interfaces.py b/vllm/model_executor/models/interfaces.py index c3979eab905db..70b78fe64f2d8 100644 --- a/vllm/model_executor/models/interfaces.py +++ b/vllm/model_executor/models/interfaces.py @@ -363,6 +363,43 @@ def is_attention_free( return isinstance(model, IsAttentionFree) +@runtime_checkable +class IsHybrid(Protocol): + """The interface required for all models like Jamba that have both + attention and mamba blocks, indicates that + hf_config has 'layers_block_type'""" + + is_hybrid: ClassVar[Literal[True]] = True + """ + A flag that indicates this model has both mamba and attention blocks + , also indicates that the model's hf_config has + 'layers_block_type' """ + + +@runtime_checkable +class _IsHybridType(Protocol): + is_hybrid: ClassVar[Literal[True]] + + +@overload +def is_hybrid(model: object) -> TypeIs[IsHybrid]: + ... + + +@overload +def is_hybrid(model: Type[object]) -> TypeIs[Type[IsHybrid]]: + ... + + +def is_hybrid( + model: Union[Type[object], object] +) -> Union[TypeIs[Type[IsHybrid]], TypeIs[IsHybrid]]: + if isinstance(model, type): + return isinstance(model, _IsHybridType) + + return isinstance(model, IsHybrid) + + @runtime_checkable class SupportsCrossEncoding(Protocol): """The interface required for all models that support cross encoding.""" diff --git a/vllm/model_executor/models/jamba.py b/vllm/model_executor/models/jamba.py index 5d5e8ae1ee532..6bb4c13ab35df 100644 --- a/vllm/model_executor/models/jamba.py +++ b/vllm/model_executor/models/jamba.py @@ -9,6 +9,7 @@ from vllm.attention.layer import Attention from vllm.config import _BATCH_SIZES_TO_CAPTURE, CacheConfig, VllmConfig from vllm.distributed import get_tensor_model_parallel_world_size +from vllm.distributed.parallel_state import get_pp_group from vllm.model_executor.layers.fused_moe import FusedMoE from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.linear import (QKVParallelLinear, @@ -25,9 +26,12 @@ MambaCacheParams) from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.sequence import IntermediateTensors +from vllm.utils import LayerBlockType -from .interfaces import HasInnerState, SupportsLoRA -from .utils import maybe_prefix +from .interfaces import HasInnerState, IsHybrid, SupportsLoRA, SupportsPP +from .utils import (is_pp_missing_parameter, + make_empty_intermediate_tensors_factory, make_layers, + maybe_prefix) KVCache = Tuple[torch.Tensor, torch.Tensor] @@ -281,16 +285,24 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): org_num_embeddings=config.vocab_size, ) - decoder_layers = [] - for i in range(config.num_hidden_layers): - layer_class = ALL_DECODER_LAYER_TYPES[config.layers_block_type[i]] - decoder_layers.append( - layer_class(config, - layer_idx=i, - cache_config=cache_config, - quant_config=quant_config, - prefix=f"{prefix}.layers.{i}")) - self.layers = nn.ModuleList(decoder_layers) + def get_layer(prefix: str): + layer_idx = int(prefix.rsplit(".", 1)[1]) + layer_class = ALL_DECODER_LAYER_TYPES[ + config.layers_block_type[layer_idx]] + return layer_class( + config, + layer_idx, + cache_config, + quant_config=quant_config, + prefix=prefix, + ) + + self.start_layer, self.end_layer, self.layers = make_layers( + config.num_hidden_layers, get_layer, prefix=f"{prefix}.layers") + self.make_empty_intermediate_tensors = ( + make_empty_intermediate_tensors_factory( + ["hidden_states", "residual"], config.hidden_size)) + self.final_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) @@ -304,26 +316,34 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, mamba_cache_params: MambaCacheParams, + intermediate_tensors: Optional[IntermediateTensors] = None, inputs_embeds: Optional[torch.Tensor] = None, ) -> torch.Tensor: - if inputs_embeds is not None: - hidden_states = inputs_embeds + if get_pp_group().is_first_rank: + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) + residual = None else: - hidden_states = self.get_input_embeddings(input_ids) - residual = None - for i in range(len(self.layers)): + assert intermediate_tensors is not None + hidden_states = intermediate_tensors["hidden_states"] + residual = intermediate_tensors["residual"] + + kv_cache_index = 0 + mamba_cache_index = 0 + for i in range(self.start_layer, self.end_layer): layer = self.layers[i] kv_cache = None layer_mamba_cache_params = None if isinstance(layer, JambaAttentionDecoderLayer): - kv_cache = kv_caches[(i - self.config.attn_layer_offset) // - self.config.attn_layer_period] + kv_cache = kv_caches[kv_cache_index] + kv_cache_index += 1 if isinstance(layer, JambaMambaDecoderLayer): - current_state_layer = i - (1 + - (i - self.config.attn_layer_offset) - // self.config.attn_layer_period) + current_state_layer = mamba_cache_index layer_mamba_cache_params = mamba_cache_params.at_layer_idx( current_state_layer) + mamba_cache_index += 1 hidden_states, residual = layer( positions=positions, @@ -332,11 +352,17 @@ def forward( attn_metadata=attn_metadata, residual=residual, mamba_cache_params=layer_mamba_cache_params) + if not get_pp_group().is_last_rank: + return IntermediateTensors({ + "hidden_states": hidden_states, + "residual": residual + }) hidden_states, _ = self.final_layernorm(hidden_states, residual) return hidden_states -class JambaForCausalLM(nn.Module, HasInnerState, SupportsLoRA): +class JambaForCausalLM(nn.Module, HasInnerState, SupportsLoRA, SupportsPP, + IsHybrid): packed_modules_mapping = { "qkv_proj": [ "q_proj", @@ -368,6 +394,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): super().__init__() self.config = config + self.vllm_config = vllm_config + self.model_config = vllm_config.model_config self.scheduler_config = scheduler_config self.model = JambaModel(vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")) @@ -390,6 +418,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config.vocab_size) self.sampler = get_sampler() + self.make_empty_intermediate_tensors = ( + self.model.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: return self.model.get_input_embeddings(input_ids) @@ -406,10 +437,8 @@ def forward(self, self.scheduler_config.max_num_seqs) if self.scheduler_config else max(_BATCH_SIZES_TO_CAPTURE) + 2) - layers_type = self.config.layers_block_type - num_mamba_layers = sum( - [layer_type == "mamba" for layer_type in layers_type]) - + num_mamba_layers = self.model_config.get_num_layers_by_block_type( + self.vllm_config.parallel_config, LayerBlockType.mamba) self.mamba_cache = MambaCacheManager( self.lm_head.weight.dtype, num_mamba_layers, max_batch_size, *self._get_mamba_cache_shape()) @@ -423,7 +452,7 @@ def forward(self, state_indices_tensor) hidden_states = self.model(input_ids, positions, kv_caches, attn_metadata, mamba_cache_params, - inputs_embeds) + intermediate_tensors, inputs_embeds) return hidden_states def copy_inputs_before_cuda_graphs(self, input_buffers, **kwargs): @@ -504,8 +533,12 @@ def load_weights(self, weights: Iterable[Tuple[str, continue name = name.replace(weight_name, param_name) # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: continue + # Skip layers on other devices. + if is_pp_missing_parameter(name, self): + continue param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) @@ -520,6 +553,8 @@ def load_weights(self, weights: Iterable[Tuple[str, if weight_name not in name: continue + if is_pp_missing_parameter(name, self): + continue name = name.replace(weight_name, param_name) param = params_dict[name] weight_loader = param.weight_loader @@ -533,6 +568,8 @@ def load_weights(self, weights: Iterable[Tuple[str, # Skip loading extra bias for GPTQ models. if name.endswith(".bias") and name not in params_dict: continue + if is_pp_missing_parameter(name, self): + continue param = params_dict[name] weight_loader = getattr(param, "weight_loader", diff --git a/vllm/model_executor/models/mamba.py b/vllm/model_executor/models/mamba.py index 8bdcd2c5aad1f..1f5cd02711899 100644 --- a/vllm/model_executor/models/mamba.py +++ b/vllm/model_executor/models/mamba.py @@ -8,6 +8,7 @@ from vllm.attention.backends.abstract import AttentionMetadata from vllm.config import _BATCH_SIZES_TO_CAPTURE, CacheConfig, VllmConfig from vllm.distributed import get_tensor_model_parallel_world_size +from vllm.distributed.parallel_state import get_pp_group from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.logits_processor import LogitsProcessor from vllm.model_executor.layers.mamba.mamba_mixer import MambaMixer @@ -18,13 +19,16 @@ DEFAULT_VOCAB_PADDING_SIZE, ParallelLMHead, VocabParallelEmbedding) from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.model_executor.models.interfaces import (HasInnerState, - IsAttentionFree) + IsAttentionFree, SupportsPP) from vllm.model_executor.models.mamba_cache import (MambaCacheManager, MambaCacheParams) from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.sequence import IntermediateTensors +from vllm.utils import LayerBlockType -from .utils import maybe_prefix +from .utils import (is_pp_missing_parameter, + make_empty_intermediate_tensors_factory, make_layers, + maybe_prefix) KVCache = Tuple[torch.Tensor, torch.Tensor] @@ -95,15 +99,17 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): org_num_embeddings=config.vocab_size, ) - decoder_layers = [] - for i in range(config.num_hidden_layers): - decoder_layers.append( - MambaDecoderLayer(config, - cache_config=cache_config, - quant_config=quant_config)) - self.layers = nn.ModuleList(decoder_layers) + self.start_layer, self.end_layer, self.layers = make_layers( + config.num_hidden_layers, + lambda prefix: MambaDecoderLayer( + config, cache_config=cache_config, quant_config=quant_config), + prefix=f"{prefix}.layers") + self.norm_f = RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) + self.make_empty_intermediate_tensors = ( + make_empty_intermediate_tensors_factory( + ["hidden_states", "residual"], config.hidden_size)) def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: return self.embeddings(input_ids) @@ -114,29 +120,40 @@ def forward( positions: torch.Tensor, attn_metadata: AttentionMetadata, mamba_cache_params: MambaCacheParams, + intermediate_tensors: Optional[IntermediateTensors] = None, inputs_embeds: Optional[torch.Tensor] = None, ) -> torch.Tensor: - - if inputs_embeds is not None: - hidden_states = inputs_embeds + if get_pp_group().is_first_rank: + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) + residual = None else: - hidden_states = self.get_input_embeddings(input_ids) - residual = None + assert intermediate_tensors is not None + hidden_states = intermediate_tensors["hidden_states"] + residual = intermediate_tensors["residual"] - for i in range(len(self.layers)): + for i in range(self.start_layer, self.end_layer): layer = self.layers[i] hidden_states, residual = layer( positions=positions, hidden_states=hidden_states, attn_metadata=attn_metadata, residual=residual, - mamba_cache_params=mamba_cache_params.at_layer_idx(i)) + mamba_cache_params=mamba_cache_params.at_layer_idx( + i - self.start_layer)) + if not get_pp_group().is_last_rank: + return IntermediateTensors({ + "hidden_states": hidden_states, + "residual": residual + }) hidden_states, _ = self.norm_f(hidden_states, residual) return hidden_states -class MambaForCausalLM(nn.Module, HasInnerState, IsAttentionFree): +class MambaForCausalLM(nn.Module, HasInnerState, IsAttentionFree, SupportsPP): def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config = vllm_config.model_config.hf_config @@ -148,7 +165,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): super().__init__() self.config = config + self.vllm_config = vllm_config self.scheduler_config = scheduler_config + self.model_config = vllm_config.model_config self.backbone = MambaModel(vllm_config=vllm_config, prefix=maybe_prefix(prefix, "backbone")) self.unpadded_vocab_size = config.vocab_size @@ -174,6 +193,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config.vocab_size) self.sampler = get_sampler() + self.make_empty_intermediate_tensors = ( + self.backbone.make_empty_intermediate_tensors) + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: return self.backbone.get_input_embeddings(input_ids) @@ -189,9 +211,12 @@ def forward(self, max_batch_size = (VllmConfig.get_graph_batch_size( self.scheduler_config.max_num_seqs) if self.scheduler_config else max(_BATCH_SIZES_TO_CAPTURE) + 2) + + num_mamba_layers = self.model_config.get_num_layers_by_block_type( + self.vllm_config.parallel_config, LayerBlockType.mamba) self.mamba_cache = MambaCacheManager( - self.lm_head.weight.dtype, self.config.num_hidden_layers, - max_batch_size, *self._get_mamba_cache_shape()) + self.lm_head.weight.dtype, num_mamba_layers, max_batch_size, + *self._get_mamba_cache_shape()) ( mamba_cache_tensors, @@ -204,7 +229,8 @@ def forward(self, state_indices_tensor) hidden_states = self.backbone(input_ids, positions, attn_metadata, - mamba_cache_params, inputs_embeds) + mamba_cache_params, intermediate_tensors, + inputs_embeds) return hidden_states @@ -252,6 +278,8 @@ def load_weights(self, weights: Iterable[Tuple[str, # Skip loading extra bias for GPTQ models. if name.endswith(".bias") and name not in params_dict: continue + if is_pp_missing_parameter(name, self): + continue param = params_dict[name] weight_loader = getattr(param, "weight_loader", diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index e69596aa915b5..4beea4641f5ab 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -21,7 +21,7 @@ from vllm.platforms import current_platform from .adapters import as_embedding_model -from .interfaces import (has_inner_state, is_attention_free, +from .interfaces import (has_inner_state, is_attention_free, is_hybrid, supports_cross_encoding, supports_multimodal, supports_pp) from .interfaces_base import is_pooling_model, is_text_generation_model @@ -218,6 +218,7 @@ class _ModelInfo: supports_pp: bool has_inner_state: bool is_attention_free: bool + is_hybrid: bool @staticmethod def from_model_cls(model: Type[nn.Module]) -> "_ModelInfo": @@ -239,6 +240,7 @@ def from_model_cls(model: Type[nn.Module]) -> "_ModelInfo": supports_pp=supports_pp(model), has_inner_state=has_inner_state(model), is_attention_free=is_attention_free(model), + is_hybrid=is_hybrid(model), ) @@ -484,6 +486,13 @@ def is_attention_free_model( model_cls, _ = self.inspect_model_cls(architectures) return model_cls.is_attention_free + def is_hybrid_model( + self, + architectures: Union[str, List[str]], + ) -> bool: + model_cls, _ = self.inspect_model_cls(architectures) + return model_cls.is_hybrid + ModelRegistry = _ModelRegistry({ model_arch: _LazyRegisteredModel( diff --git a/vllm/utils.py b/vllm/utils.py index 7cdb2cb320b05..1882264c19775 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -170,6 +170,11 @@ class Device(enum.Enum): CPU = enum.auto() +class LayerBlockType(enum.Enum): + attention = "attention" + mamba = "mamba" + + class Counter: def __init__(self, start: int = 0) -> None: diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index a3335fa838352..8d9976ded7c5e 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -15,8 +15,8 @@ from vllm.model_executor.model_loader import get_model from vllm.multimodal import MultiModalKwargs from vllm.sampling_params import SamplingType -from vllm.utils import (STR_DTYPE_TO_TORCH_DTYPE, DeviceMemoryProfiler, cdiv, - is_pin_memory_available) +from vllm.utils import (STR_DTYPE_TO_TORCH_DTYPE, DeviceMemoryProfiler, + LayerBlockType, cdiv, is_pin_memory_available) from vllm.v1.attention.backends.flash_attn import (FlashAttentionBackend, FlashAttentionMetadata) from vllm.v1.outputs import ModelRunnerOutput @@ -68,8 +68,8 @@ def __init__( self.max_num_tokens = scheduler_config.max_num_batched_tokens # Model-related. - self.num_attn_layers = model_config.get_num_attention_layers( - parallel_config) + self.num_attn_layers = model_config.get_num_layers_by_block_type( + parallel_config, LayerBlockType.attention) self.num_kv_heads = model_config.get_num_kv_heads(parallel_config) self.head_size = model_config.get_head_size() self.hidden_size = model_config.get_hidden_size() diff --git a/vllm/v1/worker/gpu_worker.py b/vllm/v1/worker/gpu_worker.py index d32848c3775ae..49e415ab72e0b 100644 --- a/vllm/v1/worker/gpu_worker.py +++ b/vllm/v1/worker/gpu_worker.py @@ -14,7 +14,7 @@ from vllm.logger import init_logger from vllm.model_executor import set_random_seed from vllm.platforms import current_platform -from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE, get_dtype_size +from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE, LayerBlockType, get_dtype_size from vllm.v1.core.scheduler import SchedulerOutput from vllm.v1.outputs import ModelRunnerOutput from vllm.v1.worker.gpu_model_runner import GPUModelRunner @@ -260,8 +260,8 @@ def _get_cache_block_size( ) -> int: head_size = model_config.get_head_size() num_heads = model_config.get_num_kv_heads(parallel_config) - num_attention_layers = model_config.get_num_attention_layers( - parallel_config) + num_attention_layers = model_config.get_num_layers_by_block_type( + parallel_config, LayerBlockType.attention) key_cache_block = cache_config.block_size * num_heads * head_size value_cache_block = key_cache_block diff --git a/vllm/worker/cache_engine.py b/vllm/worker/cache_engine.py index ac3270d1c9909..7ccd4571b19df 100644 --- a/vllm/worker/cache_engine.py +++ b/vllm/worker/cache_engine.py @@ -6,8 +6,8 @@ from vllm.attention import get_attn_backend from vllm.config import CacheConfig, DeviceConfig, ModelConfig, ParallelConfig from vllm.logger import init_logger -from vllm.utils import (STR_DTYPE_TO_TORCH_DTYPE, get_dtype_size, - is_pin_memory_available) +from vllm.utils import (STR_DTYPE_TO_TORCH_DTYPE, LayerBlockType, + get_dtype_size, is_pin_memory_available) logger = init_logger(__name__) @@ -34,8 +34,8 @@ def __init__( self.head_size = model_config.get_head_size() # Models like Jamba, have mixed typed layers, E.g Mamba - self.num_attention_layers = model_config.get_num_attention_layers( - parallel_config) + self.num_attention_layers = model_config.get_num_layers_by_block_type( + parallel_config, LayerBlockType.attention) self.num_kv_heads = model_config.get_num_kv_heads(parallel_config) self.block_size = cache_config.block_size @@ -105,8 +105,8 @@ def get_cache_block_size( ) -> int: head_size = model_config.get_head_size() num_heads = model_config.get_num_kv_heads(parallel_config) - num_attention_layers = model_config.get_num_attention_layers( - parallel_config) + num_attention_layers = model_config.get_num_layers_by_block_type( + parallel_config, LayerBlockType.attention) key_cache_block = cache_config.block_size * num_heads * head_size value_cache_block = key_cache_block From e39400a4b60d28ff5c0a1a5194068c928adcaf98 Mon Sep 17 00:00:00 2001 From: Maximilien de Bayser Date: Wed, 11 Dec 2024 01:51:40 -0300 Subject: [PATCH 308/397] Fix streaming for granite tool call when <|tool_call|> is present (#11069) Signed-off-by: Max de Bayser --- vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py b/vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py index 00917c866e496..dae481a2154a1 100644 --- a/vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +++ b/vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py @@ -88,7 +88,11 @@ def extract_tool_calls_streaming( ) -> Union[DeltaMessage, None]: start_idx = consume_space(0, current_text) - if not current_text or current_text[start_idx] != '[': + if current_text[start_idx:].startswith(self.bot_token): + start_idx = consume_space(start_idx + len(self.bot_token), + current_text) + if not current_text or start_idx >= len(current_text)\ + or current_text[start_idx] != '[': return DeltaMessage(content=delta_text) # bit mask flags for partial JSON parsing. If the name hasn't been From 2e33fe419186c65a18da6668972d61d7bbc31564 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Wed, 11 Dec 2024 13:02:02 +0800 Subject: [PATCH 309/397] [CI/Build] Check transformers v4.47 (#10991) Signed-off-by: DarkLight1337 --- requirements-test.txt | 4 ++-- .../vision_language/mm_processor_kwargs/test_idefics3.py | 9 --------- .../models/embedding/vision_language/test_llava_next.py | 2 +- 3 files changed, 3 insertions(+), 12 deletions(-) diff --git a/requirements-test.txt b/requirements-test.txt index 38a064bca449a..8ceb705cdffd7 100644 --- a/requirements-test.txt +++ b/requirements-test.txt @@ -506,7 +506,7 @@ tiktoken==0.7.0 # mistral-common timm==1.0.11 # via -r requirements-test.in -tokenizers==0.20.3 +tokenizers==0.21.0 # via transformers torch==2.5.1 # via @@ -534,7 +534,7 @@ tqdm==4.66.6 # transformers tqdm-multiprocess==0.0.11 # via lm-eval -transformers==4.46.3 +transformers==4.47.0 # via # lm-eval # peft diff --git a/tests/models/decoder_only/vision_language/mm_processor_kwargs/test_idefics3.py b/tests/models/decoder_only/vision_language/mm_processor_kwargs/test_idefics3.py index 31896bfd13e8c..c71a2d359043d 100644 --- a/tests/models/decoder_only/vision_language/mm_processor_kwargs/test_idefics3.py +++ b/tests/models/decoder_only/vision_language/mm_processor_kwargs/test_idefics3.py @@ -3,7 +3,6 @@ import pytest import torch -import transformers from transformers import AutoImageProcessor, AutoTokenizer from vllm.inputs import InputContext, token_inputs @@ -36,8 +35,6 @@ def get_max_idefics3_image_tokens(): return get_max_idefics3_image_tokens -@pytest.mark.skipif(transformers.__version__ < "4.46.0", - reason="Model introduced in HF >= 4.46.0") @pytest.mark.parametrize("model", models) @pytest.mark.parametrize("longest_edge", [None, 168, 336, 400, 2 * 336]) def test_input_mapper_override(model: str, image_assets: _ImageAssets, @@ -77,8 +74,6 @@ def test_input_mapper_override(model: str, image_assets: _ImageAssets, assert torch.all(hf_result["pixel_values"] == vllm_result["pixel_values"]) -@pytest.mark.skipif(transformers.__version__ < "4.46.0", - reason="Model introduced in HF >= 4.46.0") @pytest.mark.parametrize("model", models) @pytest.mark.parametrize("longest_edge, expected_max_tokens", [ (None, 2873), @@ -107,8 +102,6 @@ def test_max_tokens_override(get_max_idefics3_image_tokens, model: str, assert expected_max_tokens == actual_max_tokens -@pytest.mark.skipif(transformers.__version__ < "4.46.0", - reason="Model introduced in HF >= 4.46.0") @pytest.mark.parametrize("model", models) @pytest.mark.parametrize("longest_edge, toks_per_img, num_imgs", [ (168, 169, 1), @@ -143,8 +136,6 @@ def test_dummy_data_override(dummy_data_for_idefics3, model: str, assert img_tok_count == toks_per_img * num_imgs -@pytest.mark.skipif(transformers.__version__ < "4.46.0", - reason="Model introduced in HF >= 4.46.0") @pytest.mark.parametrize("model", models) @pytest.mark.parametrize("longest_edge,expected_toks_per_img,num_imgs", [ (336, 169 * (1**2 + 1), 1), diff --git a/tests/models/embedding/vision_language/test_llava_next.py b/tests/models/embedding/vision_language/test_llava_next.py index 329c6ba279f89..693abd7252d5e 100644 --- a/tests/models/embedding/vision_language/test_llava_next.py +++ b/tests/models/embedding/vision_language/test_llava_next.py @@ -86,7 +86,7 @@ def _run_test( ) -@pytest.mark.skipif(transformers.__version__.startswith("4.46"), +@pytest.mark.skipif(transformers.__version__ >= "4.46", reason="Model broken with changes in transformers 4.46") @pytest.mark.core_model @pytest.mark.parametrize("model", MODELS) From 3fb4b4f1634a896653acc12c72b8e5d6d87a8f82 Mon Sep 17 00:00:00 2001 From: "Kevin H. Luu" Date: Wed, 11 Dec 2024 00:39:53 -0800 Subject: [PATCH 310/397] [ci/build] Fix AMD CI dependencies (#11087) --- requirements-rocm.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/requirements-rocm.txt b/requirements-rocm.txt index 121123611d2da..ccc9062341772 100644 --- a/requirements-rocm.txt +++ b/requirements-rocm.txt @@ -5,7 +5,8 @@ awscli boto3 botocore +datasets ray >= 2.10.0 peft pytest-asyncio -tensorizer>=2.9.0 \ No newline at end of file +tensorizer>=2.9.0 From 9974fca047bb332ec68377be4579ea515a300d69 Mon Sep 17 00:00:00 2001 From: "Kevin H. Luu" Date: Wed, 11 Dec 2024 01:01:53 -0800 Subject: [PATCH 311/397] [ci/build] Fix entrypoints test and pin outlines version (#11088) --- requirements-common.txt | 2 +- .../guided_decoding/outlines_logits_processors.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements-common.txt b/requirements-common.txt index c71fc458aca13..792cd58e80669 100644 --- a/requirements-common.txt +++ b/requirements-common.txt @@ -18,7 +18,7 @@ prometheus_client >= 0.18.0 prometheus-fastapi-instrumentator >= 7.0.0 tiktoken >= 0.6.0 # Required for DBRX tokenizer lm-format-enforcer >= 0.10.9, < 0.11 -outlines >= 0.1.8 +outlines == 0.1.9 xgrammar >= 0.1.6; platform_machine == "x86_64" typing_extensions >= 4.10 filelock >= 3.16.1 # need to contain https://github.com/tox-dev/filelock/pull/317 diff --git a/vllm/model_executor/guided_decoding/outlines_logits_processors.py b/vllm/model_executor/guided_decoding/outlines_logits_processors.py index 1f0dbe024609d..b63fed1c8a8c3 100644 --- a/vllm/model_executor/guided_decoding/outlines_logits_processors.py +++ b/vllm/model_executor/guided_decoding/outlines_logits_processors.py @@ -25,7 +25,7 @@ from outlines import grammars from outlines.caching import cache from outlines.fsm.guide import CFGGuide, Generate, Guide, RegexGuide, Write -from outlines.fsm.json_schema import build_regex_from_schema +from outlines_core.fsm.json_schema import build_regex_from_schema from pydantic import BaseModel from transformers import PreTrainedTokenizerBase From 61b1d2f6aef8e29c6a0d795a9c6682d525f4d8cc Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Wed, 11 Dec 2024 04:26:36 -0500 Subject: [PATCH 312/397] [Core] v1: Use atexit to handle engine core client shutdown (#11076) Signed-off-by: Russell Bryant --- vllm/v1/engine/core_client.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/vllm/v1/engine/core_client.py b/vllm/v1/engine/core_client.py index ee89cece73141..4d96b323d1662 100644 --- a/vllm/v1/engine/core_client.py +++ b/vllm/v1/engine/core_client.py @@ -1,3 +1,4 @@ +import atexit import multiprocessing from typing import List, Union @@ -157,6 +158,7 @@ def __init__( should_shutdown=self.should_shutdown, **kwargs, ) + atexit.register(self.shutdown) def shutdown(self): # Send shutdown signal to background process. From 2e32f5d28db3cd79f6a421f640e083be1f9468b7 Mon Sep 17 00:00:00 2001 From: B-201 Date: Wed, 11 Dec 2024 17:27:07 +0800 Subject: [PATCH 313/397] [Bugfix] Fix Idefics3 fails during multi-image inference (#11080) Signed-off-by: B-201 --- vllm/model_executor/models/idefics3.py | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/vllm/model_executor/models/idefics3.py b/vllm/model_executor/models/idefics3.py index e5d2edbd81eb1..17e772e7faa32 100644 --- a/vllm/model_executor/models/idefics3.py +++ b/vllm/model_executor/models/idefics3.py @@ -60,7 +60,8 @@ class Idefics3ImagePixelInputs(TypedDict): type: Literal["pixel_values"] data: torch.Tensor """ - Shape: `(batch_size * num_images, num_channels, height, width)` + Shape: `(batch_size * num_images * num_patches, + num_channels, height, width)` """ pixel_attention_mask: Optional[torch.BoolTensor] @@ -520,13 +521,17 @@ def _parse_and_validate_image_input( raise ValueError("Incorrect type of pixel values. " f"Got type: {type(pixel_values)}") - return Idefics3ImagePixelInputs(type="pixel_values", - data=self._validate_pixel_values( - flatten_bn(pixel_values, - concat=True)), - pixel_attention_mask=flatten_bn( - pixel_attention_mask, - concat=True)) + if isinstance(pixel_values, list): + pixel_values = torch.cat(pixel_values, dim=1) + pixel_attention_mask = torch.cat(pixel_attention_mask, dim=1) + else: + pixel_values = flatten_bn(pixel_values) + pixel_attention_mask = flatten_bn(pixel_attention_mask) + + return Idefics3ImagePixelInputs( + type="pixel_values", + data=self._validate_pixel_values(pixel_values), + pixel_attention_mask=pixel_attention_mask) raise AssertionError("This line should be unreachable.") From 40766ca1b8b0ef92e220595bda96c4336b597e5b Mon Sep 17 00:00:00 2001 From: Rafael Vasquez Date: Wed, 11 Dec 2024 04:27:39 -0500 Subject: [PATCH 314/397] [Bugfix]: Clamp `-inf` logprob values in prompt_logprobs (#11073) Signed-off-by: Rafael Vasquez --- vllm/entrypoints/openai/serving_completion.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/vllm/entrypoints/openai/serving_completion.py b/vllm/entrypoints/openai/serving_completion.py index c54d5f07cf58c..ee97d35f2b087 100644 --- a/vllm/entrypoints/openai/serving_completion.py +++ b/vllm/entrypoints/openai/serving_completion.py @@ -392,6 +392,12 @@ def request_output_to_completion_response( prompt_token_ids = final_res.prompt_token_ids assert prompt_token_ids is not None prompt_logprobs = final_res.prompt_logprobs + if prompt_logprobs: + for logprob_dict in prompt_logprobs: + if logprob_dict: + for logprob_values in logprob_dict.values(): + if logprob_values.logprob == float('-inf'): + logprob_values.logprob = -9999.0 prompt_text = final_res.prompt token_ids: GenericSequence[int] From 8f10d5e3930f05c2057a831cd80ba24c52b8ceef Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Wed, 11 Dec 2024 17:28:00 +0800 Subject: [PATCH 315/397] [Misc] Split up pooling tasks (#10820) Signed-off-by: DarkLight1337 --- docs/source/index.rst | 2 + docs/source/models/generative_models.rst | 146 ++++++++++++++++ docs/source/models/pooling_models.rst | 99 +++++++++++ docs/source/models/supported_models.rst | 157 ++++++++++++------ docs/source/usage/compatibility_matrix.rst | 12 +- examples/offline_inference_embedding.py | 7 +- ...ine_inference_vision_language_embedding.py | 4 +- tests/compile/test_basic_correctness.py | 4 +- tests/core/test_scheduler_encoder_decoder.py | 2 +- .../openai/test_vision_embedding.py | 2 +- .../embedding/language/test_embedding.py | 2 +- .../models/embedding/language/test_scoring.py | 12 +- .../vision_language/test_dse_qwen2_vl.py | 2 +- .../vision_language/test_llava_next.py | 2 +- .../embedding/vision_language/test_phi3v.py | 2 +- tests/test_config.py | 17 +- vllm/config.py | 137 ++++++++++----- vllm/core/scheduler.py | 2 +- vllm/engine/arg_utils.py | 7 +- vllm/engine/llm_engine.py | 4 +- vllm/entrypoints/llm.py | 53 +++--- vllm/entrypoints/openai/api_server.py | 8 +- vllm/entrypoints/openai/run_batch.py | 4 +- vllm/model_executor/model_loader/utils.py | 2 +- vllm/v1/engine/core.py | 2 +- vllm/worker/cpu_worker.py | 2 +- vllm/worker/worker.py | 2 +- 27 files changed, 527 insertions(+), 168 deletions(-) create mode 100644 docs/source/models/generative_models.rst create mode 100644 docs/source/models/pooling_models.rst diff --git a/docs/source/index.rst b/docs/source/index.rst index ebf1361976c5e..842013d6d49c4 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -94,6 +94,8 @@ Documentation :caption: Models models/supported_models + models/generative_models + models/pooling_models models/adding_model models/enabling_multimodal_inputs diff --git a/docs/source/models/generative_models.rst b/docs/source/models/generative_models.rst new file mode 100644 index 0000000000000..fb71185600863 --- /dev/null +++ b/docs/source/models/generative_models.rst @@ -0,0 +1,146 @@ +.. _generative_models: + +Generative Models +================= + +vLLM provides first-class support for generative models, which covers most of LLMs. + +In vLLM, generative models implement the :class:`~vllm.model_executor.models.VllmModelForTextGeneration` interface. +Based on the final hidden states of the input, these models output log probabilities of the tokens to generate, +which are then passed through :class:`~vllm.model_executor.layers.Sampler` to obtain the final text. + +Offline Inference +----------------- + +The :class:`~vllm.LLM` class provides various methods for offline inference. +See :ref:`Engine Arguments ` for a list of options when initializing the model. + +For generative models, the only supported :code:`task` option is :code:`"generate"`. +Usually, this is automatically inferred so you don't have to specify it. + +``LLM.generate`` +^^^^^^^^^^^^^^^^ + +The :class:`~vllm.LLM.generate` method is available to all generative models in vLLM. +It is similar to `its counterpart in HF Transformers `__, +except that tokenization and detokenization are also performed automatically. + +.. code-block:: python + + llm = LLM(model="facebook/opt-125m") + outputs = llm.generate("Hello, my name is") + + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + +You can optionally control the language generation by passing :class:`~vllm.SamplingParams`. +For example, you can use greedy sampling by setting :code:`temperature=0`: + +.. code-block:: python + + llm = LLM(model="facebook/opt-125m") + params = SamplingParams(temperature=0) + outputs = llm.generate("Hello, my name is", params) + + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + +A code example can be found in `examples/offline_inference.py `_. + +``LLM.beam_search`` +^^^^^^^^^^^^^^^^^^^ + +The :class:`~vllm.LLM.beam_search` method implements `beam search `__ on top of :class:`~vllm.LLM.generate`. +For example, to search using 5 beams and output at most 50 tokens: + +.. code-block:: python + + llm = LLM(model="facebook/opt-125m") + params = BeamSearchParams(beam_width=5, max_tokens=50) + outputs = llm.generate("Hello, my name is", params) + + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + +``LLM.chat`` +^^^^^^^^^^^^ + +The :class:`~vllm.LLM.chat` method implements chat functionality on top of :class:`~vllm.LLM.generate`. +In particular, it accepts input similar to `OpenAI Chat Completions API `__ +and automatically applies the model's `chat template `__ to format the prompt. + +.. important:: + + In general, only instruction-tuned models have a chat template. + Base models may perform poorly as they are not trained to respond to the chat conversation. + +.. code-block:: python + + llm = LLM(model="meta-llama/Meta-Llama-3-8B-Instruct") + conversation = [ + { + "role": "system", + "content": "You are a helpful assistant" + }, + { + "role": "user", + "content": "Hello" + }, + { + "role": "assistant", + "content": "Hello! How can I assist you today?" + }, + { + "role": "user", + "content": "Write an essay about the importance of higher education.", + }, + ] + outputs = llm.chat(conversation) + + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + +A code example can be found in `examples/offline_inference_chat.py `_. + +If the model doesn't have a chat template or you want to specify another one, +you can explicitly pass a chat template: + +.. code-block:: python + + from vllm.entrypoints.chat_utils import load_chat_template + + # You can find a list of existing chat templates under `examples/` + custom_template = load_chat_template(chat_template="") + print("Loaded chat template:", custom_template) + + outputs = llm.chat(conversation, chat_template=custom_template) + +Online Inference +---------------- + +Our `OpenAI Compatible Server <../serving/openai_compatible_server>`__ can be used for online inference. +Please click on the above link for more details on how to launch the server. + +Completions API +^^^^^^^^^^^^^^^ + +Our Completions API is similar to ``LLM.generate`` but only accepts text. +It is compatible with `OpenAI Completions API `__ +so that you can use OpenAI client to interact with it. +A code example can be found in `examples/openai_completion_client.py `_. + +Chat API +^^^^^^^^ + +Our Chat API is similar to ``LLM.chat``, accepting both text and :ref:`multi-modal inputs `. +It is compatible with `OpenAI Chat Completions API `__ +so that you can use OpenAI client to interact with it. +A code example can be found in `examples/openai_chat_completion_client.py `_. diff --git a/docs/source/models/pooling_models.rst b/docs/source/models/pooling_models.rst new file mode 100644 index 0000000000000..7fa66274c3c5a --- /dev/null +++ b/docs/source/models/pooling_models.rst @@ -0,0 +1,99 @@ +.. _pooling_models: + +Pooling Models +============== + +vLLM also supports pooling models, including embedding, reranking and reward models. + +In vLLM, pooling models implement the :class:`~vllm.model_executor.models.VllmModelForPooling` interface. +These models use a :class:`~vllm.model_executor.layers.Pooler` to aggregate the final hidden states of the input +before returning them. + +.. note:: + + We currently support pooling models primarily as a matter of convenience. + As shown in the :ref:`Compatibility Matrix `, most vLLM features are not applicable to + pooling models as they only work on the generation or decode stage, so performance may not improve as much. + +Offline Inference +----------------- + +The :class:`~vllm.LLM` class provides various methods for offline inference. +See :ref:`Engine Arguments ` for a list of options when initializing the model. + +For pooling models, we support the following :code:`task` options: + +- Embedding (:code:`"embed"` / :code:`"embedding"`) +- Classification (:code:`"classify"`) +- Sentence Pair Scoring (:code:`"score"`) +- Reward Modeling (:code:`"reward"`) + +The selected task determines the default :class:`~vllm.model_executor.layers.Pooler` that is used: + +- Embedding: Extract only the hidden states corresponding to the last token, and apply normalization. +- Classification: Extract only the hidden states corresponding to the last token, and apply softmax. +- Sentence Pair Scoring: Extract only the hidden states corresponding to the last token, and apply softmax. +- Reward Modeling: Extract all of the hidden states and return them directly. + +When loading `Sentence Transformers `__ models, +we attempt to override the default pooler based on its Sentence Transformers configuration file (:code:`modules.json`). + +You can customize the model's pooling method via the :code:`override_pooler_config` option, +which takes priority over both the model's and Sentence Transformers's defaults. + +``LLM.encode`` +^^^^^^^^^^^^^^ + +The :class:`~vllm.LLM.encode` method is available to all pooling models in vLLM. +It returns the aggregated hidden states directly. + +.. code-block:: python + + llm = LLM(model="intfloat/e5-mistral-7b-instruct", task="embed") + outputs = llm.encode("Hello, my name is") + + outputs = model.encode(prompts) + for output in outputs: + embeddings = output.outputs.embedding + print(f"Prompt: {prompt!r}, Embeddings (size={len(embeddings)}: {embeddings!r}") + +A code example can be found in `examples/offline_inference_embedding.py `_. + +``LLM.score`` +^^^^^^^^^^^^^ + +The :class:`~vllm.LLM.score` method outputs similarity scores between sentence pairs. +It is primarily designed for `cross-encoder models `__. +These types of models serve as rerankers between candidate query-document pairs in RAG systems. + +.. note:: + + vLLM can only perform the model inference component (e.g. embedding, reranking) of RAG. + To handle RAG at a higher level, you should use integration frameworks such as `LangChain `_. + +You can use `these tests `_ as reference. + +Online Inference +---------------- + +Our `OpenAI Compatible Server <../serving/openai_compatible_server>`__ can be used for online inference. +Please click on the above link for more details on how to launch the server. + +Embeddings API +^^^^^^^^^^^^^^ + +Our Embeddings API is similar to ``LLM.encode``, accepting both text and :ref:`multi-modal inputs `. + +The text-only API is compatible with `OpenAI Embeddings API `__ +so that you can use OpenAI client to interact with it. +A code example can be found in `examples/openai_embedding_client.py `_. + +The multi-modal API is an extension of the `OpenAI Embeddings API `__ +that incorporates `OpenAI Chat Completions API `__, +so it is not part of the OpenAI standard. Please see :ref:`this page ` for more details on how to use it. + +Score API +^^^^^^^^^ + +Our Score API is similar to ``LLM.score``. +Please see `this page <../serving/openai_compatible_server.html#score-api-for-cross-encoder-models>`__ for more details on how to use it. diff --git a/docs/source/models/supported_models.rst b/docs/source/models/supported_models.rst index 6540e023c1ab0..b9957cf9563b1 100644 --- a/docs/source/models/supported_models.rst +++ b/docs/source/models/supported_models.rst @@ -3,11 +3,21 @@ Supported Models ================ -vLLM supports a variety of generative and embedding models from `HuggingFace (HF) Transformers `_. -This page lists the model architectures that are currently supported by vLLM. +vLLM supports generative and pooling models across various tasks. +If a model supports more than one task, you can set the task via the :code:`--task` argument. + +For each task, we list the model architectures that have been implemented in vLLM. Alongside each architecture, we include some popular models that use it. -For other models, you can check the :code:`config.json` file inside the model repository. +Loading a Model +^^^^^^^^^^^^^^^ + +HuggingFace Hub ++++++++++++++++ + +By default, vLLM loads models from `HuggingFace (HF) Hub `_. + +To determine whether a given model is supported, you can check the :code:`config.json` file inside the HF repository. If the :code:`"architectures"` field contains a model architecture listed below, then it should be supported in theory. .. tip:: @@ -17,38 +27,57 @@ If the :code:`"architectures"` field contains a model architecture listed below, from vllm import LLM - llm = LLM(model=...) # Name or path of your model + # For generative models (task=generate) only + llm = LLM(model=..., task="generate") # Name or path of your model output = llm.generate("Hello, my name is") print(output) - If vLLM successfully generates text, it indicates that your model is supported. + # For pooling models (task={embed,classify,reward}) only + llm = LLM(model=..., task="embed") # Name or path of your model + output = llm.encode("Hello, my name is") + print(output) + + If vLLM successfully returns text (for generative models) or hidden states (for pooling models), it indicates that your model is supported. Otherwise, please refer to :ref:`Adding a New Model ` and :ref:`Enabling Multimodal Inputs ` for instructions on how to implement your model in vLLM. Alternatively, you can `open an issue on GitHub `_ to request vLLM support. -.. note:: - To use models from `ModelScope `_ instead of HuggingFace Hub, set an environment variable: +ModelScope +++++++++++ - .. code-block:: shell +To use models from `ModelScope `_ instead of HuggingFace Hub, set an environment variable: - $ export VLLM_USE_MODELSCOPE=True +.. code-block:: shell - And use with :code:`trust_remote_code=True`. + $ export VLLM_USE_MODELSCOPE=True - .. code-block:: python +And use with :code:`trust_remote_code=True`. - from vllm import LLM +.. code-block:: python - llm = LLM(model=..., revision=..., trust_remote_code=True) # Name or path of your model - output = llm.generate("Hello, my name is") - print(output) + from vllm import LLM + + llm = LLM(model=..., revision=..., task=..., trust_remote_code=True) -Text-only Language Models -^^^^^^^^^^^^^^^^^^^^^^^^^ + # For generative models (task=generate) only + output = llm.generate("Hello, my name is") + print(output) -Text Generation ---------------- + # For pooling models (task={embed,classify,reward}) only + output = llm.encode("Hello, my name is") + print(output) + +List of Text-only Language Models +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Generative Models ++++++++++++++++++ + +See :ref:`this page ` for more information on how to use generative models. + +Text Generation (``--task generate``) +------------------------------------- .. list-table:: :widths: 25 25 50 5 5 @@ -328,8 +357,24 @@ Text Generation .. note:: Currently, the ROCm version of vLLM supports Mistral and Mixtral only for context lengths up to 4096. -Text Embedding --------------- +Pooling Models +++++++++++++++ + +See :ref:`this page ` for more information on how to use pooling models. + +.. important:: + Since some model architectures support both generative and pooling tasks, + you should explicitly specify the task type to ensure that the model is used in pooling mode instead of generative mode. + +Text Embedding (``--task embed``) +--------------------------------- + +Any text generation model can be converted into an embedding model by passing :code:`--task embed`. + +.. note:: + To get the best results, you should use pooling models that are specifically trained as such. + +The following table lists those that are tested in vLLM. .. list-table:: :widths: 25 25 50 5 5 @@ -371,13 +416,6 @@ Text Embedding - - -.. important:: - Some model architectures support both generation and embedding tasks. - In this case, you have to pass :code:`--task embedding` to run the model in embedding mode. - -.. tip:: - You can override the model's pooling method by passing :code:`--override-pooler-config`. - .. note:: :code:`ssmits/Qwen2-7B-Instruct-embed-base` has an improperly defined Sentence Transformers config. You should manually set mean pooling by passing :code:`--override-pooler-config '{"pooling_type": "MEAN"}'`. @@ -389,8 +427,8 @@ Text Embedding On the other hand, its 1.5B variant (:code:`Alibaba-NLP/gte-Qwen2-1.5B-instruct`) uses causal attention despite being described otherwise on its model card. -Reward Modeling ---------------- +Reward Modeling (``--task reward``) +----------------------------------- .. list-table:: :widths: 25 25 50 5 5 @@ -416,11 +454,8 @@ Reward Modeling For process-supervised reward models such as :code:`peiyi9979/math-shepherd-mistral-7b-prm`, the pooling config should be set explicitly, e.g.: :code:`--override-pooler-config '{"pooling_type": "STEP", "step_tag_id": 123, "returned_token_ids": [456, 789]}'`. -.. note:: - As an interim measure, these models are supported in both offline and online inference via Embeddings API. - -Classification ---------------- +Classification (``--task classify``) +------------------------------------ .. list-table:: :widths: 25 25 50 5 5 @@ -437,11 +472,8 @@ Classification - ✅︎ - ✅︎ -.. note:: - As an interim measure, these models are supported in both offline and online inference via Embeddings API. - -Sentence Pair Scoring ---------------------- +Sentence Pair Scoring (``--task score``) +---------------------------------------- .. list-table:: :widths: 25 25 50 5 5 @@ -468,13 +500,10 @@ Sentence Pair Scoring - - -.. note:: - These models are supported in both offline and online inference via Score API. - .. _supported_mm_models: -Multimodal Language Models -^^^^^^^^^^^^^^^^^^^^^^^^^^ +List of Multimodal Language Models +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The following modalities are supported depending on the model: @@ -491,8 +520,15 @@ On the other hand, modalities separated by :code:`/` are mutually exclusive. - e.g.: :code:`T / I` means that the model supports text-only and image-only inputs, but not text-with-image inputs. -Text Generation ---------------- +See :ref:`this page ` on how to pass multi-modal inputs to the model. + +Generative Models ++++++++++++++++++ + +See :ref:`this page ` for more information on how to use generative models. + +Text Generation (``--task generate``) +------------------------------------- .. list-table:: :widths: 25 25 15 20 5 5 5 @@ -696,8 +732,24 @@ Text Generation The official :code:`openbmb/MiniCPM-V-2` doesn't work yet, so we need to use a fork (:code:`HwwwH/MiniCPM-V-2`) for now. For more details, please see: https://github.com/vllm-project/vllm/pull/4087#issuecomment-2250397630 -Multimodal Embedding --------------------- +Pooling Models +++++++++++++++ + +See :ref:`this page ` for more information on how to use pooling models. + +.. important:: + Since some model architectures support both generative and pooling tasks, + you should explicitly specify the task type to ensure that the model is used in pooling mode instead of generative mode. + +Text Embedding (``--task embed``) +--------------------------------- + +Any text generation model can be converted into an embedding model by passing :code:`--task embed`. + +.. note:: + To get the best results, you should use pooling models that are specifically trained as such. + +The following table lists those that are tested in vLLM. .. list-table:: :widths: 25 25 15 25 5 5 @@ -728,12 +780,7 @@ Multimodal Embedding - - ✅︎ -.. important:: - Some model architectures support both generation and embedding tasks. - In this case, you have to pass :code:`--task embedding` to run the model in embedding mode. - -.. tip:: - You can override the model's pooling method by passing :code:`--override-pooler-config`. +---- Model Support Policy ===================== diff --git a/docs/source/usage/compatibility_matrix.rst b/docs/source/usage/compatibility_matrix.rst index a93632ff36fb8..04dd72b1e3527 100644 --- a/docs/source/usage/compatibility_matrix.rst +++ b/docs/source/usage/compatibility_matrix.rst @@ -39,13 +39,13 @@ Feature x Feature - :abbr:`prmpt adptr (Prompt Adapter)` - :ref:`SD ` - CUDA graph - - :abbr:`emd (Embedding Models)` + - :abbr:`pooling (Pooling Models)` - :abbr:`enc-dec (Encoder-Decoder Models)` - :abbr:`logP (Logprobs)` - :abbr:`prmpt logP (Prompt Logprobs)` - :abbr:`async output (Async Output Processing)` - multi-step - - :abbr:`mm (Multimodal)` + - :abbr:`mm (Multimodal Inputs)` - best-of - beam-search - :abbr:`guided dec (Guided Decoding)` @@ -151,7 +151,7 @@ Feature x Feature - - - - * - :abbr:`emd (Embedding Models)` + * - :abbr:`pooling (Pooling Models)` - ✗ - ✗ - ✗ @@ -253,7 +253,7 @@ Feature x Feature - - - - * - :abbr:`mm (Multimodal)` + * - :abbr:`mm (Multimodal Inputs)` - ✅ - `✗ `__ - `✗ `__ @@ -386,7 +386,7 @@ Feature x Hardware - ✅ - ✗ - ✅ - * - :abbr:`emd (Embedding Models)` + * - :abbr:`pooling (Pooling Models)` - ✅ - ✅ - ✅ @@ -402,7 +402,7 @@ Feature x Hardware - ✅ - ✅ - ✗ - * - :abbr:`mm (Multimodal)` + * - :abbr:`mm (Multimodal Inputs)` - ✅ - ✅ - ✅ diff --git a/examples/offline_inference_embedding.py b/examples/offline_inference_embedding.py index ae158eef2ca4c..17f6d992073d7 100644 --- a/examples/offline_inference_embedding.py +++ b/examples/offline_inference_embedding.py @@ -9,7 +9,12 @@ ] # Create an LLM. -model = LLM(model="intfloat/e5-mistral-7b-instruct", enforce_eager=True) +model = LLM( + model="intfloat/e5-mistral-7b-instruct", + task="embed", # You should pass task="embed" for embedding models + enforce_eager=True, +) + # Generate embedding. The output is a list of PoolingRequestOutputs. outputs = model.encode(prompts) # Print the outputs. diff --git a/examples/offline_inference_vision_language_embedding.py b/examples/offline_inference_vision_language_embedding.py index e1732d045f949..bf466109f0981 100644 --- a/examples/offline_inference_vision_language_embedding.py +++ b/examples/offline_inference_vision_language_embedding.py @@ -59,7 +59,7 @@ def run_e5_v(query: Query): llm = LLM( model="royokong/e5-v", - task="embedding", + task="embed", max_model_len=4096, ) @@ -88,7 +88,7 @@ def run_vlm2vec(query: Query): llm = LLM( model="TIGER-Lab/VLM2Vec-Full", - task="embedding", + task="embed", trust_remote_code=True, mm_processor_kwargs={"num_crops": 4}, ) diff --git a/tests/compile/test_basic_correctness.py b/tests/compile/test_basic_correctness.py index 99781c55b672e..87d5aefea6cb4 100644 --- a/tests/compile/test_basic_correctness.py +++ b/tests/compile/test_basic_correctness.py @@ -55,7 +55,7 @@ class TestSetting: # embedding model TestSetting( model="BAAI/bge-multilingual-gemma2", - model_args=["--task", "embedding"], + model_args=["--task", "embed"], pp_size=1, tp_size=1, attn_backend="FLASHINFER", @@ -65,7 +65,7 @@ class TestSetting: # encoder-based embedding model (BERT) TestSetting( model="BAAI/bge-base-en-v1.5", - model_args=["--task", "embedding"], + model_args=["--task", "embed"], pp_size=1, tp_size=1, attn_backend="XFORMERS", diff --git a/tests/core/test_scheduler_encoder_decoder.py b/tests/core/test_scheduler_encoder_decoder.py index 7cd0416d321ef..16bea54936bc8 100644 --- a/tests/core/test_scheduler_encoder_decoder.py +++ b/tests/core/test_scheduler_encoder_decoder.py @@ -37,7 +37,7 @@ def test_scheduler_schedule_simple_encoder_decoder(): num_seq_group = 4 max_model_len = 16 scheduler_config = SchedulerConfig( - task="generate", + "generate", max_num_batched_tokens=64, max_num_seqs=num_seq_group, max_model_len=max_model_len, diff --git a/tests/entrypoints/openai/test_vision_embedding.py b/tests/entrypoints/openai/test_vision_embedding.py index 425f2a10ec855..43c63daacb17f 100644 --- a/tests/entrypoints/openai/test_vision_embedding.py +++ b/tests/entrypoints/openai/test_vision_embedding.py @@ -27,7 +27,7 @@ def server(): args = [ "--task", - "embedding", + "embed", "--dtype", "bfloat16", "--max-model-len", diff --git a/tests/models/embedding/language/test_embedding.py b/tests/models/embedding/language/test_embedding.py index 5ef8540265d14..f458ef5ef556d 100644 --- a/tests/models/embedding/language/test_embedding.py +++ b/tests/models/embedding/language/test_embedding.py @@ -54,7 +54,7 @@ def test_models( hf_outputs = hf_model.encode(example_prompts) with vllm_runner(model, - task="embedding", + task="embed", dtype=dtype, max_model_len=None, **vllm_extra_kwargs) as vllm_model: diff --git a/tests/models/embedding/language/test_scoring.py b/tests/models/embedding/language/test_scoring.py index 30fa5ea7b36c0..0c3115d195fc1 100644 --- a/tests/models/embedding/language/test_scoring.py +++ b/tests/models/embedding/language/test_scoring.py @@ -35,9 +35,7 @@ def test_llm_1_to_1(vllm_runner, hf_runner, model_name, dtype: str): with hf_runner(model_name, dtype=dtype, is_cross_encoder=True) as hf_model: hf_outputs = hf_model.predict([text_pair]).tolist() - with vllm_runner(model_name, - task="embedding", - dtype=dtype, + with vllm_runner(model_name, task="score", dtype=dtype, max_model_len=None) as vllm_model: vllm_outputs = vllm_model.score(text_pair[0], text_pair[1]) @@ -58,9 +56,7 @@ def test_llm_1_to_N(vllm_runner, hf_runner, model_name, dtype: str): with hf_runner(model_name, dtype=dtype, is_cross_encoder=True) as hf_model: hf_outputs = hf_model.predict(text_pairs).tolist() - with vllm_runner(model_name, - task="embedding", - dtype=dtype, + with vllm_runner(model_name, task="score", dtype=dtype, max_model_len=None) as vllm_model: vllm_outputs = vllm_model.score(TEXTS_1[0], TEXTS_2) @@ -82,9 +78,7 @@ def test_llm_N_to_N(vllm_runner, hf_runner, model_name, dtype: str): with hf_runner(model_name, dtype=dtype, is_cross_encoder=True) as hf_model: hf_outputs = hf_model.predict(text_pairs).tolist() - with vllm_runner(model_name, - task="embedding", - dtype=dtype, + with vllm_runner(model_name, task="score", dtype=dtype, max_model_len=None) as vllm_model: vllm_outputs = vllm_model.score(TEXTS_1, TEXTS_2) diff --git a/tests/models/embedding/vision_language/test_dse_qwen2_vl.py b/tests/models/embedding/vision_language/test_dse_qwen2_vl.py index 3dd8cb729f8a6..2641987b25a3a 100644 --- a/tests/models/embedding/vision_language/test_dse_qwen2_vl.py +++ b/tests/models/embedding/vision_language/test_dse_qwen2_vl.py @@ -93,7 +93,7 @@ def _run_test( # if we run HF first, the cuda initialization will be done and it # will hurt multiprocessing backend with fork method (the default method). with vllm_runner(model, - task="embedding", + task="embed", dtype=dtype, enforce_eager=True, max_model_len=8192) as vllm_model: diff --git a/tests/models/embedding/vision_language/test_llava_next.py b/tests/models/embedding/vision_language/test_llava_next.py index 693abd7252d5e..f4cd8b81a0d7d 100644 --- a/tests/models/embedding/vision_language/test_llava_next.py +++ b/tests/models/embedding/vision_language/test_llava_next.py @@ -47,7 +47,7 @@ def _run_test( # if we run HF first, the cuda initialization will be done and it # will hurt multiprocessing backend with fork method (the default method). with vllm_runner(model, - task="embedding", + task="embed", dtype=dtype, max_model_len=4096, enforce_eager=True) as vllm_model: diff --git a/tests/models/embedding/vision_language/test_phi3v.py b/tests/models/embedding/vision_language/test_phi3v.py index 6145aff1a5ea2..9374c23dd6ffe 100644 --- a/tests/models/embedding/vision_language/test_phi3v.py +++ b/tests/models/embedding/vision_language/test_phi3v.py @@ -39,7 +39,7 @@ def _run_test( # vLLM needs a fresh new process without cuda initialization. # if we run HF first, the cuda initialization will be done and it # will hurt multiprocessing backend with fork method (the default method). - with vllm_runner(model, task="embedding", dtype=dtype, + with vllm_runner(model, task="embed", dtype=dtype, enforce_eager=True) as vllm_model: vllm_outputs = vllm_model.encode(input_texts, images=input_images) diff --git a/tests/test_config.py b/tests/test_config.py index 45b0b938af215..4518adfc31bfc 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -7,11 +7,17 @@ from vllm.platforms import current_platform -@pytest.mark.parametrize(("model_id", "expected_task"), [ - ("facebook/opt-125m", "generate"), - ("intfloat/e5-mistral-7b-instruct", "embedding"), -]) -def test_auto_task(model_id, expected_task): +@pytest.mark.parametrize( + ("model_id", "expected_runner_type", "expected_task"), + [ + ("facebook/opt-125m", "generate", "generate"), + ("intfloat/e5-mistral-7b-instruct", "pooling", "embed"), + ("jason9693/Qwen2.5-1.5B-apeach", "pooling", "classify"), + ("cross-encoder/ms-marco-MiniLM-L-6-v2", "pooling", "score"), + ("Qwen/Qwen2.5-Math-RM-72B", "pooling", "reward"), + ], +) +def test_auto_task(model_id, expected_runner_type, expected_task): config = ModelConfig( model_id, task="auto", @@ -22,6 +28,7 @@ def test_auto_task(model_id, expected_task): dtype="float16", ) + assert config.runner_type == expected_runner_type assert config.task == expected_task diff --git a/vllm/config.py b/vllm/config.py index 2a9f0ebae997d..2d9a76fe7ddb1 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -45,13 +45,27 @@ logger = init_logger(__name__) -_EMBEDDING_MODEL_MAX_NUM_BATCHED_TOKENS = 32768 +_POOLING_MODEL_MAX_NUM_BATCHED_TOKENS = 32768 _MULTIMODAL_MODEL_MAX_NUM_BATCHED_TOKENS = 5120 -TaskOption = Literal["auto", "generate", "embedding"] +TaskOption = Literal["auto", "generate", "embedding", "embed", "classify", + "score", "reward"] -# "draft" is only used internally for speculative decoding -_Task = Literal["generate", "embedding", "draft"] +_ResolvedTask = Literal["generate", "embed", "classify", "score", "reward", + "draft"] + +RunnerType = Literal["generate", "pooling", "draft"] + +_RUNNER_TASKS: Dict[RunnerType, List[_ResolvedTask]] = { + "generate": ["generate"], + "pooling": ["embed", "classify", "score", "reward"], + "draft": ["draft"], +} + +_TASK_RUNNER: Dict[_ResolvedTask, RunnerType] = { + task: runner + for runner, tasks in _RUNNER_TASKS.items() for task in tasks +} HfOverrides = Union[Dict[str, Any], Callable[[PretrainedConfig], PretrainedConfig]] @@ -144,7 +158,7 @@ class ModelConfig: def __init__( self, model: str, - task: Union[TaskOption, _Task], + task: Union[TaskOption, Literal["draft"]], tokenizer: str, tokenizer_mode: str, trust_remote_code: bool, @@ -295,6 +309,7 @@ def __init__( supported_tasks, task = self._resolve_task(task, self.hf_config) self.supported_tasks = supported_tasks self.task: Final = task + self.pooler_config = self._init_pooler_config(override_pooler_config) self._verify_quantization() @@ -323,7 +338,7 @@ def _init_pooler_config( override_pooler_config: Optional["PoolerConfig"], ) -> Optional["PoolerConfig"]: - if self.task == "embedding": + if self.runner_type == "pooling": user_config = override_pooler_config or PoolerConfig() base_config = get_pooling_config(self.model, self.revision) @@ -357,60 +372,90 @@ def _verify_tokenizer_mode(self) -> None: "either 'auto', 'slow' or 'mistral'.") self.tokenizer_mode = tokenizer_mode + def _get_preferred_task( + self, + architectures: List[str], + supported_tasks: Set[_ResolvedTask], + ) -> Optional[_ResolvedTask]: + model_id = self.model + if get_pooling_config(model_id, self.revision): + return "embed" + if ModelRegistry.is_cross_encoder_model(architectures): + return "score" + + suffix_to_preferred_task: List[Tuple[str, _ResolvedTask]] = [ + # Other models follow this pattern + ("ForCausalLM", "generate"), + ("ForConditionalGeneration", "generate"), + ("ForSequenceClassification", "classify"), + ("ChatModel", "generate"), + ("LMHeadModel", "generate"), + ("EmbeddingModel", "embed"), + ("RewardModel", "reward"), + ] + _, arch = ModelRegistry.inspect_model_cls(architectures) + + for suffix, pref_task in suffix_to_preferred_task: + if arch.endswith(suffix) and pref_task in supported_tasks: + return pref_task + + return None + def _resolve_task( self, - task_option: Union[TaskOption, _Task], + task_option: Union[TaskOption, Literal["draft"]], hf_config: PretrainedConfig, - ) -> Tuple[Set[_Task], _Task]: + ) -> Tuple[Set[_ResolvedTask], _ResolvedTask]: if task_option == "draft": return {"draft"}, "draft" architectures = getattr(hf_config, "architectures", []) - task_support: Dict[_Task, bool] = { + runner_support: Dict[RunnerType, bool] = { # NOTE: Listed from highest to lowest priority, # in case the model supports multiple of them "generate": ModelRegistry.is_text_generation_model(architectures), - "embedding": ModelRegistry.is_pooling_model(architectures), + "pooling": ModelRegistry.is_pooling_model(architectures), } - supported_tasks_lst: List[_Task] = [ - task for task, is_supported in task_support.items() if is_supported + supported_runner_types_lst: List[RunnerType] = [ + runner_type + for runner_type, is_supported in runner_support.items() + if is_supported + ] + + supported_tasks_lst: List[_ResolvedTask] = [ + task for runner_type in supported_runner_types_lst + for task in _RUNNER_TASKS[runner_type] ] supported_tasks = set(supported_tasks_lst) if task_option == "auto": selected_task = next(iter(supported_tasks_lst)) - if len(supported_tasks) > 1: - suffix_to_preferred_task: List[Tuple[str, _Task]] = [ - # Hardcode the models that are exceptions - ("AquilaModel", "generate"), - ("ChatGLMModel", "generate"), - # Other models follow this pattern - ("ForCausalLM", "generate"), - ("ForConditionalGeneration", "generate"), - ("ChatModel", "generate"), - ("LMHeadModel", "generate"), - ("EmbeddingModel", "embedding"), - ("RewardModel", "embedding"), - ("ForSequenceClassification", "embedding"), - ] - info, arch = ModelRegistry.inspect_model_cls(architectures) - - for suffix, pref_task in suffix_to_preferred_task: - if arch.endswith(suffix) and pref_task in supported_tasks: - selected_task = pref_task - break - else: - if (arch.endswith("Model") - and info.architecture.endswith("ForCausalLM") - and "embedding" in supported_tasks): - selected_task = "embedding" + if len(supported_tasks_lst) > 1: + preferred_task = self._get_preferred_task( + architectures, supported_tasks) + if preferred_task is not None: + selected_task = preferred_task logger.info( "This model supports multiple tasks: %s. " "Defaulting to '%s'.", supported_tasks, selected_task) else: + # Aliases + if task_option == "embedding": + preferred_task = self._get_preferred_task( + architectures, supported_tasks) + if preferred_task != "embed": + msg = ("The 'embedding' task will be restricted to " + "embedding models in a future release. Please " + "pass `--task classify`, `--task score`, or " + "`--task reward` explicitly for other pooling " + "models.") + warnings.warn(msg, DeprecationWarning, stacklevel=2) + + task_option = preferred_task or "embed" + if task_option not in supported_tasks: msg = ( f"This model does not support the '{task_option}' task. " @@ -533,7 +578,7 @@ def verify_async_output_proc(self, parallel_config, speculative_config, # Async postprocessor is not necessary with embedding mode # since there is no token generation - if self.task == "embedding": + if self.runner_type == "pooling": self.use_async_output_proc = False # Reminder: Please update docs/source/usage/compatibility_matrix.rst @@ -750,6 +795,14 @@ def is_cross_encoder(self) -> bool: architectures = getattr(self.hf_config, "architectures", []) return ModelRegistry.is_cross_encoder_model(architectures) + @property + def supported_runner_types(self) -> Set[RunnerType]: + return {_TASK_RUNNER[task] for task in self.supported_tasks} + + @property + def runner_type(self) -> RunnerType: + return _TASK_RUNNER[self.task] + class CacheConfig: """Configuration for the KV cache. @@ -1096,7 +1149,7 @@ def _verify_args(self) -> None: class SchedulerConfig: """Scheduler configuration.""" - task: str = "generate" # The task to use the model for. + runner_type: str = "generate" # The runner type to launch for the model. # Maximum number of tokens to be processed in a single iteration. max_num_batched_tokens: int = field(default=None) # type: ignore @@ -1164,11 +1217,11 @@ def __post_init__(self) -> None: # for higher throughput. self.max_num_batched_tokens = max(self.max_model_len, 2048) - if self.task == "embedding": - # For embedding, choose specific value for higher throughput + if self.runner_type == "pooling": + # Choose specific value for higher throughput self.max_num_batched_tokens = max( self.max_num_batched_tokens, - _EMBEDDING_MODEL_MAX_NUM_BATCHED_TOKENS, + _POOLING_MODEL_MAX_NUM_BATCHED_TOKENS, ) if self.is_multimodal_model: # The value needs to be at least the number of multimodal tokens diff --git a/vllm/core/scheduler.py b/vllm/core/scheduler.py index 94c62743883ec..c3bc6becf0995 100644 --- a/vllm/core/scheduler.py +++ b/vllm/core/scheduler.py @@ -337,7 +337,7 @@ def __init__( self.lora_config = lora_config version = "selfattn" - if (self.scheduler_config.task == "embedding" + if (self.scheduler_config.runner_type == "pooling" or self.cache_config.is_attention_free): version = "placeholder" diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 7b9adc401abcf..d485c2a9e7208 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -1066,7 +1066,7 @@ def create_engine_config(self, if (is_gpu and not use_sliding_window and not use_spec_decode and not self.enable_lora and not self.enable_prompt_adapter - and model_config.task != "embedding"): + and model_config.runner_type != "pooling"): self.enable_chunked_prefill = True logger.warning( "Chunked prefill is enabled by default for models with " @@ -1083,7 +1083,8 @@ def create_engine_config(self, "errors during the initial memory profiling phase, or result " "in low performance due to small KV cache space. Consider " "setting --max-model-len to a smaller value.", max_model_len) - elif self.enable_chunked_prefill and model_config.task == "embedding": + elif (self.enable_chunked_prefill + and model_config.runner_type == "pooling"): msg = "Chunked prefill is not supported for embedding models" raise ValueError(msg) @@ -1144,7 +1145,7 @@ def create_engine_config(self, " please file an issue with detailed information.") scheduler_config = SchedulerConfig( - task=model_config.task, + runner_type=model_config.runner_type, max_num_batched_tokens=self.max_num_batched_tokens, max_num_seqs=self.max_num_seqs, max_model_len=model_config.max_model_len, diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index 6eca304b45f07..9be30c635cb2c 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -288,7 +288,7 @@ def get_tokenizer_for_seq(sequence: Sequence) -> AnyTokenizer: self.model_executor = executor_class(vllm_config=vllm_config, ) - if self.model_config.task != "embedding": + if self.model_config.runner_type != "pooling": self._initialize_kv_caches() # If usage stat is enabled, collect relevant info. @@ -1123,7 +1123,7 @@ def _process_model_outputs(self, seq_group.metrics.model_execute_time = ( o.model_execute_time) - if self.model_config.task == "embedding": + if self.model_config.runner_type == "pooling": self._process_sequence_group_outputs(seq_group, output) else: self.output_processor.process_prompt_logprob(seq_group, output) diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index 2a02187223a33..0bec978c4869c 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -381,19 +381,20 @@ def generate( considered legacy and may be deprecated in the future. You should instead pass them via the ``inputs`` parameter. """ - task = self.llm_engine.model_config.task - if task != "generate": + runner_type = self.llm_engine.model_config.runner_type + if runner_type != "generate": messages = [ "LLM.generate() is only supported for (conditional) generation " "models (XForCausalLM, XForConditionalGeneration).", ] - supported_tasks = self.llm_engine.model_config.supported_tasks - if "generate" in supported_tasks: + supported_runner_types = self.llm_engine.model_config \ + .supported_runner_types + if "generate" in supported_runner_types: messages.append( - "Your model supports the 'generate' task, but is " - f"currently initialized for the '{task}' task. Please " - "initialize the model using `--task generate`.") + "Your model supports the 'generate' runner, but is " + f"currently initialized for the '{runner_type}' runner. " + "Please initialize vLLM using `--task generate`.") raise ValueError(" ".join(messages)) @@ -793,16 +794,18 @@ def encode( considered legacy and may be deprecated in the future. You should instead pass them via the ``inputs`` parameter. """ - task = self.llm_engine.model_config.task - if task != "embedding": - messages = ["LLM.encode() is only supported for embedding models."] + runner_type = self.llm_engine.model_config.runner_type + if runner_type != "pooling": + messages = ["LLM.encode() is only supported for pooling models."] - supported_tasks = self.llm_engine.model_config.supported_tasks - if "embedding" in supported_tasks: + supported_runner_types = self.llm_engine.model_config \ + .supported_runner_types + if "pooling" in supported_runner_types: messages.append( - "Your model supports the 'embedding' task, but is " - f"currently initialized for the '{task}' task. Please " - "initialize the model using `--task embedding`.") + "Your model supports the 'pooling' runner, but is " + f"currently initialized for the '{runner_type}' runner. " + "Please initialize vLLM using `--task embed`, " + "`--task classify`, `--task score` etc.") raise ValueError(" ".join(messages)) @@ -864,21 +867,23 @@ def score( A list of ``PoolingRequestOutput`` objects containing the generated scores in the same order as the input prompts. """ - task = self.llm_engine.model_config.task - if task != "embedding": - messages = ["LLM.score() is only supported for embedding models."] + runner_type = self.llm_engine.model_config.runner_type + if runner_type != "pooling": + messages = ["LLM.score() is only supported for pooling models."] - supported_tasks = self.llm_engine.model_config.supported_tasks - if "embedding" in supported_tasks: + supported_runner_types = self.llm_engine.model_config \ + .supported_runner_types + if "pooling" in supported_runner_types: messages.append( - "Your model supports the 'embedding' task, but is " - f"currently initialized for the '{task}' task. Please " - "initialize the model using `--task embedding`.") + "Your model supports the 'pooling' runner, but is " + f"currently initialized for the '{runner_type}' runner. " + "Please initialize vLLM using `--task embed`, " + "`--task classify`, `--task score` etc.") raise ValueError(" ".join(messages)) if not self.llm_engine.model_config.is_cross_encoder: - raise ValueError("Your model does not support the cross encoding") + raise ValueError("Your model does not support cross encoding") tokenizer = self.llm_engine.get_tokenizer() diff --git a/vllm/entrypoints/openai/api_server.py b/vllm/entrypoints/openai/api_server.py index 0f93eb54111ad..a345f8caeeed2 100644 --- a/vllm/entrypoints/openai/api_server.py +++ b/vllm/entrypoints/openai/api_server.py @@ -573,7 +573,7 @@ def init_app_state( enable_auto_tools=args.enable_auto_tool_choice, tool_parser=args.tool_call_parser, enable_prompt_tokens_details=args.enable_prompt_tokens_details, - ) if model_config.task == "generate" else None + ) if model_config.runner_type == "generate" else None state.openai_serving_completion = OpenAIServingCompletion( engine_client, model_config, @@ -582,7 +582,7 @@ def init_app_state( prompt_adapters=args.prompt_adapters, request_logger=request_logger, return_tokens_as_token_ids=args.return_tokens_as_token_ids, - ) if model_config.task == "generate" else None + ) if model_config.runner_type == "generate" else None state.openai_serving_embedding = OpenAIServingEmbedding( engine_client, model_config, @@ -590,13 +590,13 @@ def init_app_state( request_logger=request_logger, chat_template=resolved_chat_template, chat_template_content_format=args.chat_template_content_format, - ) if model_config.task == "embedding" else None + ) if model_config.runner_type == "pooling" else None state.openai_serving_scores = OpenAIServingScores( engine_client, model_config, base_model_paths, request_logger=request_logger - ) if (model_config.task == "embedding" \ + ) if (model_config.runner_type == "pooling" \ and model_config.is_cross_encoder) else None state.openai_serving_tokenization = OpenAIServingTokenization( engine_client, diff --git a/vllm/entrypoints/openai/run_batch.py b/vllm/entrypoints/openai/run_batch.py index 00cdb3b6839f5..675daf54c0d0d 100644 --- a/vllm/entrypoints/openai/run_batch.py +++ b/vllm/entrypoints/openai/run_batch.py @@ -224,7 +224,7 @@ async def main(args): chat_template=None, chat_template_content_format="auto", enable_prompt_tokens_details=args.enable_prompt_tokens_details, - ) if model_config.task == "generate" else None + ) if model_config.runner_type == "generate" else None openai_serving_embedding = OpenAIServingEmbedding( engine, model_config, @@ -232,7 +232,7 @@ async def main(args): request_logger=request_logger, chat_template=None, chat_template_content_format="auto", - ) if model_config.task == "embedding" else None + ) if model_config.runner_type == "pooling" else None tracker = BatchProgressTracker() logger.info("Reading batch from %s...", args.input_file) diff --git a/vllm/model_executor/model_loader/utils.py b/vllm/model_executor/model_loader/utils.py index cfb89e0f336bc..f15e7176b3d50 100644 --- a/vllm/model_executor/model_loader/utils.py +++ b/vllm/model_executor/model_loader/utils.py @@ -35,7 +35,7 @@ def get_model_architecture( architectures = ["QuantMixtralForCausalLM"] model_cls, arch = ModelRegistry.resolve_model_cls(architectures) - if model_config.task == "embedding": + if model_config.runner_type == "pooling": model_cls = as_embedding_model(model_cls) return model_cls, arch diff --git a/vllm/v1/engine/core.py b/vllm/v1/engine/core.py index fdb241e6753fb..55a5c4dff3a5c 100644 --- a/vllm/v1/engine/core.py +++ b/vllm/v1/engine/core.py @@ -42,7 +42,7 @@ def __init__( executor_class: Type[Executor], usage_context: UsageContext, ): - assert vllm_config.model_config.task != "embedding" + assert vllm_config.model_config.runner_type != "pooling" logger.info("Initializing an LLM engine (v%s) with config: %s", VLLM_VERSION, vllm_config) diff --git a/vllm/worker/cpu_worker.py b/vllm/worker/cpu_worker.py index 4fad1a3f4caeb..ba3d4a130a80b 100644 --- a/vllm/worker/cpu_worker.py +++ b/vllm/worker/cpu_worker.py @@ -163,7 +163,7 @@ def __init__( not in ["medusa", "mlp_speculator", "eagle"]) \ else {"return_hidden_states": True} ModelRunnerClass: Type[CPUModelRunnerBase] = CPUModelRunner - if self.model_config.task == "embedding": + if self.model_config.runner_type == "pooling": ModelRunnerClass = CPUPoolingModelRunner elif self.model_config.is_encoder_decoder: ModelRunnerClass = CPUEncoderDecoderModelRunner diff --git a/vllm/worker/worker.py b/vllm/worker/worker.py index 094dd5a5d08b3..832b9903b7abc 100644 --- a/vllm/worker/worker.py +++ b/vllm/worker/worker.py @@ -75,7 +75,7 @@ def __init__( else {"return_hidden_states": True} ModelRunnerClass: Type[GPUModelRunnerBase] = ModelRunner - if model_config.task == "embedding": + if model_config.runner_type == "pooling": ModelRunnerClass = PoolingModelRunner elif self.model_config.is_encoder_decoder: ModelRunnerClass = EncoderDecoderModelRunner From cad5c0a6eda057eeece87a42fff49fef3e18a2ac Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Wed, 11 Dec 2024 21:36:27 +0800 Subject: [PATCH 316/397] [Doc] Update docs to refer to pooling models (#11093) Signed-off-by: DarkLight1337 --- docs/source/usage/faq.rst | 7 ++++++- vllm/attention/backends/placeholder_attn.py | 2 +- vllm/config.py | 8 ++++---- vllm/core/placeholder_block_space_manager.py | 2 +- vllm/engine/arg_utils.py | 4 ++-- vllm/engine/async_llm_engine.py | 2 +- vllm/engine/multiprocessing/client.py | 2 +- vllm/engine/protocol.py | 2 +- vllm/entrypoints/openai/serving_score.py | 2 +- vllm/sequence.py | 6 +++--- vllm/v1/engine/processor.py | 2 +- vllm/worker/cpu_worker.py | 2 +- vllm/worker/hpu_worker.py | 4 ++-- vllm/worker/worker.py | 2 +- 14 files changed, 26 insertions(+), 21 deletions(-) diff --git a/docs/source/usage/faq.rst b/docs/source/usage/faq.rst index ce327abd5fa20..d88da32092924 100644 --- a/docs/source/usage/faq.rst +++ b/docs/source/usage/faq.rst @@ -11,7 +11,12 @@ A: Assuming that you're referring to using OpenAI compatible server to serve mul Q: Which model to use for offline inference embedding? -A: If you want to use an embedding model, try: https://huggingface.co/intfloat/e5-mistral-7b-instruct. Instead models, such as Llama-3-8b, Mistral-7B-Instruct-v0.3, are generation models rather than an embedding model +A: You can try `e5-mistral-7b-instruct `__ and `BAAI/bge-base-en-v1.5 `__; +more are listed :ref:`here `. + +By extracting hidden states, vLLM can automatically convert text generation models like `Llama-3-8B `__, +`Mistral-7B-Instruct-v0.3 `__ into embedding models, +but they are expected be inferior to models that are specifically trained on embedding tasks. ---------------------------------------- diff --git a/vllm/attention/backends/placeholder_attn.py b/vllm/attention/backends/placeholder_attn.py index 658039bfc3365..534f79b3a60bf 100644 --- a/vllm/attention/backends/placeholder_attn.py +++ b/vllm/attention/backends/placeholder_attn.py @@ -14,7 +14,7 @@ from vllm.worker.model_runner import (ModelInputForGPUBuilder, ModelInputForGPUWithSamplingMetadata) -# Placeholder attention backend for models like Mamba and embedding models that +# Placeholder attention backend for models like Mamba and pooling models that # lack attention. diff --git a/vllm/config.py b/vllm/config.py index 2d9a76fe7ddb1..322c8f8990a40 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -152,7 +152,7 @@ class ModelConfig: this argument will be used to configure the neuron config that can not be gathered from the vllm arguments. override_pooler_config: Initialize non default pooling config or - override default pooling config for the embedding model. + override default pooling config for the pooling model. """ def __init__( @@ -576,7 +576,7 @@ def verify_async_output_proc(self, parallel_config, speculative_config, self.use_async_output_proc = False return - # Async postprocessor is not necessary with embedding mode + # Async postprocessor is not necessary for pooling models # since there is no token generation if self.runner_type == "pooling": self.use_async_output_proc = False @@ -1825,11 +1825,11 @@ class MultiModalConfig: @dataclass class PoolerConfig: - """Controls the behavior of output pooling in embedding models.""" + """Controls the behavior of output pooling in pooling models.""" pooling_type: Optional[str] = None """ - The pooling method of the embedding model. This should be a key in + The pooling method of the pooling model. This should be a key in :class:`vllm.model_executor.layers.pooler.PoolingType`. """ diff --git a/vllm/core/placeholder_block_space_manager.py b/vllm/core/placeholder_block_space_manager.py index 26d42b7f1790e..a47e594518534 100644 --- a/vllm/core/placeholder_block_space_manager.py +++ b/vllm/core/placeholder_block_space_manager.py @@ -8,7 +8,7 @@ class PlaceholderBlockSpaceManager(BlockSpaceManager): """A version of BlockSpaceManager for use in environments where block management is not required. - For example: embedding models or attention-free models like Mamba. + For example: pooling models or attention-free models like Mamba. This class provides the same interface as BlockSpaceManager, but its methods perform no actions or return simple values like True in specific diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index d485c2a9e7208..7337522bc9952 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -893,7 +893,7 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: '--override-pooler-config', type=PoolerConfig.from_json, default=None, - help="Override or set the pooling method in the embedding model. " + help="Override or set the pooling method for pooling models. " "e.g. {\"pooling_type\": \"mean\", \"normalize\": false}.'") parser.add_argument('--compilation-config', @@ -1085,7 +1085,7 @@ def create_engine_config(self, "setting --max-model-len to a smaller value.", max_model_len) elif (self.enable_chunked_prefill and model_config.runner_type == "pooling"): - msg = "Chunked prefill is not supported for embedding models" + msg = "Chunked prefill is not supported for pooling models" raise ValueError(msg) diff --git a/vllm/engine/async_llm_engine.py b/vllm/engine/async_llm_engine.py index 60dccd7a0812c..32396fd10188d 100644 --- a/vllm/engine/async_llm_engine.py +++ b/vllm/engine/async_llm_engine.py @@ -1085,7 +1085,7 @@ async def encode( trace_headers: Optional[Mapping[str, str]] = None, priority: int = 0, ) -> AsyncGenerator[PoolingRequestOutput, None]: - """Generate outputs for a request from an embedding model. + """Generate outputs for a request from a pooling model. Generate outputs for a request. This method is a coroutine. It adds the request into the waiting queue of the LLMEngine and streams the outputs diff --git a/vllm/engine/multiprocessing/client.py b/vllm/engine/multiprocessing/client.py index a729023bc00bb..0a046c71e86e8 100644 --- a/vllm/engine/multiprocessing/client.py +++ b/vllm/engine/multiprocessing/client.py @@ -527,7 +527,7 @@ def encode( *, inputs: Optional[PromptType] = None # DEPRECATED ) -> AsyncGenerator[PoolingRequestOutput, None]: - """Generate outputs for a request from an embedding model. + """Generate outputs for a request from a pooling model. Generate outputs for a request. This method is a coroutine. It adds the request into the waiting queue of the LLMEngine and streams the outputs diff --git a/vllm/engine/protocol.py b/vllm/engine/protocol.py index 4079de7d36793..a066836b92708 100644 --- a/vllm/engine/protocol.py +++ b/vllm/engine/protocol.py @@ -209,7 +209,7 @@ def encode( trace_headers: Optional[Mapping[str, str]] = None, priority: int = 0, ) -> AsyncGenerator[PoolingRequestOutput, None]: - """Generate outputs for a request from an embedding model.""" + """Generate outputs for a request from a pooling model.""" ... @abstractmethod diff --git a/vllm/entrypoints/openai/serving_score.py b/vllm/entrypoints/openai/serving_score.py index fed06fa452955..4929e720c00e4 100644 --- a/vllm/entrypoints/openai/serving_score.py +++ b/vllm/entrypoints/openai/serving_score.py @@ -119,7 +119,7 @@ async def create_score( if prompt_adapter_request is not None: raise NotImplementedError("Prompt adapter is not supported " - "for embedding models") + "for scoring models") if isinstance(tokenizer, MistralTokenizer): raise ValueError( diff --git a/vllm/sequence.py b/vllm/sequence.py index 669124319c4f4..b0f3c1cc3609f 100644 --- a/vllm/sequence.py +++ b/vllm/sequence.py @@ -618,9 +618,9 @@ class SequenceGroup: arrival_time: The arrival time of the request. lora_request: LoRA request. embeddings: The embeddings vectors of the prompt of the sequence group - for an embedding model. + for a pooling model. pooling_params: The pooling parameters used to generate the pooling - for an embedding model. + for a pooling model. encoder_seq: Optional, the single encoder sequence. Should be None unless you are working with an encoder/decoder model. trace_headers: OpenTelemetry trace headers. @@ -1102,7 +1102,7 @@ class PoolerOutput( msgspec.Struct, omit_defaults=True, # type: ignore[call-arg] array_like=True): # type: ignore[call-arg] - """The output from a pooling operation in the embedding model.""" + """The output from a pooling operation in the pooling model.""" outputs: List[EmbeddingSequenceGroupOutput] # lazy import to avoid circular import diff --git a/vllm/v1/engine/processor.py b/vllm/v1/engine/processor.py index 120fc64969552..e0e525b30a767 100644 --- a/vllm/v1/engine/processor.py +++ b/vllm/v1/engine/processor.py @@ -59,7 +59,7 @@ def process_inputs( priority: int = 0, ) -> Tuple[DetokenizerRequest, EngineCoreRequest]: - # TODO(woosuk): Support embedding mode. + # TODO(woosuk): Support pooling models. # TODO(woosuk): Check max_logprobs # TODO(woosuk): Support encoder-decoder models. diff --git a/vllm/worker/cpu_worker.py b/vllm/worker/cpu_worker.py index ba3d4a130a80b..09758a5d9accf 100644 --- a/vllm/worker/cpu_worker.py +++ b/vllm/worker/cpu_worker.py @@ -178,7 +178,7 @@ def __init__( # Uninitialized cache engine. Will be initialized by # initialize_cache. self.cache_engine: List[CPUCacheEngine] - # Initialize cpu_cache as embedding models don't initialize kv_caches + # Initialize cpu_cache as pooling models don't initialize kv_caches self.cpu_cache: Optional[List[List[torch.Tensor]]] = None # Torch profiler. Enabled and configured through env vars: diff --git a/vllm/worker/hpu_worker.py b/vllm/worker/hpu_worker.py index 493f7a9fad098..cca7cd50bfc7b 100644 --- a/vllm/worker/hpu_worker.py +++ b/vllm/worker/hpu_worker.py @@ -65,8 +65,8 @@ def __init__( # Uninitialized cache engine. Will be initialized by # initialize_cache. self.cache_engine: List[HPUCacheEngine] - # Initialize gpu_cache as embedding models don't initialize kv_caches - self.hpu_cache: Optional[List[List[torch.tensor]]] = None + # Initialize gpu_cache as pooling models don't initialize kv_caches + self.hpu_cache: Optional[List[List[torch.Tensor]]] = None # Torch profiler. Enabled and configured through env vars: # VLLM_TORCH_PROFILER_DIR=/path/to/save/trace if envs.VLLM_TORCH_PROFILER_DIR: diff --git a/vllm/worker/worker.py b/vllm/worker/worker.py index 832b9903b7abc..a368bb9ee9a5b 100644 --- a/vllm/worker/worker.py +++ b/vllm/worker/worker.py @@ -91,7 +91,7 @@ def __init__( # Uninitialized cache engine. Will be initialized by # initialize_cache. self.cache_engine: List[CacheEngine] - # Initialize gpu_cache as embedding models don't initialize kv_caches + # Initialize gpu_cache as pooling models don't initialize kv_caches self.gpu_cache: Optional[List[List[torch.Tensor]]] = None self._seq_group_metadata_cache: Dict[str, SequenceGroupMetadata] = {} From b2f775456e4af7412308320a9c11e4dac3086205 Mon Sep 17 00:00:00 2001 From: hissu-hyvarinen Date: Wed, 11 Dec 2024 17:23:37 +0200 Subject: [PATCH 317/397] [CI/Build] Enable prefix caching test for AMD (#11098) Signed-off-by: Hissu Hyvarinen --- .buildkite/test-pipeline.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index 8f57006214c88..df4fa7a6ee9ba 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -201,7 +201,7 @@ steps: - python3 offline_profile.py --model facebook/opt-125m - label: Prefix Caching Test # 9min - #mirror_hardwares: [amd] + mirror_hardwares: [amd] source_file_dependencies: - vllm/ - tests/prefix_caching From fd22220687af5ccd89d9f8f2812069ef0422244c Mon Sep 17 00:00:00 2001 From: bingps <46775742+bingps@users.noreply.github.com> Date: Wed, 11 Dec 2024 23:43:24 +0800 Subject: [PATCH 318/397] [Doc] Installed version of llmcompressor for int8/fp8 quantization (#11103) Signed-off-by: Guangda Liu Co-authored-by: Guangda Liu --- docs/source/quantization/fp8.rst | 2 +- docs/source/quantization/int8.rst | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/source/quantization/fp8.rst b/docs/source/quantization/fp8.rst index aacd07a34ad46..4dbf8e9d346e1 100644 --- a/docs/source/quantization/fp8.rst +++ b/docs/source/quantization/fp8.rst @@ -45,7 +45,7 @@ To produce performant FP8 quantized models with vLLM, you'll need to install the .. code-block:: console - $ pip install llmcompressor==0.1.0 + $ pip install llmcompressor Quantization Process -------------------- diff --git a/docs/source/quantization/int8.rst b/docs/source/quantization/int8.rst index 04fa308449507..aa5b251becb1c 100644 --- a/docs/source/quantization/int8.rst +++ b/docs/source/quantization/int8.rst @@ -19,7 +19,7 @@ To use INT8 quantization with vLLM, you'll need to install the `llm-compressor < .. code-block:: console - $ pip install llmcompressor==0.1.0 + $ pip install llmcompressor Quantization Process -------------------- @@ -142,4 +142,4 @@ Best Practices Troubleshooting and Support --------------------------- -If you encounter any issues or have feature requests, please open an issue on the ``vllm-project/llm-compressor`` GitHub repository. \ No newline at end of file +If you encounter any issues or have feature requests, please open an issue on the ``vllm-project/llm-compressor`` GitHub repository. From 91642db952458fbb6ae7c2d167757dc86b105991 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Wed, 11 Dec 2024 10:43:05 -0800 Subject: [PATCH 319/397] [torch.compile] use depyf to dump torch.compile internals (#10972) Signed-off-by: youkaichao --- requirements-common.txt | 1 + vllm/compilation/backends.py | 69 ++++++++++++++++++---------------- vllm/compilation/decorators.py | 2 +- vllm/compilation/monitor.py | 23 ++++++++++-- vllm/compilation/wrapper.py | 4 +- vllm/config.py | 6 ++- vllm/worker/model_runner.py | 3 +- 7 files changed, 66 insertions(+), 42 deletions(-) diff --git a/requirements-common.txt b/requirements-common.txt index 792cd58e80669..850b8f4101701 100644 --- a/requirements-common.txt +++ b/requirements-common.txt @@ -33,3 +33,4 @@ six>=1.16.0; python_version > '3.11' # transitive dependency of pandas that need setuptools>=74.1.1; python_version > '3.11' # Setuptools is used by triton, we need to ensure a modern version is installed for 3.12+ so that it does not try to import distutils, which was removed in 3.12 einops # Required for Qwen2-VL. compressed-tensors == 0.8.0 # required for compressed-tensors +depyf==0.18.0 # required for profiling and debugging torch.compile diff --git a/vllm/compilation/backends.py b/vllm/compilation/backends.py index f002a8ff905b1..09a3daa731829 100644 --- a/vllm/compilation/backends.py +++ b/vllm/compilation/backends.py @@ -9,7 +9,7 @@ import torch.fx as fx import vllm.envs as envs -from vllm.config import CompilationConfig +from vllm.config import CompilationConfig, VllmConfig from vllm.logger import init_logger from vllm.utils import weak_ref_tensors @@ -149,14 +149,15 @@ class PiecewiseCompileInterpreter(torch.fx.Interpreter): """ def __init__(self, module: torch.fx.GraphModule, - compile_submod_names: List[str], - compilation_configs: CompilationConfig, graph_pool): + compile_submod_names: List[str], vllm_config: VllmConfig, + graph_pool): super().__init__(module) from torch._guards import detect_fake_mode self.fake_mode = detect_fake_mode() self.compile_submod_names = compile_submod_names - self.compilation_configs = compilation_configs + self.compilation_config = vllm_config.compilation_config self.graph_pool = graph_pool + self.vllm_config = vllm_config def run(self, *args): fake_args = [ @@ -182,15 +183,15 @@ def call_module(self, target: torch.fx.node.Target, compiled_graph_for_general_shape = wrap_inductor( submod, args, - self.compilation_configs.inductor_compile_config, - self.compilation_configs, + self.compilation_config.inductor_compile_config, + self.compilation_config, graph_index=index, num_graphs=len(self.compile_submod_names), runtime_shape=None, - use_inductor=self.compilation_configs.use_inductor) + use_inductor=self.compilation_config.use_inductor) self.module.__dict__[target] = PiecewiseBackend( - submod, self.compilation_configs, self.graph_pool, index, + submod, self.vllm_config, self.graph_pool, index, len(self.compile_submod_names), sym_shape_indices, compiled_graph_for_general_shape) @@ -211,7 +212,8 @@ class VllmBackend: which handles the post-grad passes. """ - compilation_configs: CompilationConfig + vllm_config: VllmConfig + compilation_config: CompilationConfig graph_pool: Any _called: bool = False # the graph we compiled @@ -227,7 +229,7 @@ class VllmBackend: def __init__( self, - compilation_configs: CompilationConfig, + vllm_config: VllmConfig, ): global global_graph_pool if global_graph_pool is None: @@ -244,13 +246,14 @@ def __init__( self.sym_tensor_indices = [] self.input_buffers = [] - self.compilation_configs = compilation_configs + self.vllm_config = vllm_config + self.compilation_config = vllm_config.compilation_config # `torch.compile` is JIT compiled, so we don't need to # do anything here def configure_post_pass(self): - config = self.compilation_configs + config = self.compilation_config self.post_grad_pass_manager.configure(config.pass_config) # Post-grad custom passes are run using the post_grad_custom_post_pass @@ -271,7 +274,7 @@ def __call__(self, graph: fx.GraphModule, example_inputs) -> Callable: from .monitor import torch_compile_start_time dynamo_time = time.time() - torch_compile_start_time logger.info("Dynamo bytecode transform time: %.2f s", dynamo_time) - self.compilation_configs.compilation_time += dynamo_time + self.compilation_config.compilation_time += dynamo_time # we control the compilation process, each instance can only be # called once @@ -281,7 +284,7 @@ def __call__(self, graph: fx.GraphModule, example_inputs) -> Callable: self.configure_post_pass() self.split_gm, self.piecewise_graphs = split_graph( - graph, self.compilation_configs.splitting_ops) + graph, self.compilation_config.splitting_ops) from torch._dynamo.utils import lazy_format_graph_code logger.debug("%s", lazy_format_graph_code("before split", self.graph)) @@ -298,13 +301,13 @@ def __call__(self, graph: fx.GraphModule, example_inputs) -> Callable: # propagate the split graph to the piecewise backend, # compile submodules with symbolic shapes PiecewiseCompileInterpreter(self.split_gm, submod_names_to_compile, - self.compilation_configs, + self.vllm_config, self.graph_pool).run(*example_inputs) self._called = True - if not self.compilation_configs.use_cudagraph or \ - not self.compilation_configs.cudagraph_copy_inputs: + if not self.compilation_config.use_cudagraph or \ + not self.compilation_config.cudagraph_copy_inputs: return self.split_gm # if we need to copy input buffers for cudagraph @@ -364,10 +367,9 @@ class ConcreteSizeEntry: class PiecewiseBackend: - def __init__(self, graph: fx.GraphModule, - compilation_configs: CompilationConfig, graph_pool: Any, - piecewise_compile_index: int, total_piecewise_compiles: int, - sym_shape_indices: List[int], + def __init__(self, graph: fx.GraphModule, vllm_config: VllmConfig, + graph_pool: Any, piecewise_compile_index: int, + total_piecewise_compiles: int, sym_shape_indices: List[int], compiled_graph_for_general_shape: Callable): """ The backend for piecewise compilation. @@ -375,7 +377,7 @@ def __init__(self, graph: fx.GraphModule, We will compile `self.graph` once for the general shape, and then compile for different shapes specified in - `compilation_configs.compile_sizes`. + `compilation_config.compile_sizes`. Independently, we will capture cudagraph for different shapes. @@ -383,7 +385,8 @@ def __init__(self, graph: fx.GraphModule, compile it first, and then capture cudagraph. """ self.graph = graph - self.compilation_configs = compilation_configs + self.vllm_config = vllm_config + self.compilation_config = vllm_config.compilation_config self.graph_pool = graph_pool self.piecewise_compile_index = piecewise_compile_index self.total_piecewise_compiles = total_piecewise_compiles @@ -393,10 +396,10 @@ def __init__(self, graph: fx.GraphModule, piecewise_compile_index == total_piecewise_compiles - 1) self.compile_sizes: Set[int] = set( - self.compilation_configs.compile_sizes) + self.compilation_config.compile_sizes) self.capture_sizes: Set[int] = set( - self.compilation_configs.capture_sizes - ) if self.compilation_configs.use_cudagraph else set() + self.compilation_config.capture_sizes + ) if self.compilation_config.use_cudagraph else set() self.first_run_finished = False @@ -423,7 +426,7 @@ def __call__(self, *args) -> Any: self.first_run_finished = True # no specific sizes to compile if self.is_last_graph and not self.to_be_compiled_sizes: - end_monitoring_torch_compile(self.compilation_configs) + end_monitoring_torch_compile(self.vllm_config) return self.compiled_graph_for_general_shape(*args) runtime_shape = args[self.sym_shape_indices[0]] @@ -443,28 +446,28 @@ def __call__(self, *args) -> Any: entry.runnable = wrap_inductor( self.graph, args, - self.compilation_configs.inductor_compile_config, - self.compilation_configs, + self.compilation_config.inductor_compile_config, + self.compilation_config, graph_index=self.piecewise_compile_index, num_graphs=self.total_piecewise_compiles, runtime_shape=runtime_shape, - use_inductor=self.compilation_configs.use_inductor) + use_inductor=self.compilation_config.use_inductor) # finished compilations for all required shapes if self.is_last_graph and not self.to_be_compiled_sizes: - end_monitoring_torch_compile(self.compilation_configs) + end_monitoring_torch_compile(self.vllm_config) if not entry.use_cudagraph: return entry.runnable(*args) if entry.cudagraph is None: - if entry.num_finished_warmup < self.compilation_configs.cudagraph_num_of_warmups: # noqa + if entry.num_finished_warmup < self.compilation_config.cudagraph_num_of_warmups: # noqa entry.num_finished_warmup += 1 if self.is_first_graph: logger.debug( "Warming up %s/%s for shape %s", entry.num_finished_warmup, - self.compilation_configs.cudagraph_num_of_warmups, + self.compilation_config.cudagraph_num_of_warmups, runtime_shape) return entry.runnable(*args) diff --git a/vllm/compilation/decorators.py b/vllm/compilation/decorators.py index 938430fe2a501..805a217ee6ca1 100644 --- a/vllm/compilation/decorators.py +++ b/vllm/compilation/decorators.py @@ -185,7 +185,7 @@ def __call__(self, *args, **kwargs): "Unsupported dynamic dimensions" f" {dims} for argument {k} with type {type(arg)}.") # here, it is the starting point of the `torch.compile` process - start_monitoring_torch_compile(self.vllm_config.compilation_config) + start_monitoring_torch_compile(self.vllm_config) # if we don't use custom dispatcher, we can directly call the # compiled function and let torch.compile handle the dispatching, diff --git a/vllm/compilation/monitor.py b/vllm/compilation/monitor.py index 3348674b09af2..b97e40415b41b 100644 --- a/vllm/compilation/monitor.py +++ b/vllm/compilation/monitor.py @@ -1,19 +1,36 @@ +import os import time -from vllm.config import CompilationConfig, CompilationLevel +from vllm.config import CompilationConfig, CompilationLevel, VllmConfig from vllm.logger import init_logger logger = init_logger(__name__) +context_manager = None torch_compile_start_time: float = 0.0 -def start_monitoring_torch_compile(compilation_config: CompilationConfig): +def start_monitoring_torch_compile(vllm_config: VllmConfig): global torch_compile_start_time torch_compile_start_time = time.time() + compilation_config: CompilationConfig = vllm_config.compilation_config + if compilation_config.level == CompilationLevel.PIECEWISE and \ + compilation_config.debug_dump_path: + import depyf + path = os.path.join(compilation_config.debug_dump_path, + f"rank_{vllm_config.parallel_config.rank}") + global context_manager + context_manager = depyf.prepare_debug(path) + context_manager.__enter__() -def end_monitoring_torch_compile(compilation_config: CompilationConfig): + +def end_monitoring_torch_compile(vllm_config: VllmConfig): + compilation_config: CompilationConfig = vllm_config.compilation_config if compilation_config.level == CompilationLevel.PIECEWISE: logger.info("torch.compile takes %.2f s in total", compilation_config.compilation_time) + global context_manager + if context_manager is not None: + context_manager.__exit__(None, None, None) + context_manager = None diff --git a/vllm/compilation/wrapper.py b/vllm/compilation/wrapper.py index bc4d292fef402..c10241b483169 100644 --- a/vllm/compilation/wrapper.py +++ b/vllm/compilation/wrapper.py @@ -32,8 +32,8 @@ def __init__(self, # default compilation settings # compiling the forward method - backend = get_current_vllm_config( - ).compilation_config.init_backend() + vllm_config = get_current_vllm_config() + backend = vllm_config.compilation_config.init_backend(vllm_config) compiled_callable = torch.compile( self.forward, diff --git a/vllm/config.py b/vllm/config.py index 322c8f8990a40..7f9be5a3a98bc 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -2222,6 +2222,7 @@ class CompilationConfig(BaseModel): - 1: dynamo as is. - 2: dynamo once. - 3: piecewise compilation. + - debug_dump_path: the path to dump the debug information. - backend: the backend for compilation. It needs to be a string. - "" (empty string): use the default backend. - "eager"/"openxla"/...: use the specified backend registered in PyTorch. @@ -2289,6 +2290,7 @@ class CompilationConfig(BaseModel): certain small batchsizes, where inductor is good at optimizing. """ # noqa level: int = 0 + debug_dump_path: str = "" backend: str = "" custom_ops: List[str] = Field(default_factory=list) splitting_ops: List[str] = Field(default_factory=lambda: [ @@ -2394,7 +2396,7 @@ def model_post_init(self, __context: Any) -> None: self.static_forward_context = {} self.compilation_time = 0.0 - def init_backend(self) -> Union[str, Callable]: + def init_backend(self, vllm_config: "VllmConfig") -> Union[str, Callable]: if self.level == CompilationLevel.NO_COMPILATION: raise ValueError("No compilation level is set.") @@ -2413,7 +2415,7 @@ def init_backend(self) -> Union[str, Callable]: # merge with the config use_inductor assert self.level == CompilationLevel.PIECEWISE from vllm.compilation.backends import VllmBackend - return VllmBackend(self) + return VllmBackend(vllm_config) def init_with_cudagraph_sizes(self, sizes_to_specialize: List[int]): """To complete the initialization of config, diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py index 551b84435fdc0..26fd486130ce6 100644 --- a/vllm/worker/model_runner.py +++ b/vllm/worker/model_runner.py @@ -1162,7 +1162,8 @@ def load_model(self) -> None: if self.vllm_config.compilation_config.level ==\ CompilationLevel.DYNAMO_AS_IS and supports_dynamo(): - backend = self.vllm_config.compilation_config.init_backend() + backend = self.vllm_config.compilation_config.init_backend( + self.vllm_config) self.model = torch.compile( self.model, fullgraph=envs.VLLM_TEST_DYNAMO_FULLGRAPH_CAPTURE, From d643c2aba1cd5421200f3a3bad1813dd067233b4 Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Wed, 11 Dec 2024 10:49:23 -0800 Subject: [PATCH 320/397] [V1] Use input_ids as input for text-only models (#11032) Signed-off-by: Woosuk Kwon --- vllm/v1/worker/gpu_model_runner.py | 68 +++++++++++++++++++++--------- 1 file changed, 47 insertions(+), 21 deletions(-) diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 8d9976ded7c5e..e75be21ef2d91 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -61,6 +61,7 @@ def __init__( self.kv_cache_dtype = STR_DTYPE_TO_TORCH_DTYPE[ cache_config.cache_dtype] + self.is_multimodal_model = model_config.is_multimodal_model self.sliding_window = model_config.get_sliding_window() self.block_size = cache_config.block_size self.max_model_len = model_config.max_model_len @@ -103,6 +104,11 @@ def __init__( # The batch sizes in the config are in descending order. self.cudagraph_batch_sizes = list( reversed(self.vllm_config.compilation_config.capture_sizes)) + + # Persistent buffers for CUDA graphs. + self.input_ids = torch.zeros(self.max_num_tokens, + dtype=torch.int32, + device=self.device) self.positions = torch.zeros(self.max_num_tokens, dtype=torch.int64, device=self.device) @@ -310,7 +316,8 @@ def _prepare_inputs(self, scheduler_output: "SchedulerOutput"): seq_start_loc_np[0] = 0 np.cumsum(seq_lens, out=seq_start_loc_np[1:]) - input_ids = input_ids.to(self.device, non_blocking=True) + self.input_ids[:total_num_scheduled_tokens].copy_(input_ids, + non_blocking=True) self.positions[:total_num_scheduled_tokens].copy_(positions, non_blocking=True) query_start_loc = query_start_loc.to(self.device, non_blocking=True) @@ -331,7 +338,7 @@ def _prepare_inputs(self, scheduler_output: "SchedulerOutput"): # token from the partial request. # TODO: Support prompt logprobs. logits_indices = query_start_loc[1:] - 1 - return input_ids, attn_metadata, logits_indices + return attn_metadata, logits_indices def _prepare_sampling( self, @@ -427,13 +434,15 @@ def execute_model( ) -> ModelRunnerOutput: self._update_states(scheduler_output) - # Run the encoder. - self._execute_encoder(scheduler_output) - encoder_outputs = self._gather_encoder_outputs(scheduler_output) + if self.is_multimodal_model: + # Run the multimodal encoder if any. + self._execute_encoder(scheduler_output) + encoder_outputs = self._gather_encoder_outputs(scheduler_output) + else: + encoder_outputs = [] # Prepare the decoder inputs. - input_ids, attn_metadata, logits_indices = self._prepare_inputs( - scheduler_output) + attn_metadata, logits_indices = self._prepare_inputs(scheduler_output) num_scheduled_tokens = scheduler_output.total_num_scheduled_tokens if (self.use_cuda_graph and num_scheduled_tokens <= self.cudagraph_batch_sizes[-1]): @@ -444,29 +453,39 @@ def execute_model( else: # Eager mode. num_input_tokens = num_scheduled_tokens - attn_metadata.num_input_tokens = num_input_tokens - # Get the inputs embeds. - if encoder_outputs: - inputs_embeds = self.model.get_input_embeddings( - input_ids, encoder_outputs) + if self.is_multimodal_model: + # NOTE(woosuk): To unify token ids and soft tokens (vision + # embeddings), we always use embeddings (rather than token ids) + # as input to the multimodal model, even when the input is text. + input_ids = self.input_ids[:num_scheduled_tokens] + if encoder_outputs: + inputs_embeds = self.model.get_input_embeddings( + input_ids, encoder_outputs) + else: + inputs_embeds = self.model.get_input_embeddings(input_ids) + # TODO(woosuk): Avoid the copy. Optimize. + self.inputs_embeds[:num_scheduled_tokens].copy_(inputs_embeds) + inputs_embeds = self.inputs_embeds[:num_input_tokens] + input_ids = None else: - inputs_embeds = self.model.get_input_embeddings(input_ids) - # NOTE(woosuk): To unify token ids and soft tokens (vision embeddings), - # always use embeddings (rather than token ids) as input to the model. - # TODO(woosuk): Avoid the copy. Optimize. - self.inputs_embeds[:num_scheduled_tokens].copy_(inputs_embeds) + # For text-only models, we use token ids as input. + # While it is possible to use embeddings as input just like the + # multimodal models, it is not desirable for performance since + # then the embedding layer is not included in the CUDA graph. + input_ids = self.input_ids[:num_input_tokens] + inputs_embeds = None # Run the decoder. # Use persistent buffers for CUDA graphs. with set_forward_context(attn_metadata, self.vllm_config): hidden_states = self.model( - input_ids=None, + input_ids=input_ids, positions=self.positions[:num_input_tokens], kv_caches=self.kv_caches, attn_metadata=None, - inputs_embeds=self.inputs_embeds[:num_input_tokens], + inputs_embeds=inputs_embeds, ) hidden_states = hidden_states[:num_scheduled_tokens] hidden_states = hidden_states[logits_indices] @@ -534,13 +553,20 @@ def _dummy_run( num_tokens: int, kv_caches: List[torch.Tensor], ) -> torch.Tensor: + if self.is_multimodal_model: + input_ids = None + inputs_embeds = self.inputs_embeds[:num_tokens] + else: + input_ids = self.input_ids[:num_tokens] + inputs_embeds = None with set_forward_context(None, self.vllm_config): hidden_states = model( - input_ids=None, + input_ids=input_ids, positions=self.positions[:num_tokens], kv_caches=kv_caches, attn_metadata=None, - inputs_embeds=self.inputs_embeds[:num_tokens]) + inputs_embeds=inputs_embeds, + ) return hidden_states def profile_run(self) -> None: From 66aaa7722df3d7ef9e9bd2942cab5cd0d7473174 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Wed, 11 Dec 2024 10:59:50 -0800 Subject: [PATCH 321/397] [torch.compile] remove graph logging in ci (#11110) Signed-off-by: youkaichao --- vllm/compilation/backends.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/vllm/compilation/backends.py b/vllm/compilation/backends.py index 09a3daa731829..4a5dc337d01b8 100644 --- a/vllm/compilation/backends.py +++ b/vllm/compilation/backends.py @@ -287,9 +287,11 @@ def __call__(self, graph: fx.GraphModule, example_inputs) -> Callable: graph, self.compilation_config.splitting_ops) from torch._dynamo.utils import lazy_format_graph_code - logger.debug("%s", lazy_format_graph_code("before split", self.graph)) - logger.debug("%s", lazy_format_graph_code("after split", - self.split_gm)) + + # depyf will hook lazy_format_graph_code and dump the graph + # for debugging, no need to print the graph here + lazy_format_graph_code("before split", self.graph) + lazy_format_graph_code("after split", self.split_gm) compilation_counter.num_piecewise_graphs_seen += len( self.piecewise_graphs) From 72ff3a968682e6a3f7620ab59f2baf5e8eb2777b Mon Sep 17 00:00:00 2001 From: Rui Qiao <161574667+ruisearch42@users.noreply.github.com> Date: Wed, 11 Dec 2024 11:36:35 -0800 Subject: [PATCH 322/397] [core] Bump ray to use _overlap_gpu_communication in compiled graph tests (#10410) Signed-off-by: Rui Qiao Signed-off-by: Rui Qiao Co-authored-by: Rui Qiao --- requirements-test.in | 2 +- requirements-test.txt | 2 +- vllm/envs.py | 8 ++++++++ vllm/executor/ray_gpu_executor.py | 17 ++++++++++------- 4 files changed, 20 insertions(+), 9 deletions(-) diff --git a/requirements-test.in b/requirements-test.in index c0b228148ab31..57fddb416317e 100644 --- a/requirements-test.in +++ b/requirements-test.in @@ -13,7 +13,7 @@ einops # required for MPT, qwen-vl and Mamba httpx librosa # required for audio tests peft -ray[adag]==2.35 +ray[adag]==2.40.0 sentence-transformers # required for embedding tests soundfile # required for audio tests timm # required for internvl test diff --git a/requirements-test.txt b/requirements-test.txt index 8ceb705cdffd7..c786a1249bddb 100644 --- a/requirements-test.txt +++ b/requirements-test.txt @@ -410,7 +410,7 @@ pyyaml==6.0.2 # ray # timm # transformers -ray[adag]==2.35.0 +ray[adag]==2.40.0 # via -r requirements-test.in redis==5.2.0 # via tensorizer diff --git a/vllm/envs.py b/vllm/envs.py index be5d9985b63a4..bc8c1499e9534 100644 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -45,6 +45,7 @@ VLLM_USE_RAY_SPMD_WORKER: bool = False VLLM_USE_RAY_COMPILED_DAG: bool = False VLLM_USE_RAY_COMPILED_DAG_NCCL_CHANNEL: bool = True + VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM: bool = True VLLM_WORKER_MULTIPROC_METHOD: str = "fork" VLLM_ASSETS_CACHE: str = os.path.join(VLLM_CACHE_ROOT, "assets") VLLM_IMAGE_FETCH_TIMEOUT: int = 5 @@ -337,6 +338,13 @@ def get_default_config_root(): lambda: bool(int(os.getenv("VLLM_USE_RAY_COMPILED_DAG_NCCL_CHANNEL", "1")) ), + # If the env var is set, it enables GPU communication overlap in + # Ray's compiled DAG. This flag is ignored if + # VLLM_USE_RAY_COMPILED_DAG is not set. + "VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM": + lambda: bool(int(os.getenv("VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM", "1")) + ), + # Use dedicated multiprocess context for workers. # Both spawn and fork work "VLLM_WORKER_MULTIPROC_METHOD": diff --git a/vllm/executor/ray_gpu_executor.py b/vllm/executor/ray_gpu_executor.py index 4263fb27265f6..4bf5cbbd18ffe 100644 --- a/vllm/executor/ray_gpu_executor.py +++ b/vllm/executor/ray_gpu_executor.py @@ -414,12 +414,10 @@ def _check_ray_adag_installation(self): import pkg_resources from packaging import version - required_version = version.parse("2.35") + required_version = version.parse("2.40") current_version = version.parse( pkg_resources.get_distribution("ray").version) - # TODO: update the constraint once we adapt to the backward - # incompatible API change from ray 2.36 - if current_version != required_version: + if current_version < required_version: raise ValueError(f"Ray version {required_version} is " f"required, but found {current_version}") @@ -445,6 +443,8 @@ def _compiled_ray_dag(self, enable_asyncio: bool): logger.info("VLLM_USE_RAY_COMPILED_DAG_NCCL_CHANNEL = %s", envs.VLLM_USE_RAY_COMPILED_DAG_NCCL_CHANNEL) + logger.info("VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM = %s", + envs.VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM) with InputNode() as input_data: # Example DAG: PP=2, TP=4 # (ExecuteModelReq, None) -> 0 -> (ExecuteModelReq, IntermediateOutput) -> 4 -> SamplerOutput # noqa: E501 @@ -480,7 +480,10 @@ def _compiled_ray_dag(self, enable_asyncio: bool): forward_dag = MultiOutputNode(outputs) - return forward_dag.experimental_compile(enable_asyncio=enable_asyncio) + return forward_dag.experimental_compile( + enable_asyncio=enable_asyncio, + _overlap_gpu_communication=envs. + VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM) def __del__(self): self.shutdown() @@ -507,8 +510,8 @@ async def execute_model_async( serialized_data = self.input_encoder.encode(execute_model_req) dag_future = await self.forward_dag.execute_async(serialized_data) - outputs = await dag_future - return self.output_decoder.decode(outputs[0]) + output = await dag_future[0] + return self.output_decoder.decode(output) async def _driver_execute_model_async( self, From d1e21a979bba4712f48dac1bbf410e0b57c92e7a Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Thu, 12 Dec 2024 06:18:16 +0800 Subject: [PATCH 323/397] [CI/Build] Split up VLM tests (#11083) Signed-off-by: DarkLight1337 --- .buildkite/test-pipeline.yaml | 32 ++++++--- pyproject.toml | 3 +- .../vision_language/test_models.py | 72 ++++++++++++------- tests/utils.py | 37 ++++++---- 4 files changed, 94 insertions(+), 50 deletions(-) diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index df4fa7a6ee9ba..aca505178df06 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -321,7 +321,7 @@ steps: ##### models test ##### -- label: Basic Models Test # 30min +- label: Basic Models Test # 24min source_file_dependencies: - vllm/ - tests/models @@ -331,7 +331,7 @@ steps: - pytest -v -s models/test_registry.py - pytest -v -s models/test_initialization.py -- label: Language Models Test (Standard) # 42min +- label: Language Models Test (Standard) # 32min #mirror_hardwares: [amd] source_file_dependencies: - vllm/ @@ -342,7 +342,7 @@ steps: - pytest -v -s models/decoder_only/language -m 'core_model or quant_model' - pytest -v -s models/embedding/language -m core_model -- label: Language Models Test (Extended) # 50min +- label: Language Models Test (Extended) # 1h10min optional: true source_file_dependencies: - vllm/ @@ -353,7 +353,7 @@ steps: - pytest -v -s models/decoder_only/language -m 'not core_model and not quant_model' - pytest -v -s models/embedding/language -m 'not core_model' -- label: Multi-Modal Models Test (Standard) # 26min +- label: Multi-Modal Models Test (Standard) # 28min #mirror_hardwares: [amd] source_file_dependencies: - vllm/ @@ -369,7 +369,7 @@ steps: - pytest -v -s models/encoder_decoder/language -m core_model - pytest -v -s models/encoder_decoder/vision_language -m core_model -- label: Multi-Modal Models Test (Extended) # 1h15m +- label: Multi-Modal Models Test (Extended) 1 # 1h16m optional: true source_file_dependencies: - vllm/ @@ -380,14 +380,24 @@ steps: commands: - pip install git+https://github.com/TIGER-AI-Lab/Mantis.git - pytest -v -s models/decoder_only/audio_language -m 'not core_model and not quant_model' + - pytest -v -s models/decoder_only/vision_language/test_models.py -m 'split(group=0) and not core_model and not quant_model' # HACK - run phi3v tests separately to sidestep this transformers bug # https://github.com/huggingface/transformers/issues/34307 - pytest -v -s models/decoder_only/vision_language/test_phi3v.py - - pytest -v -s --ignore models/decoder_only/vision_language/test_phi3v.py models/decoder_only/vision_language -m 'not core_model and not quant_model' + - pytest -v -s --ignore models/decoder_only/vision_language/test_models.py --ignore models/decoder_only/vision_language/test_phi3v.py models/decoder_only/vision_language -m 'not core_model and not quant_model' - pytest -v -s models/embedding/vision_language -m 'not core_model' - pytest -v -s models/encoder_decoder/language -m 'not core_model' - pytest -v -s models/encoder_decoder/vision_language -m 'not core_model' +- label: Multi-Modal Models Test (Extended) 2 # 38m + optional: true + source_file_dependencies: + - vllm/ + - tests/models/decoder_only/vision_language + commands: + - pip install git+https://github.com/TIGER-AI-Lab/Mantis.git + - pytest -v -s models/decoder_only/vision_language/test_models.py -m 'split(group=1) and not core_model and not quant_model' + # This test is used only in PR development phase to test individual models and should never run on main - label: Custom Models Test optional: true @@ -446,11 +456,11 @@ steps: - pytest -v -s ./compile/test_basic_correctness.py - pytest -v -s ./compile/test_wrapper.py - VLLM_TEST_SAME_HOST=1 torchrun --nproc-per-node=4 distributed/test_same_node.py | grep -q 'Same node test passed' - - TARGET_TEST_SUITE=L4 pytest basic_correctness/ -v -s -m distributed_2_gpus + - TARGET_TEST_SUITE=L4 pytest basic_correctness/ -v -s -m 'distributed(num_gpus=2)' # Avoid importing model tests that cause CUDA reinitialization error - - pytest models/encoder_decoder/language/test_bart.py -v -s -m distributed_2_gpus - - pytest models/encoder_decoder/vision_language/test_broadcast.py -v -s -m distributed_2_gpus - - pytest models/decoder_only/vision_language/test_models.py -v -s -m distributed_2_gpus + - pytest models/encoder_decoder/language/test_bart.py -v -s -m 'distributed(num_gpus=2)' + - pytest models/encoder_decoder/vision_language/test_broadcast.py -v -s -m 'distributed(num_gpus=2)' + - pytest models/decoder_only/vision_language/test_models.py -v -s -m 'distributed(num_gpus=2)' - pytest -v -s spec_decode/e2e/test_integration_dist_tp2.py - pip install -e ./plugins/vllm_add_dummy_model - pytest -v -s distributed/test_distributed_oot.py @@ -540,7 +550,7 @@ steps: # see https://github.com/vllm-project/vllm/pull/5689 for details - pytest -v -s distributed/test_custom_all_reduce.py - torchrun --nproc_per_node=2 distributed/test_ca_buffer_sharing.py - - TARGET_TEST_SUITE=A100 pytest basic_correctness/ -v -s -m distributed_2_gpus + - TARGET_TEST_SUITE=A100 pytest basic_correctness/ -v -s -m 'distributed(num_gpus=2)' - pytest -v -s -x lora/test_mixtral.py - label: LM Eval Large Models # optional diff --git a/pyproject.toml b/pyproject.toml index 253b706a774a7..c5a14ecf5aea9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -96,7 +96,8 @@ markers = [ "core_model: enable this model test in each PR instead of only nightly", "cpu_model: enable this model test in CPU tests", "quant_model: run this model test under Quantized category", - "distributed_2_gpus: run this test only in distributed tests for 2 GPUs", + "split: run this test as part of a split", + "distributed: run this test only in distributed GPU tests", "skip_v1: do not run this test with v1", "optional: optional tests that are automatically skipped, include --optional to run them", ] diff --git a/tests/models/decoder_only/vision_language/test_models.py b/tests/models/decoder_only/vision_language/test_models.py index ed8f34a677f84..3101d1d2ea831 100644 --- a/tests/models/decoder_only/vision_language/test_models.py +++ b/tests/models/decoder_only/vision_language/test_models.py @@ -1,7 +1,9 @@ """Common tests for testing .generate() functionality for single / multiple image, embedding, and video support for different VLMs in vLLM. """ +import math import os +from collections import defaultdict from pathlib import PosixPath from typing import Type @@ -10,11 +12,12 @@ from transformers.utils import is_flash_attn_2_available from vllm.platforms import current_platform -from vllm.utils import cuda_device_count_stateless, identity +from vllm.utils import identity from ....conftest import (IMAGE_ASSETS, HfRunner, VllmRunner, _ImageAssets, _VideoAssets) -from ....utils import fork_new_process_for_each_test, large_gpu_mark +from ....utils import (fork_new_process_for_each_test, large_gpu_mark, + multi_gpu_marks) from ...utils import check_outputs_equal from .vlm_utils import custom_inputs, model_utils, runners from .vlm_utils.case_filtering import get_parametrized_options @@ -382,7 +385,7 @@ prompt_path_encoder=model_utils.qwen_prompt_path_encoder, ), ### Tensor parallel / multi-gpu broadcast tests - "broadcast-chameleon": VLMTestInfo( + "chameleon-broadcast": VLMTestInfo( models=["facebook/chameleon-7b"], prompt_formatter=lambda img_prompt: f"USER: {img_prompt}\nASSISTANT:", max_model_len=4096, @@ -393,43 +396,25 @@ vllm_output_post_proc = lambda vllm_output, model: vllm_output[:2], hf_output_post_proc = lambda hf_output, model: hf_output[:2], comparator=check_outputs_equal, - marks=[ - pytest.mark.distributed_2_gpus, - pytest.mark.skipif( - cuda_device_count_stateless() < 2, - reason="Need at least 2 GPUs to run the test.", - ), - ], + marks=multi_gpu_marks(num_gpus=2), **COMMON_BROADCAST_SETTINGS # type: ignore ), - "broadcast-llava": VLMTestInfo( + "llava-broadcast": VLMTestInfo( models=["llava-hf/llava-1.5-7b-hf"], prompt_formatter=lambda img_prompt: f"USER: {img_prompt}\nASSISTANT:", max_model_len=4096, auto_cls=AutoModelForVision2Seq, vllm_output_post_proc=model_utils.llava_image_vllm_to_hf_output, - marks=[ - pytest.mark.distributed_2_gpus, - pytest.mark.skipif( - cuda_device_count_stateless() < 2, - reason="Need at least 2 GPUs to run the test.", - ) - ], + marks=multi_gpu_marks(num_gpus=2), **COMMON_BROADCAST_SETTINGS # type: ignore ), - "broadcast-llava_next": VLMTestInfo( + "llava_next-broadcast": VLMTestInfo( models=["llava-hf/llava-v1.6-mistral-7b-hf"], prompt_formatter=lambda img_prompt: f"[INST] {img_prompt} [/INST]", max_model_len=10240, auto_cls=AutoModelForVision2Seq, vllm_output_post_proc=model_utils.llava_image_vllm_to_hf_output, - marks=[ - pytest.mark.distributed_2_gpus, - pytest.mark.skipif( - cuda_device_count_stateless() < 2, - reason="Need at least 2 GPUs to run the test.", - ) - ], + marks=multi_gpu_marks(num_gpus=2), **COMMON_BROADCAST_SETTINGS # type: ignore ), ### Custom input edge-cases for specific models @@ -468,6 +453,41 @@ # yapf: enable +def _mark_splits( + test_settings: dict[str, VLMTestInfo], + *, + num_groups: int, +) -> dict[str, VLMTestInfo]: + name_by_test_info_id = {id(v): k for k, v in test_settings.items()} + test_infos_by_model = defaultdict[str, list[VLMTestInfo]](list) + + for info in test_settings.values(): + for model in info.models: + test_infos_by_model[model].append(info) + + models = sorted(test_infos_by_model.keys()) + split_size = math.ceil(len(models) / num_groups) + + new_test_settings = dict[str, VLMTestInfo]() + + for i in range(num_groups): + models_in_group = models[i * split_size:(i + 1) * split_size] + + for model in models_in_group: + for info in test_infos_by_model[model]: + new_marks = (info.marks or []) + [pytest.mark.split(group=i)] + new_info = info._replace(marks=new_marks) + new_test_settings[name_by_test_info_id[id(info)]] = new_info + + missing_keys = test_settings.keys() - new_test_settings.keys() + assert not missing_keys, f"Missing keys: {missing_keys}" + + return new_test_settings + + +VLM_TEST_SETTINGS = _mark_splits(VLM_TEST_SETTINGS, num_groups=2) + + ### Test wrappers # Wrappers around the core test running func for: # - single image diff --git a/tests/utils.py b/tests/utils.py index a893667e144a6..afeb708f3bcdc 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -682,10 +682,12 @@ def wrapper(*args: _P.args, **kwargs: _P.kwargs) -> None: def large_gpu_mark(min_gb: int) -> pytest.MarkDecorator: - """Gets a pytest skipif mark, which triggers ig the the device doesn't have - meet a minimum memory requirement in gb; can be leveraged via - @large_gpu_test to skip tests in environments without enough resources, or - called when filtering tests to run directly. + """ + Get a pytest mark, which skips the test if the GPU doesn't meet + a minimum memory requirement in GB. + + This can be leveraged via `@large_gpu_test` to skip tests in environments + without enough resources, or called when filtering tests to run directly. """ try: if current_platform.is_cpu(): @@ -712,26 +714,37 @@ def large_gpu_test(*, min_gb: int): Currently, the CI machine uses L4 GPU which has 24 GB VRAM. """ - test_skipif = large_gpu_mark(min_gb) + mark = large_gpu_mark(min_gb) def wrapper(f: Callable[_P, None]) -> Callable[_P, None]: - return test_skipif(f) + return mark(f) return wrapper -def multi_gpu_test(*, num_gpus: int): - """ - Decorate a test to be run only when multiple GPUs are available. - """ - test_selector = getattr(pytest.mark, f"distributed_{num_gpus}_gpus") +def multi_gpu_marks(*, num_gpus: int): + """Get a collection of pytest marks to apply for `@multi_gpu_test`.""" + test_selector = pytest.mark.distributed(num_gpus=num_gpus) test_skipif = pytest.mark.skipif( cuda_device_count_stateless() < num_gpus, reason=f"Need at least {num_gpus} GPUs to run the test.", ) + return [test_selector, test_skipif] + + +def multi_gpu_test(*, num_gpus: int): + """ + Decorate a test to be run only when multiple GPUs are available. + """ + marks = multi_gpu_marks(num_gpus=num_gpus) + def wrapper(f: Callable[_P, None]) -> Callable[_P, None]: - return test_selector(test_skipif(fork_new_process_for_each_test(f))) + func = fork_new_process_for_each_test(f) + for mark in reversed(marks): + func = mark(func) + + return func return wrapper From 452a723bf2e8410ee9b47f82f90c7ea48aa6d14f Mon Sep 17 00:00:00 2001 From: Tyler Michael Smith Date: Wed, 11 Dec 2024 18:34:54 -0500 Subject: [PATCH 324/397] [V1][Core] Remove should_shutdown to simplify core process termination (#11113) Signed-off-by: Tyler Michael Smith --- vllm/v1/engine/core.py | 13 ++----------- vllm/v1/engine/core_client.py | 6 ------ 2 files changed, 2 insertions(+), 17 deletions(-) diff --git a/vllm/v1/engine/core.py b/vllm/v1/engine/core.py index 55a5c4dff3a5c..a26ffe74a3ae8 100644 --- a/vllm/v1/engine/core.py +++ b/vllm/v1/engine/core.py @@ -5,7 +5,6 @@ import threading import time from multiprocessing.process import BaseProcess -from multiprocessing.sharedctypes import Synchronized from typing import List, Tuple, Type, Union import zmq @@ -133,13 +132,9 @@ def __init__( input_path: str, output_path: str, ready_path: str, - should_shutdown: Synchronized, ): super().__init__(vllm_config, executor_class, usage_context) - # Signal from main process to shutdown (multiprocessing.Value). - self.should_shutdown = should_shutdown - # Background Threads and Queues for IO. These enable us to # overlap ZMQ socket IO with GPU since they release the GIL, # and to overlap some serialization/deserialization with the @@ -195,7 +190,6 @@ def make_engine_core_process( input_path: str, output_path: str, ready_path: str, - should_shutdown: Synchronized, ) -> BaseProcess: # The current process might have CUDA context, # so we need to spawn a new process. @@ -210,7 +204,6 @@ def make_engine_core_process( "vllm_config": vllm_config, "executor_class": executor_class, "usage_context": usage_context, - "should_shutdown": should_shutdown } # Run EngineCore busy loop in background process. proc = context.Process(target=EngineCoreProc.run_engine_core, @@ -260,8 +253,8 @@ def signal_handler(signum, frame): def run_busy_loop(self): """Core busy loop of the EngineCore.""" - # Loop until we get a shutdown signal. - while not self.should_shutdown: + # Loop until process is sent a SIGINT or SIGTERM + while True: # 1) Poll the input queue until there is work to do. if not self.scheduler.has_unfinished_requests(): while True: @@ -272,8 +265,6 @@ def run_busy_loop(self): except queue.Empty: self._log_stats() logger.debug("EngineCore busy loop waiting.") - if self.should_shutdown: - return except BaseException: raise diff --git a/vllm/v1/engine/core_client.py b/vllm/v1/engine/core_client.py index 4d96b323d1662..1d5ddf4db4d7c 100644 --- a/vllm/v1/engine/core_client.py +++ b/vllm/v1/engine/core_client.py @@ -1,5 +1,4 @@ import atexit -import multiprocessing from typing import List, Union import msgspec @@ -149,21 +148,16 @@ def __init__( self.input_socket.bind(input_path) # Start EngineCore in background process. - self.should_shutdown = multiprocessing.Value('b', False, lock=False) self.proc = EngineCoreProc.make_engine_core_process( *args, input_path=input_path, output_path=output_path, ready_path=ready_path, - should_shutdown=self.should_shutdown, **kwargs, ) atexit.register(self.shutdown) def shutdown(self): - # Send shutdown signal to background process. - self.should_shutdown = True - # Shut down the zmq context. self.ctx.destroy(linger=0) From 4e116833686f3e0c0a223b05b5859ad76843a017 Mon Sep 17 00:00:00 2001 From: Alexander Matveev <59768536+alexm-neuralmagic@users.noreply.github.com> Date: Wed, 11 Dec 2024 19:55:30 -0500 Subject: [PATCH 325/397] [V1] VLM preprocessor hashing (#11020) Signed-off-by: Roger Wang Signed-off-by: Alexander Matveev Co-authored-by: Michael Goin Co-authored-by: Roger Wang --- examples/offline_inference_vision_language.py | 126 ++++++++++++-- requirements-common.txt | 1 + tests/v1/engine/test_engine_core.py | 1 + tests/v1/engine/test_engine_core_client.py | 1 + vllm/config.py | 10 +- vllm/engine/arg_utils.py | 8 + vllm/v1/engine/__init__.py | 3 +- vllm/v1/engine/core.py | 18 +- vllm/v1/engine/mm_input_mapper.py | 156 ++++++++++++++++-- vllm/v1/engine/processor.py | 35 ++-- vllm/v1/utils.py | 21 +++ 11 files changed, 332 insertions(+), 48 deletions(-) diff --git a/examples/offline_inference_vision_language.py b/examples/offline_inference_vision_language.py index c6a274ee5894b..5e210126dc8fe 100644 --- a/examples/offline_inference_vision_language.py +++ b/examples/offline_inference_vision_language.py @@ -5,6 +5,8 @@ For most models, the prompt format should follow corresponding examples on HuggingFace model repository. """ +import random + from transformers import AutoTokenizer from vllm import LLM, SamplingParams @@ -23,7 +25,9 @@ def run_llava(question: str, modality: str): prompt = f"USER: \n{question}\nASSISTANT:" - llm = LLM(model="llava-hf/llava-1.5-7b-hf", max_model_len=4096) + llm = LLM(model="llava-hf/llava-1.5-7b-hf", + max_model_len=4096, + mm_cache_preprocessor=args.mm_cache_preprocessor) stop_token_ids = None return llm, prompt, stop_token_ids @@ -33,7 +37,9 @@ def run_llava_next(question: str, modality: str): assert modality == "image" prompt = f"[INST] \n{question} [/INST]" - llm = LLM(model="llava-hf/llava-v1.6-mistral-7b-hf", max_model_len=8192) + llm = LLM(model="llava-hf/llava-v1.6-mistral-7b-hf", + max_model_len=8192, + mm_cache_preprocessor=args.mm_cache_preprocessor) stop_token_ids = None return llm, prompt, stop_token_ids @@ -44,7 +50,9 @@ def run_llava_next_video(question: str, modality: str): assert modality == "video" prompt = f"USER: %{&cm+k`e_!njG;!~Kj~)5V=A5hQ}SFX%1U4YvEsqL8=W@e$lYTk zQCE9F?arI!1S36GsXZg|RqlaLj}u@kq0{z2tPE1a%=vcufsqk<1hed5)Xmp7|x zn#*T#R|&JVFpkt^@I#rf7h`SrBLyOBjc(4$v<&$FX*I$sp@2hMIC zKXb=Mr~|$SG))i9=Wn1x?7L8g#(#2rzx3k04I3f) z24u96K6oaa6}nND@@o|NzL>L7aiQKa3}3|2SD{hDTt&kI#`MvKwD#Lq8*+RiX5d$6 zeaK9`g}?LQkN$9QKUuIpjwLpC8f?Ig-~S20BBcHTa4Id*zAVv3)blScEuAhYiBGYZ z=273|yI;;P4p{Uhnu15{M(kF~Luq%|k===DAVo>%W4_qPA%ywCYR;E3j#*$OoLRvq zj`g(pmRBABi)U&n^Kp{q&3LMfkq^20p{(bl!PN*!8+APequHf-1yo@v{LV(oK9(~vTv@aXbS6O#Y6gqkFUQfJz)JIjNJm-% z&MgU0GJ|-%)uEOnFKr}9O%{lBe;FXf+5vXCWQ;q&fC2-w96EzY!0sP0bSqPftJUxzjx0a_bl{9mo3E{o=F)Pd zrMS$Ioks}dyO0KP8u%fV@J`f3DpTy91Qp)}-@&q^1cVcij&qO#-D8yL()D|7<$KnQ zb%V~^q3))c?I)j@>}%#sd8e|i^NCqxmWtj$r}j7u5(;8WK!T_s>ODj!wE3fMsJ0Xf z$@cyF#l@k0!JDwlqW0uz?HxO}^4;E(Z2Db`xk^g{s|7oX8OExAiSgqW0emz<2JJ;L z&GGSPq6FuTJ#D-7FkW>u~$ysYAS@7 zl0ESEP8|k_!HEF7)d?7S@LO4cxl0Npx~y5@1Xj2?!zGOn{fwbsM+);DvD45@RC5zg z$~94E4?bS&y1z{ZUI7G3mnXz#WEQL0dftGLDcj)+D}mwODV@9hW59Sawnt?)$@aHI7^l!;ebn zCbCqSnKStK7S+!fr}AEc@2k4JvM>F(+wKXU_U`oZx>sWK390PfFbXg zT^RO5mKO*sPgjlPDsjt*0p!?KNRqCY_i}0sE!QgTx+mN`cKBRv@?{s7GLv0Sk;yJ0 z7O8D*ShSgd9n2W)U~Nfc3H*ZAKA@Y`!ttvOD`+ zbDCb}-V~?T{=Z81#p8c4Q%lYb^Pev0vzdt6ox#WE%WuJO)4u01U*1dXE#2?0`2Qlf-Q1mf<{*LKqnM(KVtBA-onT zk1qUCZ-;R6!IsNM=N+oQagN9KI_JLPXZA9(FM}gLYSiRMA&-xIZ~tf(^s;B>v)>U< z|K#2y_MBQzNJ{HI!tF9sdEKcCUybC*5GfgvsOF&Q0g_(_d3&wG0hxxlj`gJQ#oH{x zdnH_oYF)fJ;Q(3dj`h!b-zZTNfIN&mQ85Dvsj%gL5Nm9!3)}J*5FsJYed}xqvrauh z-mLc~F*lOrOex4&mkI0@zWP}&njAv_5cGMi;ecQCpECA{mu3m5L{zX?F;9)FE;vmJ94ls4JMbL3sh;jkn5 z|M`wpiX{Q;E-2m7x{YVc)+TXfYrForN##MWg19`W?-#+LkSwj9j-;_BX5^==?-WFM zvs@rmxPQO!jK4_t^Of`Qswt|b@f5f7vd1F!oL!522`XEgchXaVFPexI!nGNI>U$R# zjNmjm+VyztQ0>;m1I=qP=FP$B)*8M0Qq1EsblzgRkAIdtOk7{bz1p0)^<83e8?X?WM8`GltuLkdn|F3lr1nYTy$>_3MiRJj3x|p z97pa1x)zTP?->FLl?Hjqbq81Y)-rAR%_rJq39U)2{a~5xK(dsIITHTZt=RTD1DI%3 zXOcuuk?{-Cyv)VkqBm4jQ^?mDanW5G(gRrq69WO&e>(A(*aiul4pvvale4^+%2u6Cv%W;HZZ zYQp&)>hI$$9s?ja{J&JW24$2+;v@_b+e{=c#{;L3=l_il9sggbT0p*n)dET=?M@_H zS`WunP9cKIs(<8<>hc;S#<)xQv5+m?6Uoal1r4)`{@FvWN$SBXqVer`KF(o+6@@H_ zHs}EHv@5an3DZc+(sB1~fH}4xIknO3OtQ5;(IT09X%P98Ts68FLqPzFo(Gs)KfKa6zF6x!~- ztumhoH-ZKmQGh(40}`&D5gP~Qy(__tlAb`-j?%w4zgp)zJZbDD+&RG9d>{@LqO@bz z;TEuHfbOq`2bln3VbBF?p+lLR6w{t2Z=MVF;oTf zdEt_YvrXrUzr`4=opuhmSozMnV5{Uywgx1+nbpAXYIWVo(xI;pcijaM!iSI^!_tSm z-OwptH+gbwjJii&j>6xLkejUJHAg9L;lEktEcWAWEXR(U2>8U1RjVC9jjlzOTZ+57 z9Pg_2$xu%Tp$F7{qKUP?Ty9(L~oO@KLVXZoibkcA%0-e;=q7e>>TGK&~Pk&k( zHqEBon(Vz&eWcDO{DWbKbg)eOgQlIWijR3Wg7O1!j(qYAY9~vWY0PpjL(AE{84UQ! zD#~yFX@8zMr0dytK}Fc~GVNV>VkX~E<1*_=Qje=43&jQz>@K1RDyzf3jun+B7b0#U ze)xLB9%UV=TN2y(UKp11V1HX9-vg2>4M=bwwRB)ysRvgl*0~(ta%i+gAAQ*P4dbG( z&U}(rKFOkZcXRqEqAJmF!YSWmcfAmW)BT0U9VdbvJ&@~!aikA3Rp|Jh<%u5Z_vjfb zl!bJoYQi)dzgTW0ag~PR7TnumCgT`CbY`jN+XJb0-LYFqyxgOMNKpQMaycF^A4H5K zO5;3WNhCHJZ_kpM(I5zQymUUhGG*x>;HqNxWhq#xm3kns`rG#azZvBCgcCi9BYmmI z^(eOx7%0HxoPxHa1SglfqC?w^HV+52tu&%)&;XH&KsZW#bs`6kKfc-dqj3KU9R(b1GM)j)cU1NBBLKvp=-- zIG(b@tvzfJ(SsAjh_EkW$AGXsid4@u8#3DCrzpt+;UU2-nJ-u=%jbz%TIY}Z-Of}R zu9+cvG$MD3k)H0^a%7XD=#aD6LdzeDRo3g7*4!tC!fY64XEh5(U$FjM>vHHFb^eEo zU&!8--(Q=Oi*Bwwv?3ptzjjSj-!3>CX&E zkQaEgrQ96&Vh;KifI(a(43_%#!;#R*_hhC5*EsIdERvlstnc>ntN zzf;N=D*#N&b?ia37!wC3X%5sw+zF{q_IgkTM4kN%veMpPu( zzA=kMCK+@d6_w1#xnI4qxbIj#?48)R#4U!QfeZvirbA2^?cN!3_U(FlMj?shObd(> zVwe{30ce9W3ep^{K`*x|^0A628Us8Wt3gdevpnDZFGHTsrw>k%&X=^^PuXdc-0HD; zQ}Jd#QXKEfcaB5|WIaWffO-cah`+{B$cM;kfgZ6)BHaQosS#*mfa;2p^lq!X?Ry$^Hk~dpRIyl_QCojZCr5IQPm@K*(8lf3(j?o|A}13s z4-YT#Hh+KrO%D^*3r_A(I>T!%$!`N7UKJA5o9edd1lBi!@liD&#ncAh1=tV3B*10oLRN8NHKD@G z0<7HiR>QFNJPjv0BFxg0&~bRh=&^wza|#pXxos-In1)7C~O+&97&m5Z$5^N^9smd-dg&pwY2HX z=2r)=8Wz9vKOUHH3*I`9Da`g8m+EZbD8@PR6qBW|GzsXH4Yz zFYqX+zuG{CiZ4S#WVSF%4kY906K1TKnLZJCmy!73%*OzXE>XW)?{oC+7_G%mHL2MT z6V6#FT4#P1G&?Q;2aYMTqO#17SnHf?@UcSoc)u~#X0krZG(YY;skSGpfAhOTMH#^( zZTmy7*)2&wVETCAXANVk*fpq0LgpT%|0Lq>__j z%ARB@J7uzmBunMJG6Zr5}+jAKVoU72|IAAD3cd?9UzR#GT z&|6`PQ=y_)vOMA7>@WI(o&6i%Ci!I_Vy9R{dHSiwuerP3NC{m=UcED_>J$2%>ezA$ z23B?ozk;P$ElRomvO?{`feaMxg>7jyv8Mgu-jwYa>9h7a!|A* z=b^_@a_aQ&uM`yMx180ilsj}kYpI0o=;JU51mP zy}d=%5HVD6Z<}^jJE`HY%Vdi8f!X$^zQJ|TOJJbkxVHkG-NY^hw>{O_$h0~fs9Lb({?TIJL%+?KX@JFfVaDd5w4u#aGMiD*|a1C1FVUEEf z0)sV^xLRV zI2bNcL>*PP6~_h)+eROJ+5UC>1izclnkiRaYEgYsVQ5pn){=Y7W$n5jj2!bqj2w!t zf9c=j8)YvT!K1XnV#}HXJwUYrd(HzONcDGdpq~1im;#LmreiWOv;*J^w@9c%Gey2Z zY*lae@)U5c*x(6j0VWU!BrtR%ZX1lwjdw61S6>9i@5 z)`D)fo+*Yac(XSPZQG@V@U>B-aN+~a%bU)WT3*${ZDVJ8<&NlBD?fd6%h~kQ*S)uR z)ufkZKRT-T`Y{{t;Q9m%JNQ=GJ$mOxe`SuGGCe0sO5pekZVw(YC!uB{toj|0_pd=Z z9r`!Y6UHu6`CqDcj#wh{ue8*kFMXXv?uO)e4P9~t9!m5m)syU5#cpF ztlPFdgGfVpD(IM1C4Qd$^#J`k_k{z2Ux5CYlg^jr0hoR^(-P`FPj>{9V7{?8x0_N; zk7C~P%&)Y8F;Rk!KPNnLBHb4)j#i!!!=&98rFmd3Rk zJn4I5U&US|Oy-el$xIaq!mL6xM)w0;$JhtY0P`0N4^f3+b&soKx@;lplou!z3bJ$t z*xN^DmpQ*UbCAN7k4HWH z{7rausr~zl^IiTBZv7zCBl5f*RXU0lUDvRAm@Ex272VE^=#G;(OZo)0F8qai)=1Q| zo)6fqE-%Mgs&kwy6zoq-7HwWCr+RhZSArrr z(Q8+h=b@7UGJ4~0w)#HW7}`WNi}Js+J8SH+ce!Ih$W`V+sG%MlyH57i3lm5jR*zYi ziQ|HW)36G%birGcq!k@N3%B%{J~&*J(VNy|AL#9HF7LDy)<1ri2yavb_vOU)_Z*eh ziKuvCqzlM=pTC!HdiNmM$G|1IR$5Pei@T%ab^bf18?73Z!tIan2FN!_ECZ!AK`&58 zJM@Lj;1CVYZhjmMj_5dRJsgvTug0u|youos+TKOzWd-A?8iNkIkWX!`x+1$xEYYQL z$20@tt)gF(H#kmARFkg|_{#O*gD+Om6RNh_MxWcIyz8EA$H|AJ0&UChQZ-w*`eLYC zUXQzEGJKQRNo)W?K~VS9}Zq-|h zPp|492ysRr6ae&PBy_wv2D79f#z9JN#$@eiYrmIWU(TWX5(fGK`U8*}N?%v=`qg#s z3?6a~S)l;8+ZGcJyVE9-fKRoj<@NVhX5w9-xmKwDOxlR1P2?1*5ZA;-~!!)T0l7D_64OSn=ad zj+12IdjeJ2prSj$l=;@omWpRvEp#>B@5-kd7SHrUb1}!YpTLO2?LYp=`Bh_PRC^}a zmvc$e`yD6Eflb*3#P2jtn_Uc88dssJNregpIdE0QtU}(SgYenP^;xz@)4MD%Z;AsY zh+;R5JB2=l&_s{a3A_Sm2{145BLPcd2m)89#RgMPWUY?iAUja_M(CMJW(B%uze_32 z& z!!H<`H5pci!QGvKh4z=1!`tX*tImK4*)j({wjPGuyd~<5Zz)<)u5HRM>080CUhy(| zopT9|#LB4}Kr>YUmgaN7`+9tRDUuFUxE}aBl_2>5MsAiVYJ^YZN?;!KV_$f6_; zqA6*Bo)LA0OtXeAIRIyd1grWZsT3^lAI?RrAWNZ-kzmE)Dsp%4UOHy1f<9Vx z!Lk-8yq-SnR)d)Rb*JhdyN<4Yc;oz+1Pg7=!zYi5txX897jZ&&plJsQU6!@85o3>O z*0tzmEKQP#CdmWK8i~~+pdQEvIJk@ZcCn1uHJ5Gj;a*3u@c)( z;M-`8Ydl7}P|t*_Vtdr9ugZ9ruWgNB#NH}u#Eu#&bPTm?@e*}4E6^9^Up^kcFbQ4K zC-YHYJ?g#KOS2lJ2%J)I=8lkbua3fHd!qQUHKJL}HCC)}qsf-r`=st)`}FcyNqTv> z)+wM$c>lZq%=x$a%n+#ara?oil1=8gp5_84F_Tw)R%QXx?8gz+G3#-s(Wyd9|855; zDRtuSzp0E`x-IY-dlLZe_8sB)VpufmZbYpXQ^Q1!!{yKSCoJ8;d58-zp#sock?sq@ zdU=RlIs`0V2B^)XG2+>%&U-8k*nk#6@e6mZfVf%esW95YEAeBO=bcR>j9L#a-DG>- zt1rU#I!mt++MQxM&ll4Sa;`x?_XF~#POkd^2rb_aSo7Ya4zaJ9aM9|beQRRFJ8i-Z zhnUu;I!|&YLy}SxL|HjSf>czj`fTD;&f2?SLNK6mq~*r?3~u}7os%n;?o^qWRFg7R zt?j}sYHZpN%lpRhqpOKs>}J3jFOAe=@(r$@WVzFOsl@OXnCvdK>-12OV$GjFrRm?IHbJ|u&v99{u1HAhKU|UN z{oG1}uTJF6)0yn+4P~aM)W=b=5b!xGw3sTA(5ylM=A|`Fl>z`BJLr?JfvL_UN;!#9M3)6-|y46rW;}ia&z%ln_Mu1hT5RPr&yh5exipnG%rUucLmyK5nuC zr^(L6txV6&$FGDN+A!t$!mp2GZC>vp%b*ih-&Cv4>Drqn_mK;_P`-|(3?HBqE1M{O z*UF_^an32b6!-Mt^W0alAKgS3*RF4AI|H)UwU*vrbA6snXnf3j5d->B)jm@|KWYWH z091c?`Q@|P+GifE z544|Q)C%HpOR>XbSl=13!-7nGS4|G|xrW$j8%}tZFcwX7t1Ci@A;P##w4!ih%X2xg zkGw9}JV}4sDRqh{=Din94+Cmvpy>j+ip0sctPTnShQJ^~m&89};iYMFC(c7Wo|Lrt z&CT%r=Wl2uCRx{rNU zJj&?1a;L{hv#Unnu4aYUvjCg0pqc%%6M!&Q7vGHHdEr9@6-_%zh)~rbP?wO!KY=ow zOtISreKq$GWgZ=SYh8$+lCM)uygcYJMq(NutquVl=zN}w(zM!Hiht<8_o#nFYsK@f#U7|Mh zEq!Mt|Kyj6^urMQZa^e-L})wgF*$Fsh(_SJS_Yy|q@> zIcc|NWBko8PI0b*`;~6AN+_Lmd-NxV3vlgUAbp$y%M(b9L!chPR(t*%)yh2XWe^!^ zblV;mXO)1>Z^Ex_VXXyVjn10nwfDwm@d< z3swhmQBjMg*^qA0R|-t={yS=9Op?c24=`-n0~*-H}|6d#X4aum5*ACW2+NZ9I=-FSAx%>f9l7L!i>;cXflU%a^u&GqT~?uf*Kh zX&W= zQDe|?($bAe?{PjfrfWXkA>pVxR}sN&mx%MJYw0Q;mJPMLechJ@8aaQ|3Ihx*6R^_w zL}i&esYkMs0m=b!UVP42ykH6KN^^){cDC7#ok|69(hBB^B9rU+_BRPX>p`b)r~!~^ zzbX=7?7P7Z?H!-vnwlEYO8OqaX8AH-_>a_Z;kEJPXIcUml)MSj-C5WLif|~uMV3z;aIkdkk zc))N;VbP1Cn{Oxe?nv9Gre17lsN!B7wC4I|Onn_oxS8<)21#LVK$*G@$iXJaCn*5E z1Y4?gO-s*K=t~cIcHP}aBME>i;fzY-26LIGMN{#gjti>rZF+4F9ehtk|9pD*++CD< zP%xvK4WfTa>IKyrRJSUHCuZXd49?i_tXuZIAl2X{D;|s8A!}5`9J`A@X>fbIQ+`L? zM-odon(-Ch%#>s;YX_gbk15iBnaY4KV1blr5!&t`IqiPzB2C&u10cQ#_tb%Y2+j(-1+5&rFLe}WI5&h1lSmk|?{ zyQ|?kud;9`Xv0QBT*#Urz}5-?FUw!$v7g|1o%P67sP3t%{hz&ZZAH&-&3G2%n~KRh zv*J~Pz-KFE-l8Mt;|5>QidZ@01GY>RfJy}s3!5L-sxT3NPgAj|WjN#3jrsDTd#;&! zWR7&udwP0nw<_D2Wk;4baOGam0>=LcOi-=uGFDH`y!I)d$(=acRjH?+FU77e?H-mY zx103seKiPTYlqBIyMkOMH3F zpb_gYRoTRQSI$x~bi03*U~MkT9B_p-4)TO!Dj*4#VLF^?5r6q*IMG9VJZ}^C(qnfI zuH=eqd-y+l~#`=0E{WoCN8lK(q;$E3OX{^9I84m z+UI+)*{NM3QnJGC(?ey=Ndk$)%>k=)2Qz%`3=A_M_&l|F#bH;AwI#(Z`HjJb!`JjF zHycINOE13`s1|(RiF1J^X^w+{Mcnc--VnC7q=(;r^3q4|o|@k=%UXk!h8*EnpBqDs zWH$0lCV*2g%PuiliGV}8;srmT&-E(ylk-xLrXvZDu&(P{pXo=qpNjj?Q*~T3;_FcH zy{7XAj&0<&n;3GEVKNax5To6ape3+xyDXdI+FnBJnn45J)&NYz9l(qM`%q8NlHz+X zy`M8J^S7RJw2OJ9?R?&K-|N({Qx7jvWWJUw|LUP7?BwuCNg3;;!Nfg9#n&g*ZH_g_ zeij+nI2P}ASKc(^myTsh#&$9)u{P4!P&@LDZaiU*~WeVGVN zmwzH1Fy2Ka3Brw4MaXJ;NLbYL)0s=7;90KQTosyn(3 z0|1#XrjvFUHwF`<9N)dZtEBe5G$k%BbjvjH4ZzwJRY34M$+7pK-v%`Xgl4%t%_Bk= zBM)1<7ANeW8(gjGIfOm5@r?wJq=>c|De#Sj-$vatTq{0*B#wQ5#Yy(CHg))ug;{#Q z|DcXr))<_a<2qQ{w()5GJ-gBqCv;qlt)A$HCB2>D*LS)tV zec)+>2ZhCj(>21c6Bh(} zut0zsx*ci&T~r>R^yCVvb*)m5r@f19Di)5+A$n{V5SnS1 z;7*qCFk)xQ1dzdQU0NM-kMcmM<;!oBpy;`l|9J?=Es`&>+qw!2TOPX%rZL&lUFx3| z&1&#`dicS{hh zS+XCXX}2&48q-=Jp7L1nyfCLPyV|l8&6Yur15nPDM8FC;iylz=fI?zWjEjIn&IW?n z(C+sAj8;K2NDoaH{tC{RMJ#hWL|sQ>;TQVTV!>WKiU1b*kY)M7lq2OJ_ zIW81&)213^Irt-6IS>P zzIjhDwU-AG0mq?kbk-py)@RK}%c=bfyb9hN8G8f-P4!qlayW;9+SVX0skdG ztL?dvgRsdqbx;ib^)>;bT$serWIuYLL-!j1t=VFYjM-w=4;XAEh3jEckz2C8T!B0B_72Cg>=aGBOG{C} zwnW}7mxNk~IN*I$9zr43#F40|>kfr(C9JpL`0gR|lRkHrZ-b~BLUUXyoO7VVDT$`K z!r#D=&PTHnFm2*u2fwcp`V@#Lo`7Az0e(pz1z))*G@}^{U%f{SWXpW7BeGN;g3MGY z@`tRH=3-AsheQ`OsGOM9n~250)}|nMIS2i?TIY0)xkN~n=}gl6+XN^0$Hhn)8`Sc? z%Anp%u*{lhWk^i$U?(E-4&;elP(Lo_&uM`toO*|NKm+Ui!pPHDvI2>A2ZU2MIVX34 zJ=w?%kC+0b@_Ek>B=3jSpgs0>9(}s#yX1L~kN$DBQE)t?O{fo9$vDGzkPpE65m{oQ zRIPDKdM4H38O@q;9_Gm+2!EDPP`lPUieJ-dpX&VP$dQaK12?zy$w^hJFAwxW2eOa0 znCs^7{6mF|_y3tf1|e1>SIc-+PR}8W)<%u6O2sveI#RE>mb3%1H$gNd6v`W_1z#xl z+g4*9rWq{%px6c4U+iL3ec?nW|FDu$q_5jsgId1AtyxOx_?Z^_bkE6{Z5vN`_GrdX zKWMHJs$$&eI_RwV+Gl*wDJ3RETx|KWF1|P5FltqhX}6(DfLwHE7Yxmy{U$}nT`@N0 zp#O#ORy`vn&5~jeg^03ta)J};2hjoJa+!8t<8>yI^&Tc!CqMcaz41uGWf1)0i@z@O zIyt#QoiSQL;4_h6ZMoxBt`LOX{mymR1nbpts^_l3X1B7218<#fHRG|Ql;+u(5LbjaU* z3(mkYmx0azAkngGLLURG&LUkHn%~6udio*MDjbR~Sxf4^HBcA%6?ZhVFK8>O;7h-3 zJ245NOVOZ5%y$I5p3c3V^emfJil zyDOFX&Bt7<^(i=&_J578Y5~H)^e>1fZtWbGx@*uq>j$p9+*qMC-tP13yQuVxOfrD-=w3lO_S$F@sw+9i(HJ z7z{V-C_Rm^WssEe>vaz=tPm_y8+<(rzW&hDr z1ROFy+Oh>Kr`~T~97w19Xebx7%Gqka`nTFYW%1wt=7c)VS0oEK6N$fhhzop4osEQo z*zq>z888^6T~1g`5UZt|E|}FpB(2=+j}o?@eo>Aw2R|VMvZDOwCn%s zH8dcL-})V~3(0`G0AT$r zMNNRb)x&m((x)XEG{I0}7r#J(fUNR%JI*>dmt_K9N^>e0ztuxN*_8BdpZ+eX0}U~) zEBSF5Iq0+X(PL{smD?Ub%`qhQEQXef2#+sAa6WTf6X4@v3bkv$S)rP=Smpqy!hIfK zFp7ZlU$Ke|=szqMx`=7*WGVIVuwYOeC8q;sO*B>E9eTKyjIgy}yRl=7_w|>XxVT@7 z=j$&d^!F!^7#^gHT*v{V^N1PzIwzX;%fw3T6i%DexzQgLiCXRNEb+W;>>N{?JK0Qj zs`Qw}H;10F&)JXj%Q6|j0r?9}mro6s4Z6dB&Z#AcFO@5VE8~&5xBaGf(WOdxd4+9b z15fj8HKl~z%u$uun|LVdGY>%WZ{Dc-MD>}31+MqVrv4dz^S9D5Kl>V_XR;df2oQ~J zK%s*V%{J_HoPLRE61svO!UB$;?xnGFT)%*tla`|Zvd&LFG)nD})NjK{xc(3PUNYkc zQl-Gu+}?$MM_?#_fo3&djv~AO1)%njeGeK-3~~d+F*L_eU=sfRP%W_~v)g`4r18wP z{r}|DSR$)7-Pr;X5;ye|)o*A{lccbPMIbFFC%WQl;TA-Kui^A(bFAfnVlb*rPt4Bc z#6O*g{2PhNznV8fBZ6hgl0Y21;J}?0E2iw>80si?3lBNjr)bqPp0fqJUX7{Gp32=# z7S7QJ?$+~-KVs(o+5ayrg$R(&o8We?Hwi`qov|9JywH(T+0z<=u9bM!q}mXp)721l zak}r2+1_nV7v7tm16-HdEgZ<)&&uWBdf&n_`-XhcBf=iwy@Z)Y z8O_<4d)cIi8@_CxRwQNYrV^@~2A=}_p3=YSm;O6H|DS3CZvUMMF*E?NMBETQVdh$- z-Nb;MNupeXL|BH@CZz%YDev-iaLAyQijOFB$6MEdrY8fpYhA7yEv>f4mAkDmvmFVI z5-ofM=>m#UntwA36d|}+1-6KRCBu;=e|C-wJ;%j`)&TXCn9k`ub4TN{=y+6qavH%5fF4kHl4g+G)z$0eJ}VLt$PE5aPt zMbsTod#N3rJ(zO+e4wEH&)$jLY#EbNtL>VQ_0F3gI8E*p1H};2>h$APW42Sv* z?ZDtpcc`Kh4a@;;+2ngP`~mgD6N8zZVrLtR&!o;|ARv0boC^&DCCD?B4be-{KP*E^ zPgEODI5lKBWi$W5&;7U(WUXu0tVwN%{d8~MgMXg47IMckHQA+VahqAbi`)E{a`R7} z=3LkiM8o&l<&ZbjbFTs_0HY_c1C9|d?EGl8W8NmBG@;8!;&j#g+5y2!Lh_bG%b0kl z^#@n3S+-`oo8O2<)CS*8|E@d==kE~TfDtYpEKvhc9N7n5p4E&Hiqr&sS^Tdq+#D_E zqq|>RH@K@kwP#0LgQ<^z_s0-Q+(n^19SQUF)S?pmFI~APUH=B@`oC){+9ge-IX<9X zaA0d22IfhZuDhe9WV0(-fc|1dsJTiA-zEkTnEzkRBMfkH+*!bnb#O&05)|~|3HoSl z8<-(5Om`SCB1i<~QjPQ1OY8|Ke3raVtXXx4T)<^!W`^lX{J#|iaw5_HWF+|hcj-j_ zJJtK*-}>uAbbexQmQ9j>2HQql4rH|l^m!_iJZ+gG{%E$O>$g89y>IVDnn(;AGCJ_KbTY-Rt-%beNcEKExWCegj@$?h% zA_*W`I4O|1sRAI#aGbTj!o7f11?}nXk%z@ZG&}jIE7d5W`ePc*I>0U(!33qQmi@E{ zL~x7*kDqTI4*{}v8HnIXd_P{(HWFRnQR$*ojjXY1UH9JZtM2HC+qI(-&e767L-Bi0 zrC1zEy>tm93srC8Gy!SNuH6fYcRO;S$;yjpQ}EBHAma+nZosbwclrV3dgW6QSs{ky z99K#vRfr|u4bgizi08do;WkOJF^HAoLCd z!za;zH9CrL1x*Wd2f#>xkqocmbmDw|1cUW>0x+xqk}-Nf3WC=ruvce6vMf;V6U`EN8X>#ZuF2Mx5*b`%@KoBmvUr zFtBXciV`sKo{x(S7MXWDu|M83cB%?l8v(Xh8fxJKzxh^yY{VV0#XVfy!#@kf;yGND zWDb z`9q6+VN=(Xa;2K_;P(i|3Rr0X>ds`y!P}U|aNom@7s8a6LZgvzh1)%fv}4FV~J2k-fp?e1Uy{+D+y@Qn*R!@lppf&KSb{^gj3l?_t-xAXh=?JH*a#~q7V|8_?X z)&rv(0#z@HNMQv!cV;7~6RytPK zCFVFxRz7(CN-PhrxP+vXw2G>lx`w8nzJcMP!$#)EEiA39ZBCqVa&~cbbNBGQ;CInK zAn?-FYY~xA(bsR>iHlE2OiE74%D$hIoA=;herZ{GMP=3FCr@A0H#9aix3so(_w@Gl z<6ggcJ2E;pJ~25pJwsR^eqQ{twEXoOWqsqvCiN$LG{IIY_fjgTJJBPx4PT`{tT<5|?6c60y7Cm;aq^^rc>7XN7?EIBsUU6kTf(qpq zOMl_$KgLk(e~P2OGxT?Uc4i>_Y%Jh}u?az75auiFeD-S?VFF%n&&a}X?m)hlPIT-* zrnKf^fbtr>bbuPT19{;*k7ww4OyYk)24mA>(H4+{XUoo34Ikm3QlHZJahlD5*MRqy z=>NrspQ^vwrd_Kjjp0@QU$CEa96qvZrsIAAhgk~bVwYu?eQAu;bBE_=mWw%qMT14R z)maKSWd0|A2zw{U6m*i@fy7ouvkPvkc(^hPH&d8LzHc*jAa};Giy9p)zvjIH^%d`i z`OF&w+xfukK+R`k*Hd+;tC(+G@1TAR2fzWweJFrN$9s43At4GT35EuJR=?yKXDt1J4)S&zHmvf3})(wGtqEp7KD z$(!szil3tJIXe(uvti=UHg6SU3r;TWylE;%ga%NS7#wGqbPV7?&6G!daE7hD-GRi5 zvrR8!BxzQeuw_m3&qY*sW<&G_Gxo_21nEjwe=a{{5sjNMxxNEA>a+vFYB5D`mEq7o z#O!J7Sb{|i^@djeABHOXS2XP9svq{*|Hr}pNW@vwq7=%%8te~5q_V91a83KeP*(qn z1~)t4|%R`Aq+TC zHoSSWD_o{?66$*jYFQC|M0eOi zG)BEN1@d3DYoRHI^gCRPYxYat99NmDJCM6)K{Ar*%SWz>I#@vdtM}6_JCMu)6d8tM1o+vno09f(gRy-$zp`7bf< zK=#@HvjSBA+cD_K|0c!I+F+xuFj_kdk%MSirZDn7x_+tWl_73U4X8JGvg^@}qK0d3 zdyKs6+7Pd`Lyx`-0A9<+7*Bp>piI$Z7eV=GEFqCDno2dJEX5#{ZV?B4PragedIm@b zf0+H`^HA?e**V!f{yN>bTc>Zo*DP%mF=gVIysk70gdp&QAjpd9GB@MJ^vLny zZWMpGh878ETZm3P>FsC{89PVxd(~bi)eYU7G!>!7ate0w@zo=4-oI-^YjaV1i6RFJxjAk zumZPf3bs!GR3}=5?in<%D;{kba%|z}$%p#8m71~1q0%lXHv{2=A&Ea@L|M$Gy~1Yq^$^X+AMPzU|FzXm`o#k(f}Ds z4PiV&3Vf@Pt0En)`D&C=rjm}!)ANxWJujVG{xZj0DJFI=;l%qANmgsj-17P7Bf-Ml zv9mKX{bH6#H)7h^D>oq;9z7@NnT$5r{8Jm6403diAK>$y?^y`Zq`n<>qh!?T?!wTO zW?V|19If#97`(5|zGER+Y(r_Px`p1VwJk-nMZojs1y{Zk(HT^6jc0~;e#kH`8Jp{7 z<-yL!SMYp^zg0pdUUtEGNi&bGqQZ0mwPun;8FM-xOH!c-zyyH;BF6yW+jChJT@AMo zxJOv#zY$Cs3JNl@{qnKi=3YZB%WMYrIg^F)M3GSko5!z-ApBcfJk!t8^(Pu?g4kpt z><*0B-YhbxWqWb`@jf#vk7l_BVLx;tjO<_!XiR1>*`ra^5->yc`=sSY_Q96Vet&LN z0u1UjCe{_Y9DVWLlbg+UG3z+;ta5te8dd~5|5Tm@ynOzFG-!g@`HV!hq#pyG0V7tyIMNyM0-oQ@wIZ=ysSfB!SN0v+a;six*(GUMc`t(8^Jv`mrYsVzB}*IkJfooM7#4mL#a=9cg(j)gUO3~ zqS!ZxuCtH$iHF{EjkG-=E!_`ImWufyg)syyb|A{x-DoNY$k&e|BPO`W1luP-Sxi)R zz(ye;odVjb#{7_QbRHowNk^~6Oa|!a_xuUEfkf@|@o~?+bPDyZc)a&FZf>`4Urdx! z(7HeLjUFp7vOguuS}ln)hxg8?k{Ep%)n$u>1;Xp`D>h__d9IaeVy5s%z;z)5TeU0% z^z=+#4Qktg(3PH_cc1BB5I;D0d3iwgqS7m0#}pQbV7A^@aF+1XRYbuacnXw!5}yc{ zG&#JGOEHa4Ox51>7KAh3O*Uu|KhJfPhTZ8~vRJp3t*-ag2_Fq|kl4?-yw}md;E8dI zq$q~J4)x3t3p$TWW?J;!6WAa@Kqs?zZTe+jhw9ifGg&+G!0}fU`J8iCwd=p-)}0d4 zX0Lm`-(2}c!Xs7wB;FSgnbe;_)@wO*{Zv-z!HE{61yeLcv8B99{M%k zasOcJJuSDf>b>q+-?@+QF{z*D+m(!^wbxeaUTh6fBuACT-sFx$Q<%rHu7s$bA*o9{ z5DC=$W4IjbQUN6bc4r6TKg3gYM=wBNv$?>v5Z0itAtu$dbbV)L9Lj$S zdtPu>i?9nzI!Z$Uz6(2$3^N^oh^!V&(lh1qp+OTD?qyCZ4A1om&ki&vS(=@mcVuZsnQ)Z)2IOm*P) zLP0j(Za?!bbhowBFE%P&7cz0FPq2RWz+S6vZ2IX_ z)FKIBtMn5f;g$AM`(;r6CB8y~$dCNCd+(OSoUPuMEXp>h7l)jB7CO)(z=iDY5MacD zM)1m<`hnb>*>3eXIua%T<%lMFk#AVZ4ZozF2pD}}(WR>1B()!{%P&z$gliyFTPI$?w4wloM0z$f15fp6 z57~iK44#_dU7E#`9du_!PBY?B7h6GNDU#3ZUB}fd&sa_QwHXAZyR2)SQG1}6$8gKO z?2R_qe{Z4#-;_7hoPqGYk$|hTKx&}6x}fVBL1>yR9l8Ul3cAF2Jc|7czq|dQ>$zIB72DM*3gM*KU z*Wtu~DB3NA00q;(2B>q96vD~5QWwFP1t|xx;p9cdferAUCa{|~uL}2Gw$5_veO}^N zb-?$sRBaWt2_RLWx~w9qp-J-W$;xE4Zs?snxJYfj22VP|ej)X6@o30{G#>G}Ipc>c zK74+j&`3;9fZfnC8`T(kpA_#+qJ~!D5NPL&VNF%H{3z%WEyW5Gw9@T~85IQn3DL_4bf%0;=8{O&SEfvj+oX20=tq1ZpD4)hAw& znI(^-eI`~t-5Rwjn{s6$asvu4h!5-h3=oaaIw}I)L_7lH7Tdj!2e$&sn~P;f(+)g~ zm2iA^Q)m#i7uwXE$rOynQh`(|3$RoLP`}R`cEdRW4cWSxJayyxf~1gR%kc@Or#$U` z3VWDediogA_etThR||0rdv=!^VG&2NC4!69#=Kx0Qy~CepP5eIWjY$jR2|(+*U{E; z-cWkc8vAzjXK)=~YGBS?D}~GF_dYX7lWpUB@Y8+@OAI58(jt)`V8Tp;WxVhT9ZG&l z$jj6hGuqNEL zZ1m#xb$LT6`{|2Lk~2qiV~%Z|ZHPmbGbeT+GHbAly3>g`Sc667ef@_fa%EnZ>ZaN& zKk3-D4CQ@^yA`t?7ktA7w=zt*f|_BYh62|qtKaRtRpGLLuN8%Fs=VXIOw*G)$gVRB+CLn8 zjC~5Mf4n?gtJIirahAXDVRj{6d{mCDG1`3c6u$?o<-2x~?oEMzMJE*9`SdccXi7K_kn%NfkgDGO{nYE3Lu zYR)n+Jk{5HA~NgBiTT=(BYPPtj5{b$jAE{(m#t5>X-_Z`BF#l=S1EvGp}v86BrQS%Q)zQXE$rlP_IT~B78p~xln znBLMYVUD!k96Yuvr*@*q&qi(LLhiA;IDY3xltLV4sj79XPU`dew9v+1`DsukF{UU?>Kpl2a+@pdZh6!k!d z`!~~EH+U#Y)EA68Gf>&OU^|eDu$Y;Ss;2;S0gl6n;O0@ggCtzS{HlAZW_Yqz7Zo3A zBrGf{Wqt|Xt*mmr3tEM$uc8Z!P!BS)nR`JEl7jiCdZT)7EXG~wL!O(|Rleh~k&|Z> z=lJ^Te#hCdb54i2q{IyBTw(Ua*g))9N$?H+u?u3xZf8@N0kDhk>1Kj1T|9e%s}DN^ zr_LI7Xp!*=?S4$hKI%;+p`9b=TM{}~y<|~iErGy!okAID>dBpJY59OTlZOJ@Pb=U}^FEMFpR&8(QlvT+8O=d1(5kt(Twa4O~g6ID=-U!&e(R_pDmIF&RkvK*(?b^s4}2a zRb>~*9lI}0b}LI~X9TCaNW7BDeose z1oR4nPIk=b1c%OvUH5(Mc>bPq)Bfdytq9*9_zN^?n0A&qf)-$c#pbSK3S{gZ#C~!t zJ;;db4qexzEg3t}ovE|^ZS`1*@TZvl$DU5_xsWD@ZyY=GMJ)48Bja}aQQ8USI9#a1 zh&g9;F&z^f{-f1IWqub2lr;P`-*g0Ay5_8vlf`sHXW29*dy^nx_F&;THjvK2sDfS0 zjEREPTamP`R|455$>~4x!{YR1-18TLFuOG4n-aF<)psBxijZpP1ET{OPi+{xF&vCC zCQBf`p{mCfI9CmzdwoQja=-8;>ze4I7zK!K&QFV5U9w5DWic#DEu{=r+&q*ngr!~{ z4~B(grb)L;w^F!z>|<{9W9RJ?fAXH_31igU(8Z8)ta}>;fur2T~tdu+N`dnHd%owby?!pPc?v*q`s^wRgC1Nb{LNWJt6wlV_B66mjqa zDi|GGwEN}A1u_(8fSqWU%Y0I%ds6%X88xcD@u$Y$57RO&wBj@K zQ0H~;_-Hc zq+9(0fX>0>k?)~A@^4|gfm|)ohLB@tIG!8)ynAZ)lf&yRrQmb+hs1s^ioze0J}jEB zmCi*6b>5Ij*wn@bj5d6?b&r&-1f4D_jH}FimH_MiGny10=Kn z%5R7RSiH6aNyhUuZc@+*?I)ktPtAU>d%wZWe18hrOK5L8WlaM@Pc!Vuw*9|G)fI{! zGXKzF7Oh1Z!fG4AKeh{pL}a!^N6_1IL_j|SOy0&-@&5Kcf+_7 z`ogL&Y!_%F9%g%(vmV(~f}9|v#4IG-RCa%WztuFcAXS*JUEreq_(;gs5Sf?{#@h4c zuoswg)Fo_;fjE>aw0N{b5rFm@*{8NEMP2)%bzxlZ$@sXMA4l8)OK*6X(&C}u5E#nA zq8rzVC);DD)qEHaQ5W42Je0d%x50Ge+>=Wk3Y46=2k@228#N-(w$Yf48Q(Z?)_$Z!)_=->Oio~eelQk604rg|yCciomsVcbK3NY1PwRe|i^H5mD`2TrcjTLY6}uZh}_&n=B3 zKBk(V+Xp$97w?N_1`6fC42~RvOnwjiZXxQm7cA;1#EUu{;rQGB$HLF|;y=J5+yb$` z3+I2G>)-?ZLnd;-M~6|3y+|a4#h>Z&Cl2ukdAKIiwXQ~J9Ssw&F8^3QoLngO;P_i9 zc)*a-SNIKwV;=d#~8HmZXKsgtyEK&4zV%Xo1{PQDK3(KI&w6=dvEU;YeQA z(I-LL2RuVrEst38#l3MzyNEr%3{KF-Y$y5!t`{Zlt6j+r4!>WVCs!sg$%!2Kp>jLpxb9hJP zw|g^}!hxN6>wd->6Kkpi6#j84>t+?#UZ_UgMTKtZmRwIC3iFErMEyT;~5|o_x zwL_|fT)r>)wA-zOFV|Wk_ix>N_mb#k_%P;>q|4un`m>MKRgVacbt z+hK%hr2N+?BY3O{lu-Q!Q__(8v@v|UWryjvn)|GvWgLAMTij1qj61+qJ(N?PNS*eXo0wc!Te&rFpIRgdH0l(p;L@3j(TROV;iF4|AKWgbRSKyte|Q!nSa@Vl zq-wj;yenMfl8JRmNrZZ4N=TKQ`dwh?eqNfBGcf|V)s@OrZD|> z4o+-**rL13Lex?6H()5sAL_T`e3<~fc?WVErHTEATlt%N`Sp%eLUp-=zP1qCU{CU0 zSmUO9GVV6MGVGbtzGFFo5Wx&RIdRUW{c!DZ^ZQ!-&qijg-H9iQ&CYElXi>kU1WwsO zc|nllMLIqSHD73^%t%FY^aKX(EfSEI81^lEXHb_^Vy(#Ex!=jz;e#-jFT4oL+765YGOzbjFp&)iq(@d?4jmKcvRLc4RAgTlWPc5?Q@zNkkoEGG6a&j4RVYl z43Wxcd%;!YI8qTg2CrWt70?cUWpbkX;ru41PdtNMq)D3}jXWqAZq+doXyRJ)EHvFk-3X5*2p2z^EtcD@3vA+4pMgs+87 z-5BaZngPxfC2w>u^#&?sZS~qLTJTcdmV3go;~CQzPmgzS)^|V(%{laa}M&bO7SFKtn+(nbG3YL?r=-)o6=XQug^X3&2aaSG>h_2 z7Ml$cE_>a2P{2%G75iS*yv{fc6n7(@?9ULjxZ8B4j^Z+^@%Ykf+EErhY$v?GXjlTFwsDxi5z}R zSzoQEl2+e~;P=6PpBn2Q?h|`%Jrjc9qzKZ*WqNlYBJKL;XjVWNp(-AsRPhi+P-q{< ztm&>bhJAX`N0rRixAogFc)3+&9L?5jr4?Js?1CCN+Syme-SUTl zn-qJ`?B3=M35wXqZRhvt)gP<2otpBv+57?55ZQx6L#LpktR}Fu5-{Eo{U`W$L zzK4_Dpwm!7Vcv=}x%mum{V^bTbmN)HVIs+{7VkXre-TftZ#`3_i3*e4Yw_+7BdxfZ+LCb-r#?9BR?y}F81i}+>A>k7)Ppb z3U2>EebKV4Z$NFS??X+N?LhY1v8?DhSnPql6JZLv>_F~U0$j{DE;(R1;?{ZWT}>ty zHNP3Qbs{VVwUQGE-;SEl+F~sh+!^~Hm7<;x=n%H~anq(_^1xcc!P{y4kEvB3EG}tX zW$qr^fuP@D?a5{TI4nOXkn$0Xcl2RP>tUmGFz5RFA=|(A$wp>6Ud)UYIq1WOUeG0R z5lMz!9%3z9YZ28`!6id}F86D{9O{0$m^{xvNf=53o%Fo!ZIn>N6S^9x7VCgJMU9&N z>UNWy`Oy))?FZ2xj4vfOrMG-dLiYumKA<>DHkXR6ppG`UZsh2a_JfcC8^Sxef*jVJ znE>amd9WzaJM&|E@Y|D3=^HjIN~2fCs71eezjq+thLa<(8F2oFyd6kUq>pG3!K&%2 z&TeMThv&{k!%8zftqchV`1>_F7<>V`f(%*L7#KMf#k4JBih&WFL?Saos}l-XfZD4v zx*d;N6bunq`e9*#GRBfIARFzlAK^X_5IWc1fo$ny0Nf+TV5OQ`Tc5zbY!v9W!I-ivh0Mhi3fqCaxrXWnZyyWdox^?6APm9{u5zE-%u3)} z=CkIl459xheZ!l)Af_unBR901lEREa5#ZqR5{+73Xa`+(90;DTdqaOm^kY}%Y3hen z)Nv=*jhA3s%MOIe*Run$5}OyKE8;-_2So9HL-nm=*!9Gw#H-tirPxCJZ#e&FnSKLh zdf&9c7(FZ|MqdS`_8Y!oza#rJhF(G{`N&Lqj9H5M=b0J)E!F=`(-k3G1cFwv)EV7E zKiUzxaa;JXD*0Mc6~(hOU_1S<8e(UGI`jc?3hW9@Hj@eKc+~(LrWnn^wxSY3%+1@f zr>~^%hPgwi)%U;f}$lo*57Gy9wvp zlKf+Q_JB3<78qfd{T9982PmuVz6aM_mgovkb|A29B${CLkHaI9|Mn*l|0rPBZv4Y( z)WZAbq&($R{3H%^_@!T-!l`|=<8HMGMRiM~H?|DFGDZJ!lyGj?6HQL2qBHHbQSs2f zA3^?G&Le$6@ty`t;5ApNQUIKwtu{~1EjkF)Q_l3}`UsI>i5MM@SFa{YB6|>%0{h;C zgg^(i(jX^ToeYj>O)+Fkv8)X{kOV0Cat-6@To7EEDe^V%z!hp!heTUjf-cq3T{ZcU zpks=CaW$U&1U5e}xUCE416{yfum|z;)678mCD;ULJ^N`x7D#?ncR8pJF|g%V2whP3 zj}bxBdJLMD4d~AxsNov`Xs>bcIMCBLkd!`}HCt_E&fU-=`!dDU=(3;*TYM*=n8FfJ zdX5!c(;hg)#!Q{;27QdO4s1&k9E-5o278Cx7>3`0{)QMB!4cKf3H{;IA+ZA?D1?KK z$LXf^Pr;gDrti<96gucHe@6+ZE!j)~(8HB(l0j?b;mc&6D4VfrxfPnZ0d4=`I*RdP9x#?lv7Emn<+sA2-dlxwmeFx*reXl-^?yUj zZ)DSud3pO2^b73zGNvo@pDFpBY)s26$hTTu8Yup@u%=s{rS(A-jmm))GBHi>CdJJ&j@MRw!Jsc!^viz2(Wqzc33B%9;ngf!Amvl$byWl8Zg#6 zwcUr>Q_^bqzS8#z36OSTW3G(-qMXT~i&pHAFL@oc!lQK5^ zdYIMNV7U=u^)0q*90*BQ^*kZW zK0FXtYx4EMx!l>+Vj}O$IBbiDd}1l!YtcwvN+2nW{-8S^qY^fs3#~IG**sNCqk9s#+f9jl zQ5!WTX9#&l28|(yzG`~Stf=6s6MkYI{eWkHvIa3%z0#$A=p{xil6~f?$#CgWB12&O zfbH4y+Ln4gujJ|`#JR%Qo*_*Qp}2yNcE=JVUrr3 zcgwUK**7=k<@-RV<`K`H98}x4gX32YW1dxSMKh{F%)3K#l&OU!O@b2qIB^c}ru?+` z-gvRAO(Ny2kH?TpqnFb42i@->5r;lKQnK211}_@|U2_GXOC>*_=n$bFA65avDLtJx z&r(pgH#z(Is>vLK)v~ur-=DlATm3=o)3|N4TiV{ouOf{V9BcxRwZj%-?*hd7qJJ`v zB=11H7nl(n)L}W7dcs!)bH$LdB*Il#e&xULhZ08|q6Mc9fiMTVLR zxHM%hKzZFfKTd1YErZUldmrTyNO^5NwNxe$?juO~&g9jOBI{E313?slA-mK*m}JM^ z03-8gcz3SCoo_}RWr3o#U7SBNM#Qg-9DeQ0ay7q^b0WUK;L)< z7eH@PIY1=g5;wg?7Q`RVNm4$|6;=-LPJcJ7OMGE4B9m3ug+;)O>dqH`UgO2uqPk9C zFJXn5{qVZA@RWngH;tXmDKB4d@i`OcnPnRaQ@g8=XZv#vg&{6d zZ^9R?J7a~&y;nFjtI|N@j6n+^!~!YM1;O-d+JZ;Nv~$--m`o;Lw9eUU8)5{VfiIl) z4uctS#>?}E1U%8RaBl^<2c+v5c$_>}$b=P0rJ zr)=y%#F(R~djH!9l`!I(6yg|pBCll5YtTOo75;RlqG8hDoo~MS=K~?v6(h71HB`72 zV*14S4t`XBW@p^nAv0lO3QcWDx19KHlP}n4b&LNsb&&JZ+}RwPV+Geg3hei>nupG0 zQ!fJ53tLPmVjqx5!0i4$G3WoVDzL4haqxrC<~X!?UcRwbjfh zM(zh&Nq(iT4Ih~UZ5R^UO)`v)`l(4s#uuXi}!Dfpq7t%2Ap0)C`Xx$Z1?R77D zWq|-!7LHwv1`}f%4|Bjr9Uxtq@m_sdF=cnPY&k!E+wINMA%RcB=8xVeobJ3Qyq`_h zHJ3RBZ8VHG0nr8aC9v_plX{slR~S*dY$lquK`N3OFKtDiY|Awr{$lIScZ_cEj-(cd zGj}TflyW-sQ|9OitqmO2kx|>R>nrRwd6N z)$NV?DekW2qdvvA2LOL+5hK4nkhp~ujW%A8NN9wMbiYq8pZ?Y~>a&(4{7u1zFDpiF zp1)Qg{LYDYUYNR7Cs!FF7R=$z#DQCMcM1$XiZHCOJl6Q7I!Y|3#W#lQ;X%dESrDmb zsCcwghY%v1jHjzYIZf;g20I-N2$g{x3QWPA3v_p&+dgbN20aUiz)eZaCMFiM)(2A6j!kOqAo$HZeVpx2UF30l&$3A+@`+vAV9MxwCO>XoMhqm0z=XDubcuCY(iyD;q_^ds_ z3VVmie+*n=jlh?G?$G?to@-~WF;6(78P~x?xrFxz?*d(+SDqKxv`s!HC%;0yN=RMo9x6&}b$_RoeTLu|0;u&^tw}9bQLe zd8edFbgRXzFSklc9C?d*EsxdQz|lnqdh^g^C+O^eEvP$g(UD)ykP`{#r=u|oA9LS1 z>&r|idLKOQFH4MXc_2ubeH|gmxKdYFH?neeY zztm&p#2AooNJYQ_ z{63Wd1{sY^6(G_16&V||;$qM-_Staz{+pK<_OtcaY8~9`pz9c!HgWn%#NdYzsQWPd zK2i>pOl)L!XTYg+?&yK%iEwgqL zwmhWR;r**;CMawM1!BH*F(@H1rk#sC)SKUzo||1;V5f=mY;J0#9FSa4owxKlo*MWF zRh~|Ju{%nsTC`TSYHbtohN4An0PM+kh)Z{{{+OiF9mvTslPLZwtFgrrp=0_$#3XRC9Gdz0|$Z z9{DUY1E?;AL#Gl2b-x~9J#-aG(QeYAf;0_H(KM0TZ%hxq`5WF5f_Y<*QHR6Kg2yI<-Hg}bpWf&1FM(6u)G9HQ!K zn2Jl7j%15#*Pax|N4_n}NWw5YJqk~DrJ-aHds2x*D`jN=KD4NbO5laC##bja_KeJr zx+vpKw7zVmU%d+HUnV$K0bX}4ZuM0lCz+g-u$8GDQRA&9HWA zM<+LTofSb*1j3h9wFXjO}ZgmoD$9lg*(8NnS-E)2_2H(#1iYREmarW1V+TSTFdn?2#|0cIOrUwORPm%;Flk?af zLDoPhpNVNrMQ#~Ud-u@7E+t1>)Aw1{mJjQcZ64Of{$#u#?}@~;fn|gcDW*7bP+pNy zg$jnU!4WF;A5<@dXnvvMonAgVxc72W$H2Nok&6&c{%X-FE^BHEDN-)8`TKxL1f`i^ z-vd4=>f2dlI#DvJkCOWIDkC{a(R%QDxU`hCo_bnw97o6bGFGfU2`qU$MQW3s+H_OF zPEx^`Ic?w^d84;ke4;HXB`h=q98jw4&`raGv+e-dN1;|16*zjIoal)iV6zCPMZ^=5|=3aMKUjJv0~qojks=uE0MC6jcIFeu}2jm-VB zc@l9rnxnG9_sP_Kk!icTZ(ry>h}-uz=d4xnR$f1v?1~o-K=Tn9r3gVXSHGDO;2Owo zD2++!{osC7*kE{XuFHmQ;Z1g(lSt&0;zgG|aV#kjifxHnut0T85;g!kTSdTkhQ*n2 z00pIA4d)JEfs&h*fpbE`*ZPw~^^FUiU96o>ygQeEJecQ7-<2cmpQX|BU>}SjWvwrg zsaOx-$omho%a`@UT^-?f?WGVt;3&Zfq;hPztc;_nsI%^X_p-PLT(SrIpzD8*`9 z(M3@Ubs&t;vzLAr`jZz-u0B8APVH)AERhn)d}kUd%?T!c*I6Pm z$tAg;KK5gNnrmH%=}8gIp4xls6t{`M_&SDUNAp3LFz$l+#jfboh020zv-pKw9&)O; zeBO=?`c_B*Hkfj*pZmoX%!Se@II%5I%5vuHKEkzCRc0aglJ{S3lS!} ztrllFD-T>rpST?)v;0+&D=oJCg3zewHzHClnhQk3dE41a=?BOscZHNi-{{|E)Gu+b zB7j7?YbBj`#f{*^pY5F!um=PFJb=3WiaBvD^#(2x8>~A;Je}B$YJl~~4iN^O2z$mu zEKla2DtVi%9m=Bl^1afqVYPXu{M(Gns|wGcHnDfE zO@?*K)StT#Woewh%V}llRjsALVLhLbvvDXz;Qp4>@-eTsU*f}kOhIr-5j7c&EsSjZ zV}c&L_uos(!4jf5?5sUay$#2_tBpF|NGGAXF6=-S70}Ps827!wl8Uu>C5rE04gK&9 z=!*sii_2&Oq!X&^GImh}FTDd<5efvK1Ee+raB@#4zOi}5yb=M{L}L4q!}icrz1-no z6Rp~1m)wQ2@Y!mD+rQfcp-j5&8NOE$B$5um?KyTn~#%D;<2lbUq zDL@#lV4!^zRjjCX(XIsKD8?s%4e7}?qL1EUrKn5ulK{OOj8c_ z`!DDxq+X5U%)TCV>BFgfJ>0(OkK*}Zhf6q!!nAZr=M@c9ByK_Z^X*&}VI5 zZkI%3EMAhaDZw?HrUxD4NuevN3+^?mdvYR8u2{}jmp##Z+Zijs-S_L487xuLH>rLt z6fmbHYA|I1_svXBy8k%I1csc^f7m(_LaN#8Bl)aiy?ItS_}%L|G3UM3Z2An5jKgAE zhGZO-mr)7>Tir4JaAC015SuI=dXGH2V3l;1Vwi+5e)?4x6{$mwYxA_tY_Pn?7w;^S z=69FnXH&`pQ|Z=yH}tR0qo>WOmVguu4Ai=uQ9(69)cE0$_H*TcZtsjqKJxa{?mQ2h z_OTjO%eMR(i~WPj;YSzymHD44pXXc=qQXdtPno?}k>f^cJzZo~XeL~mq{nWMujx56 z7*rlG{N}-8OUTi__`3%LKOM?^f7Spad1ljU1xJ&{BZr_3YdFk|{8F#OHwq$P{954& zZPO6`U6ttvG#1oHwrW-JOBZ(EBh9ACQsz<8*mKaC6g;(W0vjwiHMG!#p1)<@pRb>r zx?pPTHH>WvQSpXdj$Pmo>2x{1>!(c@?le#J(kag$x`l_sDl+e(=F{?{k~-v>AMuU4 zU9IgBe&hk0h8A*DqhOrg%beD_(a;FV+L1Fn>5eKJ!gGp_`n)Zj^N>RrKyhisou*5V z2DFGn$yjJR)R#y)VJ{zzF>kaNw$wNFHa_LKr5vzVduf+gahCgqT+4VDt9w(qD==Hw z`OGQ7KJ1G`k{HF7nn-ExO~ohW=k@VToiJ>C zx$YV8*236s@0AcPR+nJ;oNwqCsNSJz|9RcMwRjK*A|=y|15xZ9+JScR_vW#?Jp(^@ z1sc5~T2UQ?8*QV<(PQ(=* z8f*7Iw_s{$m##c&_j5XBR`Oz&Uyju@Maj>wY&Yv>Z7{P73oe*AXew3!5l#!Cdwigu zDWUJB$YE3TYIJ)#Ktm3CWjW~iI(NapweY@j0rYKJ$e}%1`xBNVZe7!Oud}_3C?gMn zJ!TK}p@1&B4=RYKA0{_Uh{FZ-29a|!cJjj|i7xr;pUOLq?+@{;x_B|LQ&`wWdk_Jx zDS5q2WdOu?5X@n?oS72x{f$K2>>3|X8^#epvAbSYlbHB}q<<`FWXQQCQ{>~kjdH2t z0dAMYKI8&mOFh&9KL0Cf1(p?>J%N_o|#$j~fD zbjp#YgD3ADpVVqq+Hw{-|9H#p)>UwRt}yO)umk2)J-~)`?9!au%Yl`CL438`9}VC zN5I&+d1Qv6Kr{L%$!VCB`lcTCX>Mw>LbHg3tw}DlkHk;lz9iCRrN|q<{6A_w4=Y3m zpUUDpHP(pcpGt1HNvn3NY#aTYAK+rum^0SS|BbaS?a5h)3Ga6>B#(iz{Cc!k-E2Qt z`@c)JN~B9M8s~j=N*H%4Jp-{pS?Wm3f)xtxi`d+(?dNpd|6%XV!=e8F{?Umf`%c!W zD9KhKg=Q)t`AA8~(o_->vXe1W3Rwq*PsCIRS;kJt#8{F__T89K_GN~QH?wqJpYQpd zbKm#(`#IM+_ql)9xqjEV?mwnWbItOa_xtsFu8+s#sc0EK#&qF)!N{;Huy?HXaK3Pk z)Tgv`wbcmS@qXjn?Q_uBY_HS9G1)U?Ng@}vWX#zK0O){)V>&Ph_VeE`kM7ud)u%{` z3T^4SQctyz{uxi(3Y)VyUq7K9eeJy?sk@Fe_=$XNiBCUlHv%Gsvlgl9OBi2IIPId( za6d4;J6D=iM7ZV&pfV~v3iULa8J!KkaknPpFsjv>zOL)5{3>RjseKweNy}gzPP?S& z(uR@(G&yVqBd_%C(2={N-7zd7!TRT#aj8MIb#Y&9jkX@bf|Y{(QFX!CimS`rEQuGy z8$EfQeClk*o39YJ3+IX*>;(IKgq1C>$tXV=pd$qj$|BwPBHfz zPoKZ%2H6$6%N1u7_Ql7bbxo50@Z=qpY@6A72|vnkd0li>Y9stYf=M{)N;7imEhw5c z@_?-ds2CCVLcUoIV_OChyN9X^bx}gjcSjX-k8fA^%LJR>>+KGLrFqnR{tat-F&yPL zB73muFx10TgC3YRW08176jRW@e6=am}HOn7C#3 z%#2~30u07UA$lMmVug`+yCbfvK|APo`zJ!MZ8g>-&WESYKiaa3Pr6Sp6UN;xZup^A!NJP*t5e^hB4MY*J`Nv3I2#IwAh~)$N>}o* z`wh-0pz)*xiYT$vX|>Dpv`~Z|%fCM$6lFs5s&3RT|5))xe#9+t#b*i9>^0e^rKP{5 zzIKE;Kqo9o`B1wfff@io4vl~dmS6mnlFo{ zDa*&W7$=2}8n|R&xrkHQM5!i4%!CIskmUfC=qCnt;dddUv`bujr^{p9!ct$FyYG8; zPx@1V)D73aLwd0R|akDl;J3DIN~#&`3Hop*g+Nk|t|m%GXJ@=lE9wZ%2Quizh~ zDd?Qdp6u>`D`7vkz;|Gm(jd}r*ac0`Z6(5NbO(N|4A35!2bzumVqBa9C%UKx--5&P zvi1PpjcvexAm99Zl>GmjzeCw%*7G9d_8O>~ot2uCn%%^%Sr`9?Eq=CQ@4j;S0bu!@ z7x_T7WwlY5~pdLL(Z7zOwAy1~Fk_*qg5 z23hmLIqdVX1$`x%J|EXu`K^@G`k@!B@1@?bJ#zjR8=|9F67ql)J?#S$#FiGie)Dgb zpwr#ARt9#ClCf$(#^C|9jvCocSJ`7Zw|&1Sqdd)C&n3+jQR<4PoKNm!9;p^9cye|Z zS^LiMrLp#L7X@44XTePw0f!k#VRRT%$rE0MZBe)Ik;jZUWD}>zVqabLN}1t@PiXxR zZ)beY3p(m@@XOwP6{p*RQPrk-Z$n%-0usz)ca`?OPy9AGOS-P!N682h%T2#nR&kE+ z<7D&2fz^0^rbsiHW{Drx2mT(X$vGHeNU?q7Qamy)(B+Xv_H!IRwBvS^^ua?fy#)F9 zCy|eA#po>q;{P4&Lu}P%w3m?@o!L^Obf~N{$a0tgxPTeP`7@InaL`TOqm z!U;0Tt}gdLVf%Jk3ob|nZ|zxKVy+2r868W=Rl+)<6$$N45(5rgz44ffynk46_KDeV zMx0dUo#Ic;eGvwBnP@EtNvoVnf8@p4!`eq<G;KplLIJ-LpEx}gxq&+`i+R+nupQNBrHA2Y5x{^nSTFK?zEKvf5+-Ajh&2e&} zVxl(s*OqptqrCKo0k@h&>a@?-AC81CHZFU2B&lz1Qf9%uk0kOT6vxoY&`k3)Qej2eNCER|2*fU;9R39af4`A2J)Lox))83@_HJrPPCsG8{bz(av|ky4 zP9udG9NOaJ%{Cl*%5LEB1J`nz9Epb{-eKwW)gigX=IhZc8QU2lB*PvGD@GlO?d_oE zN0w?G^cLUqZ24sHDJe6B?8E6S8MUQA!iZf9Qlj{_Tt_5JfG%-nsPTM_w!(sL!C=U! zF{l;_bc=D{mv=;@Zr^;vGN7xr!yomr4A~V`+QOdxcd6&APaSJhsqf8ug19B`ZRdM4 z^4|2%8~nfWSxW3Qq#N!bN_Z6^fmWuaCW2D!nYf8z9uI2W9h3MMXzI_kSjiRDy*8hP z$X`70*<_k60f*(n>;?N@AMHU|-R-&U^OVq}GEbY&maZtzHjLR27#ndB)i|oA*{tk4 z#%zKJDPZHg#qhFU>1L9LXDRrWQ`UoBR3n4TdE1PvHT?d`O{RKh+RyZ3@CmIgN!M>b zxo1|l$+W*XHB1q-6ck02;z+Z!zX$k;wyw~w8++A_R{c5dHPCgS3G7LYtA^Vz*|OBt zC&1>h!l_mw5UIUhDy{Hci1p7^-LRrRZO!j_s~F3h3y3|x^`zA7?g7isn`;7NE^9g* zzJYuaQ<&3}eFE%-K-T#WVN7<=52(9^bFj(8qFcNpSbCAJvm-{UDwBEHx>;5IJzR#voBd>L&=^kRu{5G#rZVIkPOGG}=hK?O(Qwer>v)peXPP+rM{-p4aW16FX&0l=TSsJ+o(~v$ zjW+wQ<+Kp_u&zEpl#zF771Xq%=?Vy$K_W$|XG5EBLF}&D+q-EQ30b+8v%{ZSMUEMj zo)OG{c!mlxQnzl<7^wyY-O<%nEDaC<-%Ysf8+B@e|+dC6R(HN{9CnIH8U$o1jLIL+0*ch48c ztEYK(pD4g^VLl@(@k<|E!*0?9TBMa^oy}X*{T$VG`^s!CH(e7rPukrm-EQjpvS(@H zyAwo1Yl00#nsTFVl5nhp-dJcO97l6!xk90|53}sj#%r(cW{d~>N%#L+#4Z)_)P%oL zQP+8S!fV9v{U@%bhXh@yn&v}s>%i?Yr!7uvJ$9oy^Lw~tSO`F^Wk$(2XHtw8t%j25DIa&W=td`RA0>-t5gGDB3)VWd@{Nv^?zVaqTe} zqfsRa7^6F8$KA$x`JPQ7OX!tXsAB+7%ZstoG?##2MYIK zut9YR)G2sYAQQUxFdGlPsj4G#Lu7|%D0N@S+#~N_^G_8*Q?$C=*GI^-b^pUo{aZ}M zJ)zfzIeF@_^i|d)2~I;BYq7diy@R;6(`tKFW}U#J814CYg2bqASeA@6gG|9L%Q9s+ z{RrK!_(v=u`l*s?_SNWZ#LN1xb+xq_6wcE9t;fPkr?0DTA1T$L%8&>!v;*yW4sDv6 z7UM!iiY(AF!zT2tYb}Z_p5km$)VP(k%Ox}F%a0`t;JEq#bWzjcv!;dlKyhiwTj!py zLalzLiwniYXW$p!v){k|4eQ>hWwWj5gs)y^93il=BQzH5Dp1Jg+S+3^tB3nq>)E*i zOG%`HTzBM%3GBgDZ72%I*-iKjI~TPM+Aw5s>{@RbL&+_ZoylJP$Ue>)|Na{`yFJF? z9G;Tyi-|K_c@zkBZH419z*$r9=FS^g@*!IltL7qAKNu?#rtH_Q=B22~3=R_AZn zQ+Hs{V}a`3c^@RLgbpZXnuq@WS%%Wh|6bmIU%h|Yw*Sxfo>O&0Rg+y9pUX?Lgqh>Q zGN*JBUx2=67_db>AOey44Vz1v#8MooRvZVrujHy#mZ+Uq^luYC}uHLoN0Ty0%cUi~{m=~k$B#(DNw?un~z-cpj4 zO3h~0;V{B8;!WZx`6IE`Y^j?<7}cgMS&zOgt#BaEuji-x<}ajq-)yK#D80$SuNEaHVULZKYZuW+ebY=%F1AZ z?N{TOG#r{m{Qu%8_o;xamYR^4#u)(G(mH!#A{}{{JJ?oTEX@SF zt)h>IY#~t8NP8a55dy%P2z9Y4*1m@3X^trwq)=_4>+^mG;qaSMVkGBfL zQPDa5=D#v)=%3z8KsEr{S*jb*skg??o&CV|AJKL0BH-K9c5rUs|5bj=ZiD&vVqGxo ze`a`6GFqPAH!U^GQjntJM|u}qgDXO$ho;DkaRXWGFWr7@Rq=l2X(w#)+K<3*501V~ z!i{C9ue(ueSyChhFQiu#lG8GIl>U6R6f(AV=3<(a_xh~rN%uQwpD^KRJifGE+N*CV z>Mok3IQW6YFk;ss@2rkBP=3y~;zx*1UeM`8UO@Bj4kXJ#Gv4pcgTQ*y&g|> zHT6g-G~48){d59e_Qcdwr9jTY1NHS<#&xg*^D*uCYGP|BMv$dDxFMw9u6EBy`8&%v z@49uXL;c4c;p?crw)V`T%wXL?krsz_hxi) z-Ci%NzqiXZOt*#>{?b^0{nkSz%EU25zdQ2V*E-#6OEL(`8=@8(*eRYg@ZAqW#wfut z81*yL=2cx-K9t$p1&2F1Z?8Knss|p~b?8l>1HAfMFBTT1`c33@$9qf%k#-3+;>K8b zjItV;bDWMwJ%$8?P;eXtcO<-NKTDbp+%)>iTlZ|eA#c{5QObv`fCTG)*;TJ)zY1g=2X^SHCG+<%oS|EkLek#`I+Ue(is(nUz^ed)jqK8di1wAgWbN=z-e4E94% zL4j4*Ew%N&6x;lOIF-SXs4uIfr$0qo<{qrlcV8Qa46l#8Gesu$xjvS)+hepN=?vGC zKb_$fy0$Wrjpn%_T_l6xI2K~{Kva>Z(9cS9`IZ^k?*4Q~27cROkA5B7R@ zE((*`gW}`#k!jY*5gCRl-2_MKjzjEyk8gYMoNIwh+CG>SSbUdw=OlZ5bfirqsco%B z<@x@UTrMlAC1NFN`5KLz(EtH6AP~$z(-;hF(z) z@Ox9XQ{hIe2JiP@c0W~F2oi@6v=s&RuTn;#Ny_WK)l7@`Lqq3@KEiDw303W*1+~mQ zuy|QCSw%^IHg>ZUNG_wwe;~`NkhQ;K5jDG_;n*u_ZuH##8ovvdeGS zmG)qTLthmLWu>^r2PaP+ zJgITQzEdexzXLbwKV`MlN%LErUhS|FJ40FJzt$m9{o~Whw>uvb_DAV?t1mFj3@xw$$I8;=~lA&wm7cV{lI`V`1L$_!%G2z8+ImM)->z;7sTJGZxIj&A>)sVg z&&uku7TQqrmpPhxwbotQs&aKiqT)-(%D#L2Yy);C769qG7-dk{JC-%1S3F_y+TCA1 zY_Y`gj}JXNwF(qc>f@!qTvFo=;o&7n`ZYLdf8VZ;-73J0;45Th;gV<&5I@ee;+(Vq zIIeW&er(TW8lEGpiG%>g=fY)_9srEFaz5dI+5F>`ap09^tmCMq7(hHRYr#S%Kt*Tz z`Ho+CQtPOHeIEPI?>(*5Gfmi%*bv#KdR`%s*I?zpVT2IkFENy6$QYx8opO+t#k!0J zIuu8rJls|8foID)`Lv1vwNDS|5l#IX6h?t3Dk;*^%1hj@%rhPgq?OzrGVuD^y6eQs zExTtPDZBRdA8EI_gFlT}CCms|514fzp%WzP+xI~YB~81~8sAw*y$uhn5Y_s#KD>C9 z>CthiX#P5gS$4hyHst-n_G)_^;AqV*|AtY$q*teXqN2zmP`<_cC}-sk1N)tA{`csL zF=6G{aG&OT;yLR3Rb#Nt$QAiEIL$_6Y?>-WAsb~I(iri3HpDy(&}9Wm!+SAY^)#^$ zA%GAiSmdz_(*>AIzoP$5?%fIAii|}GA;jjev}b5*`mQX>S3^US_B?;Z;&a=(k_wQK zt}In(j21TC5!D@!;fHc8=+~cDgqp5is83uGWg2^F%I;Mx7Ct4UUh;5*U%eH5jcEZW zCET>HIF6ez^5viPQd!?@9xXdh}5 zn&cN44_0s?d=ZS6+jS9^`MjR558pRERL@O#)8s)PTE5KGchABQx|;!*l^ITRaGScE-?DipjP#=H*Mi;*y7N- zTasDWvCQNkQ@lJMLaSM8E@! zau@Kl;x0jdA_VQxXJ|{4n%XKZW|pShVNK#!hdzrm9JYx#nq=}|vV2;bFC^H$uMtUD z2&%CgCE^+Kp+D_^)G;+M_a+_6c^V5i!5tIVFY?Ho4ydw=W*M>HV%>b=vbFmWaF!rg^EnnQ@7Om*1{*N@*&AY;*02?)1B?tK7G90 zxomEJbQtUoiuSMwV}gcS&Y_94WFyk8dv4GZ3Av?t@7xW%zNcA8)G4baWaV8dwh^EB zL~y5*mpzzTEG>F5Qi!9>GN!FWQu#()a2K2UUq$?=bm8uD6UsK*^V-~j;xgx;_H;O_Y4*&{;C(WY z@;KOi^@+RRBVrG|{l`ozR2-b?$8;&}W*6vM@9*abpfAwyaV0Yf$KFkrJbM@} zGMb8GK4cI^AFzN0j~Iq9+Ts9)iqIdqfSe|iQYHzNBL)}qX-8u_#N>NZ*0h9s&3@tY z$)gKtqIZKdR3fN)X~z}$PkchA`c3{Dxb!{|OncI*W$L35gKtlU5rsT-j&7z59_Lg~ z{5w|YTEVZ2ga0Q3sC|%SMp+dcWjC}2Gq%x+MqMv#53q(GT@N5viF~y2!cB+6#KxEx z$iHE6uSBlDX?)C)6PbduMU?&um{Mzj>a7_|7yc2stz?7*-b~$xqxJ9_Y_WEN1o`Hls(9$y_zn9`2dzDptRt2lfF+$_ zaRY+UG_YIHu^M1&=D3IE2%wP=^%&y)4KwZlE`#Rk1*}^S@SD6e5l%D?z{{pSkNsnj zsQ+H%|C(2la}GOIjfWBMBB6sQ5F%rGP-S{w98^gnqYo@BMW{QEnkk@Y@r%cvSI61( zJvWyg@k#MWv3&LHIA7<{C!A8O3-Bdj6p<`GV*8ZBfGM ze<~qlaDNBv`P2V-C5FG9f|xZ4U^1yM8*c)YK~RLBAG0L%z@dMc)cwi>Rc_Ye3};aL zu|7=44}U;IHne=!zTPoia5~+DkjtV@EB1}A_oj!}3r%`YyqA9D7mq?kW))FeZ?5<%R+U{`74J;=9RDfrxq^7kcRM`hb9DQpARCMi z%VCgrinbBXW|-#We0*6rFw^rkLtmh}eYWl%KDiQK|G;i(^CNP2jDlL4rDsy}JCIxx zcdsnMWj55E?Hm%g=l%`^P{7-)DKNyFo->Pj({r|r~@#t3g%O~6?Cb| z1g|4D6+WVE_d0hOql@X7sne3)*A!1v%2B67n4H~_%oCm%J-T*GlTgSeRlAw3-CkjV zAW~MjLLOC>$ZdNN2hV$<3&oE=gIdFE3J*)}I=PLV2B0Zph6S_o2TC8bw?POF7a=DYS0r{X3vVJS+#UWZ(?bFeVsH)ds0hUr88~E~A`+jl0-~#!^ zmO74HAq6$uL9u!aV4*}w&Un4^VD97*deCD}%Gg-wf%y4F5BE=>T;k8Eh~GJya$A|5 zY<7&r)$x}@lpt{|Jvsb*0F0?hg##K$xH?FeLLlsv1q#|{_e6KmOYZXtkSFqc>fH4$ z-Yvamo?CC`5rqu7cG}v}2xh>~WKM0eB$!k9X7~=)btsr3KXKg=p5QQ8Ub2FYGO}bC z22?oic9OS5c7^8Ed2~f;yg5?%a6F~x1uz>2*?5i=rUgzj>B0_!k1+^bsikogjnC8A zGv!4^lbx^TE6F9T8I7P&c;3GHSwYA(sk4?p(|FGCf)f3~#VPy}ooOf{hiS#v*0yTP zqfg(CK<*5=B_e4bQa^iz_@d^_y#TUW*7O}7`EQpc?MWi(Ue^7N@L@|Vf=oM0hJ_ej z1X-CEz-U{=1Ws5@r^nx0(e{1eFgeP(qHr2tAA2ov;A3a!4DmifXp(iIzeyCby9|Bx z^m3Wz`xPdu*X5~GKdNZ{+So5yqBFsDT=X|AfS*nt@nImqzzSu!5Rx;mH`~pyY+yI)5g|3RWh_->WbK#f=3soY<*YZ>!Nt z>HztPW*|APNvW?ir>{{+v){I+VxF<;5vsrKbm;TC$-cC+0y2Ih9a2ngCZHSXAa-;5 ze#4{#z&He@ZWP^$vMpEqJ&!#xwnxg_UtH38$=`I-eyf{t zga&W^0MHmarr>;#zExj$q&7ml=Xzpkdnl z%Hj5_+s3;hjs60D+=t!`OyDZsrG!UfsuuAyW04WFkJtI>$6w@!5UG-6;haRKO$`*R z8sja|QnOwc8dOqz_N3f_Q$VmqEQ`6`uFVpAHiakojJk1l>(Mr+tlABE1^^ytRb)Xf zD979UEbVLHi38W}U+$+Q4++L|Ywea?sgxv!OqVqXUeo7?pX#?n*nM$5uPDFQ+936+S9zT z7%lWAD6J@RMQ!m}kKPX0W4^z0DV<(`kiQqef{U%2jLEJZr* zDa$H&3K@%)VC|rgJG}5~9zmx+U77v-of6{gK^x}?|EA1J=%>FSybq{i>sjQ=88|F{J{qJh$_Rb zp8dRWUqQ&lRjOS=(}9}T23Cx2`MdbhT4|2M?v}G0Cs)yT5g%u@(-tmXsjiYU`$nYw z#6l`Kwmgos$`W$`upUV)DHY^VDac>mND(5<#Pc_-)oy-5N*Q?+Ndag!nmK%1A0ADH z_E&?f40Q6Nfj4Sa!Oo3i;Ml3yA<#1*^wl5YUYFn~0Q|wuWFW5U3x$KLu8!jD$3cEX zmbe)wp@Xu?*VTnkXfrS>4C~<(R@8Js$^wDzPs}5?W2w@g)a`EZ%PL0ZBPGw zsnhBeysLvHZpGQ1H&x^_%8!B$`z&9k6_Ln7XuAhjS-dpzxr*4qjuu;W&byhVbMLhD z?5?DIIV$YG$#{Ag+idjO2c(+K766sJb3CDvL>D+Wa6G%|dZCNPQ_K@h?A5ERrul6r z{7&o?)hDq@lIU}oJPj>)z# z2_T^H53vrBFUT>M4$uxB%%T^yAh_tLmY+}?J;)0|T|E8$RaFF)I%ajCcD(aIr|8(n z>-)2BZpCrE6Vam+mpr|u0YbeQ)fhH4*3Eqltx-u=C4Dt?Y@p4gVih;4YWadS_=5LE zouDKJog40y{<-7Kq=iEG0MioFhLvm#WS*PDSCf}&r|XQkrGP2o4~G*yR-dAzSULdAWN{wK zFPg7Gj^6ctRa5KttSpv~FH-Tx$xtcoGJd|6hdRGuE#l~M(X&iE^N*U*gkpCpk`)FV@ zXG+V@^i-i~ra8L9|A*DE07H_UZpD%aYr*WJ2S@6XB^MEfv|_}8!N73q5pQj+)j&_z zdbjo4O1GYNUXA0q@Z`#7g=Zz$?_z-AtJx%v{Q%0}^XQEP8jyCcW?HfK(Aqkd$WuO} zyjQBBqiV@sOVedBM&|jW7h0>cvU5_(%&Hz%?2fq}w4Xbc>B5jHr*e`V9BTb5q zOg}{~$3Cgq^q(?`GgR~bW9w0um={l+$iH~p)8ZO7=>=V8d6TKf3Tk8*v-kj;?j+6h z1wGn?X8YJbXCg%XQrd}9<7CNP{K2MH$C;!={jk+u^} z>YOiN%psB@Wqcz|A0a>Zt93xvTo%4Rz{dS;taGv7Rn9F*iOQb}sE;^El@N)RpoqjW zi15I-&|?D2_fLyy0n)vi<7@57jlO5R_@$3_z>--NPD$sDZbkD3cd;J`&;CTkAYk3o zR>KI?NIoTf312ccKH=`$luY#nT)p{$w4(=_@(z5yct=wOO1X38eNUE|!wS|Lz9Zxr z6_`7bVPy(=`HYut#y3J}!L^Wg<6Y$|TG+}Z>kXBVnuK&gg$qTIxBWf{0npw$;{f|T zhd;!jYs#v{3P(4gO`9iZRJHe49V%@+b2ER}Va+%7+CR!8#KrkkVrrHzm+gN-$U(Ym zj{x1YX?*+YBkUeGy37c9YNjx(@Q;+!0t=SGf=A)+tM{Lb^KDt4g~1MoXAMp?@N{<* z#;iaH59%MYj6pWe$eD&D$)O^>XX&XOQ9tFJi-QUeW(IO`OB7#dh`6y8%52C~^q7JmR78_fk4B?j-ooYF!L&Z0UJ$7H{Yjy*@)n_bIDObAfWL5C znRyA*SqbQl(xCNi8Ry`vfmWrTrpuqcxsDj++PU^<=e_wM-udf}V9Zo2T)fefd3u;L zh_Ar4!XaC{D6*pO%)rmI_EW}I169L6)|DNf%FCuHx0rX1N`(yPUU}RUXF>R6hd4vS zHfuj2@*t-g90Ca~RwCCz{Dgccwk6mwp(kf|=e_qb?QyV-ixfO9`%#wAzdE}9u}J`m zX6l9Xou$t_^C-S%mNPk?E0a{*Q~bSUMxX!V!%v}C`hy71FuSnL2v9IT#V!LK2run8 zvZPjH7&QicG>o!*#;rW|^MuLQR*7>pzQ$|DJGl-zDR%GkS{bWEPC7ee44fC~uOV`GL}F01i`}~*9Cau%fGEER7ys3% zhZkF@&wJM$8tPkUXqa<>nX7bmIg-EZH_Ud=%^1QP;!V`>(Ddd~7-bO~p`;8w&Vxvl zJ2O0Ija1Rh+q-9ulH5d_O}bXwvP6zRe9;st}6nmYA?Idl+Ej*lS~E9cNryq z2o>z>f2#BaHmA98!QfD1lDTe>jJ3-TjoY`!Wn4}w&mO$+v@H)FoL4VAgJX&5w<{eU z#NICPq^v#-bS%MigN!u3x?5mgE^qve+xxzNt+Qv=g@p&^ap^m~rls&qQ5zMBd*dNB^$uRMRE2&yFctR%{6NgBnY!^+ZhJDdQn2tIY{A9OCqe)lkuJX0!b<6?7WUv@7pLe(kU?x z1$49eezhh`L+ieu*Is4WNPXi^Rjx1J^X|vp3}?Fypl@f_50M!Ml^M@~#f?V?u*>t2 z?mcMusr(iqf9Kk<&V1jrsWZyW!$!T=hV~Az{^XPZM?-!}{B`^4wghtz z=bHg`DVk{nc}_*fthzfLhz9ljAn_|hGLq*>y|rWv4Aq+9h_&B0w7%?_I#%ZlN|?N$;?|i0 zxo|$iqXB4AK7&2fkDTUUx97;;hz9_ZlsGIf-LdS;l$}Ra65FvcjG-p&J|hz^0EKBW zlYX9LvbSn|=jBZM&#yi{D_E?}yBA`|2n4@7XP%?oS54T1=^4EaM0$cHhiZ&l((Z@9 zfqm@U6L9H@-;Z62YDIFi@6*mW3X&ahgEX6toi&##gr9!(bqtG$k1 z*57(T;^ljJ<5op3`*D*fsQ6k#c}xwKSVH4T7rMb%E8IQX&NGK+hUiUeeC#{!g1dh> zvW{D}kYR-G5^GYg^k9bbAU?s$3FrxqBba7%l%LSB7YHAS$-d0PFYaGGB0yB zdhBg5-aMGscGOvx%;QUSgCL8_l3|4Prho0{XhY1FY>~@yKvmQESaD8;gcai$m0= zUPA8ktvj0!WZ#^RpdNb{oMw&mVTJvM?L7zJ(R8cImUN8d3U;!@NbZP|K za*EzXQ7ROPY}z~b?72Cys<=gs7^^ZC=SWI_8p)US`TeCmtaqm*7!k4RKgeTPMh9Sv z@8o=$Jjl*(()ojA@qMq7$;%VWi?74fMtnTHiESEdqOq`PFz+=37RUI)t^;P!XIO@! zK$OLLbc-9m_Q;Euw*44dTc7alSPdg>k=dE&vr;f!_;H4~i*<%+jBR!$PQBI8#)#}8 z-WGYh>C_xn=AU(0&@}6t&;wB&yFRx~7tZt~O>M*-036PA^<$AGl#zU{_l1 z!`VZHMQt-77A+}U+jfrv3K8xl%T%A|lc~)aw~oo5a-4xzUR}EB_tX0>(YMQ*cpe{b zfMSW}Fpkl8CKPk}9Eb@`dW+BWUm$X(t;Da7zfjm6A4*Y;eKSIS+#o+`r<6X5Wf z3EonH_ECBzU1kzGSzSX5c~bKlrw+V{WhP=mgOTb$?)?+UqAsAe_bVyS%s|7IO<+7B#rK$|TlB8(-cgZy%F<817y1msWDegCG}pIFwv ztOEJ3S953~QZh^q`?+aqHoO~TtpB_W&|LhNi$WOSKg7@ly^iqaZ%lkDxIzE4kh#CP zD5!Umm;d_~{omfA=|D2HcaMX%&S*S)^42|x z^7FAr2fqB-hIzqh29uF+kKxti#JB7W%n^4x@~$QWk7V8^nrh7MYcrP+9s!H-8o_1v z?|2o2=e5!5RJP8hK}}?V*}9pAgckhH zl+mZk>W_1>SJR`#($8M__3O^}(!mrY+MUoU0;$ZhB#o(rb}(2tpRp4#Djr*~Uo}a# zi8W?Rtu3)G=MY6brHx@X+jq|8?4Ettn2W95pw!hh=#+>4#n%@00cSPo+Y1g0zbzDy z`gH-oon`xwWDu~NDNV_o1F!;X*qeVs5C2#G|4(hh|5?c6-v26(oGXtVnONv@{6!&2 zv8}pA*ShU+w3*o+S63i5wo?g61cO>A!z$~Ii=J{^sHw~NW=U?@Irr%4Ty9^!)-^S)h`S5zJW^YI zz>1f#OY{J|g>mA$5sJP`wE}uX2|YloO$gW;>dGEM^@=3>lQTbA(RC^E56hUTin zZ>!`<9Vnlv_5y*;yT4dl|AvJ7m%q`x%n;6SwkA7$8O=>&Qh1}68?ERP)VGZpywMuf zd=8V%x8()sJP)H zb?y3uCt@${OFLG?L)=j}+eu^nk(!%H{L5*1j`MJvAqBPUQo=gJevdhf`BYo4_u)f2 zX+qnqzoh!m4;Rf2#>>kM%I&5-Wwkt37yJG94Ksscp=S(3cCj9D)Tb3M-FT|ReQ?{3 zrH+y%hdA(Q{%~VD*%E{71QDjn1v_zn1Q2TG-)4LV@WXp=~3uhmMCVz2xVP+i<&7Ehv(evZcI<(d1(OMkKBUCyo zXD-N}Om_Zb%q3!UL@>orsrj-w@&Xji*)0HsjJm!E{i1rA< zynaCV(M~yd8&P<^L)??IM^IWmiN9iNJ8by|SiNHdpx^{!My~p@MwR(1s?fMdNHPH_qhUW+mwlMWJn)sN z#O2_^kXf@@$i?5y3k#3QeQ(o;`5H7YdC~R6RUzef{t9$m6RX{aR9N^yD%;|mXrXm>?Tv|1Z&>K_e!)(DI7CvStW zDnhe8}`eLa>=<75=46461jz1Vy6EU#EXxSpfh2PN5n=2*jDkkb`o;$ws zcA6pXiaCITy8XLDfVqPiMAi&-pq{|1wI6{`B#%r}s&R=d<7rLt-!KPT;$T#=!sYqp z=t~Z=lB`!R(`4s({>Xh(+Mc}aW`eN^QRncB(Yrtrti*yd5nwhUPd?-kIK4GfLE}{C zdtUM!b)ERJxAqyRz>ZG5;Qh3xT(#hG--e6FaIeeHGw*S(-0y!$nwl?3i^p2$J&5(#tw&70OS+H_;qocRL*gb#0Q9{MhXLD-G!=F<&61D^l#FkIz*SO{W^tN|=<73<(S9bJYNmerD+tYap_O?bWLZ9c+-Up95;1^(r^6%Za ze7^aaA@63*Mn@^HQ$X%jw;Kh=tWF==aVS66`nCQj1iK_=p>VoBB49i~oOnIq4B zwk&s`id{`kWZJbLs0>P>yBQz^}1f zX63cX)=J~O4EK@qgzj#|5E&9t5-n1KK1xTQ4OebWlQFoubep?=a_xB38PLrrCnOW6 zMuB3-K{|+#^GPTHBTqHFJ!qRPHu2OaCt9&Cc#pR>wVZHG)Kg3~2?@nh{T)eXL4(=MJG@P~$n%>OJb;%Z43fcS6CPL)w$vq50 z0oKE6q>ln$_M$1b!7GD1yfg#xd{9%{lAeIA;l*~6di#X6eU|O{mwt)wNJ;8jySoG@%$lSpR#+rZ&ju# zHr-pRzbB-dee;@tm+ekDz)a)tg9P&s1?mR%khY<92i7l8;Jd~d)CU25a0Jo~A4RTQ zjR8#eLP)}-S2y8~X(UVcyVLMkGtbhH`_?(py0r7L(ZKwn2fkX5Gs1}_agHMFJmAIf zvKvI`fKQ9hn8j4vXOHYdbkp|~h@eSPGHL9!QaACv8@$!FFL}sgZ@Iwtq3=Msz40b} zh{HDm==^8c6&M4wDG|ydSmn2BuudA7xV*l0*UlfaVtCG4MNIj`AC;@_&k9bAt=BGx zF?rDzkUxmBDxu5F18BM4(njYZNWT>!m+{NqRQ6BDKV0ko6x9AyZ2nCr-Zz7Nk_}KM zu8Sb{lVKp}ZcAG^cTvNmrR8q&^Fy~&?Fn8lT%$2NZsetVsDGYSeycX>uvsA_(q{D( zS!I>j#07F?Mrk>_B7mU4y3ivHG#F^d?#iLF<%p5FBK~i$jhV^?9FBL13iN_5a{cf^ zPUT@l=sq1*WAcpSP#(qV>4VA0oAl%B%W@jWJ^AFgJALvk7@4LaYpePhQD+W~qRU{i zdK=g$m=jQM-`NTpZ!3Jb@=`ig#m~$&U=)*{4+@BSDv_k#iHewEd_QXPWtg2M7d_};eEZ!Dsm56T>0P1 z->TJ1=2F{g-)&q+j=a<}3*n}A={R%KL<5(beAGp{v%hVyr0V5xSb#NvP(Com}n*#Gxm4!-I$+slO zn_G7}YuD&~>32mP3n&I(j6b0stYiG3X$_HXjJv-SccFhah*$@S7rr=(rQp{O>68x8 zw(VUB4ms=aaq#$ji_P~FBqBfTI5zn}18})^H>+qFcTh-O6qOy4(W8R{YrKqu-=`{C zQj`}=x$BI;Fno(+u7x$sxqeDbjD`3M`R-E?UR13etaoH8$bkzf(EkJ@7jZM zR63peVt0wNv$wn2V6WV5!ScNi(qH&yDL3+Gim|C+2U<(Z!4pVvNPfNqS>pta%_1ip zOX4pgZn~{JcqNc`$=~&%m61|yKLgbakOe%5pe2GVbji#`r80x=Jg)jyMP&s+(Z?89 zc1-|H_7No^uXzk_XN!EiouA|tiBe1(U<1)!9{y#oMBOJAi$ojj`suzBR1>g)fviFC zw~%wMBUi&=a$k0ohBu{~T#;c9U=HVFZum|qFEtSKG4kBch%I^`h38WNwBie{=2!Ig zkU;D^dqH!hNnK^Eq+5N$&ufiRQ~PJrJYC-N-DVk{JJJ>s#+D)=S2bA4nHrT^&yYCW z#oYqCF6qQG;sC0D3!BlVu5K)&nip405YdyF;8qFsk%JoN+9}sQGB7 z39tPAbln>6_gzi=(l!Ff>_n}OHX@V4XPPO z>|O49f9$n;-MdXu!S~}LaqYNZpb<&RL%m1UNQ(?8j5$P+Jf)_%GDcnHhsZK^RwYIL z?wTfZPv@!~uf^o{ExF{M6nm_=Y?#^NW^WJK^LCI3q;w#y8|+EA(ay(hByQDI6B`KF z-^JEKJ_a{1FciNqwzVKx5Rq9gqUVoY4dQs(d*w50&-#(p*iRHBn zSm7QH6#3LX5IV;H2r#_N`<vlu`QOf25(xw8+rZ>7Y@53!h8kIl>yI{Z$xQrgmU6(yBY+je>TVpR$WA+xEp{FiJoy*P15qfvj zO?d#Te>oOKG*jf=TX*v$*_?R>=Wv+yhFpTzEYUt z3UBId<8migm0hu#*Fxg&dq0ue=+&@Rxu3flE@qB1Jfor-seB~DdRVvCvcGbRSO+|Q4*jOP$~)0_tYVMV z%a}h$jL5CQh|L#QA%iM6agv~`46wIp!Y_Q^!0qF$#kKxP7?xETq0HarKIPoJuJ(Tp?_%{QAG%ix{){;t`VC5UMp5Iw=UpGM#4#9Iuak;RSWxWDSaMg*kTj0&EQ-^wmE4>-^WQ1PQLhOi|@dfKz>?qU1l- zZ$E&E+7Bd#-k$zGsMa8F|#6a1re-+uIH8xG`e|>wTo%I z%Z`zbFG}Eha?@)_E*y^Fu0J|1GE`R2<%3UTVY(co>6yVArk-J#EvvhZcJJ=-@SsW2 zg+4S1trI$)94}abyV*8uGX)F{SqCv+40io2aAW!j?;l7V!~-Qf+~7H1soll8gLX6G zIxihemD<9&cDuh_p8w7pdmtl&phV$+h3Q|(%K}|3J`^XmpvCU_m7dN7CprNp7yLH>t&x{hyK9KGJ$YhbQ? zUmSVxDNpaCWm}i50E?hC_>}lQlR5%C8VBJ$tgGxmfO&gynbb}#D5FvPGUW!HGdRPEDEDHhpP~0r6kpZ=k|xHN!wo8i@ib(iWUnf}#rTp~sd z)+KLc-*zR@_~1dNysSj{7t^G3ZB6fvO7}Y5YaU*`=JMGs*k?kxhg=;@8>+aE+UG{1 zh2RRzL-e+g%+zbRJKyzrl$)o`*hs}M+ii)HO_Dy$JER9_Q{OBLcnJFr@rJ~{XCt%u z+7r0i+-ltWr8-Yob!{A-TtB#Lam6G8BRvAAoe9@>wkr1XF(#Bneu}O4jy?9m(j;z+ zyxi7-Shp1tJYzPg^j5^`j_>O|5bi$hmV%(=}InbjN| z;W`kU{7Sc>PO|n)t*Aw)dE1{E4BR|uNAwdC2@R}E} z*%*FXLnjea3+l3a_3?BbnXD{rl&W9HDJ1^>JcJ(Sr215W_{fJTG9NcY8(v#%)*uvFp4is8x8t4n&8_GC*2q5ax9LF@ zh)`zfNakDEIxAokCsj|~ahxc4eYR-QoPZ6uiY=W%yL?5Fyp>w|!@0Ng&agHD57VD5 zeF^MaZmpILvcR(n{o;0je&1XWI{Qc{F5yJw<)Uw*TuFIpM#%cc1AX|1pWWwrSZ&vz z=+to@rxLUX*UVw1K8icb%&nC^(`BDfEvr|wQ@33JU4x(i{q>8tw?4eG`3OkUtmZ@< zQRyC7=^Ky53_`=dlNFmBbhqVdejHRxee<6w6G)x-BQTw~HQs82<|LDF?kf1zJo$82 zxm{*c=EaMHHx|ZE?0;%D(-IRsV+05y0$dqa0)ORZ_{`Xb2rrcp+=Bvp&UHMi9vRWY->sIZz0U1cK)K zUm_@hijU9S;tX6=4LHL| z);p_*7lD;cbq9BHR13HAx461VE&}C`O~u%`4`UY`K3bdvz^`{&>@CVnkS7P-K_8)=g4HtH zv(PxZxC-0UY&oj!0lypSa{^nerOH8$16mz708bnE3mM3qH}xS^9O*@@W?y+< zWL57S_A)TkR*8RivtjF}K2#kiboofil@ylT53biZ@aC%b zwn84@v2`PK)nr`pRU4< zoH8IS9y-T)3(SPo$A}H)y#1hRXP!HN#@4Q+dKPqROg|kTwWLfP|1?)&a)d8X6|qSw zzZ^quf4S}SH!wUwpbz>No4~*PpU4Y>D`CKQY>9e)%$~mnw{fzpIJ7pAiyPN9`EWCe zS#i+(!1jSPq6#!2Z+yB-YW!0Yc(UyK^U4pX1nsdl1)@3_O)jWQ{w&0zq`3;iTu{Y1 zX{uPcJ>Yz4ofJMSjYE4_F2l>l`}ljOr1^C?Hy4}Ly_=L@b7Q6_N_*k>oBE1PP2B!{ z0Nv9DP|N%vf>+?m6c4c_#!OqxCD@J?557tJ3mR`8aG5iab*DT#u=}tr9jYi^X?`f_ zo?`0nvu@jUF>7NU-g`VUJotMM=_SzLR0Hngr?K~==m64m1|3>2#cAp(w;#{u`Uv~m zym;<$&EvSmiA^T4a;Fbn`AkgqS+EUn%UOO-6u6ZQ9zeF@{|XPQe*!!DKeg*uF+%^u zL8AS{6MM9a;6n-5iJ?-+i1L&O|AEwf*AEiE*D!fyKI;>DnoFu$F}fpAwGeb26g6`a zz-Hpk?Wt6zH((`Ju+R`(Q%m3T1cPFxSbs z3nGnqJ}}#Bpa|R30X?wrW6edhuU%<}!V8(tHd#7Bf$#J~uG(>XA-av*+Jjs%XSf}BBKhV*j2-vLWND*uR<|{o_nkJ+$O?B~on^J|MvYYW@zl<5 zXT@ZFBcu%OX*jLQ5W{|xng!UrAP#$7Q~~i3j#p2Z@l+9YWd;aBujC0 zbsyN$g}{yh;%UD`IoL6cm9J+!4DVerC4ZO{XusKQ$6XTqG|Um ziaHW+HM8d>`> zO+R>mM!pmMZ96HGQavV&tR9tq&y$-QiFQTtY~3CILHxNy^Cb73xHyXc!LuVtowJNwl$zdCc@ONL+kY2t|E~M` z+ivV%`Tf_<_R1Z~e=fZ9`rhKt@(bb{cn0xjL(OLbctM>v`y?2-%6!x(F$hKYy=Jck z|3AFd{jYFve+?M=xBZ{Ze?7eV+%DQ+p|1kC@1tsyM|cbWGjspzm}XwOI#)RB2hWr9 zu1Kafy732(xt29+5_KO(cQa&n2L=IzZBx4oVglf47$d=IYx92mgYFh95HT>KB7%)l#z$4Wdu{-G_u0ABC^ zmznrpGYUFF|DILRN4u0Ca(LIAyzP@!eNVT0p`>Bp%i|25h-4Ugb1;UC$Kp0Dx}|IH z8--t-4l*=x$7>@qcmfGV7_dLqjv`6%h{aow5R#tVh5Q-z`vb*+G;Xx{SSVNQE;x_8 z1^-mM*zyqYTSbGX>shF?i48LP8vLpMNM~pW`bq4{A>=~@Y(0%*#~30~&)Q6Svh~T_ zbs+XJQJ~j?9Hv73KXRDR@}CygQ(w7~(3s6@Xx@ki6K07@BG8wx@GcY1He9U;TWPWV zXVxMMT6t^UfLJOHz6|0N2cV+UB)V#4*Z=6kG1mirM!HzE8%crMlZI)83Y;H$uy>@P z=Ot~bDm4P7qnkdpb?-4j57fqC^sGYM6VDggHoFNG#wjCGl07~p{nD%ES6s(eBwT}gF)-OITI z525!HSZQm>1thNj*thC`d5+=kAn4~%@UEh)n@{ihQW5lK*L3aUUG;Y_UkT&^4z{!H ztJsdP3B&v})&_POPm-%U7fX+GO?WqAOgy?}U$RIY4nE|`wG8$GkX}MCVUg;Is?_mh z2j>Du!$fEY0OSDGE_T{lGPe=pho%Qd2?5^3Wqy#T^=WXA+*9rRFV@1U-|%KiDp1^` zI?k+lXiD?TnZTdV2E$*{tiQ+4Ket}ANw&d_fW>qYbbRr9kc&IaR!#j3N|2wo{NWbN zB|yL5uaiaoC+hu#slOHe`bY2ke+h~Gzq>JW!Pejg^sI`h5-caSeIMa65|-nPr1Kw5 zpvdaLB{yGT;>W&u$o8E=+@e_wy)w2k=+}->+ITA<|A_mptQ92dHb~3tkdC;1*%e|H zVbgb0!WQ=7?kXu%ke-SsCu>}=&$Co~Kwx9mw$P-sQ{M6Ep#$?Cf2U*8B z*h(32W3xUKhXouZpBO_fbP}Bkv$jJb3G!D-+2Jd8OpuWVd1Kme`Qk}FZ_%4aC+i%v zdd-vY=q^+owUn8Avww8qV&!>@nD{UO>!lm{tcHrjtyiro%s)=Nl3O-!$VR|B`Y71( zmbyLwrir@U0BzHZ;=0+RhfpPDk4k2XdUrn@F}^FJhP|-zq6S4b1)9!U_b-cc#o%3} zS!>zSvj=c(h7Fap)Cz%)OqtY5GXKF9oB~w47E^~aXq>^- z07MIRIkM09D6xPTit>lAH26|`e=s+UKNzB$x`O8K6sU~}aULP39;3Cu)>wpNdsOD9 zMjvMi{`<}U^rJy`cm9cqUIPbp54a)LmP>1#+GoR(9e@@^#pWA9@q+Y_KtIKSEM>RT zeL<)JAJ#lMa5 z?=2zukj|ci$TJ||2##f$cX!nww%i#hd10EQdf8SJ9oL4PG(1>v**0)m`MV?29Zb== z0G(pS>m(NFJyxI86Mn;OMQ!$2Z+9W-mQe}4A$#}1V8`s_jY)NZO?U1;&Xy0=TypYr zZa@Vg1RxiP$AP^UCXZDh>kVYPH>NXnB9%2vD3+DD(-U4g7N9Wf%oq|ZI2d^d#zLt0SrYqrWFXm;&356$ z#X<_ACHX-#^0&*AZZH*7PNI$WBnKdH!gU13|ZJ^7R=e(}mG=-AE@VPx5 zm7`~dsXU!EClmm{NNA9VkM||(&U;rcoxNwQT%T=xmbf>jZ|ffAv_9}?`jiZkqJM2Y zS*WiHFe!-3MhTRzpXqWnVa25o|1bU&W3#}_PUa4k0^400Q;mV5ihRxG5~NHr(9WmG zchd4^_?zArTh9zUp1Zkfr^dAg@q0)$0%&j)w%6kZGs!Ucb3IJ0)Yrj-xjrrlI5D=P zV+-2x@JpfdH5@+9%sw8Uzw6Nn?gj-yGEC`|PDt4O) zP2yCEe&IW+SgrT$vlopkvHT%tQ+!LR&t?y`f=_>xaR05x92>X2xe|X=Sx8K7 zJNA^#m_wN_P>|hWP6a%>g5v5=*Gf6@m2xwk*hd(#2jbWoo%qg8dBvH@Z7sg;*|#^} z+aJ9_Za;UnPnnG4g-yntgT8x)IvBi*INR}k=tPbh<=fV1f4}au6sD3F)%J>_an7Oh zH*v}(D@K5X;DXuPaijdqSj0#C;)t=poV&-ROAIriw_9Z-Z(2VI5}?|NvHPKF(MnaA z%(RBb$nEgXzD9Rr5O~5iMYH3`kim?hZ)T>|zMh5d?;H27coRw*myRl~+Gz!)uPldE z*MF=GO-||sx%t`1e~H@F|7UXd9|fU*bnG8IFvU+SF?)MFAmH1nvUCr5?S$aP06k8^ zq8Q^_g$VoIN5;8aR<7zEex(KhB!nnVtc>Rvs_(sS-?lmumyDmd&0V+sOTam>56H2c zuRoHdjbrQeuPC2y+);oN!M(e%9gv95Uz00twMR<@d{2mpcXq+Oukh2bGz#2P_1~v0 z7eUpkj_`2MCY)N%_ZE&gS=i?Z90g7#G9_ud$w+yjabq$r{;mbTyOBlDcQdtG=H{7F z&gX;Tk5#9ymH33qqt0F1VYt#`f3(g)=8OCGZcXQy z#iuC|<82$$v5EQ7A2o;*tuV2bA z@_mbAWH(eXTHWS7uYu2#Y3x$kPQr5_3HB8KQzmy!Ds1Vxedu7koB!+2gpsFT=3GXg z)%zV@*|1HfZjT#?Vrl>ZbMEEkzfc3_Cpl9^(gQ4E5E%2t6mXvJfM1XieDJY=LlJgv z=0Y>?ZP%8WhIL`q+QdW=RZn-?7Y4KMavmT(@IbtH%^XHymbkeR5pc=Y5MV|g$~D95`MMOB ziic)SCrFgGel@so{Og4yk)L*&NVmirqSw4!4?8lq(49jvz?aIw^x-yT z36RJ*oT%)o@kxekJoA1G`)}5(xwv+od3C=r&%|*=IFW9GmBzFn3lthqj|ifeU1+NXYhXR7 zwiONrmZIV*>1{@@UTkjMc92foH{h=4b^Oy#vsfh_T~*{+10GBZu8#2ErviBfDob%~D@gu3jDiuv7LFWxRuSKtcVr2$dv5{AkFP(%|% z)xQKK=!EO~lZwR8Ixc8@vBe}$IKKRlNDudB)Y zZB4^p&heLkL$37TGtLTx#z-ZrzhH zTF`asYFPUF`|jiOal9?E7b5R>FI+@Y53x3}*Kl$?G%BERql}5rFmFEPbTrneG!SXq zRjvEz;kdz_%_sJMM9*WWfnG+c)^4`W&CmyD%6=xN?iw=J32@`|nX&pG4U{-JVotI8C@l1EfA@N*1ZYu?n>=trTnCr&&hZXfIe2 zNn?&2b$)sF@jl#6szcABrl?}ZdtNee6{Z%+gW1Myks?w&lP}mek=!vdCr4*SzmZ#C zH23)W&Ag9^S&IJrHP`DEsFMWU3E0F)pafoKiNfvY7_zxzxvI9iKXm8$@c2BEFT_#B zDGAD(?_Iv3grTkCu@VlPHQE+4SbZBu$AzZQu^|P-^>)L<4vnub)Lt)^@R0xb%+rKs`eV?t%-Sc9Ah_rF~L6h@= zd{$ZX6c7h!KO|{0#SvY_UWTd<5^j71qQ=O9y!Hm!-9T2j!>$c8e5!~sopWC86{^Ku4^78yP+#V zE!)*X$Ry`NeHF_+_Uk9)MC$E%F1?rtVon3C0A)^ErI-sr+F-rgIqr6P1A*t``--AN z=Nx9LNNFj{4hL5~rW@kZtH-Tv-Ac8Ux+X9tZvnTy|LT9Avi_G!+i%}17CH`Xfp1qE zND~;?NVSwMjU=T})3TP5p&J&5*FW`j9I!sTyI7eXI$o4Q7J*eDsw((?&Xfs%FHe>} zT8bk*PK%_xOutajv9V>0YT(3TJwjZM*}y|iy?;<_w&BU4=Qo4n-W!0uKIi~YkawnL zvQDBGeyU~Ps&SJ+phDOPdYoCQJ>G51?T`o#b2eI*d-LS04ILvE4Q?wAdAF-aH_{tx zY3p1uymYfj9fzpY!K0TiJA0JvC@v~Kyv;H-I{Z<_nR28W$-= zq=JXan$$0H42gSeBlYW`cX<%e^w6)gf*Z^}xTM|WcJrXtG0DL&j<%)Ex{n4fVd*{3 zVI!6_ca-?Sm)bh!R)HeD&TGnbphHM(eiHJqlLPQ9(RijEc6k9H%)~{)C_OemOsa=DY`0CQ`RKMHjpu7-j z3rbluirl(FphWzG$Bgq!X%WEwC zkV2^27!oyw2Qn|Kp=vaN^m1^ta+DXY9~ZVG0X7w>X4nN|&^cG&_6(7-VM3O#n#+z@ z#8Rr$O0-uIR%E7I|HJfw?n8?@Q;0v=SCXID^I>gL%YS?2zyEHjQ+w}L zi2cDO{q~|I4mrW@1+J~wAT6XcHb^+Yqf@UDOZ#%fs}{Xm0x-KuK_Bgu_Xj8aAOeV`bt6#H1O(#LCd&W zSi;T1Jp8z+S6xw}kSedd>e?pNN82{*td07?15k_uSf(Q^$C3%y*a1011H{9@wUwfD z%4%slTa&$a%ij}5W6w)}i#1+;9&P6Dk&rQ+DEr(!)#YjD`GNCwkL`x4g0xe^_la%V zWVrq<5BSHs8s8Uoq2R4)qRGWesX0O6iaFsw^Y=r)#?#nnfm`v(8Nk&r=A5;qCq0uv zDs&uRAOBl2-QWD4axRT~(i+F{12E5&^^5C&jmX`TtPjqU&c7T4RnHLTpAdP#I4FG~ zEYfmp=J`;4G`1{25bZIei*RCdl>y>&r5@UTcGbUOkG}`t^0t9>Ap2r7R|NT~**1OUg|Y?${U{cex2Y4H1nDEo0M`3FDL>c`)aq6bH}I2` zzh4RGVFP62qq_uB1h6jf{AspbIY)pw2!@C`dlSTsV$~9sMgYPEfgb>^ZU&CKV;U@H zH6VLVK>Ko8r2vnz2-WE$=|(?zR+2;fI3(3Fg?j^sR}pb zG3lV(WzJ}SK%u2P-q7kDJ)cZDX@t9Lu9{bhIm+xnW(_JYzwAVk-XQ5V(16B$)?G~7 zif0{BsJd=+53GM@!F-#G0c%UPQ@K)~@bEn4^|}L8J@NH6`m;~HGlb=HT+&}Pj(S@f8UCx}Q62xzqyFy@ zLyjDYYONAuH)FuVgKmW#gt+S^7`pbHScvp^Fe)#EL|uQUE5M2IYD|x1cV5TTH92#n zq*Sn%{H*Kk#*-(`0c2bzs=%b)bEu2?yadc#B)91eZ=YtuSu+wkJbaJ7zT(O9EVNfMeXkRpd0VWoG>^PDZ`11g zFTau;5g9x#mX=5Lv^It1j@XMF$&_2na^YDMZ6dZzU|j)TUIb^<;E-(roSj)XS&NO3 zQr3=^;cS;XNQ`ssoR! zPDX?w=K(~*#|#X(t&lkp)i}|;7Kc;N86EfDjR>vzK-ivlX2MwEUWq-ivj?Ni?IL_Y zRO5&ENv}W9`N?guiG2H zaqP{4OiK4ePr&Ul|1DVo`V6Obc59`0gi_gy3hFbfxD7dP>o_&7B*%T2mQifYoiEv~ zX~<-iU%|?M8wQUB3WIq0pMd}KhlcO}xB$H?a75iJ8T!Mp+vA?%s3F(8?y-Ie5;nJT zHXaKuD|`G_ziIWogKzMS$S52gShjWhmx=luEin;ZAef(Erec$^!LkxooF|@WIx#I zSMkp9e@Q*-EHkdTs}t7|BDS2uIs}1PKzc?fyxQqXTx+0#zD~1z;W1*sq8;TojO#%eZDy@P9l?z`o4$^Y!0J0Q{xq)~}DVg+L|hIy51E!JHEYY6`SPznAT6{n`oNHD z(Atc~HQxQ)`mOy9I@pp!t#(C5T~?Veh90T1S2qxjx};X-ZL~bOe?ZpFx0`$9XCox) z6)y}bc>IDcI5CWxn`s)_u69sAWaMktq4~}&W{x3;my%wr5<7hD3eQ&j@DHAT7I%Fk zOAIqCt5pg+)6G#I%{SQ{TgZFycCg~fLOp`$;_HNRW+>`Dxn;AhZ`_1xGKO3W_i;B^ z(NUpnKPUG$u3zk2H}o!r;{}^!?z_zA&cD=_oS|))ULc?H zNKg08$O(pEO%ZBPDkSZE^J95w6`b-RluGo_n~(f@xTDrLke+G-WoJ0qKFI#k#xtdm ztV^%eb&01$$7go+ok=}pfig{&qF-mo`}F{+zqyjk)=?)%6Dm}4z*-gl@TXwz@2x>c{XxN#sU;{ zu9!_`Zb&l*h3P~VA}F3LAl6WQ#>f3O^F2eh*ozJ{;7{LozFxdZci2Q~sBW|Tsrqjh z^WaEebKSI5Auu@MnjC-?H?$Z1LM#mJWM&p{z^~WxHMcfZuCQ>Yh8s zALHAmsLAJzZ56Mb336LJ$~*^6uS z%ao-t(>TEp={YigsdiSm)pZdP77y=;3eI7gtdPOM=55B za7T5t0EW_(11LK#{+>Lr`>sxp0*>1$x?2nXU`p&L(C4InbF~lqXd7`T07G${8!wf! zy4Y)Klp=o4Ae)iE7Ej}@TOgV7Hzdvx#>>D%=E0V8p+^ZtQUT-Rb_0n{XN#laPrWs0 z-{CBIjOS`pv&a;o#ykvzfII0QYH`=Jp2YX3$99U(&f~f3TUm$D zUE`>#3MBIh%#%7q=C3Lv!lwdupd+52&#Ce+lZzC8II~8HF)3snsQlgPWo~}(rEPcg zl_vSLKURcDID6kUrv$Ukz#_~cP9j$l(*p@dYv83_!!SD(ZrfdS-{W=Tt5@ZyPpm|v z^kZ^3OPuf=(6WXJ^j2b_1XUSSjyipvzUZwRhuLmpiss7VA>$wQ7S=$EZKL*CRV>PP zj+RYxH&%HfP~?Pea%#BJ+Fv&GzsJx29kSs+38ve!yHG{fxGS^gyC1orpfD;U(e;_U zS3~fw=g~YCI&oE^tL|2+b3T9q8vz7Z>@7=GrVM^2o>37RLi#AolveQ6wIuj(1?`U4 zn(c2}rS!r^&dahDb12{rn62fQ4nk3AU7#KF3iwUU_1uNRSMhs3FeDmet{)hc?2^36 zn7#*nGM<1QyxH%}|E&F)%Fj%Uev)_}w+6GliDeStMyP~a8Lz&V4-ur{@`#p?Uz@bw z-~S-|()Fcreq?ghxz%O+;5|KkSDkd;?iL4yX};~?qww6}&SxMsB8D7<)&vOq)yZnY zNm~Q0T=yvhjc}_1ZuXI2(D>j}gni6;~dY7~+eQ|!o-o{BpI->^2MzS)n z2+54My4QA9YUd)ncZpo~tkV?7d=~3ARCXm?Iaj{Gc*lRIy7;;@CnCSWBxU+^>OBKp zP6|ZAA@cdDcAiRLKm`MI$L+!rTq~b;Ug{Qw^8Gz0>=oFnEm1;7 z;iy~2q1Ds$fF=@dE0QfgDslPcvX=>x=6;qW+FNxUD z{s2*ctgX-;CsyG%PgknY+upvQYN{xWsk(oaY4|F?)*&)fC^Kl4BtFXX#tk3ctpydQ zf3iXxU0y%Dy4ghDV_);9s_Z@6HW~{6i!e}JtX2hboCau?1t%RM#meqSuF<>g9WYDV zF!q{$t#tO2X)7}RLGfV?1s!UY$ZFClEdQV(@JiKRAVx1}+j3BaB55 zJZS5St3|NA$&U*Br&o2d4Z(J*vS|0^&c!{m6CSGkBWi_AF4^YEc-IG=Bflh=ztYsO z&;7N_g20B}zp)V`QU#!tnOsXJ@JQ$%BO^F^BQ~@oNP<8=hJ$yRKY)O=`#D5pDWSnL z;+4n?3x;#}6&=(A_GQ1cX-mOCtMPq~g%2K_b1!>|Ew4-D+yu$SCbmAvE222bNcz|e zl5B`5J!>=O$=1e>(Ky>Ye+trsidg>OkwT8uK_bu;8FCR}Yv0Af9sG0gUc`+c{{eUS z0WZM3gxE3nk9$&BI0}}!v7wzhY8QxW-!#U?t9x+LWlHKXYEbPXF3_{=Y~Xev;G5qO zj2sA@pn4;BBW!9|$*=)%7grqi0_OQ^yCxrBA1{WqMYQDh5YzJL`g_(!Yhuwn7W^Ne zYIRVujKlOg>|iKRUa+jD_OmrO=?2p5R|@pV$YZ|eNcR$>pS7KM)n?IrD*FtPUl1QF|w8jMLH~hVFWfHW`wiu->wra_79^nM7KoN3Mh6yj@|Hh-aG9$iYqA$2l#L>Qq)X z^L(s6f-C67@<(6!iY>UVaHnkssX7%<4RimwmqNV1lV9U-r?B;mVKlw?aDmv{Y?hQ=E=VrvFqE zBt1sGpQIoe>XvZlm|zmBUUqNA`5nPleb-LB`m)aJpxNRzb@gASQ`N1#~?qp6zs;j4rekcUo>UTc}k$)uXCSscZyYeXB_*e zn{rr|9TMpS(ce~TfOmTbh%(x#J!1q|Q8p{EFnW*w90IMP~@q8m>} zKU;Ws#Jc22iTiLS#SGl@8rYn*8D6r0vq5Gfi7u`m-PxU!U=|yA+saj?KF7Uequ~qd za9rtC)h+Pqmx&yuRIIb~f}I87C<#BH#$89Iy8*45Ofn5@1RDxf&H-KW2;`vVE<)_? zpo0qzS9Y9@5|GBh&p(PS<%(Jx?@`_X`md)%nngkx!|)dF1`T@a_s}^icYA)#eyX^YiJ+BNQ|S^w{WmH<~oXglTd9hOMLF#9)eLSeIXM&{%usFy*s5IIu*a5 z-NTxe4ME%oC%*U^@Yw#r6>a{}y~F^d@Di{uk=;f|KJ*dU3OB3>tDpGdo>5*=UgEXw zAd|kwPyUYf`Zvk3<~-|pz8R>n?O<2B^jKaYBa|##>^jyiguc^5)08p(UU$>3R&wGQ z_uhsRRXnkLkg|gU;FEz6m5<8U(ZfEMm{~*X9~`yt<{;FZ@BxQyhr;rjA2SCX9VZ@L z`c%<$D128>xfggl4JL~;V8ie9=^!Z~B$ZRe!R-C}_9LTxl+P)1vfd~lruM%xIHpp2 z_tsYo#m>&|yq$@YlT@vcVt%6(&)Y|7tIlo!kvcz*O?FV#!i5a8Qn|!jJO6y`_J@!V zMm9i_F8iEL7&C})9wTm;>J->pFe~s>5N&bitOdV*yVc!3A0sj9w);lfaxsthd~6i0 z`i~MudFU}tGKLSGFdn~D1*Sh-Yf{O0`c!gc>aoMIrH%(fW!~H8qHK)h zi-d5t7YVz{1)|5nvOCv&Ve92|vlARtu-P z%B^!_T3Y6-L%O|F&|#+el| zq6;&pH_u8~_Pb68`iOn7A8Ee%*ihc-Ky%|N$5Z(fMcy~5pg%t0IXKHyJ6w0;UH+yM!gmad%bG-7GoLSn|7v zEp)y|dc6_vnIZJZD-#mk^9nD^6_TfTS}$H4<0>XS!_+#YqrI7mv=T-V)1eHwZcgS+ zGNalQ_SYRBtJPOe4=|Rono}~1Oc;)|;CULp!}s?6ly^;i9>YFsFvtAVllfO~-Td@X zLNVENJ~kAqO?nTdUZ*>8cRgpBx#!ZX;gf!j^ab!XngHy>o0Xw zNVD+;%31qDBlfNA=ETr_bfu-QN6CLYJN@q$!3YurAhvoK)rn)O5U81X*MRD+*bs&D z%;;Xm!@F6tRP8Szxgv{|=rV=c(oVgatoH{pbT2(#mO9DL^Mmj&1je|MSTg9m(=NlK zxqO@$_6`;=w&jGsFfcmnB8f9`9R@Gx3noY=wk=z3(Lo5ce%5RHI6pmQ&LVZEVS4?P z>{-sB7BwGpEYW8$O#bhBdU;(K1|L~c8LV;qtAw49mo0PI$k$?4sI(~j&3#kpgXK1- zq`6QU>&M@L8XVx@1z?>KYjlEjr$;Jmzzkgc6?%?$H}MDw|} zShwr@FH*H-Yeei}r@5s6k7R!-zyN|$%~h-7{p`5njW0a`@+-ktx#p4=NesQ72>+4N~VE*7n(KWzww~ z-;&yZb5#^LHo4@6U><}mcTRXnD%+qB-?KXB3BRz9PPsG!R! zbvnhVX3Mc;m`-hNj?H6hhRb3|*_38c<&;~Vt*S|>2a@}g29qD@cZ3Dq2|LBwLjay= zFE!BjIJKaDgqzgbH1DXs_8B4KFBmD)krVdn2LJfYd&o+^mDTt1?Jm&wBX8tK+FN5) zOMpRius+p{WfrW?xX8(ALa;2@rvt8Qx60nxdfeHi;Gcr~{Tjh2Nb?kWIOugKke}(q z*bDHKAdzeWL@<+M#8=PHGGSz@;LKceE2p0-_#@5sgAI4kJDBvf+)#k~qJ&1^^21r` z%vFXzCq@?*O^~D9VW!jToQ5%q%=bMX(zW$IpB2(fmN{?Fz40C9Yo8{M+&GQ5uipIe z{h>`lI%v23CwSWbh+_7?{O|uQ&g}n~^5egI@|MF2Xjtu#Wad+Vrt%md5rHJOnt81F zLJ?bv>7!YmPfxmDh9$hl4y?olVk1B_t>R0tpXZm zh~+{VYgcC)DboHxtS-s?4(PMq@acZPZXKX(-{0eSaF70XGys2XXLAz$lg1x2JO}Y& zdNZTw3lEgCyJBnVrMr)p;Sf-rmAR?PzRb-_If1I>OS|QW7nkq2+M`R%7D+;&ca9OD z%|$(C%ir3W`DQ+lPP{rXx&QC^?(`e?{JeKzJG=pDUhF@T=CQ^(!c0Cs>Lc)shMYnp zIl6TxSAS?NcPA_|G0p82Gj{oWT@*Fr>eB~4wV(Hdk$Jv^BF71CGI(`v4~%AO#uC29 zTyBSo;-N^qKw3NFar*{>cr7Er&+GpC2Uw(6`PXwr-%p=VoP+QpbeVh%Bt3{K2!8A~ zrx+fAQ3b~U9K$F_%7ZW?ol1n&#@w@zL{*#W@2Z-ctdmQsh26m3)gDLAXfL%>svFf4l_M8I^th@O}qH3i+e66KEynnh0sjtx+I=5b$z}*^0DG zigyedC!rtfwd}3AjsyKEB_Ak32o}{(VuG54$+xBUp;|kA#$~PjP za7-lxTW7imyk0|ez07v7&mE}FLGUvZe|9S9WSUBLmwVK~eKU`EY&9MUDRNX4xTK?I zh0oX9x=Kr9oM4u*U<=S{{VqZr*SLb7SVuQAPrw20iA~;Q%klK0ZUI7umbdS+%y!Q7 zS(le1tEeTAd8@5VVkck#)0i~8fY8_RU1}?g%HmETX*O7L^bLNH!uiw^s%h$;H}-n% zk-zjfIHT-cyXDKsTLh)FNX0=sC1$L)$ACC!e8oUoM}Az;n5pb_<3}} z2P58i`XE6LMh++bjB*7xhW4PJ@8yf}??Zx0|C74@d)K?(ab9OTG=1Y$dhXMRE$~wQ zofl(a$re-Tydgo?ZxM;diuybK`aXcds|Lqiiv4|Rbe`d%SeIc84xmt$G^wyLqqO+oEh4zB6L(9qF>+62q=HQwPtQ-j6I*MzG3a=22iRMul7RQh zM~rHY^#K~C_hZJug8E_j{;OSXai8vPpT!t;q)Y$Su=2j(x^pRY$<7kTMsh0g+tIr? zSy9vn6vNFL#5N;)DX|;#VrAXgB-_*zGZ}h!2mU$F$!lYGeHwe9=GM{re2k# zaEaGXTp;**{Ja)@Iv-2B-*KGX$lrl!1+&xT9@Pbe-#U0rjxQ!`t98->%l?7tNdK3`IUa zp6?!oY{q?|n91-xynos6(-)X70VRV`wPfAIbm8|8vKNPen>*thqbo(wi9Mxs6;%+)S29X;q;vFDeQ z=&XZ@@YLIi9z5}n(^U$2WX~}+&cu@@W*LJOOzQ0Oq)3GWOs`p7R5n`@i?V8h=XHA_ zbpiV_m%k6^z7~YmKM800u?seWsz_^AM-Vit;BpV1CwFHhuQKe6bIVf!~j*@ zC|fgg!_0V@%ZEREyl|1>d-Nlh7v|AFDMG^Pe2;l4qvi!^e{f0L0n zr^_wD-s!PFX?*_7Td=I2gl-4bza|kDGPs0sYj_R!1S@4^JagUd0^Ot>uRO30+XYkx z7b-jl&s+Bf_;B(Ctp>{nOK;!Uxzi^t$#76L5a!-w$_mExZD}t>sZhHL7t)Xjk^H*w z@89MJcD~RUDySV2FOwWgwA^NXyYZBgTRgjl0`R07bxUnzs^9sB23mij7FxJ?S~*uF zaptS5iA=_WEL-h^v}BdjlTWLWFJ2SN)o1&O8;tXG=n{aUY_4IJAmH(=eJtJ^5WpiU}O$SHHaXVWMLN{{F6s&dIqK2Dv! zdSa9D=!^OYmRv?Gcmk8c-f7Av@DUFW1bZ47}JzlT(v*J6t1ZrrYnh z!neo%fRPhOb%+jB^y3aF?IMt^xP2vVw!g(ZmbkDq=R!9LKjmb6SBSHG^js;~-&bTu ztjhR0a}jLSwoF&L;*5FM6Cgka7Ej@<56n!OXwB66i}Q9YRI9gNvBwH^p+v?b|C1sA z?R@1XHbyRIU#~yPA~5h5*6y-HJogI;ijn3HleV!BgE-0%WHTe!u7yqu4gTz%D@B(A ztzNxM?etAm_Dw8+A8gLXqp1RvrjS37{bzvGJRNI5MaI&X*^l;e8#~y)P6bx~6FBC< zSO+dHR|vz4J~6`8X1k8vW9!W%Y?~^t0se|M!p%+AQ_r?u?BLB%g6!Jw03L-c4>~Q| zbM*HJ3pR@{}2#@%bueB0dAb7D%y{b{;!W&ZqS7`eG2w5XjFL#h~C>YqKsLWMUy zpeMl*q8{-+yI%{(Yb)LV%twmzb4{jM*$oSD>W4;jpG*6f9WpN{tJJvE0$HIX>PlZR zciwS@7o5f=zTdrCG~z1lJN*Zu{dK-=hLiU&7fgP-xD<^VSnF7%<&gE|D{rCeZMR7y9Fxu{u@9Cqd~n5yl- zmT?k?{aFRr85T)(H^6_n0is6&bW!dPBo~{+hHOP;F{8j!m||Nru|Mk)2(+gl4Ws}R zX!I)rIfJk|*-2uaLx5;VupTfLNt;VRmuDcKI!0IpulI7k%?HL7!_9WJT|kkSJLR=I@CRsJ(Pc^%!~To?{G_I1 zEvjvYI1L-z5Ey68ofWXmW?)&r2DlZ3%?;)s$VU+n&TcGwZ$4MR z15o>z;Lh>#o+J^ht1z@H!wu1>pIun;mU(w;(XMHKuJTEh6^D%@ht-8Vx0PGpY;3Ii zRrD{zhnwvGZvym>A%%_V|ACkQ#1oTeUE~;DIul4?bkg1}Ot#FPt}#ZC0L6N8e_ikQmzhXwe~vj=qwm1>)6UfMclrPQAl z^|nh%-#_*~^gl-)Npm1?L>L(YS^wSkbd^Z1_|=Zr^FSqs{~9pHtPPAkSl2v4)r*UB z|4r0u-JHH0V(p^hm)2WhT#xZtkHz<=lE(6mmPIgpe$=myEoFNA)&EcdTV6A;tv97( zRxFuFN?crX2)VpIxV>cJ%MYiQyS{rHu`F*7#Q!ii43it-`>hGEY_v;+ab7Zj@1PhD z8<BjN1(ke(_RM?`%E0?B3+)uZb*uH1V#9L(Y=F!-mr(DalYnepL zr4eB3S@Bt7vzHDBUdII#67%rS)+8d%GI5w`*{UEhs+(Xw?Nzt!$J)h%6OyDXOhEUi zt^!PgjqR*Y{;DB^$VtN}1&_@-w81A887{HifVgGSY^@&s`}=YF0$!p1uGDegb2^ZC zYo!$77gb`(yULcCsvApF^vAQyt;+(zRSX}bH0BW6fUcPoy|#1ss{MG=nKyJp>F)<_ z9B(1+^jVC@#0*%cAb&`Kj(*r5h?^i`0>(H^gSQCZKON9)@cA23rzN~{A;^2kxpwY~ z5HG&ND_^aoBQ(Zb;_TI{KzAccDWz@i1@-M#!&OJgYHzz$zVe*sdz7;S6o8+@pK}Ed zgI8xqB`1}=H;yo|O5R!QRfaYnd74o@)@11&G~xdK{pOv-sje))ZbT-ZbW#XL7c)eH zV#x(q$r|W5SSqhPJUFEB%?WMJtj;yboSAf>SYI$Oji?{K9e?C0R#=(ms=pRWNu3K%okZ>e6e@1NqAy zp@kvSMf>hAG+ee(y=;H~-A)KkFvJ1=-y4poe+u0V%GCddT?+nt{qP%){>k`3m+uuQ zrGim80%saKac!TIp{zsK9JnvnW4WhFwm1OYH$s>@j0YX|$}0ufd6FaUyFeK1&%blc z4W>F*Ae80Y!4lwp1a6ifHRXTlU`v~hIXQUceb7P>Z+F%8qDY01*XmQIZ}=!*VklNv zavqj;6}AtJ31p$#awj}bqJ?L?X>dwO1-L>;!7o84+?J|Q;_SO@P8_Zu%RGc#Dhpuk z!?bWAOhbx<3&rCep|;6d_f2&b@>(&=ob;?K!+qk3VPmOFGvyA`qOYpsO!L9 zPi^x{>b+QZ(l{KfHpNY6i{pNvA4{+Rkl>md4h7le-3}nuwf9YPQCQkV{GdgScTqaG`)zgE{hW|bL+nnE zBa<6i7~3O9*EGd_%Ri!DQu@c!829N#OZm_xyYgS&06VI=M6L|~c>ThUHu=&x-9uV} zS&zF8MmmsQB%SML;#fjlkkQC;#h8ZV{&vIbr&lk~B>GTWJ&N}x# z<@qf&R?M`B9z$5%x9h$0?fnVcUGm~FS2q9V1k6JYOV+^TA)|mEbd}aOl~Yvy#XI1c zf1m9JymI8wZRwh+vZjHyqjzOOyI0pj+hRF;^P&T4}rxc=L`}Z;8hJ9#R~smG1-ZH|I0_qx4oB^p6~EtdolTFRg4%WtQftMSyyCIFcW%B z^*}@Q*|}ObYmYsSA2X!RN(sy6NV`cJIaJ!f(*wn3u%IP!H}??&BY& z_W4*!lyy?SKHGZ-CH^W#Fbj;;HFYobR*c)_@f~SIaYPzj{6UkEJ_SWUda+*wZx#N-wvS}?1u=qWb zXA(y#w%&Tk=Hpi-L(jKMmtN`#EQPSYyy}h+=^w{dW0#6)a*S$PdQmvn*j>Gp=rOkK zch8v|q;-k)qpmEJk2k$fFn!b7V&PnB^Dc=sWAUeZ>;-dWG5v`5`b+&R9cCvs2D)3f zWHNWJvy*WeQua%Jz&ny=NxI27>mZHD6P*JQ7xHm|_f}y5a!bwzYaYBSH+q90g}$0c zn$J-hu|Kh{&f&9f9{5g3JX)>UKmH@ydSxc)$TURy79w~6o_f5dS}<305mu48M5TyK zq@u=I_5+79KT3(dIp!!gnQL$=O|$dbVjs)YPc70-$J=H2#aIv>!3bf(>2>6QKaiz4 zy2^!+Wq!{e}=)@^7xrN-b5;h(~t5?@|KkpvJs1029-(J`DH=O9Ey>EY+9DLm7keyRE z)9iL}tW+=5?n-m>uwv8Tvu51#+%rO&%+F82rg>a0IR%lgduc|hK^`=1-hrl#IOMW# zhY(#?ioeog;Y!L!wH^!p{WQ~kV)M3u)0GwxG(MYK-h z>2qCqo~gGPof?2C`f{MP+^?oC$8l_}1JMIzOSD}chPlFI*TbQNe?Bv{8*`aYq$H-f!6!bRt z0ZG@-v#KnEGR>sZPwVrX{)aBb^)|1G1OIUO_^Zss!v<}I7I0|#4)WET1eONKmgkS+ zhzo4+m+&#*!ZFKj!7?24|3C&^5H%w!`^?$y4;69{)#0&oO@4kpZsY{dl|JI(Tkq@r z{SB{Pb-B&2_?oX!eG|ao!aca%pl1l9Rvvi4ziHUC$* zlHx6&#^kWo>898d!C!X2DdS27miU;Xpne52k|=(#>T-&pXJ0p0fq_W$Z^D+*Kg6*BQ?Sy#|mTg z3bPrtV`GRrHuqCB-@8^>J&pgZ#gEF>`&-yaV|;^;J24FCD(wjZ3n5U+1OVL~w609>L^S&u9w*Kb3PQnOrqn6wBq@*n^_x08-b2J}|7c&JD26om- zPqIT9DJI}8*rgno=V8Z;V_90~TF2jT{wP!?l^r8jl%47)qXwCccZef_oAw4)$PI4s zuo?MAdU`*@fP}r9E8bP)e0JuwUnY-5VXW8q(S-2I)S~S_At^Asvd`pKznh|{ukZZE zyjw6R%*>w*t#Yi79F*^Q{xcvUNFU+~q(vYl>&!Ti^wJ24vkczB0AU@VGd@e=1ATsy zx+1C5bbG*de+F&{5f25u;1*ICSA%BR)*mgkqD7#K*l>Hk>MlCQZ|DaJG(LNRXxxYF z{WN}NUn(Sjp?{*)XED@RLt()01U|zgA`cJa6vMpxCvXgt#sPnkc!T!A;K%LvdJEiT zjZ3q0DuUmZcUqN}wVl7M;G~^Z=RFfo^J!+CT)AouJHp%>LyPu3=T8mVw=Qb><2QW^#Ha3W-IbdT69|res(j-jQ zRG}Q8Yvq?tED-%Xr|pvpoTvNUZdW!bgi`Wuj|iny;$Aoe>eNf?Z5^Qb&5E?aiM%tI zIC?JmcbR1UOs?o8rwERJBAVJM{dy+M)ivOb>wXE>{fUQ%+pau$P_#2+a=WK#qj^py zB3cyN)3+tXd5+`$Tvlgr38MP};qLjGHcKDD>7RBeO2Ia3o8`*0k8Nq=vI(A~30}s1 zPMwL^G|{;^w(cB^1I(kb5bhjX+K`OQwo+ zUOP{~^g0ibhczb^D!wEZt&|f_Y*mlcS=i=Fq6Nv=IH&+dnb;LV8|#pTMc+!^R#u2G z4djQdcN3cpS?YG61ml5Se$16vkxIgnzrmOXH`?WGuY0eO({oY+j-NG#Rs}j?rxZ?`t^|6<@m~Cg(R}P!SyqI z-h9vfPdl||Ep#yDdFCynu;d)@%XZq#!_=jRnQU=uXDEa#WYr31+UE#0uhvlQwepOe z>@P_u8Y`%Zdpjqjm?#hmpi$6WTuHP%?YqftrsYhTdMnZV{iJY4ikn1wlgofhgbid1 z=-LEZjvY*l~cJ)=W2UI&gld2FFipBUb_3w!YW zp6ko*K%-RT#M`_NeHZ_);@tn-Z1SH)&i~&Co8x}kaRnPOn4@TWP6q7ecsCLijS%IF zUVj$&JMp?=t4&S21^W$54hb@r@=*yjxBcD%5KQB_Y@F*9;A zF6Gy?;WnoBU~QXOcIoXiQ+X3x)j*snBUXI@e&WB7J=KqITpiTyeeio-?yVQ4=K7FP z;@gVyV;L$D((7ev+#p@p1ac;mdIc;?;hfiK6?%{4m=W~Ne2z5!d0T3gW&~%}v1_00 zw|6?5Ij4WG4AZ24FYGuohrBARxV6UspQ<}NDm;Afi@C$ujU>(V$jg!Q;RZGZo<#e` zTMET_&K(bssy)`cF^adAdXtfxd7N^zDJ{;9(^O3uFQFubp|8wh9~1c5(83$@IXh`N zFUD$zZuGG}6yo8>k10vgAt~F&r!Adj*9qfQ*P1|Y&m*`J+9DqM$sWXl_2%jg zm3v36)$%|2yiiNsMGjmm6ASXRJu7);a@T<`7UH-|p+^z@+U&hS3@HwnvEGQVGFA{~ z;+h)zG+1{!gw3wJ__kpCsrSv*3DN~53VCPG`BMju$cPt?T7)*;lla1IY0!8GEL1ER zbu^aqH-wOOiIK{pFZXX>V zwcV?0tnl#H{vDsyeY_Wy?&*{k?N7R`zT(6%iv_y>CfaeZaT zSwt*%H*m7Yf>OI+u-XONzNUvpu$j1$gdL{Cl!``;v2CASPvux8cSyk=MCjZ1Fy`;i z!HP<>#e%W6gxqsOxU%0`WmbF5_Py(uQB?GOoIX2jyJ&LYtC67>qhA!^G~U+L?{l!d zDVa~>)WRv;oC}4KPD8d_bBn<5=YGtsa%d;x(0I&0f2@jf=Mo2OdWa?j_jaE_mcYD|_N@dtQ%~)rrr|ImuhzV4wQ_g{#2gt!t0nJZKNj#j-!pcS_%XURDKq}(+m6r>RFK;P znT(#xSv;NJ4@5!F{l`PqcQg-Ed-f^wz${KW*t_3mRGX{F3~39J@;j_^_RaLUPNdxz zqhgV1h#z26iQBc_5aJ|b-EdLRishv!H<4DV9A%?j5GsP`&r?6msC9S{QuL@!IQ7#R zWMiOZl;XALxcc6+GgG$8O*M{>Zyk}p z2C?jM<@?PUSRr)4!VS7wp-lbN4z4P_^nQ{A+p4jOUf!%>w$rKE&9){z$OTf_Hdh1@ zQk9l?@h&8BDz~T2ikkzZH?2#Ve(+J1OSm8}q0dsUboSrotJ9;~aYqz%;7rvuY{lCl zD(pStV^;_C-X?y8krKebYM1vte=z@IY&?he4=)0uc8z?T)SZB# zpHikwMQ*cP{L&bzoc?-x;eJ@Z*Y?!6#=Fdwuxe}^c!w`yTg;8lvao0q`n^ApJ-Q)Y zf*mH}gxbB=CQ9SBX9vC(3ptzFx5@R}*YgnfNuYu+X;@nt`zar2P>j6*nrb|_xyRgH zq~dFszG*Q|Ho?o-!o8X#6pU}X#`Kuo^*zw>R)V&-cXe+O713iENj0(rYP{I0TqYXWr*e!fH)5;}4BYNe3`{VwZrb=duitHH7SI8oKLRike%u`9HxNNt-P>uBb+ z9?qL(X^pzPT{v$laC%~*bWF(9C?Y-ukt0O{=Z(o##=E2?80|c4!m`~UAwDjGj_Qzc zq2Uvs4;|JB#NTRYNcxfRK0z-16VV_{bPn#0rg%Y7E>QR9KD+*Y3W&&qr)JKui7V zZV=21=?pk&N&I2SoBQp|tY%G=9A<=*MIgr`Xcqv=?Xrm~V5Sf*5a@!Rl+&EYHjoC- zd-mQ1h`}2&j(c`}`R0px24SYa$e^2Cji6o$BxvT(W9`eKiL4payJ<6`pS%U>2|)B&G_g<+?Uvlx?#+g6?&jJ3^OV zT`(vZ(iNgAZR|mR>3GK32_ZYI=_6ucv}=DLn?~4iZEmkPcI!Nn(SSFLCM1ZM4fQfo zKA{0dG8K!%jvrtC12NabYGG-X%S;uEdU`K+R~K!J;+52u8^aY&va4?BOq=0&mFcpu zT0bsxKAU2$kADm8#x2y@4V49i>dac<r{j zG9&P+XpM}m>#9h}AB)z6T0&e;4sphABf6ZRot>qgv%@#H5=I@F1wy@CU1lar4Ks-0 z0c6n!^q`sF={p!@ULL;M)qST28TyXpWh$3;x7A?I-v6jGd*WuU8D5=%4o9%}k5Yc;K%GkxVAw%6(z?n+Bb5vChRJ@B zc9IEeJQs7<*LV2mR&4;gKH$ngpocG0;4LU=W~0^D@^$q-XQpw$e&V)iMErTKY0_uI zq|f4?PUnQ(bfJ>sV1h=1^tPGAW?B%%D|)!enBpW;O+7ta0oU1;iG=i+9sbR~Kb%K= zVXSDKfRe{yI~n30z2hu>V%O{#bqvQGY9rB5on`NWcYJr6r8S;niO=OyDk}g&a8rfj#7}?YPVjZ3wC$vqX#DjmpBH{N{U?QE(+HA$fAjN7E8-<|cj-9tx18*in%C2}G z|Nf)W_6IOGoBNvNrdQVB$PQw5%)$XcrCkTWJDU9HMmI>)6Txw9Lq*wlQWq$#V^=em zf>-f@9-4bJbK5>mu2AsPoCx#<`g5>Xhi8$3$G8IKGmdNU7=WeImiXCLp8_Ci18bkG zyMta?oYU~R-X{BILnZZlO4FlQPC6$a>mm}4t?v3*rp@r9M^qCL5!KclHE!qRSs$Y;^VzUa`valsvlP*4oOerr0&lDzD2sa(FbNm?Wl)>7KYXim>+{?> zt1D3%>|Kic^TT-d*(V(-P(7jN?WUa?Sc9jcpS)kWaMn1L4J#ArfNrm&qKd&=bQkL( z*#D^R#_r5^Vqn5dueBaMxbc|jD{Hy4wQZkT#Ee@X`}6f&zoqiRVH~@S;HpjRn(3m7 z&~c1X&a367141y-CMn66eud17>d$PJ{cF;C8lIi!%Q}#F_Hv_Z%dDkbH9v84!;i!KP{1$vFF>SuSL%;^6sM}m@kNm zq)38Wu1Ijcb#5n^PMxo3B^=j{;T^JB`1(6m=ybUbR51VfC5qwH)xADnMp$+)L`>5L32?yLnaG?OTXLm|oZ`H| zi`5JPCKoE05xE67K$EWbJy8D1EIv8!y8X#36WUyH9%vipZ%{nP#ElgW{FPVC6MO` zn-;=doJ8(!kXF_-W0>=VmECm!U+2M&fLzb}M;+wYTw}+j5I^l* zEC1Y#;b5-|m7w{SIKYU=;vYstXcbxz!;Y)~EEEd(C>1#n6J9LX;|U!cdR zmyTx%wEyt>q}V2wK2Gg&c9?o<>vQoV<{lT+>?<}pUm|CJN3q;7oi`MRjP&XH=Z134 zhRcjh_YM7O_>sI+DHswF7Nh7sWv18_e$m8rGL4F@%E@GTEd&v3Ygn#`;FI^fH8V!L zPSI*=+LMdBSCt#;8}GW^MWo!0=WpGYn~iVGOAWvHD+nfSvKQ0&7kq^p?+Zu@M^s>A z*}ME`jdsK3HS&twJ(GqLdKVxhlT8;>&tCb#14>;#pjmjvNKcMBm!Csp47HQ0pk297 zxck8uRP{t+6pjpP1?x(DgN z<=3et_x)IhaHs~`l)R{Eyo5H*i*&6TsqS3eUmtd^qW+3cPx6y;ONp&IbG>uwE4y7* z^=EP;31pmTaMEd7R|iT0t&0+w*AGnfeE22T1-(^>^Vs*eFXCpPb%=*joZ8@yR=j}F z-NvLi1yDK+PY({EOczl4Cl=fBJLw`%^x%r!Uz@JD1-bbYRG*Vz$CW6fPTo0rchzBk zviQw?oON@P3z|O8>{6yFIG22Dq7jympsVf0d`&SlwsvB6hmi z*o{;WcFM{hSiT3}(j{L%;^r$9v)`64{eXZOIcZ2%_5OJ+j2d&1aSVhi1~ay09|tfX z-{^JWWq28np=6kOZLBA{5J&1aZ@msias}aeUHe!#A?K#n!Wlzndq zTXOv-o+edpD9gE%mwRwQ3TB8m=8cB0HR=ha~I3Nk}?4!<6+gBb#0rH@^?{>8erouKdRQ{y0`Cm^zq}p z0L9qV)kCb@EIgC~v&Qe_PJ;>y%md0)UG6|ltj+{x6==8QhRfbDz{a32_*OnI$(8&@ zv2#PJWG9y%zrT}Z<39D|nT+}VI(gO&kbd1Gkh2k0HN9+;G0dT>!OuGeZYT-PkOcA< zvtOjWmWeiZ-^VA@Vt>>06L)VC#ib2asZA|r?LZ%-OM-^=j+ab*dQfLBzuOqf$P{~v zoGamEIkSn=+X1h+zCfF=)ctwjcuYaYAqdsBS#C>|^8!nLjR4k10W{oM=x*38&?($m z-T3SE2kvks6TP9}FSNb!=P>VsRd)T!l&MzLS!v#%1H{la0NPRjGoyq%a)F^eevILm z+iGxrpgySsCXIqsytY+56gYppqkPI*_s(7oH=d{OR=WGKZCP04GO>~xki=9|pr%s)ByxTOnK!rlcQzYE-xDck%47s|>8T z>uYdhbDz&x(Z%*le5IJr02dBps?;s{?K#UbVk>{uwO(ft-_!!C#PFPw==sb#3#Uw_ zV|S{fiq~-jWIJAl3)+ZPNuBB1G(QSoB-@cUI%c58$%!VPc^~o3ik<#lps6jhB%IBvf|eA2EYx%|MJBBrPye!R2A$OlooR3L{G z61$41(FcGZZN^uoOgZKYwmL9EDjIzl*IDdyZ)u>x=#H*$$3aoL^h{dc_oEbOT6w{;yikgB2ABiKaNOZTC|S6A5ZUYGHs{uv5L`^Gf|~R~ZZY zijl~6wX-GAT3rW99s5KSrHi95_E%sj!Vvs{(kRe4=voM(nvW~K%$1n?11YJ&zLbX% z1vlZuY;~Z~u+NO{!c9Ul!T{hO6jEE0m_8yMNn6G^A}CV|fP6&&HbNwPdCW4!9%k8@ zW7L7(5=H|%K*a#LB|8Gs28eSUU{?HrSk|ykVP`G^?Iz2uMu*L>K$dK>gb* z&a}fMVZRszAp3Nf^j90&!SfHqK!m!3T|~Vg!2}23e{;=$`)mtT#7|Qo0``?R`MA?oYh{V4tR7F@?KIeJ~`xmTQY9+Vk|4FdKv42Cw#|Y=zF$!bZ_CT}hpDy@s zW49iNTYbg-is-=%kIm#hS(k%KV_Ux1S)d@A+d2i{3CRSo!INQJ!Sy8Wxst7ys5z;c zD8H>c{|n#BUkD&PH7+&G9Xcez?gMk|-&{AJE4j?x2Tp#lKM*i?X^2wn$`z0TzZvpp<(6 z+Ye|jShmz6l$~xlyogl|aSVbLo>Sj^{x3hl)+8}c3UaYrurQDbU}}ATXHw*}X+~_T z=jhlVK9l~%2VgT`NLoH2R>4C-!J57W7WG3m>jtDAg;Dwb`P5kMU9e#MtIvQYXApNc zI3MSNawm8|?NQjKT2k>S_Y&j%Z|wP}4y*eA`Xm2pC_^vStwjCbVf&ZE#zx4)$g|A~ zrU5=4G<9dw_{q-f<`~7VR(GC>uzu9#ffuVCjG>tcr13h2h^Vxv2zd4JRIW4Aqqu{j zdj^UzyM4yN^s+DJ(yO4wm<-9%py&}Re9>BjIhvOx3ch&>d#j(Rib*j3xl}XTXzX^m z;sP-;!dEz>$|Zj1YH6Y1kt?Xo&sW?x?n{r6MjqYztBUhaw1+ zfBsz?Y~bdwjKj4GAn^$&{LMRsysfS((WM^O{K_2B(>y%q2bzX^2=4^Ax0^f)Eon~N z-G=n^=i6#YvY6fT*$w+{frRsP6Vejln=x?M)x)<)Ns zR4X=$*X~0ttuXGb`_VfPpNP4Mlu4YfvjpPMryl%305Z@s}pbGryNgeU;O z)D{Tt>d4tQP_Ap$1f@`|TXhb7E>VYTWhb9}8Tc;3VJ~EF>e1n`(RKDZ__;%vqoyn) zObek}yGuI(Z7!SkzRV(5iC?#5lKJRur5V`FM z?{@dX=amvk4R3L6xsMURy5}{PW`h(CVnlGV@cZ3C>yax<^mMp}iqKg0mtIHq-MeZ% z*F3U#exfd+-rUO9ly9!*H|jPMurziY#5Z!1O;p*pIB%-&45D3V%CnaSjSwEUu>;FN5al16mMu*Bg&P4@vxyLGsAK%4}1OWTnOA_=R`UkG?_F z>G%Z{Q(eCUHU6_5T7zg8*~c`yFw|r|^Ss%ND>3AL!}XoVF(<>f2G5>4AL_N3V3lWe zIziPKh;3NU=SdTLx%~AshpuUvA@ZmZywv?6UljY~t-wx0rvwK(b>R;m0@Q3JRQ!^A zKX%T2nK2o=?r#s(iuzq6!8E1^&DK$8Si-+BP|h3t3HB`-u0xv-U*qC3D#TrQ+QNEERit6*6RsW^FJC9%tHnWVsJxZ|1 z5$hOt;ju93^8F1XlkE?uv+&c~>)=)6H@Z=x<6%NiOw+!Nbwhc<&=IPSgIenLPs|$D zZDhk0Tx`>I@^zGR)llip+M}l!44qRcSuSBEI;T^0E7M(FWTDRj1c~a(`--R>^ca~y z3~9xm01`B}SXz$m2Gqhbau?@i}vdS5Mc?NwI1ZhJb!3t80NaK`+dEa*K#`TsILA*(ZpWq+*Dl&Pe-uTJX}w{WMD=c-nk-K~9Qz120bj_*C;jVzzy-n(b>6g@13eIj7Ya>2HzQTl$KRxT-^D7q8q(& zNYO$x{ZaIco|_M59=#)h&_hcwu7jSE-5akLPmHS%WmO*Yoa6VCB`kwp`e}=zEAd?) z3V5ZJ+{WH&H)rt6HJFHAiv3$J;m@PmDwrh@JtP2Jn^d1$g-TYBy-DkfXcWwY->P?v ziv@;2Cjq-lyUwpS7N8Di)$2QEVY)H58<$pZ$Y(ampwFyo$MatQ%I}loqZyU1)5l`7(avF9^#`z_|?oKull> zle;s1U?0G%ZM&$uy0t{_MVIZU5)GCSa6YkkPlL`C7HWL}Y=-;bZ*WB`-q-DG$B#!i zS;{_%hwTnlwr!mD?Q$M-^zHDkTmg=ETO+_fZd)G-0Ufn#kr?Qm044w|$OeS3es7eZ3tfJ5AE&8& zqVrLHHA>YNZweW>ZNh*nU`{yLTj1(OghPAGA5n@HdFe*2hUF_YQMh-FXHGcb1AIC{ z3~Y?2Ws>p@Qc?dD`RI#k6N~y-naXPIRse%AtV8e9WeSX zSn%XZM2?=iqooYapesr14Vxn$5P$CSOeD=lBYyb@+8B~1C7q%7{>&);0-QmeyTXic zUyRhzYV=Fxlff<>^_TN^%jQvW^63^lk{`;Jv>?0tqGe|qrmii_7b)PynRZ}E3RbgK z7JWgbHicB0{qh6+qlkNr-8~nsR?@h&oK`Z&Pu?}0(GZ?AvHb0G_fg1}f{$l&+?Z2% z4(r-X-u1#cA#hzas87#90!Q0$yT?@QP}GmkVa9M`o7GmAZk?}^oEkSA)d3Jw!N^@cT``*mU~1* zfb02uT&OP&UnZ1J)=$OU#|c2}o;$%?53RZIQhX;B^RD@Ia$rwX+L4SvF|_>+ zE03v-63vyx-asVX<6Novu&JA#Q6muCg$bw)OXmurEs?H=wA2-1Tfs3>cKkg*{X#2N z%#eG2M#Guk$HM?T%infdjb^|Was-L=weQ$(%cNzNf#b+>GFXni3)sycx!q^Cj{!&6 zze(%;m(H+uGl1Ju%-9^pUc-&%9Cw_$m1Sr`cO+!egI!u2c1b5KRfMz|Xu#`guJ+37 zUsXA8x;S9-ZqQH8CC*@)GK4OGHg#Ai;G8!a(BrJZO-g7I9SR1d> zy){vVo5AG$;rj#P84FkyI6+f=dJ3J$5cO`x= zd8f1(wPVK)GN(Q~uWETpQ!C5bFdqRat+W9}cd&9y*@iwpklQza4wIuyd0lR z?y!`kZk*g!*9TH8zUy0->>i}-FBkwf@y+_V`s>9_J8E#E?aEUY$LwNVCHdW6D}S#X zvgP1}>_X@FNU8NbpGIh*;BoT~{ za^JKQSLWe+yI|^NQ=F2wC2N8O`4r8xZ>*PM7s0$w20Cmb)S7p)USwq3cmN)#XcnPz zJB@Y)j{4m3@4f2iCq_6g#Ch=2b9^4(#;8f>N=04BKh}C;KmXD8;%xLi(7Q5+t%7#& zUvS;=IO4x2#=qwJ{Xg343^%LC=11NjZU?N2uF9peK0rSLaD*)c#{FapH{E(%lnqs%ip?ZptfGgrrR%tGSw%j1nP zK`FgRB(i~7?c%tD# zxK<QVBsK-jcWSNwU=LP}hI)3cJ6d{oSCcUqnJjT?1-V^(r?}f9sQ0(>6 z+1()^|J-JzH>}V~HeVi_H?JxaU@Zv$Y_@o6>k!k11w19Pu$B15Rmz0;a;x)R`bY<2 zw}p?_TgCS2k6-2njjcFW+hPSNj!vepCe4dblTQV*6)ziP&wRq#R$ja>`p(FOMqq$Z1qB&N z1aIWl@^}M4nuPcVOf5$RN;qJureqI|Dh|D?w00m~7iSc&PuEx!!9dM=Lh5Vi6u1+n~11<<_jyQbV=3!Prx|7b@vO&x8Echj#3*ft3e^ z$Ve6(J!^MdzlOHT5Qk>Aj4>V9%I6~;R3*ru3zR6u0VQ+wx(g}VHdg203~#&rie*lX zu^%6}`G)XFdu?*PEIdEZcl|qaM?V%7iUec%GS}tBIxvkVn31WOQOI_?dTn?AcJItx zV@(ZLrAI#1*04s6u0NXfR-I0Dz7NR%%VaD82`SjNA_O4+cC(8BgU~C>)u9QwT!`sm z_xm7rcu8egC+2v&K2GM93Bvtcia(yd9(qt>5o?eugaUvuxM)C$+7v5uk??LDDmq&m zKSnXBqdB!5Psse%=clF;E}!`3V_Egd zjWHK{R&=8>_=E6do_4I%{N&LALzj}P?ngF#lXR*bN=ozn8L>(dFG7S(1zuRG0ccT# z*B3nL$vu3h#weD}OQ$s5??S+IKe;&$Hp_dJ_)jhH`!h!OW;ou>Fn4)&`c~5J`=$L` zUuw@iLN-=j?0Q~O+Fr-(p44kY#35>sZOB+64RBt$gBx0n4n689v!Y8@Cj?Eg#40%> zgT#wO=?Q*!eoC5(8FncnLeFLX;$d8&Wyb&NgVa)}8ssJ`tH$bqDp`0Nug87$27=G+ z{?0>FqJ_pskz_s_gOLRvQPcG2zd1_4z*&MRT+t5hJ$!p#63|RMvV2AlFWG1otSjl(nL~bJW%|8u=MTkTjgCy`>KgGBBAJGfj{Z9{wIg-!PFc8&6ut z_?9jt9-9?-@PrfR&i0MP-f7*I@XY20@?q z!3n2=W_R=Hpx!G|H_R{RLxLIfKzLT$!PcU!2>bYIFK|5&oi3E6or zyxCqdyoc+Ja#q$kFPm`1IZu_BHA;V97_Qi+fUMYn#!o^fHXjYHs-DXc>epDy&bfUi zP*bPSY|FaFGkH2pEgBq4q0K^2%9JuwBzLN`m=fhjhRYSsCIy(L23CCcT5bA<>e4el zUc6et<~Q*tYa~r_LayK{4MIM*>A@+C6nYeBkA6uSaDeXHq@n^0o(TmKWIP?Wx@GLv zG^P;z?s7Y5)lW&ed=7;fnfYUPD>;#7_76N^aqq$tx!hlOkY3F|U5by+h<@J)^9MdQ?_1dhG@^ z`7A|QQdxIa5R@c8*nM)6A-AC2^l?gNyW1_;9(Ot2^c&Vd$RtPVN$Dk9;>1egt{rNd zyZ_XBUf>UbGesXwH7%~9cd+nm1qa6&!&tOq{drZ``fO+O@T9X!qD;z`-(w{#erXQ{$PL$Jl&m@n#m>d$GK;GTXHK#1T_dNZ`=P>nS|OG-kyV4ecF$ zx}VmUba!DAgRIXK9uG8XO2YgCs}X(}+xkZ@)p%vZ7HZ#;LHm8lx@ebSjWAAejMlV# z^P|h2w^cbQ@4a^TX^?2$QD*M0X}cFyok?$N2bHA2k6Jfyg7Dfs2$1GZG- zM-wKFpTH44&j9E5zHmGV5F==&KqL$$a0~{1NbsZ*!QJ}gtHEx4p4`gLU0g){J}F$e zAq-^RJ6MrM`yS@PnstLE?C%6_1Y+dhXPjB>>dd~qRU~s)E}e)rW<5fZ=z7U`QZ$xk4v&qOK(d}U3Zr5`SQ7c9rPFMSSecpj z`*}<#>@3f`^YgbJSwMVPC*etH-scHgZ9q3Qos1hT-;wpa@q^3WQh^$gUy6#!hy9W@zk(BV2W+PY5;T#vA-ptFCSiLCQ6TzsIga0c z6K3}C=bJRK6UV~{HY(djm@=S^r_x#rjy_^5aWk+6N0ilLp9HV5_6hM2M-F21Ki}eK z6TwSiuVts~0@1LF&eMSt&`3b*N`7IUd@&bSPxkuh5^=dcLN|B)F!j+jc}H&H9^6PC z4aF8xr5PnlwBn_jg@ebvcPi{5Mx*Y|-gtYW*Wr741m^1Vf;+c&sBo=+jb}<^lGm9s zNRocs*JdFmoc`rvAJd}IDzvDZpmdGD;aWaNxj|^((y518A3#w1_ zsolG#MtyiVSY~AWkIs-MY&4(Ta)E{;D@UU(r6M#KmrJ}tGL*u29x3{#E6NcD!j7K; zy@!XNpR$>`n}T=KKX}?58C*E#ma2g7Q=Zrh_AA;8>`ZyqE6@u4w$5;AWWB_?5o27) z=*d0JRdpkk?z6#%Mox0Gs>%K5j|x<@HC!4bWAs2^H{aIL!)w8L#%!X4gNb+92I# z;=x-*0hcgd&^U*U>X9_s_*3G>ShF!aagryC$2>4jH90iR-yBvdF&^joSNm*KUH8d$ zrEZ3sU_8)Z%_CwxbM`~jLQHRN6}*k*Pg+iHG(jEz93!58ygL7tXT)ewk8BgQSjY6* zQ#1jjj300a}EQ*WAt1$k^$r#B?ABz*(m6~>3h4Zo9XLtoH0q>ErCm4Pn zB>JAHogS0?BMQT%d1V!Y8qGwRdQa-gtspMGMXPPiVIrc>?D~IY02OvK-veRiOq8WM z{~28)bng!o;rv}G@T%@FQ&5(GFqJdNNCWDC!{^!E3S+Bm5p%RUEaA-7?#0|2S@Vu3 zvTIPQTpa{ytr{5yF7Loz(13XIw{r^)Bq6Va>UAP2mXc@-Y(5}EKRQEqTU^FD66eph zuQcaRIhlMNzfkGI8c3G(TM_JID&SuexuCvwgfLSgpLvSrG=Ov;TT5@wzwQtJo;5bF zo2HDsWy)>$0|D~ms4*D$H)2u)QYWViz6x$I!S|PjOR{t~4Hq1gB40UuZvKXSAnb^3J7S_n)e#V@bjq_}XVH5-FJumhrwgjsHBYC73zH2Nxr~ipgwL~hjM!bB}@bI>n zS0{L6Mw&84e#BJV$8}S1{06x;z*ucgX37NP$plAl#51%GXpeTmccBHK<0Pyk^U_G8 zOsnB(L**iZAF8m7V==?yN5qi;S(n&` zO8Ute35LXF;f+UQef*T0-C8S;l27vN$g?pJA`I(h7kM2!QZ|cqnP?4ScnBQc5ILB0 zHDOrV!{AB~PbxASUNOrQGng`?T9rb!HqdqwULrWYx5Jo7t$!yjoTPU;gmO3}Xhcx+ zq>@*~M|1R7q?YM13cyVM=D=YHffz}siHsYqXdjUi8vI4vGfxjL^)WqF?Q^*WTX;1( zq>x|eaJ#hVoxo)G3pQW*;f1>CZ@udGKT!W%6SDnD{ZsW9tJ46Xo4eGN9s8XiqcR)M zj2Xd_%8{^N*cd&y+&hdg(|8CYb#T%ImjDQ!sP_}OnjK0>ax==iwBQGEdvGqv+C%Ii zr1&2RI>3(N$M6E=8{-Y+PZniQK;cE>!ER@okmW>=F_P6Il;7>Lt8X_P75tM{)YLRS z+}H4s3;%#AKZo4K9>CMi!orc|xK1a2P=9pp8_IT|+hhk}3rwzi`erd4?abLd0`BOp z+K08@@&ZXB_9seDF#wP%A_3x|;0f8_cFOLU6rh{6WA}`EMA}ylqV2o{D~R8G`CMx> z`!o&i9DaAHh1%Lao3tp7IOSQgww96UVpR0{!$U_$(}CWD4^)->5-YR}IbPh%X=~<& z2Dh*dOl|1VJM>;CgjP%-3RkiC^a~oSxy(CXZWK3eudff5M<3!{87+I}Vt5&Z7eINA zMpi8^+ENf;M8VG<++NnN+snVe4;)3Q$Jk?EPwJL@v^Ut7qV|w?yX8a6hwihEd&|J! zJkro>3KrWeQ9-W>3ovkUGaK5~R6>1RtlRjZ;El>p%lF*E1i$#Xg;gH*&ObtM5zc5h zR|=FdiJc4mbdLxMJ)nHcdW<;$1x-nGG&ng>F40Z1{a<@5ymF9k*Q)W83|Bn$qj9Bf zo6c#}dEo0@67T-U6*g%u3*T8&sE2(A_aZ3YIL51VpQxda?iwk5FPjKcGF8No&zG20F^aUTW z#`P@!hP}06?FV0f(2ADgmI;4e5PmVCU$X*n(j6DZ)@M&z&~SvYqIN_Kmaj30u6F5* zVa6~l=5E#2zPBIZG$i!SJ9et=d?Ne4;^FO*nrd7)76ldxFZP?`CO?2pYfK$_UJN}t zHIV;wS*O@Bz{!E93Ut0IMd~?d0u6_jE@7O$Z}TTyD_cJf>fnjQ1AW(#UT^lDRP-R&S{96)yb>Vx z0b1x-307>~Lr9z&MCoP@9Q4(YupNA85p?%jVr?|{zEf8FkG#y_DC1JV?*bmw0Dz55 z$dxigQBrm?EosQ)DxG1Yy`Gn=kEM4u`g?2&KDm-%U=Xuf!IO6KU~J-B$0n;RYxx4q zuk&|tpa0MdY3EVVlRKf|Vs;;#uTg?bjL%W0edyYx7Cor^RhMFAZ+9mlZZ>1*sr0_3 z;ZgSaBhxpU#u-;wC2SGQH@R(i(8<}+P*vAoO0@oZH_3*}>-AjFo3H#&Y`^;oX<9zJ z9h_&Q_x=il7(*0h!|G1XIM3 zvfhpuMX`70O(BzWB@TVch|Dv)xAJ5JUEc7$oFHI+H00(Dv0VX1ZTNE-dGU7%G(zpE}r6?`8L8LMz|rf+{cKK|6o2C%2j`w=De&t>^pGx*{5{mAD; z9-t1&hCxwueq*3!ZU*LviAJLHn1h#(aneH{A9?TTozO@Sc;9!JH}H72N%-y)7#5Cj;SGKa@e||M6`Hm$xeC(;2OozUR`+MK>s3 zQ7QfLYGm6{IAZ}qZ8<`bokZBL377-TEfLMHqq%^Qr7H=KBggr}&fK|iz%>apW_-=| z3V%MVP0EDRF7fZBbe=p z-aecYskBQdcx=jL%^|+T?M_Wbs?%GY8yD^P7OH>?o?@9S!L#D>-oi_6*WwS`a-6rD z0gmA=j3g+XgpJ-Kv6!@_VP4Mkgy_L&wJG;>M$_Zn0!Yt8c0ZC0%z2Hj!4-FzDZDHg zQkZ6(O7vX>&T&ubuOezvm||RHJX6#Z`HRTOREV~XmgC5cjv&*7_g93%xOXAFch2AL zmq}^#1D28>kne#eVif403bGzEcW)`970&!*3k~L1ZdtvyK>6DC`30y*ZL^AdI&)2b z`}^9n+e00ZYUh3L+;J-ubG`W=I=&6bKmXtARs0wIjQ>XvukVuFPJC%JyEPUR>-_>> zbNkZE+9X$F$!kgPcDBmEJx72QvN1iExqF$h3yPD^CLneg!rp$cFmmk*zp>(4Zu@C^ zX*%uR8ZOxoOs%-NK`_>brm~z|NIcQkQ=in`SDC3ess!J-t{~tyD9KfbX#;(Mz5#4& z9g${(=kmI3OS>~mXx^5yWbcTLL0^9}>aFB9v+V{C7r+p?CAY)H+24U)7Ec6%l!4@t zCR5noh;oa-K6GAMLkPMxa=C`Sqp-x2zoxgUqN<_B?IC5~c<^Z+jU3V69GCCmnbCLq z)~A1SeDUJ%Ao8*$p}{VU9F#y_hRto}G)*0;4V5@sUt{5`!)Q41NIK}6gSLXoWwpex z?V5=6K1j)SQE!HH7+N2~ zzD1igqN04b=I$AOIdvy?^Fhof*2X23+Sb-S1^kO+%$a7O6r*Mf64>+aS#K#7G3;Fp z*qD^5k-AmNs>znKPOIUwjqJc(K`toI&o6zy?lIzn1S$AL5CU-Hr`iy5=#x|KM$(W3 z!*@j2%WeRLGWm=Tz_ysxrUNsYOsW9IelaT5{nl__`l?c)k zIQj-F(@^>q^DN{?x0wqlv1>%X{n#PKr7X99i z4tr?xWlwV?3qWm1&yE#!!Xu5upbh#gxu%2H=m3=Qbr9Jw^lJk1_!W|atFw&kIEw}57 zdceeK$a|9T#w#mn(l9&ZkoUP#`3U`dl7pg6LbX-i#-&< zAOe186;2TAGoBQRn$9#HE-4yS623aWb68>iQ8=d+2Ttbk#cFJDIlW*@Y&~m^fGZNHkl1p*|z#{*F99b%@;^-R0*c{q)q6QNxz^T$L5K z6EPRoC8(}juE8mkOy=(0EzSoZLBHcgNmzHgex`N-*0pT4M59W4{B_9r{0^5RZ3p^q zexP0};vWtLswD@&RPYun6|)T@v|yzVz8GAyb|GXn|@##3cUw%nKzXMR=$MuT}~1nBhDwB_)moE=%N%xv;O zBAfOb+`k{<>!XnNWA9o1n7fKW9}B}(Rl!-POW&GGiNT|`;9=NuR5IFxk`iU4POG90 zeRbFvI^=lztIi0lUM+W7vBHU`Zq}V!zyzXL@rC>$%Z>Ht1jqH@f%-(2J&o za%to3{6$?~S1d}}U(eq`ipvl7)Wj-uNJt*){_e(OaQQJ3!4wMrLbrtuf+4LP2@CBv z0~AyUezCDXz|kO7X?${SwW;LezBNZBpR|rkBdV6q3tTN^1#k0CIoUEyKuZ_kg}19h zx#Z(6T||+Xu%!TN(=p1~r#gruS6((+c^T|7OmH7CA5coYA#`CMn~uxFlZtXVjrL)N z;O~PvG{W#aB{!Pr8nJnUX z6H)k-i3cv&ku6L$>$zUX{WW6JI#K__YGsOz>NBJ^#(~!xtGS8o(iko7fLF|nbYZ12 z{lFuAOlY%p?a7k2qeF~E)zC+b=au$r8N?e$8X`JQ8cTCzkoIhCQS7MNbP6azy|xZv z`ium>Y8Y;Ri}~V!WZ>ch)HJ@&2L|p5eRsjx%<}_h)O2dw!Egz8#U<9*8Y&_SB$1$c zy$)S3!hEVHv`$Eq(??=l*dP0uNT1TNxyyEesp#qzr*T7Do$ZQ}4+ScGBwM{xF0z`i zw-94iWYc0=KI085YtRZ5gL$;Hd9S>ePBx~`-#8%K6dNn;1SY=->^!aXH=6WKPwH$st!epxj*V()}H*VN!uyWZ_Ac|Cro z0-tfQ>-3~_c*_=!@g0)hvI%x$5FppBP(_xxP6Nsp%7S6*Pb(Gw(S%x?36~I$0$Jd)t^XLX0&SErddy+4uZBm*x60J0> zey!n%uS;EXW45*NN`>W<CS$s7q4RH*e6pStZyT#IYbUPZwe|w>1ze zZpcZ$1$otfj@zHNBlE4tu@=D}z1}xddb6a$KYy0NPx^A+8fx&s4!)P;+B%5&f|q4t zf#P~Upx)tKvBLAs$D3)~{u=Gi@7qP(#n(m8_kD_=$f6qd#*rQx`ZdPuJ#UClK8>K+ zP$d|ePzt$^2N^64P)qNO4!c{)65&Vw^8Q>iL3CHB1OT_=6=|veT(>V)8g0bx*4$o>B&Bmf1kWP*Sk+{Nh`}F z?rGiuOrg9Ob*ws!CR%-%y^ksT9x?<((nebpS=O;**1l>8Rh0a7|2fxNJ`|WjAAVLF zrXHxW5FzB~i0j>a4?ns9FVkyB#pTi*5K?G8x~ZHicPQM&tj*&^HCgJ%zOW!LP_|Yr&EH~PR+x%O7x!NUMp&#(6QROz?20W zI>M3tJ+}fEK1jZmTN9b zy?FWt2$2;;{d18afc8H6 z3kMqfq_OB^6ttf@v-SdjjO(CCij+^2Va8D5c#0FrN#D|9_aZ?N=k9cUSb>I$ z!L6N!C0ZB?>!re9-ORk{dHf3B9F_!Li*wC(bR3*=G_#gw`PlZiNO$3d%<$&*?04GE zf<&r!`pLedG9xKj_$b*cu63KU|2a`cY+CX4FOb+ig#Q2yKb~UjhYM&f(Elj51fn3# zWyo$ZED}$74Bg9GV$Hzd3-Ssrbq9oj4Y9B%O(Z4gguJWXOyR4@XM< ziq$3Z2fzAYjW}0Ud$r*G2Q9+7y^WOQ_0%xe)@_xA=LXXrTRgsS*^NN^B-nAl&wJtE z9hIAR<1S4A%uMnMhwwXKUu{O}te1C!Z^O3ks#KRg0d7YprhJM?b@Gt2N4Znq zOsQedY;EfE@!==yn?hlBz^~p`7S@`h%u4$ct_7H84fkO}^Ug+e)~n(Gk&NJjx5Cpx z#O%_}D}3abO;%%!kceP8V~`aB`d8d|#Q^W{Pl)$7lZON|)(~ z*%^seA}4ziUj!YRm&2ABnbRT5`cDGn% zcUs$3&iw+d`1|b)XAvN$Q8RU1cRFnxj5DT+RdQVFO_PbX%}G{ri^hq$4$MKw5rEtk zG9qabw*A+J(WsZulA7f8A+$k}?491LkFJ^%o<+3njOUd*Yo}sZC@CF%7;zKpf*Q^2 z!tchrz#fe0F-5t~Gz>FcNuR9PBGQs;R&hoXu7k>+;n`Fz?b<*i#8X{S%ctM# zgoic<@F?a?=;H=8b^%k8bL8B_dLAkl&chfh5$ybV*>+Ha*(2JD3a$BU9UgIn8kuq4 z%rm7E&e1Fi^-|!`Mu^~qGpit-{CTM4ZURwvWRdQiXOVeLUi!{6bPJ_g%}tPH_tUzs z&lZLbV!Z`iG1Qm@8QX>ncUB$1h(T&oHt%)7F!+ep{N@^!IQsQGt^GFfW=)OLcf+!t zS{&ETd5jX+T~{Xhz)|VN=pVpNft%++^CYoj>{r}7%yh!~BinRm$`O*uR{x&d31%d8 z0w}J~z1X~kTjES#2vG>@a;=ZoD}|fp_5-g5l1}Xy#{kW46^p*J%TVFt5)Jeq~Z z+o~n@+{%0%Qm&Eebyvy$La%_LPn(^TE3p5(cIlHNNuuq!KP4w+%X~h|Et{gwWnDXP zP@{1~&iZ-($JdJ7^55^LE4d_|XP}Ud?34bh32ks0yqCr}l4941-HDc@C&jcbZ(}Nt zDAPV}rZ8?E>#D$FJ+as$-9YJ=-#`o2ae;<2WP;vEr^`%JfNdKCQZT>4^`nbC0{n%E`-yApK2}DU>0ODG2 zweT;QcXhmAURqRBF=W4k%W3@doD3nn;dVP!x?M=Es*V+oz1h<&4rP;)av$UmGUX|P z76DF3^Y=N9R*rFB_VSl2J3VC2X>apM;7@7@F33~+eyGG{W1bbRQw)!USI4y50!gyU zUX~u1tJE@2p=fs|R|3-1x2<1k4q?7=QH?&*RBe}aq)XKD%`gVJaH~<80$c0>)N$i*bXG5QvHbh$<`7A zgzwhPj7Xm#9S5HYVq8%K`TprU@@ZE(A3X>_EV!p==a%lZd%0+30W~^~z4KF$ z=iR=PF-N`S#bymCk}515eNv&NH}z+4_i=>k797mdQh+>mi9eyb9!Y@3$Vt#Z^i{Gz zZ&5WmM2J^A<{uDZcno0Un`# z+u z<w&ZZ9?>bF`9J|Yf=3mG=1*)3-kZgG2NX(VAuMer(AhwXzhrDmQSCkfju_JrA} zZ@`wmVMF`wl=Sx}VKwr(8bE@I$O2qu(sN|FGS!x%-@Fg)Kx>DK5Ks)$;mY1Ol?y%B zI5^UJe0zRGofSj3blS~G%GmW|cd!AJ7+~1s?9s{qWxZu97#w8#{d#%D`Q84Zvw=69 z&AbIaFMhs#I9N9=tQI<(ZQg$ z`Rf(I{EuY^%Z*FwPCR(#avL?K0jZfb0GeC}w!Ekd8`G>>`Na^fmzaiv%u(tt&4Y`x zLp~=5mx|&|`71e96Vv(n5~DjYEoeCCJn}MmAYGp=Bedoh`7&C-y9+1kZ@ydC(h1^z zD#j$wMBbdxeeaT1C%J=0nb-olm z(RV87@HwHB(_-LOWB5y~1$qRwH30|hk7?J~4Jg3xaY?^Y_An)qmJO|52OBuw)H+>p zWMg_z*Xo+B@=|fMO(ZvSc#8{E^~CMJaLHq{R5iDkIOcxmeFHE1b=5{vCwvdekCyL` zic8??bxug*(WNgHgM$EMpAmPx_DpA05!ySF2kwI|WI-!7L_A$*ye8tSmu3RrhO5ch zut#0`1#`_g*YAc>X+`86*>tz5py*~*cUn_ZZd_&Yafh9ST@a-yYX2o~zlV}s$!g87 z)qQ;4`Q(?ECtZ&gIq{ymD-+uj0G#xS)THe8eu0wNk)Vb)6A^+zA`^{ezLynuZZb$Zb>uxR46jBlE0 zn`wT=4G!ExJSjQ1e7O^!gcn^SLI;u5)K@))!345c>|iq-@|?|};UnXSe41?O;=Z*3 z`a|hw5;tu#K_!idWCO*`h*?bkopf81;@|^5=f^nIS_wB;`P0eW)O_1?rxbva`mFcgvJL*@ z;P>DCz;J+ssTaGFD4nt;`=_)l^18>O4`QcfQNV?HaD$I21ZK(^zd2?k_23Kx$N=DO zzOHRiKx*3+k@oQQlBNM4Bh?ls(!c1#1*nTt_Kb&k5CKgh;#JjDzI(W zSWP};+ANiyHo+al!OUnrFpwFm8)`UpI!-mr^%9pCmhX9YZprYkU-PDZ=aUJ!Dk}|| z;EEU<|J{29Xr+Hu9QeD>VHsEt+flPNc;zIXHr^qpI#l{=!$>N)w7s)h&2v{t=cmZx zg7ZrpOnT0~& zrRorT*z?$r;v?cJ@=2Vb*|DoKu*bKJZoGVT{+!)?LA9TMhUBEeEGHtBvSo%=0Xx(P zlDnKX*mW!kpTJh?<+?T+e9H8(#mrHjX|K^?msm>7rQ@8q`K7L|-ZSMf)`Qor?%%(A z!@`J3Eb(TSb*8_WnjQuc~ z*ml(q?GXic72_+Ai8V8 zujiL0#6-niTEsS#X;{)92s(7AFVV+nANx!Fj@5H-*Q>92~mJfX;!+#blkcVwWy*a7b>eQF+-&x&0C_;{HR5 z#RDF%e~4HAHh%r@ea%CH{TDFxmSyNDihVB94|pEqBk{keC}i_76f+XwK^L;H@+TzR z|2Pr+^T&@h?1sQx?lLGzUyNA%qo?tK<}h$V?nUT<(D)MA51_(;0D}gv$DQC0_xt_; z^V)1ijCRwjV<&COUQPt;(34!1KgjcxOSbfwn71bLYzl+SdXFV#->XLhy@iODU`sE& zP#alRgCg;uM5*Bmz|zEAsEIpMtxZBxYp$cF#tggI74y+=3O(-qczz_c#L*qpm#3>CSnm_b`3s-iU1`;8pgIL zK~xUp+t7i>F=4-9!L{5ap{>*Pzk2L3On`TyXpvffx#z>2lkcKml&BY*3B<}LVTw|POa z4bU|Cs|9;K0~Qff?5)<<;$CI5`MAgrPD|Y~5*}>fz>W{Y#~wFmg4I0djw7HRU};vw z@qA#-H-R7%TCtGIQXLs9FyD;&wx({-Hg@;x;D(6KVef$Y)=`DSaN`2Ig1>r`{{t5x zpDhD6=k}vO%MjpTY}q15;LJy6jKV`7jo$SQ?NM9V`pa)%9t~pi`2$_X-yGyzrjj>e zV}A`5<8dey>j0emnV z+g=E)EM2;1H_MHrod#FR^c{Jmi&)c_a;w7d@SKBePNvcER~FB3BQkbx&_*D^zKyt+ zfZAn3L5xM()BC8L?@srs_R?V;+Qjj937;{&zUp}aps(ZlIP~6-l}^lP>{qFEBO53R zESuXSieADYBMxk<3R)~+>je6g`n1LC(MX;Ll{bP{QB_CArMbHm^y6Ub=uUl}UqAt6 z0VHr4O^`4hG!uPb<1w8sWFXGZ|JHFs{auelW??X9f^g)XZW+fZNggljD(90-72XS* z6STf5vvybnTeyk_Yl8{RHA{S1e5|wwlkO$^ywX#j(FF}zxMO%<`ejarn2 zK-yA1H*xhNtJwSCC`si}2>a(8a!?mNvSrF9z{_)~AunKpn894wUAT-x!7Y{K5*N`` z@+`t48eB+mGPwnH3HvwYL-<=ws0Wy1`=D40*ad08Mf%pM-H`IlC0Dr#} zPPQFG5QOQh=Ok+*$81Rpl5W?F`D%J+PI)QR!j?xOAuhIHFW9uGx#qpqbOM#rm28{T zEMLPV!A6W6s`A!6+WnyZG|!fS<NJSZw?Jd#pHwgYznAC#3*`0h zbbS8d|9+r7esd`Jf;z$(CoMjq#G?!uk;H*JtCt}o!nPGFxX z{}tcu5(@)<^S`IZoq+%T`u~E)$G;qlf1%g(-+InYj6&SlrpyXT*ckD;YL|kY-kG~L zsWEHJRrR;s{D0$-i^lVxDnr?kg8uW0^#yZu+BkIk{o=Ck11uaq;D|aWQUH)vIo{kO&`h%N9Bs2`0TI zmS*j-|BJo%j%sq-^Tt6yq=`zG5)}{-5D}Cj64XNz5fPClL`0f^h)9!=AV`raD4?M9 zrU;QPMMAGC(wmS1(jy5aJd$$W?Ong${O&#H{@!`#&a9ca@0vectR+c!o}K;dZ~K(S zfLTSK?Yj?DxDeBNZ6kKK<}6I!!@imd(SOCbc0*vC$Jq0Wghr0S{*pW=O5?_#F(0eM zqM-{o5c_lu$E^%ES@qE%^ilS&tr~;)DAIwY9|lz>kW7!>rh-Xy4i6E>+_w#?Fhl88 zk$>`+BOaq`fONtcY8WFF?Qw=7P3;+|nx&nj>(|D#wK6pa2~Uj)pB+nEPi5BdoY;S( zS1jj)rKNqd1R=~9{ih{r#DN@|E)vY-!ceve33X(rS&de7jIK1f_KC7g`Eb<~ZWlsF zt>sY5Rn0>Q+!ar|n@>K~J2V#(wnzu3PnaTjs()+JS;mpg5EK1OE>N(tj}Lh2m{Kk( zobs~jIyhe{-V{OL+ikWF(UVarc6B?z9QIAG$bXbC;acL4JJ{sJabR=k0p5lLA9t57 z24{mi!ydzhtL%?>yz#!fkkJybEmHfe8^vf})#xd|mlWQ5h=rSmU>-TkcmiKM57mO= z{wzpq%)4nE0}K{(1jupdzXj6{puYllMj+oB40cc6|GfOy$~1zdxDT-1YZs+}zmFD0 z<3i1v4;tSG<(e-WSv-QQhOn$?JbWV z+?4{&mbZ-YS1T{TZ5Q8C)2`Q!mgB6upPf6&nPAlI{7XbrVxQspV;#Rp;Gx_bH0)1> z>*%fTa?HnyU;hwD3Pb-fAl!dzSE*k>xPNb``0MxoSBH%#xXI)R59~IkbcKhq zFo@e3nQLqYRE)>yZGL)@&gZvDl%LU(WRV`f3~ z5gT?3zv;D!S?IxPuKgv@nD;1M$w_wEllMl4RqufLHF)&{?9ZREOq$S3UvM&UO&f5X|~_Eq>UH`e;UB0}Y3R7V^A@X=eQxwcef%k$1V(Y^_?y+#!IG>H@-*08vQhRg;F19`0G8~ zrVE0DtcNT>W&JRyE5Zl^?H##}HQ~5Gk7g+ev#L8|b8}oWlcIKZa}s@rR{cx7JW-}A zh)N8g@iDno#YW-y9GGBP)4sq1bXTTOQJYCt<)xdb_sg~)zHKBo>iD=wX?_lY(e?nz zNhCi_a_SEDz%Maf6lDlty!p}$ zj%{hI`h01alxx9Nf+K*F$wi~d+cl#&>2hRh7MewL{3$8YBNpbK*~$mH+ByxB85hf;%B5)TlY{pP4aonBq`Cr_?Rd z#+I_K!ttk0u!cHrxwzT5t|RF?S7S`&@;Gv!oB|MBRMBp*@l<0ZNORd^puTs=v$aps z9Rbp%!=k4Q(*B+(E36P}>fRX2TP|=^yPQpETVvH3lp;A56ilZFe7us$EskH(<)C}k ziw+%Xd~Qqt0ym-lPt;?9mEU2fLE$qS>tSEGF%zdY3`zF1BS>~ewXT3YQj(GVKy%AW z`%;hlJ#0hc&<~&WZ?>0o-JCC<=xgcR0}P2MV#uQyJ{aDcn{*d7G=TWr^^00K4sFrI znO`S?%+eZ@`xFKqoo06x%gvOrkIy1Mwd?4jI@31oFUmGwHCy{=A~emA9*fY_)U}a( zQdk!Dw}ZsZzd1M+4rf04zfD{JpX`|Y2{O*`+)c5+Y{utfI;tE))=hcDe(uTA-sAI+ zKfh^y)Z9B)c7Hcvs2I}>QWCi$`hkWPL`48e8V&Yu3!|<{8EFZ55JONuyetSmD)DOv zjwcb|!V^z5Vgut#)U{(mb;g}jCPG<#A~xVTDt{Uu)zxP5k)vW92i6+R3T&3umQ*CC zQ?)7om3ekDw+D?jaD);_hSeH838fnWtG$IG-@(J+5BNF`Ghq6jS0@d<8lu zSe%j#&RkFFr;CSx+xhnKQK`2KWwZSB$;4Sx`{%Rc-)>1Lm6 zURpxbKe)n)pT_`kvG3p@fT3{Cw^731M9X;IMI-g!J?*Q_0icd0`@31cAUQS(3L+J?vA>pXIxku~rOvRWi%O1O2_t0CSWf^2Tv!D) z2zyht_nFp*q8We*lIw8YPWdpiztGM&5ud#DLR$l-W17H$DnVsk+*k&D%4IgP2p6l>V4a9M@E=n&=6$;$&f~nZEU-Gv_y! z5a*ERC?Q#GfST-ZX82Zcw7Q96?9!1~@oD9+b(UXt8{Oc=Tg^q$g}s^jYZDvHKDh`G zF*Ptl{FA;+Q(PC0sUBQcnkTq+v2F%#`%Y@ z5{-`EOdw9QT&>f4BPS}W_o=gBwJ~yFXNUEyg5po-LAVzf=ciE{C|wCU9yBZFQ05`X z!ZANEVcUcp6^WF4Ymy(5+S?wT%p`hv7<#<9D|x9rDDJxK8NCkFTXf4&uoAC-OzqpJahBIZ@!^6&&yXXpR(}Rn|0cfw0uE2 z_iRy-(nh6$9TiFUWIRXTHJhMOZ;^!)bEX&3!l&#M`5Zzn^F%d)-N=K8qY_(=?CTXNz zsn8dy{AA%T8N(K5wa`T}p-Fha%RyCGOErY7QHMRG&VF6fam<@pDk**YvTAkZdYE`bU$ES5QTJ`*D5 zVP>&RZaI+K6@$+^cgJBD;{#Hh($_u%AHGXJua4Q@@$L)Leh$N%c9`A3cz<@%;kv6B z=DQbE8rmMkOF5es-v}!R?7M7Vvs3*_BnfjvX;ETtc(&$}6?fWQWt;3fT_#s$PImJc zM1+Fd&BG8@h;r?T5V+JU=$I^!z~T#WqK!f1m<`iWnb>Au_jo03)7Oo@mz7q=icfs2%;Ds?_$u|Qw0PDxWg~K*=x+UE{FmaaHo$Ssp~HCBB*$#d0+l*=(7Bj` z$cy!W-9`6bD&&WmsblsrZ2b7WMVcVi`S)Lh7sCh!XrN(rXQ zGYXL=6y5dTSu886D47wZ=K>@@GIEZ|ATRT}e*ad!@F3{`cdfa(&iUBt$ZWI$P6Ce4 zg&out11Gc%^B}6_q42Yu&jfxkYo3y+sp#Y47V)1nuHI4*COER4P?>r3L2e3GA)k17 zN>pZto9O_kC+3Ne7C&Bdb07`SC9Ao~-jFeu{O}GyLO3sdsHu5j-6@+D zG8`r|8Ih)wc`s06Q;6iiLP<@cNH^QcwR`h(?S73JpIR1fy5osHN)XoXrD{qa!KMIT zHvZKRqW5F$d)K!9gC1ysMq}bjIv75QHH2UL5R^OOQl}wMy*iZ-Z#*9>p%Fl+(y5UOn?)Pk9js~74ZaKYQT7BcsiDE3ydW0`q+!j!a>Sb z&4Bm+Bn!-)Vlp~2l?Eb9XQxHlu#SU%W)(KJKNA%hO*5)us>^KtBOku`=N9v{^jD+C zm$M57!b=Do`t~?IAoy#}!pSno}e@+j-pe!r{`V3FzIiG)go3GN|5}NSBXV;Sjut;43}>y^#fZO-sz+3!Z%KU zgsfN^8FV8q>?{!9>7m`Fp92aKE*SvOfvvg|ivSfj=aw8fD#2?<%)6BH2j-fiujHI{ ziZx$BtyeQX*bAiYN65 zz67FbcZlMrPsMWlZncP{fvDG63XIGDnz~(}+v-22Zuj3>*ZrTY?)Xpb>ci7iCg7ic zbyYb~j7qp$hz)yu&a$o)6jBJ9>pIzkB%PIfseb9!_J^gYrm@*=`T;D7bC#bJx)VhQ zJ>1sKF*et-0cWRLG7vU-gehn-7`}DyIyC+l93+_Au^?H&XR9)vH9FF6G5bUidz(a; z_q(H1zgC;T4g~rKCny8>;O5|rt#M0h{h4f_m@RZGpBEO(RH7rmkzWdI5IC!06Jb-- z;HYM)BNW}t2h*+{lbKjopIlr0d%OVx=lHNMl_Js@whBp4lj`oNA?h}--!BSz9r==S zy@;Ta0INW+*5dmku@xE->c6u{*$18+GaJO24EwPb82c1W>VL;h6e`ZWn|r%@;CUSH z9GMl!uer9s6VMVM6lc!}Gb&{uJ?yRrIACiPUmHEwSXcebl5%#MxN1FTA|g-G>#g8< z(z$e^;eNIgbtdL6k6s<*OvVw+NTwh!qEglpblAv!_}ZYnSqI{C46e-hu9hurgLH%t+S6ls!~H^p1SL0z}0*J;<|mA2M(4C|7SDj7r<* zX)tKwnF~{W&ew8K5B_mHxIoZAqQO%Ho|OeD8m?;4Y=AuOYX~6FICe3jJ-w>?4HFGCrKx zf}S*?ssK-|UpYu2WC||Eunq131rsvPxK%6Lk!q5D@*n&n8uEJHE~;~jmULnvA!W;u zE|f=Q!-B8-F|%)$Q2TgGaFA|JSlJq_(sDj?IC)t!Kei{OS?S3UBa}0(OmI^OsH!3= z;?sXfOu>4(I^f}OvYiKdf(aA!$MDoT;j{ z-srZ2&(C(I&R&H}f=a`CB9_Vh6THz7@O=40(bZ#N>hMTF)~8FBh%=n5hSrYM)eO7p z-P&-B{gUkJByjOPCN;Xraf6$zxD#0Qk&KCW=S4rQa`Ta=V*-1Ev89cEsTJ;|vSDXGbW~+kl4gLsG>lTEdqN%sfl=*~??dfz-=a30g}>(Scje{m z6Lbqt2)+I`>v+j^#Vn5tM8w`3y*1SOh^l0**0;U+&&FB!(10;g%Nd2nm*T3j!gNe& z8gzTcOTfNElT2cZbu@?{b$7m%r3kg?UVGo8K42oyo|l$#%j%|v0z2Ez1Zdj<3O!h_ zCCF;F6h;K{z7e*%DP@ZyPjO4zPoJ#12<3M&}em|&Bzy>Y3vTJQB%yV#dfHeMGhHuYK@@3Zvs63nDxX&Qlv zOzr^UniPZ}WOeaRm^Ej@hLn#VK^zE7SXzB-=E9>CfA{IB&Y#g@mim4&EFJr6`4h>rgA@&|@l7(o}ZFJ6&#ci=GN_xPg_!!Z%f7glCRcdm#2f7S?XkF1N~~QsGtV zZY`gT@FiU4k-OgOpbZ7DEOva;O6*M>zX?hf5jjf8Hp!2y(u(EM(RyPo&UczmmB1W* z>C$_-;@x?@BY{&T^(UM~<5Oz+dqCB@LUjjg)QoPCYqSG9IRa5J7OIYjb6};MU92Bk zJK9)V8R*P*OXrPjM`KdcCoaJsQQz4$=d8B8_4lSuE{DIMn=`G&eM%cz6MIwx30sQtAx#C7m`yTA=9@gsC! zm!CO?)#pzr!*O3=G?usd``Iic;yk{0tFe5R?QqGv96Pr@u zs%Si@tyr2gkhdUrJVFZ9ul<{@4AA_9i7sU-hF?g1P|X}n_>?50pcGe}ST%P$ZkFv8 z+XAi%)6x$Pm@P(T<0*=`Dft0zkFEkzo#I#f1gToMl13eS^T6|T%|v14%wd!8los|x zC1DO&;#j@>gP{*M=!cJDL@%_NXw8Tg`zEt3^vq$`on#JYUArMBY1 z>rzi;(zot;ccIgLz49C;ytkPP?g#A)qw*)+(gTmo^oo**oXH1dxv|-$+zOq-!^c6W z`%6B!3ky6YKiUd@MYrhw(A8LC?|Mg;9hkf43GbBVizEg7 zl+#<;IoRCr*VL2`unEI{cm=M_!jD|DTnK!fk zEIDDd1^O9(PoRPY{oh`}_+On${97g&|HUJ?4G?BHby`ZkfjU*8+RmH*Q|j1yU|Xw+ zms0FU>F3K&*LbF0UWY51lu8TBPBaMKnPV-O`HKyoct-k`urXd9I#?F`oOEqu{T86aSar zG!HRBTcf30+a-_s1jHoq;luXLhVV$ti0A|Mp8~JP-0@}esK~uTw_Zuf^GD+=4)8&&`gd(Mn$$gHn7L6`2=EBY17*OfO|dcV9$yJK|!cNRbCCu*reG>L0(CWCzt=hCiS%}sNPZ1g$| z@K$T8-13+&xJhx~j8vP48kj_{2GA5Blgkvdgt@Qa43-%=?$*Y-JT3A z2`)zMe|ztPOlWIC)g$T6i+~@%NI`q@Pgc&tN#cOO0TV&Npy1X4x=ymDUqh_DEs^L# zyC&uLZZw+uM51Ork$AG?=pMzS7~+GhsUm68?1WcE6Zseuqjz&_QM-u|lSpJrpv4U^h^!ahA@HN`BrGP#q0N^2h| zC-&2>LLER7>J2Oc=~Q^b7K$;M8sPhpDEqNj@9R5rPMdExWRkSgOdGTo9_`R*$DuxI z0Rq|(nrGWoAFMfZDTswjV(zpR= zOvKN3`b>u#4b`UHuh*(6`8s?Sz1p)&;HB6Fg`3&<{h11xa43bmMlb}~AVCSrDzgp@ zHaXD6Iaw|KxF+iJbHYBgr#z~JYxh0Y!3Z~=h~^KJqYf|ws^M|kjrH~Gr64`!PrIHJCGpeJ7C*AG4Hmq}2=H7DZXr)r} zgz<@^r(Uvta&9fZ(tLZbW{H$M^ayXpHtl|0N^R7$>g85n)zId6*@BV7k7ct4-ec`z zpr-LVcj~9Ccb>}X5V($#XBJhDypV9(g+Q%|Pd@Zg3>1>~N$&2&%Bi2O@M8^X&r&%` z42fZP;8hxQVHE<9dx0hr!a<_4?V#9jNiyo%n1;>NF4T?v2O4}+v2G<) zhxrZu4!B@oNegW6DXa(tYugCKSPyv?eyAKMElim4SjxNcj!Wi4yjXuP&%U~4Jxnh= zI;*ye)In3b5$H{XlibMx=Lb-dfl=4r50u@lGWA>Al9Vz%NB@xDXi%HL_h`bL{zhW= z;${77g?S&6;V#9Pel$qOa1_qfj}#38rWLn4@W?`!EvEeqhNMn2Q5fWk@5tBnyuu98r0D&f1@ArD>1;GV<_PeXc;6@vB%*#P9&cuGPaw_0*3EdA9}2&nOzt}ST=|Th zePoQ1arwPVH?#e39H_hb+NkJe`OcDqh!?;N9II#K{y;lhd&}yoRSrl~cvG89g3h-Z z>!hGHek=rb&Qw#<05;eU2|#m&V26Q;w37HFd#n5#oH}`n4~kQja?u z&G|~d;V>7hm1ZH-&=YB=0>_B`32=P%L()PQjI$_=ZthG@of)JK&TRSdXi=9hQTjbj z{N}`Cu>9eH)dA@+)tJk<^+x`1lpE$1XsRYoo@v8YsH~2G({$3X!~g#BX$QedQva#Oo{Px^EOYBZx`apWs1f`JQq8M9VxW<2ANV*C5E-NWil7^D*DUK~ zWD-Zy6*sb|ssI;pgzgTN5OEU$)HL#K6dRO>FIM(0D^>bo`Xej2$iYW)q-sj!`}BH> zr^M!>&xUJrBBUTk0y4YH>7C&;Rl4HPaaapV4p2zV(4}{hZEenReC+EuI4Kx06Py+X z*BatZTGO+ee?v_IcGn}r8Y03}|Fr=B6+v@_0DnzOOEHGs>#f8{ z1o;|z0#0m&vf8CYKaA#)(T_K2pD(XF!(s-94>wLVy{C@wJul_Y> zXNfQpAQB+eAT@AGPI5YoG6x7S!5i+QllF|~ruWkL-&PcqU&%Xj<76A5y|r553f28>|Ah=b5^L zy2sEh$|yd%@mF;_Z@%%BOj)ulsVT+fZ4K4@a{!N$YJ}yLXkANIz2-$lS<7|bAfdiD zG!=>lH4d^Ohj%sYf!t>^>b$nl(N=>?lQKT7gGyuj+MS(U9QLuZ&ruu}5b;>DIsebI znDK39*I5%l#jdbgP4N)3uO5Gz8*{8(24sf=2gj41 z%03DGaOIvq7pLQm@t$Jyv~TVgHIm^)S-S&WhFVbkgGkAnzPq2fay)kx{dq_)d{gL2 z!0#-w$NHI#`G64<@$e5Mt_}H3{(r32`EReg_iw3X{r$by}jH_+H&C{jSy zSO=m60ZO4{wa>tS+N@(xX%&1@j*!orsvGcwq+^=D@St9w$^FA{p~qjk&nd3>Oj;3F zG*u0ILHqg7q^%GkGHey}%Q^NyHZ(7{hiOsxiaN4uJC;jhqR|E)e=|tCEYUwpF)U87 zXZPBc@3GJ}F>ahZZM`Y^)t~CM$$Evk)p^9yWqfEsV)hc`$8qZRZ3mfwweQ2yGoEM zu%e{spuUR?VH7a6Aq)Z5&L1VBanPka+l+X7V0CuCns)!KWHPlZ>tr`pg%hYO;&x*l zare^(;GmdgFe|H{mpnTkb|z5JneF0@4;LPF z!-^f?5EuCjvF43e8Pb&OA4}w!I$6rsnd8LQ{-*K=VVbLXRh#?sdFqhkOc#-#Q~+e3 zWXTx%zNxO6eT;;%A9P1T>18qGIq!!@X`X54=U2^qPTb_|)si_D$|5J}K@GrjlJ=9~ z)Nry_DgfelXuC!eX?&%h-Z=URuIoL+^vvgbcB-XF1LS|##payZw0fYgYkavibi zTA%7Ky;J_tnj#+4DDVI;r8Zaw_{BLOQ*7{{cu;=~*8mwGnCQ&uXNM+z2n+$n?&e)u zVn$yGoi+hZ^O#{;;%?~TT5D8unyvG@^ux8ng)O-jUUTv+hTmuJHnWKP#92EPL4G@i9e(`-$(p8QSE+RMOI6tfa;K2*g(m}MD{WG5_- z&YzBPbJ3&ph{v6*Ncts+D%ufb7KL?93`HFn_(6vgyxA9R{StDHeD>q0$zu2Uve+`* zEyUvWj!j-?m*$%{rrqx2^iD>tm?2%zm#QwdQUb-oAo=vVen^4s(61X~c3y1vC3;!0 zF~>U)6+1;?jj%G0bnhoyua6I|nGeKPM-0C&>AQ2=zU&-f&Dz^%yMkNp^~ zrWE0Zf=P$vgKCYbB5JM4CU@5P-raIx&yfzZ5}ZDR`wrRBAHRfx$=F{Lg7FdntESAf zQD2OBA{8Ae$$jTXbrehQ4DkfpmEWw?KbvcI7t7W zbVzAH`ft0+7JZP%!a9{NPAR+l$=2llR_dklEmPg;`%7tAjS_jd0%kHX8o`YcMcNJ> z9%xbs3^1h#6*Q=|2^V-s_I7?caq)>DNrq*w>MQ<>*gWJ}GJoV6Y0GTyA`l$^T<~HD zpFBCKnXafeMSnh?=NbHk6wZ_BV{Cyw{B0dR+0}`yOnby+^LWM_Rhw$VOIli791&jD zDv12;qpr2VK82?W%U@1NU%8QDDT9)R_&b=wj0CI)SY*Im`DM}_w?V&3mTR}avN)Oe z=(wMCH6d>)`h|8xL2u<}Z@wBHPk6(z2oW|-X2P1%>Xx|H1CoL059J^Gx|FUK2Tt=_ zI_vBU*#o}+P`GRVBN(?|5d9pvuO(eBYVAj)Y~9f6;o6i))%eBGS&4m`MVhBa9>1hd z-e9&nC@~t)_&m@UYh6ORYDDUYJNpo59}T{YJyF^6@$zLPJIjiOq)whwuavXWwPx8u zd{qIBlIo*a%C9j`6s5KqCW;SHBzCo#u|bLFC}k4y(-3Zm&eWv@$r2^Cy z6L~za@O*&g2!yxQ&lC~u*)CR94Eq(vZ}Ai#zLvg}+kHVXlxLN9D!^4t^X*ln1{8Ao z%{z~0ZvK2hL`5I>w_m)z_5`z>+n3aYuPEQ2v5XRz7#MVVjS$fu zwdZM|^AoCwGU5Tc1q@-mu@i>Gg;n^hSp;?$(kVX>D5wc~X0>HXeSJ_jI%}S}a837$ zOEv6cwBEbhPrIK~x*?h@&9LYe3n0@B8hBOStEb?r8?;l4sour#iU7%n{cs~W6)LiSOjpP8-`izjl91FknSF2gqX3*}#qniP~ zSP4cl$wxX-46+D8r>NxTw!X{BvDU|BV-2d551JkwZ~P~5cEEqAYFyb>`L2n&zP;Ev=c|U7`ECqt1EoRs zL>@*W-69V)f+b%^JVDg(w=8G_U@3p#`Joo!`Z<-RHjCkdM8%i!mHesK9vg@R_R_ye z99+iYqdmBR&DH+=CsxW7la7LDw&dfKemu(#HzSZ-8-Y~Cd-G31W-YdUHAw~I*$Fd) zUu45XpZj0a6GUC>=Y=k`X7ZFjyF^C4^eC|Vte$pK-<|t>qI#Ea-5wUDuGF)p`-@y& z++aF01;DoOA$wEDAPU(DSY($FF^|WXaumOXSe;Wo9;NYe#-k5N#@mWY{p-eo4Jpo- zMV%x_vzxB?uGXvxyEYiBt~UJ|BSR<1mvUsGJ>8j9(@=V2IreQDcs?Ze>kPAs=6k+1 z>Ytv-N@Lq>BWkpIN8m&@Z

s3R|xH$69KoYwThQ=15M9#H7u0DxZrui1h`Xe+}Wxo(Jap0u# z&!|(WD2&CkFzaSUM!fz&^cBsF??r1|1AJ+Q z&BNw|cSau@iHFko@Gj#=`xeNGD!iB7ulB~llP=zVKB;a+Q&Ir|0fUXVYL{ejbjwAS zQYD|Z+qFDUGq2W3-ZE#$v8Bc04@7$47bAR7mztQ;x=v%+s2s$}tM79Vjv6AnjEv#? zebrMxD{L6HRMe?Jw;Mu&H`_nWY#p}xM*8!im1NYT!uvmUSX~>HK|y^Mvd9-(i8*R> z^C=|X$@(@&`F#^7O^6M};GR66B5v#28r%MT;8uTxE_Ttay0HA=@4DH=VPhf0^7yPw zX}Ik8`YRgO`S^^`b-m(p-I)F9*}w=^9@U>Ph>~O_Fdyt^D(rZ#iCW%&i5kP z=NC#sYn#@*Ds}v1CzqQK?z~*?xt{_swoAnFs?!*8ujg(yt0;~io;L^uzj*R8I~%1| zTPhk^E$UAsqLcsQQu^<^>0d8va;SIix1P<-J4pA^3347o z_&0=4;jMu!eF~xax+WtXcj>FS2))lS*Ocs(GsNjB{f+nsPEVqR3#kj0pzRkhrR zCtC*wY4|v@QUq%`kEsXK$?%{SqX;uNs78IKOkQ#jx4DB1|E= zN(K%pJQbGb?6e6iM4Gi$Tu1XkauR}W<~ZM>{mv#1%LMUE+aE_|dJaSYk5ZMegjK)4 z5aU&?)EX8MarAp*UU(_#brK~>t77gg6nK~I2uX=NgF);fCQ?Lw64G-Gc!}7-`ZuwW zl%&Tsr@wTS+zlTv=I_l~y3`ujRj2mwUta#nD%Bw%^UV z_9)gXk5UBnr}E{o2#L7D$i++&8Ce8d5*}XN0RIJVxki*7;bh9 zRA{1yq+6}Pc)J1`V?_dg){r1x5A>CUsUGEOPNC`)1qp`9z!1in6Rew9qy`;H8{lUL zIeu^=5iM^g{UzBb(K;x*&o31o-U0)Hw{Xz6ofj{J)evW3NfzEy6&Oh?J1E*sxk_nM zkco}NeIU5aU73f73nirNo3DBW51=%M3@3At61S6{6#UO?1GP(KNZ2)jf#3DHm%U}L z671x*VuzugMa!m^!1C?5EUwqpXa|LMJ>-=U-bzwz#I_#ea@L#*^FCJXr z1%-BnY&5orBVWK2;+x~wVBhqV&bwZ71nbj?4e@8%&+NP_ct`yY^j6IUeK=Fn)g6rX zS$)43&$hc^H;qOSE2cO>1;aAPD)BNm_t?yigG7bMU(GN!NCMqkZ96Yec8gYJKW^?e zc&{FIX`dNj>x`$QRdoUObMh z&hI}4Rn9)-THAF}QK_J3Q6eaC8aAUDlg;2NYGn>}ZRL~y!jR=ON^^>UGupE1i;iqX zYbg5td)?&`Eg3tsgEU~{Kwq`<I0}&rk67>@^soDi#jE`xpE<4!}@BO-XS5x?U*iCoaBDf<4W3 zC_=()8KyYjf6ekcv^Y^(B$28ofw&S2mhL1ueMYSH257|9pl7@C?GDVk^1UfDrK5o5 z8Pr@JGVg4eP$!!XjvYym&K)-szA}Wyy8W3x-dMwEDf0~_?ac|e2#qFX+OGacoF%W{ zn>oCwyOk*Xg#5>WkVcx!XUifoG|Qb*;rRzeTFpj81T6aQpC5(FjUQF6B;@*dE?0nD zufpt6wo~i05_!dKi`47496uzrsV19iH7kw0q)Vh2y9X1qo5|wxH2Ao2VniIbYCNo9 zNgNfhvw!rXGt3BKE=bAi(TKcJO6k{?t=Dr>A6u);XlW%FB*3UNfwx0P>AL<%a(Pa> zsO20*l^D^xdsOqQ>!7w%?lAm0!ta9HlA4PG{+nl+$>wKX&qu$z``vWZ4<9CWyxZi_ z6#scnqx)`(K@8>&Fj(GWoCMyI*I6?S*|-=iCI30^*gag^H0;i^)egKOOf#Ai>bv67 zJipt6{WkoFR@RB*#dm!+oMmK-L@x7;TD4;6((8m4sfj6&s~jz`h)k z&(3lGU=n$)R#N~%;k$&rIbDt1Xm%1mo1!C-9q)GVUkjURn6~H${g(s!|hNJ(kf9xdDI4zit?1J?LbY795r3*Fo zV);K?030rGVdD!y(-|bBGl*)r@X7MrIv!P?pneW5kSHF=k{9L*%0!8Y$Ht1Y5*JLo zgnV+YV2>n0?V%aeKu@Avly}!J{Ge2#riyK2#!#TxY~n{IWP#e0r1bttst_GE%Q{|K zO*?H=k;Fg)vux3L5zHbzX2CjUiAeHlC(ADX^zMB_@AeLXmN*MpsGi7tpsoWFx+`T+ zg+=s+?D>nL=iSJo+QoKe*Tm6G3*JfnGcqC}FE&Q6#Zd)jkk*v!ZX~THeKV`c53>dR zk);O8VY<#SeXQTQ-?$Oo{mzOUm8Ec%f3Tx-<#64ZUt1hKF@gVJF<4$V1KCC3t^g!7 zREEhUus~{AwyuKKqA3H~zGeC*L|iIWT7rawi7u2%AH7^n2q*V4NUWHawmPm;b(c<& zsD~b2O|(R3VzocJPE{4PtNzy_ZPO=$(kd=>#Y#0_@m#{Fk&hl?zehk6jfvcYRf*j_ zMhq&!l@pa*sKs2s{T;tG%*s3xvq2ow1PmJd`S{IFu2#cY0)xCrj4JXRcg72o=Vl|1 zOMt|3nchmPf^gDwxP4kx$KP7B_P5LFitFVfRXGgjSjYQNR{*v})+8s>;hfF5lP)jG zSctk2QdJQ+sz^2B6md5Yy-A7FTmA&g5aKE=Xy}d=Gf{8yl5y7KATuBKMEULf;jLGK zvWTLfp$wVjrvdv&I=SfG%$` z6i@E>fT`h#ky@|BDkU2JXJ==Icd4Z|IsaaJ)~P_z|MgEEDLFILschr#5;-g>@1b4- zl;K#n|E_~{i@1If7552^j&v0|-Pa#X*kC_*zjyU;J0PbL@;VPs%pL-=?URON6e0r@48~oyP~kMJiB+dw}`sY80g*)9990- z&X{zc2t?t)m&FQ^ER6Y9$zQ;{&k_!{nNe1TG~M&NqXS{3#FP-6V~c+H7H?g6w) zd-pZ$aES?D)DKy+_E)Wz_xPGD_T!NM+cS>Wk3|9=9Z7WI*!NSr(by`{uPll@IbDm3 zL3DrQbeL#sAC=y|MeMyHnD@c_)%XJQX)ORR&&Qc62|qr59wlr)k^}BJiP#yMUP?y& zFE8>xZbjf-M)==7Y9%^Thh23utnx>l%>dam{FP6%Exb%uM(!Vfl7WP($2{IGfd&#l zqHo)f%ubnMhp-r@!wI{S5xUY>beF%W9SlzsY4`iRi z46BTJ4pPEN5YEL4x=2G5wkkWxKcJzNQ2sn`m5$16Hk!f}76fLvcgXDm;hnIIk_v1i z8mQe#NxO7G#o=39`Z|yq3SG?iFtAd6xUNTEXeht9f(v-kbU^zfad*>ep-W z#_C)IgcH7K8d1%_GsA|&g`o%Z`9fQa2ia+c5MrnG!dHTev$~Hb|C!Gbu8>@ z2*dwshnp*%IB~Ib_rq2Cc~tq6^U;}xm@8=?_=6FAc(LW0q|?zAMsd4(_{Boo2A=!Gcpjt$x~_zjWjmH-L4+x^z> z6ND|@G&j3qC+5p1OOpFv@F_H~A|VE`6ja0)yz*Mo%sm~znDdM*#hISjfgfynyqmmv z(r$(tgl!Qlq7rcwB0?(J^b0Rw!ultjr*coK4+a_(7S+F3*{78Mz2Me3-@Crt?^f*e zm5qP=2&1HnXOEv|XJzY_@7OcgrJCOK*1I)LK+K*s&;NYZ^E64HZ|nL2jwa31{9~(j zbJ6sV1WxvY_0M~0m{ji?W2;*eYG8FoN6jQA+%J3`?03VTH3syD4dzI15xc=I$YxHX zLfbVf%US0$3Hm`HoZqx>d5B6nifkGBe$TS!MZ}#Hbs(u;V>X1BwLGs=H?wbxxe?MZ zpVn8T2N}Jg!980lS~cv5mUcU>Q`EYIq3R3wGNj^9t&q^+c6^rJrCZSqudt1H9% z^WxujzdFKovf@B(AJxROU)@aqU0jSvc5gYJqv^w$N^ex^b^ia+^_Ed>sN1^mDup7& z32w!MTXAb4xHW;|uEpK0xO;FbUfkV^I}~?!FYfSVopbhC`+j%dpBWh$`7_@!=X`7? ziE}`D9@_P}^jq z-lODwWj#i`&%rbCrB8+rP%L-&BY&)X46!P&tZAZZ%IZZhRh{39oU^^M%#2=OE{eQ$ zNuCloZ)4pP2mtGC3GpV6fqct47l4$w`Bb$2RrR(Nxmp1~QmqirdrqQX$~ia$8Q@HH z-B=!U?cYHRAz0Q(v0Sdy^uYsPTX~MiWGx>-1t>Fh@jZ<3eE|WwaV5O0v)`OA4+ESR zKW(c33|1Fys!VPa=c~A3>8p`tH_sPKz^P@NxS~3ehx#sory;pR6O@rra)< zMhw3BuZ#+*z-5b5ia@D?2KK>-NrHQoW^+(Z`0wB$hZK%Jhpvgi^az6`ooIq0U%_A! z`Z(N08KVp+?!8=jh5_}sQf>|Mi@S&;SHAnMx*E`_e9cpYcqP8ME@h2w8uV`u_YQ|Z zz?sSWX~TiKFHmnN;3)d_?dv8E5s?)Emvd~Mc3{xm=CibFwj};|79WQEdqdn!O~YUc zT81yHCB7)? zYu%TAfVH?sEO%Fb7+tYg)gdxpr=s^UtE{_`$44HSHZ6L!k*L-5@`u`uk8XK?cm;E8 z(wK+<0;>=o{6=;MH~gPeGhG|(J`!Cy((@LEv5v;Ti*qMJ`NqeMXt0h>)Z|m$eoQJl zZLO8Y!$?)5S6Zv&;=#CB$!%{`hR5Re({x@GvoF1-aHof6VO{gV@vw-f=yy6_4YdCU z>f?$DYq=2^Q-j~uXb~-0T*stz_kpOe2z0V>MqMzlfM%K&8 zc-AvBd0xe8z|lgu$RES?gFaA=HR}EKfXXLtjzBK}F36b|M-IXUAbcob*;z!1wT!@V z3*`VP6~kS7$3=kL!jT~W3pJVn_`+44l^?+>Ml34F?Af8$@vTw*)!0*gz_@%OpJ&-j zQdnD>=r{VwuL@(YN|{FL@MW5@_axcFq}Ak%aw@y2s|@;uW1EwYL42WcM1ghUmv{yj zxfOMbyGv)yZ^cfV2vp)pKQI0z&6s=Kjq>a57z36EFCDGHIoZ2N1#@~gpIHtFdUX?N z9EUjGn`_~_mzJ}thnB2L{n3YRGJ0}y@@2>2(Z=~9=PG@W4=B!P^y-_KKyQtLwm zdzwEN7_h$ychF14r=b$=2{v{3hpe#=*kffkBN)3XLw1>n=Sp{o zkUWtWJqz(7VSHhE&k}jLbrWtcTR{f1dTeB2ACsjtuSL7w2&b4hnFU#}Fe5huCdP^v z9zRVmWs{hPv{-H)_g+gz@}Du3qD8?}qLJ6@UzYRwogHLP9lAZ0Gg49tl2S3O+)Q2% zK5fQnuUfahtepoOhW#uwL3Q_|{O@qKMVfa-+O5g5S1HsyN0a*3#}4uSSxEn#r1%w! z{6x<#=c^3k3ggp$wF-rwFBc;-hi|{)cpcPnVi^hBxAhLngUU|NXg{$Z*9}&5^7LSI zkCVe-d{W9T6eVc7ZP>o}d64}mjETjnP}3f8q(Ltb!<`S$M~N=dE8X7$+JAv#4C?5^ z#|mP@iUocEH&*|87e35jk3ZGstmZ7c2v90a6{n;PO%TBN0K<|Pq`#xYjVndvXm#&> zFBMhyKou6HYM+<-+^6{u!V5kbhx8u__pmYS%12nP^vm528oDf z1feO9upi(h6!#i5%x%y{!${jiJ1Y!<=1${VA>mg=hyu=JDMBH*z3sF~ z2nprp(t|m?eVw=Un$F!c%?X-n7l;VJ)#!9L3>3w5rKmEb^S*BcKay%X`Pl8Et`mfb zSe2WKWA*gva#)?0?8QB|)W0Pn`=qwPE%6xX?Jb@p3z~6cdf%9}3fjQEI4v?Kw^_14 z#NqC5Czum^kSYs! z1GmcT=3e|c`5q)qPwD%GqDSN4%icU=v)$**>eukG+fS2NE^GMZ1z=|(b(k*c?Sk5jW0oGqxgE6)66O}CcPj1JAquFoq8 z9A~+t!6m;17sp>FoQPf2Y#-l}Z~pRqJFi(uWJfxQHM>fFlq7g9NiL+ncE6IWdlM4etT+N4g+!5J+$RmoI^^XJa0X)wiLTt769r zYf2CpZqaNs@$i$3o}4r6#WYLGaF$YC%{^qt(@SI5C9@=$$Y4)|To72iB%6r88Q5{o zo!1E?QWdhaPD292C^R<~r60@IhmtV>8WlY5##HLM&;z+Ael>FHO-KP3B5ma7fHZfP z+z=^fX7X>)eLs3yo*TP-*yX$_V}Dq5(t3vKnsr^ps_-fSy8--Ihc~b*2Pn8;Bri8^ za6+LpnA|E(7!~)WR2TwcN6~^-sO?A{3|Tv^$5)XX^x;oMyIR9cTGpuTPjK+?58bFW z$`%)wbz|;5ggN?7n&8vYPjT!zd9BY;Zy{>~x|fLYCAGgP(xp|Ry#D4ouM3lAnIhCG z<`wYazkLjukPN}>Yb&y!{?YZ7h7xybh8HLVJ~m*c$)~361=m{Ga%X11brF$ntm8gg79Qtsfuh5<*$ROG6*Uz~<3qpNd(U zl1ef1fA&=k$3-4+n*Q)S(y`QR!8PoQ=$yARJfHC+|SlouM! zP@Bf@#d6@y^@A!PkKWM5VGz{tf{4&A2C-KR#XC00mQ5(aU)mhn9fx{uk5N5VgFCz! z7kN%Oa4yKFCmEP4K`qg;L_3pnUr(9*(FE&+mvzV>^P>>!{i<2w0gz{R;e9?NPTC!? z!<(F!WH>Gf6ty+m%T5+>VQF?b^PYTt$;JEmuO0}5>%b-uu}1Y)`-qkS={nz?#}Pxd zu&mx{IuB=a&-Z+#p({J7V!hZZMz7m)g>1Ig(ayE>%zam6r6G<5KGvu%N z6NLy52R`9D&lPgoAo>4Mk<3#yn25cR5av{6VB?FT4kCTn$kfhVw{NO~9hX+Q%>&#l zhT~K5b5!R0RX3SWT2AIR))L){a7mg2SxKgmRdptDZCV=+U2e_V53J^Hdkt?h&}>*0 z10Sl$(p=Q-hP}?h3F<3sxeTx;u?We&JB7o5pW`cvEwDDm@z99SEbq+oB7v`>^GMG& z+F4!d%=G)dY#*aDZf+$m(*HDq`r-tB@6{IvYtuvgysbLQDJ+Uy=c38Nf6oF?JnCEn@pe}8YgfSW`ymC7iJB~Bp2 zLs=MF{x2;+X|Ew=V#BN~HQ5j-VT|GKn-?e~XG+YB0Y-cufSu=KVW@6Mdxq!e9K8v< zU8pn!Fv_cz^v!8$Vj>xMW=CSHqL)VV{>1SO?AGv?$wiYvC^g-zJVYud9W)UvtqDyW zB0>CeQi?M;gz!NP|HCHXR4$N{T_|iOT!)3zWF#|GRmUCSn#-vy>MusJao$%sE!(ZY z{V5t!^hnaW@^`UWKzSHg8EVw`(7StHqb$rVo$(>QQ0MiDxs}I!h@7l!iiQ6z)oC;G zTmCFsGmUf+oYrM)9{#kr{OTIFO0wANWI0X4$Xsx4KZ`|_AkP>kjhQucK2LPw1P}ym z0~*9~trh+e#|lpfma8(i=gMCEipcz(LozTg->ySlNjkf98q89Rx1dzL`H+gph|@$4 zy=_j0P-jFuw)8eDWsW~8F&MhzrQj}x>AMI7VvJ)m(sEky@vi>_C9{AaHM~FohrZ=y zI+|L;i>XohTIj@hbl)8h-Fi5m?q(~Q>DD7xcx?VKK5|vGD$hMhFLQx4yL{x46&?!x z_t*^9@uD$Z>`~y|SKeZAcftLiaW%sziUnm=v$1iM2n~N_bo^O=ML2lcA-N<2AcT`X z##)Bp6h*|PA?azbep}WEAQsgaL)_uAFZ?ap8Mg-&1b!sa|CCGt%KOMci-8iq?IT~P ze+Row@w>z4RYXwo5bmE6iCd#_o`^xj2?Uv`&xc?=P+ zM0>IpJqfSRqLdp^C-dqp401sxjTdF#nHip-5QcdjoLYR{!kz1tKfXNPaOSzkCWHse zm;?{0j`F0{*dj_qr84U3xVR|_-VUs(X>WNFI zEXhV81ao6<9WSdgFa?-m<5Nz#>wOUgB&lh;-*Wlz7JHw4JLa z%_?`VH`rVL?0;%OTJE1sulBK#%Vsa5fl>YQsb<}gx2@s+h!_UCoeZE}7zWLNB;xbB zeeSB2*0}demUi<~K50v}cUi3s4vtbh?=C!<&JA25?u*9bq0LqA5LQj}y~8vb?*AX5 z{kv-_puyGDbaaQ8&0B7;qthb()^muC9(gD&Vuz| zTmY>M&&p`Vw#JYIY~AYm!Tuw-3CEF0=mLfhwFZ3AvL^$mhj2{kNTzsZ4(jei`BRFX z6GBbYkO$$JO*q+onTF#S6XNBFb8D4OigB?*yA&dXt7(wKL;>O-g#G7S*m?%FQs8$= zt`kkg55;IsEX2>vN3zbD@w_<5ojS038vDu*R@n9zFJrxRbYUFk*7p*~WIZqxjG2R4 zx0K3=YyhnW4uiSPf0p-mVYKrAqMIIj7u;IWdkYWZ9u30|B9>+=ZYtHe%fPE*jU zK*NEB>?vVIy1wo{@MdqnYy%TMvZ6o$+c#>L`8RPKI5WJoj%L}Pp%kKVoxFF#I3++z zc5p-%XLo?Dm`AD2GJ}P{XiLMuSLm2p(!ik@ZD8PmPuc6n>PZ86%=5;JQfb^ads&68 zz7?g?PS;iEz-zt<6j|$u3xRPp%R%puB#?#HJn(2ScfLy1TsCQ3BdqRd&fuiEu5l-J ziTyTy;m-O-v`FR2@>=z`+ktpBN;C-~O6u$gY27#cx>#^Rn+ZyjyN$x9 zLbuUyWC+}}r3t*rWV@Kz5tHOcd<`1TpDSi$Aq2Wxk8;AfBYjt_rvge#jUs~KatXPp zlGc!nqW6E4tIcbx$)8@<@2TF7g;J_VF_Q}u*=+p0r-_ZsI!j$fU~Rp`wkDcvzS#D8 zCR5E)`d_BN7svls$(C;C?fb%P%|f7@_s$`X;Ux4CV|FXsbWeY#`ANmWdwuMU>!0$P zJmK?(tuOjV1tlI^MX`IN+*l9i`GECiPa=yNYNH(MTDiZ zdAg?f$j|bB%m}BP&4r=2*Q!@=@hxiPO)$LyZEMY@i=8=Zh3;|Y%98q zeiGUVrJrUELEodI*ELGI(_+P|hU4thiR)dHTNz|*_5K^0j+X~t6gTsUD|vubi&f*x z*efh59`5nIk0{8JAKO5z(qsas|8e4V{uxnMn2_d?8D@BY7C)X} zY%n02{A`*Z!duF3^p(t*EVmChs4SPA1MI%aMU{R)0ZlvIr+$L}6Akk|!vFd^z!4aLaP&4GFUAib(tis~OWm@M7sJrqc4OZ|oE!5kB%z|jyhI;3wf;$m&|9MeMZo3wS9w{-i+;9S z#RKYocv@BysZd-?I7z=Fcd;V44=J{@mWb4|)oUY&EN?xO<=<<$wz&f$+5B5w^F zW!_@Uy!U{nu&+U-vZZ#jLxO|R7N*52Fql=N0Z*9U2J1{sRr+(-8K-DC?hnAgK$IU& z3BlXim#$=Na;1t9qWF-nfJs;Mf#zY>LEUB3Q_`tApqLW}ofskt#M;tqF_cJ^86qDV z$Fav}7)Xf;VcYUF8A|Zo;JdVaBVazh)mm%?I6EI zAvvFo@aP0Xb)$}6q<)0&V-NnBVzbO-IXdk6Tm1D2p}$4Y^(RNrU+FA7H`}DYQqcm{ z>V$Oz*w9f_bF&)%Layp4xV^aMS{sfu^X<2Ln`6X} z(sAmS^O;Q;4Uy>-7t0YC)}RiZD&|X}kL<@;%f}}R&RPzS0zXSjdtSN{kir!tAqF0$ zI<6OC`uIARco$)ZF$vm#6mwL^Ux<;8i-!8ChIK$-EBXljx4YWs$X#CE&ytEh-GFJT z|G0cAP{Nsd3zIn2KP2L6XK|B6-?rRVGAbL)OV^&8(}(@4qO`qUZi&%eXztU4K|z?u zxQ?Y{*{8WkB#EDD2xeJ?;Rm%LwxdpSA=AA>paOv^4vtZ%AHS*p^wR;Cd%(V*V1j>; z)W=35un}uQ9yP5gJ3`pxjPsFoY`B`N)SD7Fz`QJ2k1?*c0w|WZ96qN+u-OK@WOEBO zQvv+CSs11v3^P-3PI@bTgYC%H+kIF)c>Ts*WSaMoI$==qh`9?>oEb#~>Hw^EV)vN5 z1^zgx+8H+WaCeFgU_>v7*iqigy({z_dEO?rT&h2DDSd|~K&~i!4UgJsQj~9mrVkZH zg{{9(0mc}Z{JWI`<&hMZ1F>cnIA~moWdxTRA~!2I%{60aW5UkOI{$s4#xys(Q;CW= z0W(s1i!R%z!&~nw6;5OFL$7IaEp2Rd<0_M8`Jc_U!MfFj4@U-Hsgx!@5=B0ZKpADc zXq<<%UqZM+DB%W=I`b>&vCqlNJ7wqCD$oR=FL;uFofj#oFOsEC`R>3IM6{)@$B^_^ zp$H=%Ah9baU*b4B&<2qwHEIJ4CMSM9`01y96sS;uPN{gV^_8zg|2tWvb@;Ns$0zo1 z?63am*&i&EpE(mNG<~ho8a8}APWzwSF3(}k-u2Ye?ke*mq5zLz6i(PG-Uf7a;&g#N z?=1p%)K^rbw{m{r8Y5Y#zYXLY69JJ^TbRla8}kq82qFTyl9TsJE`nZjkRGp5N~`pGINjZJ_@zTBxB&9I9Phv-k$B8h!wb<;ZgdBN?Jm><|h$QqHlJg$~L?)31K#`m-xg zlc3Y8vS)-VGeb3GNw@<#*S6+8VKz`l8g6D9xk z)ghCdTFu~gImi$d#gc|n0=$bd(GXwDwwjd?EqA!dnWm-ELqrAMD#eM7hQr0F6r72q zpsWRxyq`goh2XF4d0W*T1KzNWLYcY%h%{|&H_vS5^7{rdSVdui^bX*s9L%p9KP6h( zH8Lfem{&2W%nTqWKd;3iOLU3_n?wW+J?qh&W8cyPs_WzE^}-~u(Vij*^3-+P=)Qd*~TPd2nj)RdpQ?=bT& zk>0EI{iRQ;2*50=EDIU?8mix1-V4nfuIG!UyO&aoagC)t`01Ye7 zOLotzsywRy%(wO8>vgJ*LW%9sUi>ZJ_ij`^8|(DBg@1KQgR{;c&B#kFw}@R_3n3s<}@&0Mcm zec1>cJJQ%I9{+@2-uN7CxU1TKD&LE)^GjLgM@SvRDA8F1-^v4giMR;SQ| zTgN=?y`i0r!jbQgCf|jTh);x0qT@6qnV z)Yq}lGN8)BGB;r1#yxI$9eQ0rgp)rTGk!DS9y;Hr3@k7OV>_s4%nMqO>ZGw@MWUhc zI_!TE6nwyfM+_VwN^Td|A3(-mGC6l=o@C@Rfmm(CMFKc!;?E***eUVtul*^5m%Pd6Qckn+^}KeL}i zvaU&-76<`Xz!AjdLH{7nmJk20<2%D3CW3oJlI21q0}&lL=<~TUtEFYVezYcE|CLN+Db8PqSRs z2|}KKu3GHoI!?R1ZdY8iI&No1|5>trSYg3LEH*bEQTNy07)qeoXvRPKFR%$6#aD2z zN8YM#OunD|Gd*y*ezJ4#uysx*2ftKVr)BQ{oN=7^pb}&b;736Dk=W)Wi9V#_l z(5I4Trwc7jCo7zKBdEc~q}=wlsuc4MVtl!%hWR|Zx`9dN+axU>3DGNO$@^zmRa}tylz>X zk@4(zv+hs&0b8{{SMSE}x1vmh4~hPMR&S7DzCeb8lk^!5hYXeANFE=$i!h?FU+0vS z`rYp$h(*vhNOLiX@%=q)Iwa$YHc=-ipV&GF^VmD{Hgu1_QJ00s;nOh42m%7!Qw73N zL7y4>H9pK$Emo+b=6tHFw+p8RUSFS1R0B{108wi9{fELRT%~5qhRiV!Am)!qGl5cv zID6Q|Ho=w_?E|`iL}12=6Eu9kZd`}blaBFK&<#EfPn$!3WCs{=84(`EvGiudo zZILOZ;mNW$p2r*}R2L^&AjacBlrHbw`gq#n`g}d7x%)gMU}(1*^BGuuhPyyFM!-xD zfeD4Em1u|zcD$iA$=~rxEfDe5s0Q-bXXzgFlema+$B3wj3@{6jJ0H(FqV2W>Ux|I@ z_C4$dxsoVm31|}hiPJbhpI~-cW$rMU-l#TMOY<}|SFNG0TXXvaUu3K80e|q88X;2J zT#FT_oP>q0)k()5V>K-&mD&O}SxYmDuCk!35iN+DXooE1)=(zj4J^W_OT(j9wE0$RPJ5S=&V(f0 z(1bJ<{l%6nN){G$4;1^#-fE@HEWHrirZE3WJetq`eIJmG&ie?|ZBp7qdql(B1-s=! zw>*;-c5^SWDWXJ^KMybSwS^cilhX!Q@h4oUH8-VAOW3ct)r;O8d19G{#F{H);%9m^ zasge)pl7%;eyy2%B_nQs!Q3GC!um3rTzlKRABvn1NxmW_9ZmxkdyAH6O&x95J`;GK z*tXu?z3)F-(pgdcNBp_?!8a5~r{`EbYa{&)>G`BmQhg_3F+C|G9H6~jH#2Yo$LHJE z{1gK6i2ac7iT%*yl6&oEnyKlie1b%A_Tz`apCu*TFMjz*Nb-{7)D!DDX#=% zSn3J2|BY_Jjrv>m0XLs13i2pyw1h&<75ddrPS5}a|JAJ>i(gHP8_P=#H${KcFS_r& za|0=NgP1u`opB(bYL`evMY$QK)qD^u+Nn89Qf3kaA*W!*HXeEkp{WBO`X}JM$vueN>nw4COmt#S9pyr8w>uY8Y$Y}j2hwUKij>pHb z60u2OA?SveY~RPH1lh`*Z8^j$i|#3KJF0X((J(SG$r(p@{?b^8$tv$jXzC>RcFp+t z!%^gnq+WE}m)U{TAx{r>eqyEd$wr#kaMT}mVu)?Wh!*nn(O;VmsT)fCDRhi@P|O7x zdg-Pnr!}HJeZNkOFL()2zZAMoe6@mq=eqIaOW%0%AiSSU;yePG zaNZNJov4fdJwG4P=C;-?@RNr>UqWwc()8t<4Bhc#gU}yjmC`Xuoz7JpXEgK8|Exp# zPWk5+hv2SvhnWR2%Uce+dw1{UJD9P*L1XlZQAE5}w`Z+G^cnz~v>z?^4!-*PG#YR6a0{8Ge*6d{JUX)Ha&8lfO2rq{(SAZSF zMe>|(X8toI1&9u|KA*ayoFI6cAwd@^D;JP+)%{p$ALE~+nqMP>ou2$joG{oQjtGH6 zm}(Rn@hPK2N}x0?&sHpjR*RegSsQA}{n5(*vjyfxXk9{NxrFpy4mBt=coI849k!hO zak*PR;wdy7tR}ZT{qvdkwVC42<#~JnJFQ}saBQdw)P97BwTC#z2u&_Hr)S5xrD{Fi z=6rSjbwDc^W+E;3+I&a-Nb>|CT=-zG4l~)IH^^e=14X_zneWpg6PNp2u~=;Jz^jI z$?=lH5NU*Z>>?>@pZ4)X#WDYxEV;@_bH}*G-aKY>8F@KA!%h5rt}v}qZE8am1_;2= zPajHw3Uf-v77}U8cSVXk9zAJbw*n=N)v7@QhbD(G$`Y_BQ`MgsY%BINBefQW=CirOHF#A#;j6(F={>e6y^vxu4Cm%`nq5>QPpjvAg+w!kC65XQ50p38K;{ zM`=nmBD4W$H&}B*V=n=0I_DE)d`d0SeMhjKdxRW_lNKhd=1CaSlb?~i` zJ{Qm+@<-yG2N6h*=K>{DqShE;#?E^fbObY%5ymXrm)=u`v1gZ-q+H(SEr1IQXUsjc zF`H8b1}#icdEdSrCkkOV>`Pzl;47N>U)^ybfv+@9W`D#%*2d;#i~ckfex^Z*h*Q$= zRpnC-3X@t~XoAIMB}_kphQ`fCSZbOimA=YD_Ez!e`yV!ueWy(G3q!u@_vr-g*A|rv zmcTKczp~ba34XI8ulWdFs0kOyuN>)OD0au;dc^4N-=BTp`ac`QzklK-g6pSYxBPqx z5v-|pa@uZC_4Xs^7|2bDZnH-K%I&cMf%(?!;ug4jwfuF{Ud{ucM@9rwFaatrd?2L)3}|+5P%vUG&^ljqH+{>Wru1dc^4A;Fz&uT`8iNZ7w0O`{Gj>{~kAMnn zxx_fB@ncr;kS48{O-3Zj@;zDW)*EP2((trG$G+V1{l2zTsI)|G3wSa$-8xBG+Wnvv zq?Y>u^x0g@@LhfBT116}Pne(JtPxy;upJ`V-P|+peldU|>}!nO^ab5Um|a>Yi~rf{&iOrHAtc?6RGeunlmq8B zw}nr>JRc~zF}6_;8_mx$j3Ro4`#)n{CNCy@3&Twre1q7WHVNX*g;zN!#j5(7M~JO$ z48nfYId&lnzL5mC4FB_=-l5)oa(Se)k6?M66# z?ovx`JULK7j%X^G#wkLSP-)#&VKBi$e}`&^XEv^Sk$nNc`q)#;ktxQp;gL>^0GbVg ztZwO1lJZ@qpOeJ!jCHg9>l4>fj|R7 z3&wllz0c~A7xSMb6OcmEEwZlKL!v_a{P-?k|2(Q{|C>sX4#Rg4C$k$C@*gzJdKyY6 ziO%PSSD-RkfP75vxar48_iM*K`-1~ZtKY-=at*&hXVRQNM&0!YP$3|+&yyezk6~2y)}?9!u5-zY9_Y;6kkx=?1bsS(P-9JhOL?F0{)0+X4uT4MQTX+eY8QoyRL`O6mp_B5(^jItnnv(;- z$x3hb2?DxHeYhE655m$)DiA*PCIJEvMKzsHyl(eFR;QziUhGub&D`roB7*E*u$r?* zGsf-SHCOhv9OfuhyplRWFPxHO_7v35O$I;N1LSfwlhggv;zh{k9zln<@5|L$(nIp4R9^kq@ zrEOecxn~-uQQ8;?LAI1Lg=vEQl4b*7$vH?y#)V5X1|I@hy?xluBiHbsxQ;+Vg9Zw* zBWfZ;RWKqQvK8K)C7}VKlS|>)QS%Hh@%RUd{e>*5cVRKU89px3J<~mTKHo7e^4X-U zz%2)7fZpEV_n-6M2aW?P=Uj4cyzDmCS(2ZN_B!4x_MNX!O`VVWod6lmpSmfy6wu+) zhU>TL12KjT;N-mTZ1<_D5qh&o!j($?g9+^u);ApZyS0p`v+>>YE3@0J!>A-OXKNxJ zDvBrr&!h$t#pn5EiG59)AW{TAr_leN+{U91#AsBv2p}lQ^qHH945MB)~rGVrWSZcP1NUmzc=jGp92suRp($RSt^D|+L z6nvQkqg=9%gj>19p$JCU_RK_jfPD*{dc+J=u8qO&82?3wC^yfP9WBc5Ly6d6s#A;{ z@0J7CZ)vZe)2#eAGVVlEBS2O5oe2N4GK*!n!oJYV-7)6NZ6RPJ82i{j0hcd4XK}>F z#GwBW6^fUT;}iV?v)MLYcDC5Q5c*dd_M`L3wv;YhG_~spY21V&>v)_XV!~v>c9H%^ zi+T&Rm!~GDKm(Hd%GFpodI!Rf5&=hI-mx1Wk*4oEHMf|LC=fM_p*^lQ^nx7fQP{+{ zA2i4*obpPejO^8+!X(Ij1bnae^#*+7rSis+z^OT2VCCN-C>~cIU1A^YzoZ&H?+0f)>HG%X=E)frNKg;- z;mg#T&_GRd>Fp1>eyuAw0)5Zj{T~tIZ)ar7m1f*=p<9wk#4GW(_BSrikJi)8hxzR> zqIkUwC@gXk=7sM+HR$HC$S?sMlaUh5ucUZMpk8VzQlCp|=+%p#cws`;6b5B{$~aU6 zI+;(cIxqc7_|#of=ttJ@M}DB3jcZsWliqCVMn1drP@F=pcu^UuFh8sG1j_FRX$^wC zBvV#cB#2}liONgL$9KxSCE%k{|EZmx*=tRrK5#3IxG0M=hHO3Q_Ezjk_csZ`SRH-BUn)LK_eCL*Uw+fgU;uLQ3P; zfu&M*nMHLE?aIn23rC;>2_|yHfi9p&EmvxF8AHX*ejJzpUd_dgw;~Lv=wbW585))G{S(;ot9!)DxSPt<Rt!~IC{gX~8j;SsO-rqRq;uD{vx0mh8 zju)HN;mPaWa?~7rCXo|8tR!DRYYMxZw4`H__>L;Yy7E zSBd|vUi+p5U#h@}Vj&Ydc{p!i#HSvAIV!(tR?#n zt$@tTy_wn_DstRDw1NyIp``J@+jKGV(&K~3&b56~e40W&|4nLi^i3Ms;LsT#XY~~^${<3s5 z&x)If97IqAFh-#6<&1s4qM$ql=NNbuPVM-SI^UD*4FK3_lh`T3MR9LKJ?1(wgJH2S zY*er?9T<^f(7OYRl-_7-mS_rgrU4y54G^`m2P!@QhjhT+!MI;0DCp)2)*MK4Bh zF!SWyJ%c0{Y>n!o_=NvGssNF}#q!hTylPpZFj5zLYEmjo^UKIjK5d<{Wz$V7{= z2}W+frRo@d$}PbX)*$D*wQh{j3+c1gZ1>b0ALO#A|h^Tg16R9){{* zd5VRB3hjHz^V|kKNFOy)**s=>d=3PQF#dNr4f_=;z8)DmzMlK~2yj{Pq&;mrImWTL zFt4X$@_mBPjWvHmeR_Vba_FY}{czJMKbIaAz1Dsopglz{FW58=bai`#rANJxYZtGI zyg`~?JhVF>A@0n=AT5nMCSARTSMyEH`KMF-+h{Z>wf?iW^6XM|JruX1)`0EtB0AT+ipxGW4*1+te)HfAbDzq3>G`&)l@|{5 zih?(a6H#Oe^s7W;DbX$P96*cgT?&x{CjehMb?lle-u=?J zYw&XD??4$`iu_XfMAF~QTu7X?N$#KmgM($FM(VfkyME59N4Iuq0dVoDOf9&%%hm!) ztx_bJ!@x)aCr?v$6HZAvh6cr@XBC;57S2B(gbRq*2nydNw&QZCXZ#HboZFbH~*yOdFn`$}h!FxPZSX147Ad z1%-x*02RTLx%>TqMgp(AJO{Ay)^)3?O~tF}nb7(r)6vcI<$|I_yd%wu#~BC0XYuWX zvhS9dpKs#K)T^<;>WLI^u%-k0^@an%=xlO@)@sU;#%wB2dTM)N{ME%%>~L83iaGc9 zdE4Ec+2eM5#ba~SkpdR4d*+sWse#c2VOvtN)g?wa`t)}xT`rTfuL&BKe&lqQIh zd-n~swbm)k*GJ{&Z~M*HsBY#q9?1)eSXc5-wzjO~GtT;#tT<2hmhis}&!68fgYpln z(Ns=&DY@t2>*|{DJYI=7El!^EFREzUAQl{veQ7cS$}CnMCs>EpA{VEW0Td0fmOnDZNj=u&u7zz}L7enJ%Q9m#YNuL)I`4b_?+q>V(8rjy(`>km5ZRgcv4C zI>DKqVM-IzSOO( zur2veX(X))#DaL>W%2D&=#$V<0`i_VG%Bhoe95AG_a$S_hiJm1~@;Y;HZx%7Jpkwp6v6N5Y z5Et+?q@#gIudwa(tN5#?6my3LcJ%d2Xb|BHN=@N7Fl86JG*r@}=c zEYV9oz=vea8Got#qcN!eU7A{;oKk|80VV3SJFWQ+=gs~6ywpryQrb5z4q*^jyv(|q zTGkb1>D*bf;6;+w2vT=bP%7cCsM7E}{ao%pG*RY-JYnssux7!j)@wWf^r z1DKou@};e7ClR~%17BEPiFGGOi4w?Nc5jN|eIgM~F^>A>{=98VJAd1B0M)5!*>N<0 z;SiZIe#Ts9EoW0X8!|jsLa~r%E1b9RevhF}xJbtf=1PAB6LC7$n!hJf&az=sHv?Jz zJmk36h&O|XHx?q>=EZfAn}uw1-sM9%79veAxQtjJGcj3>=tayweuTPI7EoSX-`qyr zDj>v{_QfbOsX=g&#TW?H-{MAlmI1$fgW!(qGf7u`u`1BH_gwAayMPnB7<8Y9;+%;}E2zi=#@v7zjWMGrdeeP{~4_4D|t zzO!h2pn*faz}m6vzIv2jlyKwKQ#arhVub&?PFbd*d`o^L-EgxO@96_8`2Q-s`mb;u zjRywLho@%zx6^HMBj+3N!WeK|j1G%CX{seTIifbNm(IP-rzQrQq#^=`m#$BH4-sCm zT{`K7s=CPqq%Hyv4qo*lmQ$F?ZSL(+Zcn8?X-}bNPbyw5SBoDpuGKUC=Y9QC>3=^q z_`i#tXpyLHPE2y%C%_2W*Sz1GOI8KNApiSo0eIm^mrph>!uOrebi*uLv=_DC$Xi zRr!o{yGu;crOa7u*@e_!EbTEj0S4n~p|?DO4VMccd;|LDdRos^ZSwOJTjARb=2%5{ zB}lhyZ%|^(D5i{jI_jQ{%xBWgz!0+&NM)O}K;9=XVn|3}PXLqnhs%$Da#YaBa8yKE#pijn46Jx6$G}Nv##jR@B!R3e6lC(^{Wf z9*Xz(NUD`lP7-sF<;+I?e{8*VRMczNKYT=xmTrj|29WNS5C)hb1sS?Sq>+>s$)N`X z2?2qjhVGJXB$RGM5Tv{79nbUJ=e)o5+<&sx_m5eueO=e5_TE@zqF?4jm;9^m4u2lw zzScDHqY2s|cD5}mM9>!%I@5RDnUp%9( ztr6)i|M?<&AmU57UvG2!C8A`CcWi!%uC-ol88$1tMORi~CHaWm?$e_G$2(==eckOI zD$|>iwncyG|6HuMdO1(?9Mv$BCtuaBik4|DxB8!6 z$vA0hj+SrMthS*{jRL!;YSeb!W_V9_qK;2G$IoA%JHN%do^ovnzq_=83HvvYo?IM; zZ(~b-YN&Pie!lWwu5`nvf2w?DIb*zF^ZOthjC420vX`MGSaGL1)M@_XF6O6pHkN~n zoDKI`75XamjUQ-&vN`Wn%(S36y55EBr-k9RF*51Bkx{UCexIgOZd|&}(>N89-Z-F~ zXc%undif#G9#H}I0KkhK=%RHZn}{QQAcE5F2+8XU5m3l+kvo4h`sRE=4Ozs1S#=5e#Vu zu<~Hd$V*PuR^*_4B0*XeKk|^38!)VS+}{fA0gA_bsK_x3n&btV10M1yPtVx+qfb;` zG`~#ai?0G6Ta24|n%`s7R5F7;DZo~d7h;KoD^kq-(;;i_@kj>B(7XK}G5qn^Z@>^3 z^vgrs@&1%k$mNIZ?ePGe_M9(ohR08aro%M(`tDu7wX}2T9UZCL8zZUHY01;!hw6)V zXSJoyAy<$)reF6AEs=w#zn`BM@@Z9+t{&b>{sUwF^RRqsr&9f42{$)AFt|PZbEQAR zOql)9dOQO)sB`)iu5+A$xZbWZ4y>Rl`S!7bg)BwXIdepO;|aarK?lmjI5&7y#>o4Z zs;it{b}WfEKHUBVvOElo?KHEOqaBm{N(c39Mop?qN}FNNX>3Fit{;ghm;5=DFT_lU zt+KsaAfbe~DZXW$6{Wg_$62y&6a^PplrvVV`e}fCl2CO~!ae6u&`l3!U*j3e zC$52D$^ZU2hp5cfA&MxRN^=}CZzAbG{=vbmEuv0}^p5S=Y zdH~8w2ZT|EpH;goZ%&caI z_8}}=+|WkUXi43MT6BzvPDb#WFp_5IQNM^BE9Hkj}}yMGVWi?EF_AmHGG#u~;OIOI@Tm|5E)N*^8) zA>xs|nFHR{>KtHwMoS&n)--{LKxU-iE&u2$P3*+cPk0AU5m^U_7KzXUP)G5MA}tQ5e{0v@}a$?%9Mi{7o-_*{Gs zKix_%PgVWh^*^u$4ccc!a27nriG)vMZJS&NgCvfA(AUSPUGucLU~q1^rQOti6?Xsq zLhA0D^_Jkxyu`)_DUU7+?7$WKYc9%)^X&Sq#*gdUE7;Cs&%-a~8jKkG-fK&q?_E4JmHrPy_{GDjIN$ zw=3>a&+V?;x-%Tq{vHe8eM;YE^P4;o%-_GI_03(Y^72Y< zgb((veD7eD;U}&jPJr~&l5Un$Z2A;SJt3iUASKVCfg53UqzuU6nDxtmKC&Fa31Pv0 zu^)s5#g~zFk2fP)!fK?aol1yXE==E4=wSt=pvJ$JMpo*ge*pyGTpJiKt0>HzRXS4g z{a}Vk_*I(PfgC6&AmOAkbby89F$H^0#ej$KW|Bc_S@mdqsU9!sc*2B$N+T@^J*w)3 z5(kY<_cV1$x{o}VC!xKcFqbE>mPz}~qnrM;qGYRKJWLAM3{upN^dmBvOeEh(+PR8y zG8SSV0o$n;p0?Y5wUoDM04spda@MRhj0E|e`ph>c8%&+Jw%pzQUbS;{go%6_zgT43 zQfb~paR$1i-W^PI={%SC8O))Y8RV|CHS^=OjHKuzsLc+^rk!ht9>pVxyY7o($NdyP zMIWkXQ2F4FIisiy(c0KAj7D9}-#!@Ib1?@+G=(eIf?7H`#Jr~)vi2W<1i;To`!7C; zOJeqTCK8kEi;Z3mDA+O)^%OlIb;a6u?a{5sI--ir5(L5FC&8 z%a1jdRi=%vcDZTQcs@?IhHFfsCp>*C5A@E6y;>rw|9yM$^FE_}cJvm)%k68~MfLc} zolKKYJwAQX{}AB+r@Y02)YLy$*;XpL(;t~;ty;{m$~+20A36!#g}WvHCg0wNjB`es z$QOZP2kNK1o7Empb6&Lrxadc6D_U^m8;R1BY<-oDtq&^dFV9_4?Z%6>X*nvvPwfsR z$UYJ`5F0@162^eo~n7 zB(qVzHjia(IIM^zIYhyZ<~ur}hqyK+iQ9-mz2!93L>=qtLi-Bs;GA17e$?@$ROe#pNQIyyK>)0% zhl7AT?M8uva&&`tDv}yoW{nRs@4Z=229_%o27`?#tB4$TBkGd@^MA4cl#@sakThu- z#K%&j-=HM)N(n_!&r5CNyEY`Y3Y&dIcGXb!Y@!R3i-VfvQ29MeUBDUq;@opyRWf(r zu#y$ZpAx<;o(MhUKcl&U8e5KjA$pkWznihbmD5p(`#RA7%GqbNQa9b}?{5LENb4a`W@>RL@8KI?wR=i^b>dsw{vXE4CGT!t}?eFrI9T zjuvq%TmV^kP=$GQ0Llb`UcY|33MGta>>Som1cnGedak>-r^uQW8;CgMd6}-vLH7(& z9ZWB<)IY6?hp%GLJyu2kprSXi< z|8%NmEdJ{qx994nst)!Ok7g6^T;=+O3+9ZsTh@j+ z=$}esV}*9e%!KCxQ%9#|~CxCv0QL zRZCD5^*!EJ8|DL-!z+BMXO%$h3wTo;Y_5DLyO7X8J)u;k4t~1Eo=^g*eb3!VMWCpN zJs;rBHf2S}O&?uO6nRp89%e8QJ36Ndu5KDNih{m}=<@wcg}fsO;av(|mb&$oI=Yr& zPf|?-pq;0|4kLQXsfkUIP)Uiw+wD)el1SQ080nW^S=!zzJ2?+$PqN> zsLQvM+0o3;rk|G}Tj`_V<5c+6x)l8%+ckdf1KzXzGA`{VkHs?f1cSlWg1hn5lV?Jq z?5pdNR1H_{*bKs?_Z-Yb%%?s9Jz_ZE0h}G~9p1(Bg%jZDw#sDs9pFk`9NVUedM(-9 z?(QA`2b;TKr1J;OJ4>62F1fZjT^^cCxa`Bz50(9G4atIrC$(ZW3r;S=P9Pk>Rm7X}Bis_nl7TbCY-KqmO6H7j!qu z|M1`cm3sb$F)k4F6SS3GqRbL0^PToqRg|1tCrW%9JVIE1seX$GiQp` zP&#DQ#`gx|OL!H@?25sd-rgCC@9}eh&2E~x4dtPdCUn~Ln4cgu%$+YGwh*|(s2Bj( zpZf^Ieeu@k4O5R^oYQf^e0gk^Vkc~2IJ@9uX{Hpi+&ko*z!Fn z)Wx|h96)hU<=cN4%v1d=_cYgsICZ_H+aYlXVkDPu!K4I;LKKGTjk5~^umI~!m+?-$ zMiBXZwspM(ZT&*kTnqUUj>PYR6N_36@x7gR^KOtfIby>M+ZvQgt9ldJfLYxLT5w5d z2oow_TESEK1w=$@3n%`tkXO?}sq*M3>Yw7D*9n!GNx!a*g?JLC)}o2TGaXV65pP`n zSgA9dB-`gGgEu|o_==9|t&gd?c`(a^5Uq8r4T-X$UqWy~pY?h4J0Fwb!#*09wyxk^ zJLos5Yb~u+>ha|dt^pyP8y>?Q1_i>&8dlHuNNojCv zsnPxCd}FWj%$x0HwS0r*qFRZsE=(4348apZq>YeEAUs;o{NORuDWCz+dKhrf>S}@< ztz?*jf9t>Q@{OG6e}?mVC~9Y#bD`@j8eGphUXt@7=ql8Awv`w}ati^k5Fi)K3L6_nV_WKhmyi%hQ+*0PDYS%sxeh&8iDW0 z?{F-oWSUAg1H>`Qw#dEPf3tAW<-Yfr&5>mLT*1i%lakv2Nv82kvyb84xJ7E;`1Z%3 z8;o=(Cn?n+CMvE9)%q({;s#KR$Av?#XU$lo6rq9xc5DoR^DRn4nyLoB4B@G4$%SMu zl?%We<%k3*w6ttWK>}0)s=zD(gsn?Wn@+xN5z8`auS0;$i8w-8I^G*%Zb4rx5JUT9 zxA@)rWGw~Kanum)q>WCoc)hx@ytR&ZM95@OJFrZwD#DPJUoo@j$fZU!u@qD-pq)I z@+}ks*U3}oNF$z)(OAHIAJcGJpVz)JRQpCtz6s&8AFO@tY0(Ode68mYKdGSgK%bgx z;`a<+aiLWJNF1{v(nVwP)s|!H+t0H4)-^oy`AaLA25I`XMTVfpj&w%JzUXcvG7KmV z9VI=ow$Vd`NRIk?n`Y06G!6c0AoT1p(=&dehwON`4%|TPnl^Y)v7mhom$AJ7lTCOC z+Nhlqi{r@#k`;t0Viy_~7UxSHzhk&xG$kKaO8RPbx+_nV;1hb%N5?q99QVv()I4Lt zEH*BtrkfiqWX0Pp1{Pcr9AQW$9*7tGX>6Ea@(OOdD93^(S6dW&ws0GqHxUk>u!7HV z2cnPT9_1tpQEK*UEkOEm!5_|T^&CD|21#;>$uAY$tw<;9Onq<^S(-?5CE96-t9Bl( z=S&(+F;Wy}#h`sgDJ`k-(ZmIg;wTm=U*_@?{#W8)WpUZy-q^azS}t#PmR4N8xtdYC z;>udUbtl!UWi3ZYnj+DV0D8XoCLv5iOi|a^{oRO+e>pcEsn!f&`8sbYz!^UBZ1wu; zdy=TL=Jt7y#Q)V%{;qME2UJ}m_ku?h5<#Vr?GN1;CP$Z=#?Q1#xu>#-E@PT{B(>i9 zi3P>Un;rh_kyr^Kb034U*^Yb?mpD1fRlAzwk#>^L1K^s0LIDghF)<5F9-7$@dd5@# z2+;^(h(%J70v>11YvQCw860M$L~tD(ftxfC2Z(v>6Ql$6RY>m_lN-R3FzT_$_h@iI zv;Zq5Kso06^BiIhiYcUpf^g*K#A~y4gD_dwc&6m7SzKu>W)`~TRNz-y9iVFh5}P-6 zsd-rdmN$$P!CkrXTv&t%TzEA3)2RhvZHpYT94ZW&Q7$5n@o&GGnkkEHtn)`ec@QR4 zD*{iY6b(FVwpVBtMrDC=W?STw4O!ZPNUjieD`t+zByi$YJx+VYznCvM7@}$I$C`aY znrZ93=4@sqSgE3Guf=&ml4Aq$GmpSmX+mMLSTdyFS$q2CX!ymvrr;Aj48(t>VvD1= zmz_$WT4*qC2P&n&EP@T8lp4}Yv(hxRzFmQR^nIw|CjC0vEXm;#nQEQ10@Pp3`* z>z7(@Qi$vieYE*~xh!pIugsb#u~XQUw3R&hu&u!~(Eoz>Or6O>vj>M0ROTRUlK@PL zLFVa)=3*Fpbe{ay&qUTkoit!Kuc66T#$So(eMc_7 z^xEJUMO01@qz};3qkzg+S+z}Rg z<*iSK|HJAkc8M>3q04kGYpLMc={mFZa59i3(n%82ufP;D=n_47YFk=n>SOB(%cB3+ zH?*~jZ<_YToVRxp+wK&&Hppa-4f__d>kD|lA1+H8Oy91ZX*`Qt{{3I{?mq`3=pJmA z2sa-d@bG!FX&)JyJJ+&2!VyV+#s2r>5Z#_$J1*Q3PSFDl4HZSHGKujn$^%aPO~EG@d!EC2ww5RX-+th5tq7=47=Cn|+;Yh6#53T|8W6rL$rAva4QX&|WxWXP?NxG^ z88qD8pTE&o1J1{bDNZi`JkcV{gorVRay}t2Y+-s&DECsnZa{h;t(l05b1_8B+u)e? zYodRND!JT*9C~;`gfam#7dER{?5esx&icedI=#FHgWr%S@HF;=p#ga(>85$VQ^X5` zgs~YJUtYqbl!!3$q^}ze_YmEiG;;nBuCWF^<^(dhCC#sBDmm)O4Cne$q3qWV4h>C) zDE$^VtR&q4jMn9o;$voxJFQ|^QZ%QbW7MP+0yJZ!xQ6|1w$|ENPJETIHZ|)8YVMKP ztH}&?Jkzhvt600GFagPXN)TaNR~~ zNK6@t2~pgy z8`T!c()Dgg#|Ziihk=~9Re37yi+n-2(RdxRa4eleC4T_gnGvmwb&$5SwJgE*#t*_n zz?-Bo*)bVVxQ$_c0%ZUIo@TK`%g|UG2A-{}Uyat_%qcqH2-a_US*GAdmX!PxjP6tb zVd2xf_HP{yhz0c>BZA`J6*7U*fZLJ~;zSb44a5J=tzc`xliRys5QDZq+=Vca3PpK? z?Dj@bq3x0hD+bJG-Fyix7f(8Hp5wzx>?Sej`k|o8r z#xF~Rv|1!q$Cv5pX1!>9q=Y>qDQ65UvH-oNz{D?=COahZOxo-*_-O3xO17!;7460&qDSnG{TBUR zHwuF^wP{r~O+0nWvvB3a$@ms(qb`Z9YC`3CAM9E2>ixkri%DzUdGY9|IHAM%^9l}F zX|h8x+)vx5^V8Ot_<*u6;|0TWA1g<~R7Gy(X}w{#(IT8vzJX{1=(@G^)Qmu}=Cag{ z9%6mt|EE`b;w*S<6S@C79Y5*w)tK5drLEg_Sn#?ECDp4IDM!zqo?!6Z$u=yP)F$&3 zhsv~FNlIKcHwR{K&&`AfZd^2TvG2Tl`1qTn*Ft?|{iTN?PEbC$n&<6`9k-)?wwtFI z^qO$d4PU>Jcl^K9bDbhaI+!56IbT`n_Vk44(C4ZG8>TtoW^eV|_kWH+5bwM8u{;%z zHBBYE{qe^yCg<_p@44G%EAYVF;HO~fv21FP96I-7nH1l`Ou_nvsfSCMThlSW1DP{s zR_PPYshYNO^}TW>*-k-=qDZS>5EP&pnxd=DlFef`SXb!#w|K@a6?Ia0dgnOM)e$A+ z6Za4=L3riE3{y~=aW8aL3Vs@Z_@`v6sLz@60nEc{ItKorgeOqv2;Y+M?XBwK-A^gl zsnK|9nes3zCFtw+vv)f^etv{BQ69itfL&!N(V~+VpCJ{Ak#&kI-WMCG=}DTq1~$0cwRf2y9LK+SxYyQhioRFTh8WG zWSWm;W9Bcd=sgb9o6f107k8jf?rxjWyiUbo*K7Iu$E!9I(;t1z-!h@Uo8z)dQTeff zJU_EOGKl>UiP>C%M>57CP0=3t|H+SJQ3SOxNXQk>PdTLQ#Urjam6|h;5#!iL-gjjC z_fjVMcgZ7uXJbUh7tOZ?c1EL`_vga_q5lhqf?}8|9A*XReY(vhE{5cVuTN8(j(UrQ zK?3b-TpU;NWeJTR(85!CyTW@a55L8(#T}(bRYVDdxLOb}M2LIVBiT+EJ=Odc-v~qt zD$U<9K<7jzCAHP_lLr(|Q-wVVlL8@@(X=X`4PI4|Sdbl&pt0jCxKU?9zK+!vmp?8E zT)E9UH3_X5zkcfwZ>&K|ZL8YF9PFSLp$pMi7#B6l-a64TB$FdPq6^mM>*lwk_I^}F zTBNlgDXN)QHO@~4cx^HpJhXzKW8kXGs>Ji!y z8;JFUa@lCDAvX}$oNU$7!I=Kqar*kqdupszB5_P_+TL;BKyCFg7MbInuBk6FJYZ-vIk#z)x6ya*Cfq8jY{KqHp!Cl;OB;RIAnWnNy%H2DfS}jkT+-yE%|V*6lGQ0g8hkR%$L{(n)1Uv8pTi1fz-d&q`>gQz$wjb@UBRxzU0V}8;0@NC?|zHN zt@ZWm>&4NDT^T8@K8C5o`vTGFHxmQrzhk^UzOG+vLtO`6AFMa2nobumxm*cSZP!xl z`@Ig^*AUaPaSZG%NSSo@_-}~hU;x)4p}Rs?_e-f@X5*TC@zw6#OW`GSraeF_pnz;9 z45vh+-d4>w@zIBq#W2rZ!`lYTWM0# z8sg9{;jZAiqN;C!e?G2r|8svJMOu=cSrr*64HPyMJX0+)p3}6$QTzn_UhYfIdb|f3 zC2&oKHV!rkDK}Z zTQCl$w(UCX{VFW~_DEV%Wj$AmjGY*Oa5G6Wu~jwGprwI3cx=L0$rEnEDin_Bs_1fP zZB4#txUd-$YL*yQ3@tf7>SQ!RjG(!#AUrzgnV2B|a;uVJZ&;B=gI|~LsQ1xQ{G6f$ zO|S$kp<_^9O0|ERZqFL(U@xGyMX_nt+Z^rC>mfCjY`2l&rE{8KcWr`>`~ByFb)duC zyX#49T0`0TTxrOB^CXk7Yx74Nfof*EO<%5JBu%e(>$A$54(R4Cw{5n|=;FSUVDC2+ zG2X&AmK~R~P;~(ro(r~Aw<~dhciqPqF=%CO+oTOfG#YrbOnFhiuln+y=Uy3S+0W|- zr`!K6@}N>gJ1a@o#Y@eb&BA-Bz2$qVv~Sg`GJ02kK~)?rXGUdfNXiDnXcagUOt~um@bS zMaHg+ws-_CYb=5q;HJgZga#SU*CU-FqVmxwwQ?PStQwSJ;=3IA(lOFEQC0%d@J*N^SV!)hP%1pE)<3 z0huA^_k)s|&Uc&5zsqun_iN^MJ zRQA7~iAlEdRt0^^n>xku5QH9-o}Js`P3|d3+M?$v>Dt>DqaF6WHv50=mwDnK-~7yM zc3su+<3MQ*vc9wVFwZ^1TOh7b`Pbsnnd6gEXMdHC8P(06m%ZLi=ToaE84v5Pfgev9 zv;2RGpKo7&EEcjodhByj$tLnu_$o)5KFGB7-tgA->h@k_V6xe`@L~A^Ps!h7^4Kdz$H9i8=SZJXw6ReA!PU+N69GleE7zQNVlk4a5`XANlggFr!^W z?k^gLtnfIUmpR^Dv2k=>g(HiHYmOoBoR3$D3rkBUpJUS_**_W~H!=fsY?j?ys|M5h zgOLh@+P-zhXsWeE#-Zz9_Z^eV?yxY43qQHgz7V{7iKMMolgoepS?;G?8Zb37t$m~$ zG=@a0XejALGpGT`3-Ij(?|dQtTX#7k3G&@SyC8YaAl@$ieY|bFvnp#2p1{H|X$kd$ zD4VA3rU?$T1p@raGV#HFHr`#~ipus+?puek-{exbiuCwp-fZdzg+Xco%1#n{D<*kO zO{hP}R<-n8K!{k^j27Z1FgqAQoNdx?3KMMhH~rv@;yH7A0{gLQc{pS z#WQZc!I1ZJJ!L6i*OC5ob+y#f&A~5(Lyms?xsk67zwWOUr!;Dsqb)E#c$XPq-r5_TQP`LD_LZAKTKO+QAfMw| zSJUOL>FvA&n>rfY_=K+6fM*Nd{NIxhq@ny{bBoI3@+-OlIbG|I@6O=Td_}J7x8_dA z9oEu#B$aV=@_|Ur^**59PVO>Aqe;DO6sD*Viw;haU(_@%MEzg%mF#GFh9L1T?8;*g=Q|upH%k0*|-}D&P{6w`NDb6pq%= zfK6yn!_@kezJQKLRd2T-QYhgwI+Hs91xWyYxa=pK2o(E1VWhJ?9@gNgi)_unV@*p@ zcuxU*jH|8<$WOXt!YzJYWMoi!DU#UvNZ~5&FR%nPT(5NY6m88cYl^1oTLPaH!62qM z&(RKg>0sIShM9gsEq!adN|-{`G6|w1nKjhK`ZRskgB6K(tSZDIU*#I)w&aqTxno-5 zSrvZLc2Wa%?aPewIxH-Ezf{VCk=6XX38d#$(prPW2gFqk&c>cCO`%`8VoK2Ish^bR zh_Xq|uMJZdFVZ0#bXU|HVPC?Z^jvy94&j{oDvS)~C;qF*z{+Gwd6CtX(gy59?eIfF zY|PZ&joTo_D5n*%WRRp^`j*e>+BgLzqUk)+?B+gvI$hXM5~WjWl=qdVMO&>l#^*Le z)O}}?&F_Urn$QD65kazJ2i`B~{SJJxcfitEFPzONcOKHg!W&i2;)EBK8G$-9WczPT z^(6P1uh(1hOcFm-kMK)2wvX%u9?J%-?aT`1Ed6*L=u3qlfU?P_fiuA+mQKYu!yBsk zf|p!+R$Sq)uI;nR_jC2Q^3ekk^8Y!RIqvTHGeavfOo8Lgffxv@t(fx11Zj9IUy!U3|WF9vcUG$&GW@JCNwwho+ zad{Wpwg#5EKDY7c+x05cMKaTHCaG+eb`um2(ES}UB)Y!*WR(P!iQvn%?AkFv(IQ;p z{o;Es#7&hx0igJYPqRJ_D5*8RB8+j*=VZ}NFA_z|xU7ABPKAk})F?Z!9m~8cZeoKT z8L5^k31KXu(DUeyTZk&e%r7hiYypG0RzhDbcG72Yt53dmaersrRg27g!#hT2`yxUs zN=rqpDQEDFY)u?g#YldRJb{eeeBvRH1#zcfr?1R@iVI88>up5@N`OjEBQZ7e<&uwj zXbb(oDO;2rFm%9>0M7G$Pz>0E4AGh;94UYlYgOlL(IZw!e?IB7Jtw794W3Mq+du9PJPVI|8dkl05#ak`>1Tc zwCh}2H7_sEXC;*C#*6KrYBQr_IoO_8pKSc7I^H~LiZT!WTCa)Mx;vWQepUCSiReBk zR$}XE#%0UloaMdB-|D1jTkGXbbKfzKuU?##_{-o&ao-X(q>JA^yYsd@+HkbM;4aeH zz3t1LNOrpamdJYYgnLfV@4v|C=*Zw0j>8vf9ZBPYXQy36yZfToOCu_h)P4Y>WkoQsO)KVu;~mM6piknDq5t?kcY&)LG^*GVCgWiE7waZ@+k(K%=4XQa9q?qJHzR)1+A+8&g%S-m`5k$N8sZSJ=09E9h zs$T^-)xK@ga@B^Qk{2Wh;qDW(I{B1E?DILLnxsjo;dstYYh44B^nTS`QT4;`P|RsIYr%u}hPw4*FSSY_e7EUD*dnL}}tG{l$1 z|1cwkb%PtxloQtmnT-wtJ0r?MF?(bse(F~cJi=hL^&s-7{lJe|@Rsk@XgwZeU?jRy z%sBUMrM0d+4gJhC?U3xfg4&~BCml^CxLB_bRKU_0WQKShT7pHi^$gl7=Pnm1r<0ri zode{y+v{$ITn?*EFzCHJ!|f_+_Fu|W`8|Ag&du)F_a2^n6AF$P3jN%BT~YZl?puKT9WoMYJ0Kn5vQuA5i@cxv-D1F|mA^ zpg!SOQz+@?-iw&1n2O`n^b&#DO77)X*IQyAKl}IIO>N9Y`|=TM34E;ak8ck<(9JNZ zd)_M_v0X^2mdey9GF_rsX->DrsXw}~CRcm_My@T0g^Leh85m9@-P$~oue4A8P`zgF zJJyxre@z@|DvY+qUGfb7??S_Vm#%~i-!xNqyKo)sFdTQiJ?MSv{+KlxK0{A;y??}= z{H__V+0a#V+){F$xRLpCL?U%Fy}32+9Fgg>rSGvhW}Ult9974Ku|G9DcAoWYUp{Kj zbWEvj{$3=h_hzWRWc+0EUsd#5;+vQ&f!#^U{-VR|TL@4qA^sHv@zW%PiQ}F)HQu+k zH!R&no`V{y-4E3UOcl^^U2>A+7;I_LK+P^ol;wMp_rMA)oz%7_e=OF=77XhO&w#r{ zpcw_hgkvs8f=l?73TAfG5C@riN_q&7RaItoY?BhW)Q`y-YK`tF+d;=;3=HLs^E*PA zw;s&0k<#Z-*yeC+J6%hjdx9rqE&2dAf(KI4%^~46I)P!H1bH>!2iV)oJR7Bkz465x~xl zWFO5bL77I#ho?slXG%GgNl{;3Mrdo>Oy%p^e;*3<`O$Y?WU64wZSqC0z}PMS)o@DQ zq(`yGY#fguf^ZKf^3y)r9u7S={}XFu>11NJ;nX@s;ds@bWBZZ>5kLu^6EBs}4G`)( zD_XS9xo!(H%}sO2OJZ;Pp@ptpOLzB%$)KaX){iOH`;Xt7*$lI8hzs(YHu~v&H`8X$ zl~;|Gmj=75a#+L7<#U5o#q5oza$XWiXj0tzY%*#{)l=N_tQ!8Sm57A3J3Gnf+uXXO zt+xauU7hdN@z-AntN(aO_F7{NCUL<_`>`w)9{Lf4c2u4v<~hRn!y zH|w;WdA~3xr!CXxO5bC9eVHmNd2`+G9uX*Id_1K+ORAF7HCpZGe2#gq39+^h7D0DZpJ{D_MOV%gLzV;lEP4(ldZLzN*dh09ZMi@^yr;B#fmnmsH^DmSE}Ispkx$9+9BXjnebav_T00 z=o$x-+v8$)vNCnVD$9dFAv|j=tzn-Ol&NrGB~jf%Q#vGjwlFxv4vp~>@p|W^2oj3o zV~FSV&bu#ObE|V#+ozHp#?%VL=MXpOpwB15q3=Cz9X3cRK&{QjSK4+XdZs4;QSg@X z+ALF#54APnO16V`q{MSQ5sf4ZKZ8ju z&N9F?d%$htbAD3YZ9+u&TBvr|r-%p)5jR-$9YHJTmL6{`acGZ16RdV45 zOon@W`1VeCSM5*JfgcF*(4*#2w`8S0w7Q~HcSBx@uaGKi*KmGQl?dcPO z?E_8h$PbKv8k7G-tUgO6qW3}g-0l!9>X5Tf94p;@w;qps=*Ib_uCh|ocdxM^;O(h` z&#$77?j77zsf8hH3TjNGWT(+caZL(6S}y(vtOynMqL4fzfH2}m$VIm>jxvY^o!29) zA`ZjwPL)L)EV6|~>bx*?T;P=8bS7RkR5sKsv^`jCN_64t7AHw8)X|iW z&O0pt0tqAxxZgIq)!ri5+$Yn z2zC#7+V>2{66wi*e={Lh`)Xb_?PsH3jIvU95poPI7S*GmYo)@h4x!plo}z;bUg#WP z%o~m@%{PUzw_X0|oNfyKZCl$pQ7nNF4_e;Tc^>g1JY+(Dpx>leYcje2QJhgGc^yXH zvA&A9We|?B5s^kWYeMhL!vTHk71G3J2*1i2DY{LpAWSuJ7hf|5B#PpY3@P&HjdP=; zr8l#fH-ys+t_h@R^Qq@CYU=zIrNT4c$1A8CsypfeXQg}p-ziyIJq5~op z9-;ThirQ7x88Xi25Z2K8S6p zcr#p^GpvbEsr;nPVHgEua|bU_ZI$JvQY4f2`9Nx!ACyVYf1Y!1LC57&cY0gjK8Blr zsYYAS$q9d2AVS2JUM{6y4cYm!Ns|41+qc0&HGBDT5$eyf&#=tqJRG1TBXIif*SQSyQ~3uaw6g*Dc4POqCyoP()6UCvy*0|!uR#R2nQb~Ra}3!AHz8+~?Ma&>YzQ8166N@zMcC^cU?q>hIPn#xom#%e&6}sCwYD^VzrlDC7FNFHPRji% z$!7DC?LPb1In`p*@X&0Cv^L%)J~XT^OizL`c&lG@ez_X6oF0$O0dftj#pA~ZpqQv% zwz)W$_D>Y?EPfV9NGzlv=!KC#ePN9<_?69|%wAgYJ`&5~?DR86vQf`8B7n5flL9M> zoB&hx6&_ZnjvM@2u)BOM;~4p-!5`ZA4INq>(aO^sjdU*!>v?1t$|xLzG$|bvLGN5| zu|TJYPH{gPCXnTjLto=OdnQ8}@$^ab)3QR^su(n*1JyFT*$oC)p(DJd148{9XS$BbW*0;H`F+G1rMep}q2ucSQ!9y*4&Y-0m@GN%X}JEX-Q_-fcN zv?elE7@t9$!3F1PR_~B~fksQ@xM0;LU@1O-9GE9lo`V)Re$wqADMQ*RW{SyirbtTn zr7#^t5fv1#L`pSXsi@U1Hkxcye02GR<#kN-V?kTIfP+o)BS;K$Weivh#X!wtPpsixf}PE>Z}DSWQ165*h2NA94%T|+(EB!-AxXo1?7saVEO3ldbT75k5N^#PKO`soLg0qdAkWyZzj0jI@j zuERY(JMcdDJ8mjlbhHjC(610-KCum$iH;`ZXMdz05&5h)O*NM#2muNclecCXd+ZU` zZ=^ELDJ&w5UgjzSvOYYGgb=CB0e`{mq)|Xt~Cw(qt^l0{nJhX+pLM3;`D_qc^^@Z0`Rk&UIE8 z-hi+S_mNwJ+8>HZ$>a_L>4&w>&oRd~zu;YEWTWY%A@`7PtxJUl#nb>w6^bzUx^;Fg(Q#Oc&xz$xfKOS)S==N`=B2N>g0 zozzpa&;ukEviA1P_~B?GEfEtG_`RwT*@{)@=}4P?gKNx49z|t#bgRhMH6RL)5$RQ0hvHItT(^KfO@gBs? zsZhU=gv0>YwI*zVW|~k)6Ll2@EEz|i9v7xb)&S{GOE0PNj(Q*3OOtBbi_LC~^h%AO z;1QjT7L6C7ePC<}P0R==F>68xXypQL*=RNOt@A=c= z7(R^QV*TcvYp%H_$JrQk(l`K@hx!AMGG#ecLZ_Ci%ah+N6|_%Ui(!Pbf^HX$Av+RL z&G_?}{IIK{7ez^+04MQ!Y~)cSU;!sfJU)g~mPy($k?I0NJqQ0DV&;}EC#j}bgmt0Q zlQMphv%T>NXO}>&QR6}Yw#jQeczyW%`Y=CIFdc0UOPC)+glUsFgspi&%nYtp3b4n~SPhf-;6hh|UrWzC8XqEuq_U}J{%LE|byF*(Wx;A`D9I-9 zx~C;-UO*IXx4TSjBN;Vvf%r{@Pwkk)7;?~{29f;^5EO}Kq{aeXoT<3bf;-1ME_wzR5hVb9W z`yYs5xt{rGKHQ`X+s`-qK^3vA-kkTMkSZ+}fq~+jq zd4Sgf#OHU~v4f{iA+q%p%C9#n)r?Q;FT%hMxy1XNcV3Is0wdX+T-aVCU`gH z*<^&I<)mXAMjP0jO;q*Fbvj&V31?>{PblpypV>k{F&k5?$x3O3ArVuoi@c=jRKFRs z;;%z><DRkIJsyHD90oeO=C(@~V0AJP4nvg8I$If*W#z znwVm(mErSd{dLcDaG3M@Xz{Iv*6z9Kuix>y%$#>;{EwkTl`dnLeeUM9kxSQ;Irptx9Fa}twuez# zyUV!1yN7k>LNI@By6hbyt5WQXyLl_VlPPBTXjh(hTDj%tgT3qjL- zxOsm9s>U#0;N?}ty5AX=rv?a5>(E72i|KtT(WI!lSppHdAz++kmgSY)*2A7W(Aa$f-aglL|xIU4@Hihr+o)idw8nI-5%0P0# zuP`s&%g|N^OeHo!?8ejnhr6PEl%GSff>xqMJZr!jcv#Or|wxzm}tQA6z!_i1~^3hxchha&!6?je{X9 z>$NY(?iJS)^wW*aRtdND_bBOO>ve&okWoyqZ5=JTlfr9Ftr6JBr?Rj682twJ8CuHL z+JGME@Y=Ct(wZ-B#yIWbvyq*}Cc3-GHCujp6 z!bgupr$78hS8ilOxeB3a+Qp*wy>?D2Ym-@VaVek6cDDY_6!&$5xJpP_qZkH!I8{po?b^KQC9wU$ir z;5uNzR&bGy$j^3mbSy-d{h|5254 zGfCxeg%*gc)~0H>s(#|RJq`X@T&j@sk4Av~tpOQx%2HpS)=>~rx94%i*S~%7_zD|6 zuB%-rQ2i(16$9S<5*1D@i^kaz3iQZk{J)ffjU4Y3-hmprLd1Jho_K7Q+LRqe1n3_f z_}yK1ne1L%h?NW`vN&|~oaeysz|?_vaJ|deLB2Xa_X(uOp;N# z-k@T6EEO@e_2JhOSXP^o!n;;NKRM(KK#kMgcK5`$9P!Y|L8V&7<;jCqFcJ%6^$pWI zSaI&)j*?u{hcM}+u=M1`uz2X`1No_psKROs}olds~@}>8|nV=zd!m$*4+H96sD?z9OXs z?9DM{1AtSHNiRM-)Fo7`M+!?n!MM0-6-d}A1A@+=mIJ~AP|#cY5jtnB5AQSl5(jOR zLCB{C+7Akxv?eTFn8yfTg}2gb(tk0+k)Rscs)aIYfT*PNXyw>_2Ns^ul`YH}9i7V9 zIX776$1UkBnG(JM5cwD4NkKXuWu+ROZN82`PW@|E_2D=Kz)`G{YOi1Z_<P4Y~}JRQNF^ zks*(?QWPvJ{H?5$nVdY(iGNPx4Im#wnAJ8OdlPNgTC% z?U*{nrB{%sA_5JT!=B2hZq1`ry+hpJ&k$Z;JNeqq+*`o7DDk$-`?ibv-^=oBGDC2- zqeN8MjkXf~r}9JruJ~%l_&*_Q2jM{F_+fxYO#7b> zZE&ab0kDSZpllRy|7)pmkWv5j1maV<&-K}k)*?Kwy|lj|R&^TwVZJ;eflvHquRdMK zb+y89HL z>p>;At<5NrS@{RlCaWT_{XC)S<_w$p5q2!LHZV=z>-9T}c&)+qzD}02j5^*1uO}|; zUh;h8u|(C$w?&)lk_&646w4P%2V|5frZQ8f1ax_*(mWh^5zxQFRc!O#f0xFE-p5HP zJnMTcec1PO-s;TSLoY$gW^-g-^g;YkxBID0ZP(M;R+9i@Xo0AS_dzo0(V9Cfl|kIh z!hcy)6~Qqe(w(iw{f-BMmv1WvM-mXS--1tS>f%?`Q`p5Bc=a9$z2Yj(C=`O=hifHe z+aAXLI!YbbuI>Q~!qb;~K@|KeilOrZ92=%~o zo5aG=tlF@}^j||STV@gIQUwFXE%M5Q%@M)x@cY2bM(|a(xq)D78UrtUewAC%oM_7Y z6`X_OBxgdwW!G5 zLw^8eTM@KMKTPB7J$(5Wuv(PBET%PcQxp5wKp*zfAT zYlAofH55fDbnqjlT-5gTx3l4p4iO4ZiKEX*GZST{>X{K#$5NZ+^+eTv`rqb9zxipU zzV2kiCA6*;W^I}1g)*wPlA@&F2BQ3U(@-xM3gBBzYD3fhxeE1J? zlaXJ5YdWrDZETO2Y7+6iye8M0?h?^DKJMDt6*axO*HVZ^YW8o>cs#qj3xH(^VA~i4 z;%K)uQw4tBdI?Q7#~>j5n92t!MHKMPFqji1HtH@ZJc`IC*`Q+P-6wegS!cmP>MQxN zFTc}R&y4^4vN#r7H(0^F5a}pu7fFCS-mAMM&x`EVYIMjP5N5w4uK7u&Q1>M=smJnz zLZwwLNCJ*xUPjFR7vwKY4|(;03gek>JUv^`T@=(LDRpQ*lxsW{FM-m z5CKKoiXbySvg*9c-qAG6epKMpd%}?wHiB;Rozl7>(jcjAplm_|^}{gzMdG2wszJ7> zL2rv(f!?}E%<%{ol!=nuvimh}u>;(^Kxkl{hdBtIYvIAzAW}8lb;?s*e0*hoqEk5r z{z1~Ljkd!(Blb2@Rh$;0fvnl-N%lHu-f~%)B(?Y?v$^~|7&m@GIJ<~>GE&e0!@nq(C`KQy)xE@1dVl0WHsptJ6Vtl4PGsRnL^`kby z)9UK$xp7}^hKO4HJY&RIl0CG9ZsU65bW&nTbNpUzPUoTiyRN`N)gaUJ)s>sTV2#l$ zc4_Jh|BVfwAc7fV)XPJ>^N;+lz%5or?gn&Bfbtt9e`27Qu=nMxofWHYZy-UVac3bQ zAdtnd@M(xK3YY4{S`M)Z44uQqHW8Vq_-1$66EIEJPu#XkrK_6%pHKezCHmL*m)KE6 zLA+5~PG}Fiy~9!x0lkmGD%qE9@5;lqP!yMcGP#U6ykFb*p!K&KlXXUMn!VurHj;k4 z@1&sfGY=K3r*nNU(RI91aBWy;snf`Ffoa!&(}Z&neszFeAdNj8L-C}^cciJn&}10QrW@3(>{5YJN4^tfco7<`OKL9@!;d+3ZhP4+;Vu3( zjh@yZc%R|&vf^!FJkY}2G~{cP2&$X7`Z$;&ykk+)I2r@1)UFC1%+P$@4&ewP`Hu?l z%=f;)u!i?U z^K$)&59CF-+$t*AP!cWgVY1F7bIHnCbK4O%5+6 zZTkiNLExrp)7eml>owv7qvxV*GYd?avgT>r3XBvUX-?B8*@IzbJz;4kJzLJF%QEAI}!(d_qp9z{O!7V729ds;#t0!Ps$p! z|FW~L+r|2p;M2St<_`7-@jA;D)3H0&{1D+O%LY9mly8b z^Q#pGS-{!aaugPchuvZ3X5WY18qf1g=O!cnmOuS|`GMKx3rk?PO!<9tU*t%RJF`mW zEg_^S67Sj5;6GjXz0cSS*`}8|}3ee%_U_fnRQbzFMbXZfB*|dm8({PX*xi%hG3#L;Oo-7`3qiuXiSWiIw zlsYwnL0gORj6u5j6%BMXA0)?GI%pgJu@Y5YOg$5k@Op}Prz>5ms8CY*EG4Qqzm}w4 ze1-`~xxvE_Yi8)-jaCFbm#0z`e{v$6*b7#4<%Sj3br(-qtGhAE47slvZjtzt}PZ=C)^Sa{OyV)R^2iTioQv_fikEsU4++9MMgtyibI2x_Z5In0FrU%2IK> z-ZBrK4aX?J39T}W|F!dOSC*`BIe8O?&l)p|nj<=7>X%i0e3#S>!b(uipI+7)k2XCVb|xs_K~&ThDbZic-l2O#B4fjwlIvlrgKmg*N>j z5yf{f2pb>qes(|QyX+>?emtL{ay0hgEH!X~y5?t=*V&{_?N+w0)AW@`UG2-|I!~wW z-woBT8q?f8C9zpl#hn}-)ja?4V*K5{6_+XTX6O-SY+?SvqWR8$S~ex2SK~FY;qfA; zLFSfV{b(mtBj&!};TP(Xc#s-6Bq4#o@a(4i_(wVS%IePm_IJeNb-IEIti_AEEan6n z#b7LW_BPI$Vi_555z6bU9Q*H-Yi(2To=jScf_jAzSP+i&+nFe{*oOsSJ5J#E9#n1H zq7yYh)Tb;Vh9kPCEuCvY zO0{LREWI!C)%McBb-423&Bi;pZ4nAkShzVGkySp}APEF}#e*hGm2U}|Me-5#l~LI+ z8w$KH%!cEbyOETGDS`hrxBLnS7tu6W3BGQQB&oKjo8()vN9}$IyGR+@rAq3ccsI;o z0pC0c$1XLoRAxwQ1Y5_`?~>}Z2&sSlRqw}#Hafh{J!R^)VCJCP#8X?rK7sw1KFJ${ zX{Ef&HU*Wpj^kuqDW7~6emUe)G}J~(zTTy;!2C)1B4zNaVTL(HQT1D^fjb)!lF@!P zg+*uPrdK{Vi-hgr^NQl3CH(1@qlw~7wwN9R7pXAo5E-7i@Z+y*qrKl2tXCI27yBAU z7o$21UrTa{ZArrx+&|AVYA2ltTkKFhX>E{;)`Z5mTPc`R39FkH){t1l66nQClZ*1R z;;NIZ)qm^i6`gv=M;&eFlo=@Nlc=cTo9P$ZO`a2)9Mquhuh<-uT-ejoGh;C-9+#aM z6exbv`%C=pAn>yX+P^V$Qm2Xx-AowT!RNoW46lW5IXw@b|D1|GxJ8wwC(L&3=mxx< zm{{$xp`>Cc^}F}`J~e@24MaAX4`kALH40|F0FZu@Wx_%!in=54e06IyCpvGrMpMqX zW0YxzMV_g%);;_+aHQ=2>&r*V&-UhqMVo3FAm@fK5zr(XQKRfN zvH_x`PW*(?1USF0<43D82#)352hQBVy7GHv@**rkI}AaESn?Em0>Dihb#3!9vL{^H zyl$}w*dc~M=Tpib)TQc^19xhR=Gb%v^}6pBT-nEyV?Ij}_BZq4JH$#mlxXx8Qxu>} zIFOBt7x6j!*XV(`fL{Y!C3jFPQp(UFYm9j-dy^C5TcT7b!u)1`GYgpZDR+_7^v6N1 zVFe5h2Z$RkT+J>lgE}sa-c&caJbRFL^jT=q=g;40@Mpm}wQ}(uQU2DFRByr+UL`dV z>>cZpIwp(9jX5orCOG!JM2|ElN$eC7JbZL9qA!bUIN-6RfXo;$*#xfNBoJ7;=k{vQ zjXq)$LfXUde|K_v#J%cLAEl1) zH?mXMi|HDZ6sF*}8See*+!5W99r%}EwlDTb%P+seQ8E^fY*^`9y*7CXz>rUtpRr`8 zAzZ``i4H-fwd=V=7xzc%@#EI=%I^;^^nyz;CNSJ3d@YA|)E7T-)f=!$US&p4grAUa z*30oG{GbU`d6OfI33nayIvC!P9~JHo_~s${cVTtvvfw#(`Fk2i{N{Qf4a?BNqTK!D zpfhK}W7{oyuj6&pg*LWykHOWtgV@)%(Knt9YYtxw>!O!54!rZ&18Tg5KtlX0$+PF@ zf@^J?MM8AWG(YkgA81Ns{=dovAaiGx)bjjvb+3UkZ4}K$#ez!z_IO}pU%@MnWXr2- zw#=h>y$t1i^O>rnreEYqog#1m``8Aid;k6v8kL(PFzh;`z*Ecs; zf$W`b_v-eyKMz{5v+~Z~4u{!gL)AHFC>-UVYSeZ=D!nf-?OEqU>cdpZ_$f9-Fce_P z$U1MPT7)#IRk7a%Dt?p@8YJUHssZE&XJdP_6)6N+K`;<#R9i!oNz5YLt8p|VLLlS_ z0wSx@x5QEaIj{QEZ3e^x+bF;zwIao>a|#2G8Os^di28@*t7OO{r|YNUrO9>~bZ*K) z%^X0ou=~38btlB*66g2A_ZQ^K9IU zt|L#Z7>GoeH&;30XX;Dut4|N=%OIU$9Baa?*RB$Ew)~=|xT3N?1*!5Y2x+46Mz3mc z4cvHbD_M>j>#I~czgC&3`@$L|!J>$HdP{t*;dX|~K6DhTll0u|ohap6=5{wi{il6k zkr-oQ8zeqmtzIV^7vr5w3T~}9O{VMVHaHU=Wpx;c6f_AW#8feQn{#{a1&wM-|Ebi~m83&0d&_FP9iwA|S{pk)x@fo(K%^Zn;oy%}D0>j}o@aK?p4 zW62@>Z++zdfx)SCN7myvO_!CFcs!34PJ7q#WPpPf((9us9YUH|l!TvPf&jSaY{Vmb z9_G?zFumN9qPp?w%X=C`0B)&H#he$%xXzW;={_WV9z_a0D$yRh!eB; zsO{9pzSMr}kn3eplohUWJ)v+w*SRW}<;v%fGee?iY2~Vf&?1eVWTPig$dMzZ33i{R z?ex9o#IPXpVvC|Er7N6}84UBy#q0NxL|P}ksi+AR&$mGWwuK~KdSrrHyr`-rKT`55 zii0ecVgn1~bQb5fbt@M`qMkbEV4E9v=KINHrxr@40~GvkBGN_)6eL!^>1N|WyKsTE zWqqPgm+@}oiJiKypo55RJV?=GohPNW4~x<&$u2Kt@G#ld01#&kO(MfUmEp7*fqv%330e%ZtF-MX*NV59zK8dU&8LxGGBig;f^LZE>{CZ1K(I9*l zo0RHVrxa2gN|w0978t5v*ZRIS#li#d6Gm)A^kJ!%iallwcGFTSc?_u%TYr`G%PR+5 zUYn3$ZqOn8JYA*VvPrH0BKw@VsS;fi%0ODgUW|mtt+g$usb>wk^)n6UtgaRW1`pO3 zoA=#(J?f`)UY;S0?a>p=4&H+bGxTgU*U9gK)6AzjKWluAjT@FtVSIHuTUhbMCT0}> zP-eCtB&KLYG>{tA|NK+@7|o7s!x){|mDq8%`gK98968TyHu!|DWAs`f<0pWq0~Nh? z{)um=nN6QB<9;%ZQmQtJ>MEA>+qXN7Rx~w0M@6BR-MXcS6+b5%lDwjHYP)!W^?b3p za@=qGI;9bNW_S7L*wh%O`b_)H!ASrtJS5LF&nEJOyjLHZlbu0f3k^#R zsTuWObcMQa!H*;@k18`M4c4?ZMn#fp8yhY|DoVtwyBS1tvZFZr7 zl^|_^=qT3eU}2{QAI*p} z*o+al&+GxhR}vBT)CqiUyVeK3mkqAda<5VDWoK5c{hY95^NKzY9+3~zT+XH|H6(f` zghtpYCO}F+b*x@4WQ&aeO*uHlwGlPBk2&Kgnx~)>vc^~nQ$BeyVzQbR!Cau#p0fi> z_}p_`6%|dKn#JG^$I1uw`p7b;@ZKSR;A_+m5ya zcmyUai8kJzx(9)BkqN}Fz~bvQCRWLtY^FeU6!FQ3+88n@WX&)s103BqYaZ&{&TfGE z*|=O4RY;poy=;w6gE=660n0h9Fz)!p6fd)a{dCxv&yWIBVNh=0M4cDx^+p6$zk3Sh zq>CC4CRzwIx^U_tHz4UY8VXBgt0c~4b@~I16 zz5K17>Ah~3T6+V7V%_}gpzYI>G>DLVI37(^6M88tGUVU-pdr@Bu*o~PNBE%FJY8{Y zDZVvuIq^w7HK_@6!&qU*AXo0kprHU-zc4?<(1L;Hka}w)O-}tdHKJcrOu}Ye-rMED z`(`j*wY5$6ApD=gf*&J_9JN!&27#gL8b6t>8t)!u?S!B+;iC@8h?#l!qh3PQz0KS) zSc#QFlyW@lUt073Te4&psEx*0yH)+@qg?ZjH-R&U6yLtix<^q%KNb9srY6cCR9DA$ zd-AEHI{{a+AAtH#J`5ui3$zQtqEzabziPelu_(d6i5RD7IPDBut%HT>&=o7V*G*T6 zO;lg^(l63*^N}T{r?Np_MxhXw-+^&l)%K7eL8kl(4LUwut6Y-dt~Zv=e4swII&EsV zOg)7+%=rS}pN;oVaBx@fu+!+B*+PMv!r-Au*b=hgOH{Eqwtf!-Uf)j(**E6hq)g1r zI<@x+HmfDiCi0nPm>psZm5x22lJ!rE65`B>B>9rd=_gQfHVC0H4CJG#H_?;PN!#{O zPbbhatYVW=b*Z6)kqxQFEkHz0q?mV|YS*5|+`qj5NNd*BdTlTAbhj$ew7SD&Mk{&rkKLvI{Bi91hqkW*7N}k0E=GuU!d+W`8DHi1} zr@kp9an312M@96gyfc|yD+>lGP?LLF;kMok9;|#--5n~Q8JVD0qEl(-Vgaw^g2+Rgpwh058E$SKErCrtlm|wqv_9g5t!S z=be>m9uLNNrLNRuCZl^24P1UZ;oxdAji`9fl)r+0LyPthhpXCp_Z-9G#B-C@f8k&9 z(!Zz>kM=XvGS{!l@0aJ6I^MQlJ7-Tl*N!Q=A-h5KlX&dvy|vnGlhfCc(?%Q^XpSzz z?tC{#NlS%cIui7$psoyFSaWx=^x!aBwZRePLyi6ZWp~@9mZYi zE)~o|;K86S=0>#g-nnw98IC$nnSB-4Utq|=hzf{^8){SH-{JJpFe~xavMu6=Z)dBQ zitRmbVG8|L72B0>3eMOs1&@OEmX+%!zAXV zgIX13=|{)s>0@N}_}gqFG%S215L#^#0aOFo_0_YaM6hE+*c|Uj8q=Uz>y%TyQe`3G z8~h+XL%V@D){027z$X=S`Fk|@*}DFN)4DeU{+=q5x}U{G{staKV~z%0KN>y(80 zj{dem#MjI0klOD%g`tk6UqnZcJ5qiK}73+zTTdzr;2W zP2-X9a=zk0mlQq%f6L~lr1d*`bbtCoM)z{fWO?vIj+~D*g*IAQ-2PDSTVW8Z(>!Q|@Ip&PCa*Y$Vg=kH-{OJ#idcK+kQ~Sz}#{G6Z;_&OgGR_Vj6+d(UQ*{$8xk1S? zHd&Jo_dh)Z*zVf)EE!*kzWJKjz7X)#gW=yrpt$U-LZ^l>aB3o&UbrU^m{>R#56}DZ zAb^f_gVnp06&d(&!z6Y*(BQRML~Lv1{(E{(n#a=o;_k4sY;w{IC?1#PAhdu=?^9)q z$j+8siGJs$X8gPUWoyb7*CnMF$^niGYzxuCz;tb=I0H?8I_ z_n*m)eDR-pl3}@To1s&cwoJqA5d-_0U(=t&sc4(|VbPWV8YzuWIFV`%K}V`nI^>Td#%CULvX>vpcVAGL@g3$l?ndCM@fj@=a$$O%DIYcG|f3;o{jUg2g$T;Z*coX>p_rct@nP>(v+cs zBRYE{q%>k^eLRZ3kOPEz`Ct;awwv1>AeU<1q`LL5fAo^CQ1FL|GHMLjjkgtb$NB1$ z>tdp1M%2rH_210#>g-*}JdqQ8C7Bs3?7q2t7rXc+$HecM4OIYzlmChn2-HVE z9M5L6k_(c9S50VuODz~J+GZJ7&GW^L30A<*)SU{y%LhgKT~fKr7PgytS0(JBgHP5Y zpUW}JXv<1O)Zz|J44)62ji#@;S8+S`WB z?}E{sD4#f*abv5z?ISaMZX?jtGO4aS*{J`PAN~Vj|EL3-sEztqYd`O`j>Ybc?}_~K z{t8fcd!~|e&%aUn2OE&pqtX~2gAcCu^2Y9%d{$kD=P`K&ZDM&_Y!=A=oW#v^Sw+9} zI#W0F9X%^R=)=;?QHUYT#o};F^I=B9PC%S($^-QRQY(U^7o*?Hx8j*v(%Y1)>4*r8 zO)iQ`dCLY`jY2T3@8epkQzxDYAi%aO2;$er)Gko%ec-T931YZ`jXFf$6t zw5dH&D3dm)n{x%OWf46DJS1(Ha~1`g&G(Z#M&4L@(2duDb8Fe+2nZ8LQcdObB7RVy zUFgx2B!sXcl^M|VLK^OuGoBFSsxX?xi-1*_r3FKCl zZh@2ir`5*;t`LH;ONk*fF@sFTr`s`_(8c_ShB*_asDY>-WL98i_esiK&<94;$rbB`M|}=p?S`j}P)njK@Gz5T!up=2vCZ>AKcWV3Lvk}Fq{acKp~uQS z;#=~@H@vi{%)!nlXx3gVJ!b9$!rC|Np^}FhApeoDO;dxyxgWMtc=WL2Bn2O`CjyFZ zi0)584#~kfj7bt#z zoS7MG!XF>yUM!sOon$9r>+L94IW$fEi;ZaSL#RvwS^wP+uhSW$6RKKk{^7KDj{{!g zm1%1&(>~m?oM#qvM{BI<`40(ev3CV9XG^u&$^q=>Zl$B7vYp$BdsHY=Au7{5#vdkFsht$&tlKVHx)f}Z~G?H=rZ{vMb@y9`A})>K%Wy|22l+s4;@ zenWaB#4!0vq@N^6{9>LEjYA0al@_Ph(K(u>dy<31S#^>o&yWOBtq}Npf+9NJURt#s zB+JVgWe4l33376H%-oDA%V&?#81YTkE*xDKHez11R3;TVAmY@7|LTHnJnfM-SFuPU zsJ0B4>lhdNh4!QpzA%zyR8%)Ku%x!ZI9ZrwkawkFmT~P^Ga?7OZGDxv66pgK=fLO8 ztr(twm^~mr3m~ZZxw1h>s6|qkIu#;egxDsMn7*8M1}ld=zBM&PYk3G#J<0Np=mN@% z*rj+jWUM6qgn|0lPdrZoKM_NW&@zW=-nSVZ+W!5mf;CLNpb#<`^-e$DCf{XqYrVOP zsCcf19R(FvSQu3X~k&Jx9mjm#9{vIZiPMM zoO3nRNqeqjPtGo_K|PouuKMHvodFNXaIV#DbR<7$zj3PUAvfso*mn9i_1P0a^<#Hg zXwX@0yfK#XJbTqL(tHZC$nNaSrG-A}=weZl>}=4vC)h}S1%6mAdo_2_E`Nu9*m{Bf z$AD?*x#02h)VK|}m*Qo!Am6mUPbo*qA+TUpCx|b2aAWXoFrD{Yph{Hz@813I?*fpO zC+t}XnLwCRm=gpj(S4=m(7Ea3pPo+htA&KCU)DDLxqAwey7ya3M$V(rZ>Mq3yBELg z1$U(c%zJ(n=cC==)@JIT$(Mabu{HFTyb%Poult~okeW)+7s8B`ND5A^=zGn96ivvr zxFXL0eNv`ef(F~&>qdkJL0OS20uo3 zT7fm%w+Jk+=a5!yxWlCvsW2FA=K$kmciEuF!;xKQ%}^%eYvy36V;Z`hXL<4*!#?S& zE_T}fH>%`p5QK9E%Yf{UykP<(+U0ABH;sMo3vopz=v6}=jY>aDDGc5ast;iwRdasM zIEEi7;h?7e_1#Kt&B{fy$&m5vi0iYSgcm|hDlN0DqeCFiynz4!{ag0`cF7^>20FSu zt>kf~Y{!eY>Zk({H!cdL_niM2FZ7PfYp+qvnKW=kC!^v(EMO5Pzf2 zX~o!9ATWd72y&C1ZJikw)f$1qE9Xm6WS>i*RVWnc4v45MI*e>!qSe?;V8x?ciR5T0 zI`}ax~L20*Y+T1C3*`i(Qye=~!P9V6PdUlF? zDT`&$a6)MKdU4R!{-6u_iF;}1>_T#rzV&=FJb~!VaATjv>M?x)gqpH$$qE_OT;Xh~=W6y+WLOl` z9pUj494(kyLHd?L>TqCydze?)_nect8eYSGMx=;A3Mtw&72OjMMFJ*+@4Ui}m2k!r zlEX*iW#C4*rn!Flmjirw3~}oEx8}j5dBDDQjNqeuB6dVuw}r!wUYm-DpZn4s#TN(m-ZZO zon_t8RSWq3(eU@}&m^3W_YLO1pS&Tu@F@KMT70y|o4; z_)F2@b>5RwF(+WLQ|WOJymYk&DA0fo`N;Dqck75}kXk#iSX7l7E?^WX|YSKkbtJVV*#-ORKjqWBMGT-nIv?BY7gCsGHR0 zHS%ZB_J|zEfDGp*l#v~e^)suNn~t>I3R6OTeh_^TDot@nPdGFLag0p-J^eHz1p7h(s;d6Y^h?|eRJ2SU z(+E(1y-QW{JqA>BO*~7Fhx+&Cu#C62FjwF&dqb~p&POwKEQQDfu_3WOfOaWiP!e|N zD%sP$9n2<(f5Mt|Y|K1k!P_SaU6+WNQu{FPi4jKhRd*c9Ohm*gQ#_N%J8KZ(J|cn! zB$ahHxvm|J2HtA~e)y;6AH&4cimGGmfqn20=v+{+dLjCTGs_6wQQA~yiANndBICbL z!Et#N&l82W7p6P$2=28-wkZgn{e6z!ZBbkc<5E#Tt8h7{G2%vA*`1dDp`&?(0h2_JT(omPc; z1>epJ?hU?0u*wg`kfqFK4X8Svc+^Ul zzG+`YcD7(SOv89fSQ~X}=x+6MMbr9K+BpOfdy$qLM^tJ~J(jxsEu~B;yLx&r7UVJ8 z258lT&EO*51@@oGOiPS9isqJmApRjejkd&fePl3ao`APN0@bn5$2}=Pxk4kJByKtg zqI4N#C0%V+HBOpn63=AMjS|s#VqNvO*Fd3u(*gMYNQo(BTo7Nh9 zi_#Oe)hg7ZG;h5NQECPfV}WghXY!eetLh1iFWar7>7OuY)2pB6oA#F}6x+d{7Tn5| zMxa|UOEbJvc8LzB7N|JaQA{9pU`Trl7(;AJ1E1hE($A*Rx#2N^vy*`?T2i`5Q0l&} zmvZwJ(z@CX7UDIEVzr}>oYgMQ4DTl1C`GwIy~@1V%4W3(elUa`RZ^i)Ad(RtQW*cT zxMIH9CfMXxSV#ic9#-rve+WyEFF1M};km%<5E$8d!E0 z6w`*SmNMDCvhTa|TnZ1xBwgh`lt2NT5MlKnKl_A**nTr87e~GqtuJ9xEpHrzlH58e{ zwNLL&!aKySCFX{lXY-V{*A-1QP{iA07{V$KOTN5B+mB4@C2KvsoHYoubYC7wdOdv-LD#;Lg#It7kqSqKG>Kn)xjG zVhsV`SXu;!P1%56#e;=@VuVtRW@)%38(C4&i+eU`mwDMFFhPtkCo0mtunCbF@Q9|Z z6xqY7{4ytnZBAWFwgsb29tFQVz6NKIj<*&u+_|U}zlFo{Pa%(@%t?zFG2^CLxkG!* zpvn1UvaCq0@VlwEnUMstsQ=PGG!c;5$YJfMWqq0G+gcaIK22T3tbf+$``6yBQhODDCju$R;CB z`|9gs;&%RE=&3cnoRhy@Pzr+>%Rru~s+=axcdkYuPzUOhceca}!^EdVN9 zKf8B=RnzV^b5bCt#)<9(@cL2)CX<-DQY$i@)abmJNArH;!$J?Mm`7WJG%4v&oJHsC zoEz5HfJ;Yy5oXN)iln$H5_n{mfA?=CPS+$PUSy1dMmtgY>e3E4dk(pp2#AS^d9Y*( zYm$6i4JuMn%45 zT$l_{=S20za@?L=3y{W$gsRW%dY#YOXkpw;9eh7GDslG`&wRs@(1!TwX`ZG0Up-G3 zZ#%~#Rppfrw-9DG8^e8vfnI$is_n`Mv-UXJLqTpx2k%>2mCHv7t?`oU6aTWQ&XA*5 z>{rQUijvH_>WQwMd(Mi}dQ89-vg44a2=dE~2WEaaGNHF5+RkV=Paxhe^%p>r2?tph z(%8&n%*c802uhIzI+@~fA(YHk@TcZwr0lH1y7V)a9|vw>#B0kUupbG9Y}2Tv>6X{J zM!9hHdfi7tV=rD>7N1BFi@m`$bU}9ZJ+B1);Ri$^;7cB1j)k?-@dH@D zZ=7J~M3I?&cdq^cCTwU6-gOUCrfBxT(FrAeL()S|750{#jD&TmiV$JgI;%xiVdAkq zQZK2v>HTDoqV7t~Gh3pDR83|ZSJgOKUJQ*2zmIq%L6LlvR{VXApFJn6 zbQw2pg1y>=?iUFYeP`v5&lrq>fudeB&NR*QaQLsmM79Ahd8*qKlMq1+D~Ye%7f!A0 z8V9QWF4S}HsNDh=-UzNbj&)}@WLpk7WC@V|pdNG!80mV+Twz&w*$;Tg-^H_}sm-so zQGES*;Wv#5@^sHn(wr>sj!u=pj1$*l-jMsm_)BpbqPrmzMuq#}(Gjupx z)7;NeRb4^+w5pQmuZCJ9u4kr-?bc)P%>6aq?fic+oZ3+>oLw*d`}^gi<(XP_-t6Lj z83??iS}NS3QrM zSjHeuE9Bve?dJk-w&`H>{9If91G#+U5qr|@(V+(4*mtBL=~Vvc^l_dX0Tqej43_bi z77qcLUN9q68D}}&?B{i`h6x@(nKEpuDzuv9VOCOWMN62DSLA5O9~O`^DJg5vsOD%! zFlW;h%jt*sqe$0xAD_r3PVJ41frT{@<3Va&pNoZE!nFFF7#Ecv1fx78G&0}u>OJjg zVgzak#(5rWy0lHZzj~Qs8+4zyLzTr(t9pu-I-x4YK#_C9yqFh9LOT&bXYdQ)Gj_fJCV(%VROc^q1XWg! z*7WK<(}tKai*+llxD-v&R7CxZlZ9xbZKX$*!?fD->SSJ3R4jKcIPGf4&~>d= z9fC9S*v4lVXX!iS_GcROit_H*3z61}W3H={VIh|`!p+$KUW+dGn-~KJhUO@$Z_Z*g zXNRG00`bT{OkR%A@+Gab`dhc|6p}5>$v7GRZQw0W8CsfKFY3>1KIp56eoXY_UR&EK z^H(3=^Z_y?XkNd*3ncrzc`CN~NJd-Mj?U0TEg?&i^KQYcZYW#9LxI7x_+U03Su7c1+trz2D-qKY)syX$0SrJ_p#9)I}x z*0)BIS_kmWuk}LCl^cyN{+Fli4}vr#zvs~?y;=XUDCrg~?8?woE2(rp1K z{{(!pTaO-qKlM3g2OE(9jzi86zf{axtPwYt37l+Opy#W(t-O7ogK^(Yk1vxkZ;}Ft z!!0fTW!EK&ZIa;ic)!LUXetGkXjqPcx(lar^-7eRg2vb{3)YtVVmV)wo0155KG1RORin+mt$$h1%#yqt3Q1bMA(zQ!c3%JGTd*H> z(S3MmKkI$D%fI;P-Z9_(|DE3eq`SQTK&Rb}bnbIk+BwlSbpFq7NeQ#J&EpD}Q`&+Z ztj{0j7#UewuWF-ycg^@vE$N4MDt`2OG9-G2lj^ocRun!9ocC|4WBz)gobDBA&YmBt z>df_g%$fn5Aph^6^goUB_uO-S=L53bts;uKpUyH*G%*Xv_B5^1sh;9A7tW_l@waoS zP<8dczq<1di^zbfGc4qG@cJ&>)DcEHIBXoVIeBycqws-5!tuk)ozOYOSve5iWuf%- z>s1O_r4RaPLE5|DKhjMTN=OD# zL-PJmtZ2>qbWmF7`0c;P<&w(xMhtr~nQ)a3!EER^1HukESs2k*4O*GUh2yGpM^LiL z$dH&{^5S_(CE)uE)K@ft%HqG~I-sjBXPn}BF+d|n>~@$XYaiOI^7#$aUd8#>q8q{wk0$-Vskq0H1;>!! zoR#<~zr-G}s9pcw-{DxIQ$Rh!8%`Ig@+$LH=jSSKhi3<28U)8RPq!(y{S+caJ1=DC zA0qu`aT88a`6C3laRaP9Mn;Euz|qeiSk}u_(me#~p11sg@?tM>!nZa}&0OvIzLnC2 zmwynhb5x&q7{dNcMYyilR@+?~y7V^cg>LNRnrDhVl7E8x>eUdT>I@_Gp*L5D)#PE{ zd4jhTEX+V7Yi+Ep9dnJH0meQ$?yz*puqPj7%ZWV2Cm9rejGL2RQ^xbM`Xmt>m!!q! z0mpXB6#wauF68H*@29@xL8DtVRO10_auy?6;-cMK#>2qrS9k7X?)2B=e)3Wvq{w+> zGQ^ZyhUawvBaXg2sUcjoK?DjSPx#jHlSx)dzPs1SG*|;dVJ7b5s|a)3Nm(d2!6)P1M-f&0F{`Tz0UilB%{d$ zuS4@-@*4(`Bw${&jmn+OR~bdOf z`3oF;IaY(MPN!~vaniAF#&yok5POnU;dw$?pQ}V*kiWw*V!n>CxB0s-ZGyfreevvW z2$ya#q9NH=e9E+mr*y_Lel3B{5>~!h>9NvO_*hL2YuZsUPg<++d)7)9&zm}X{g!W+ z{ct6Q2<0t z-YHc&70t$x&j2CEe|Xt|@5vSqk6R~aKcqT4I!n4iDnh+*OecNsoeJGCO}6W-x|!TK z`DbN%-@njp6Ck~!3f9#M>s;ltY<6>Ep#M#L-(pKhS$G{sL-I}MI?>YS%=N|C0>w_w zAJ>$@XQUQ=`l{0t*f71&mefOa^H{gI02VbYA?JMi#ub4Y)g3 za~We6gW@m=8y$7+t{zWv-1;O;3|L>6C;}Vc7@DYt$dtz#%OyxUy?}ctQ7~_>@fBD( z^fH|>dP$y8897QE!0tV$R{F{h^@hP#c@+LjlWijGZG$mQl-+T>7Y(h|H;XpUh;_zb zPJ)mSMh@kcjLA+fH43LlgN~KAKL+SkUPjgMGoqAddmVFnQ_=VH>12w2iV#qKw;q3S zl2Ju382N2~P}0Bb=?8tWHhtW@FuIjmT5)Q!104=n@FBYUdZo7hLnF<{TC0!IZb)O@ zLg%cVPNGUP5AMjF1C?-@_kDvfOK1J|Y1v+%Swe&m4AXrU#kX3$rdJ^SxnU?d{0XBw zXHlU@h0Z93(Kv!K!FG?TTpje@EzLPRkpV~x5LryW$HC42+0GpHSJxqWb!?dIv2ae) zE~AiR++unzA;@w-BrQnx5T;^}`}tmw!_U`U3AG0yMUknJ2fODpshvn-X1zp)(@8_% zWZ8loY9D;=0Kg%%C%I$A|tEi##w$6Q<29uRb3iddJRPmy&$%};SVmlF>Llvnrit~9= z^z@&=ARe}~QlVrSk>uGp(c+A=}UR)Y4F%EstK>)ib5|SLXzodtP(^WDvni^@- z-nQgpcBB|Bk!Br;8mV6UrczyW;ucfHnFBk~TwRe)-hA>36VYRfJez=i0a^>L=eq}y zY?8{Mh$%~FiJ1l((}S1t0|wLT60Us_m+igM_NG``Id6g4Z}b)&csMJ34_vZTs(9Kf z661!HSj>Mi?n|7JoJQNb7ZJ^Nc+-8BAnhLA$hdk?SEZLl61@XE>;B0LRki7<(k)E=OI139==qO@+|Kg-WlrO7^ zQO<5YV#!`WLW!cRpSO|Mo)R;!c7J(Y_?=PMh=V1nd1T*GEAWy!;cFZ?jBx4Mx%=mV zIKX5$E|UJ+@Z~YeO?c;B{{!3+ET`?a~!bIktnK3*A z2^bHdF$(RI1cNJO^$qvq1qj7D`4uM=78{)X9LfJ6t66{*gL<&d-X$;w9yNeN!RgS? zTxcnVDZ&+ebbg5YA9-r|Bp$e8cfQQtmq-e1+8n=wmw>`Z+9jtKA>7BW9+>RUMWkzL zMURcr(B$rI=!gvJn3iCGD7PI>&H;k%FsK}=E>a7aCsoY03g5VkAvdxFd8iaJz`Y6> z8TApvGzHv+xRtM4sU8xNV399oGj`<10Fm*h3wyLrlY*wIMQA zWmPZoerILzMdo(m$Uxzd_qyzTYSljF2&sJLSrvHNt0hcLSl=CEPXexGOp;0^*b8V& zU>9b&%;%r3cI2PGmVeO<|a?|S>P^mnWSFyJC8 zYUrDYBQH9SIc1Hb31|Exe)7m#6D=`f4tS6T(d#<3mu?N0tsb`YHOi5Y&)X%pTijtK z7gHnq%>T<-0AS+Th6g!^N0#j9YDN2|eaotC%ci5{jX?9dV~{)@t^B~2!vXWlFh(`n z5>I6sq^AfCNdPB^73b^m}z$ ztHDF#Y?UZ~dcGb#BYD4UK()u)dDipl`hZW8uv^rJm$%m}3CE%>S(tXVJB4~Rt&Gz7 zeASVE@!R?<`-PN`KRg`|;`#(G`gWb$cJ1|#GrO?ElXUBs4r&%perHnXlxT{^h3nvF zG+0Z3-8h`qJmpig?z{BgU#fOJFn9^XVb83Npk=a@^jr*%WulQX`iQHT7o~yUHf2A27gE%r zc!Uqwz>3sFQ4CR+D;rOW;;!F@T0qCufJ8Eylh3@}Fj!IdwSyY=7X4=5<1VvG8Zc+C zuKd$w>N#b5gHaL$g}~YUGgrs^7(h?kVs)lB`7-tbjD$ze#6Btc2x8Sq0DFkyeOX<;wq&r)@Ucj`7#5pj)(^3Eo97*_l_o;(>Gul z6uH=mi7)Q_$-cLFhFdJ9?L=wPAMblGHN}B-SIc#o-i$lZ*rov!yM6k>udwzl^0p;7gM_a)y|-bnR7-gN-ofxIuONPH>mgc!px zc6v?lBxP4vVVO&?mCx)(RB+a7dn?~eA1}MKP3&}ZrKv3ew~BuAVIMDZc&<_2^xOw_ zjxR6A-X0!|53D?#*M)=j;Qw`#@)5|1>?mc0$mqO16Mv~wbNeyuU3JOdb>AfK-?Q>> zfDi2I9XuT2BYC6vb6HBb)QWI(LX)oL_II=AfW6ki408lZ>Osct2n`Qs>(@;MK>lp6 zoNf@Q);JPkRxf4r3i_0!_6QsTM0^G7#Es6#(&fK$++jQm+J| z+&EuQ1uk&W5g`3HVGXG+I8H}U^hk^7FfFwssDpxLYlu?Njuc%)&#M|LcWoM^_qNyNYF_M(^$dW9lAAjAz zm26J%z{EzP?t{{x48uy7U~fdCLa{2MbZaCK(6n_MHVkQfG1bGSr|0iAEVMF0lJ4&U ze9586Uefkq>J#{fNVCpk4jl6c9EG)l1V-m|xeW3&VjO#-g^ywfj@*UC?G+-Xuh>LM zHb=~BPNxyzPFhX0ITuryfiAF8+`H#XC-5s`A4!KPbB=D%{WJE~^(>&8)7;=Ioz|?+ z?e&F_?|)ff`~w*LKNbX_>-4@no3!98G|a_)DC*s2l2qe8-@%`yck^^A{$u{XoCMqV zQ%k(?!eiJat4m0$hnjJ+nZSl#eL@{KgJQRyo`0sK_%~6vt+Ukq;Dk4AF+K8W7wR&r zi+jqdV3uF$E9K+W;-7QoGID}mbR6W_&|eh2j;PeYmxh@YDf<&i(frye%jG1O;Lm&} z`8XGdi8$fYUURSbf|h$eVLi|Eds2%=6!85|6Fyvh)2r^e0FVZ@la@42oUFnAJw~gN zm1;_;(Qp3S9FUNSX!%n>PGA81s7ICPYrI}3X8WGP?^gCaQ(*N+@RL#Lz~-?0CemI* zJ@;XXNgSP#@)iKg3BMz^xy}nGqu31yAA;-L4O5c8Zt8<62)i}bCtq0l&g_Zj&?;ns zp5}a3#*%Z%Zhc&zRiq3x^oxzY8vxr)pyeVZoV3@1MdEY{%Hp^?C*kFt(nYN2hX~g4 z5tYT$bxwEp%s;OUGay}MMOt%Ky^xs);j4XJk$eRh$HGICea}KhiO&)?ll_2b2^udTd&o0;@(5UQ2CLOx}nPK{)Asb3LpC=wB5txR@I=NYg zEqmXnvS?nAtzDg^231NK0x>HrHGk9+qL>UrTTNs%DfR~?Y;qMxySfvH?hQ$p7?rQ= z?=K}tks1P5e@jV8J^XtaM9MEy6~2GDNB)P#{n!5Yl`n>3p*<6k+BUssvBPQ2H_LhJ zf+P9g+7945S^k$XMd0Z<=BpXy&I^&zzAjN@f4nqL%>QZ^X8cu8F=-$+QJ=_DRs4C!sX9j+A-R$B8fWSQnIEi zU6l;Le@zo@1Nb)d%Q@AbM5cEfaq0uPEy!3PV=Mezzk6OvrI9h*>z5|e^M(FgCh3YU znG=)HkszINMF}pX_w2p(<(n@>`ZC&?@1#bM9%Ni!j2GDI<@ORN*ti4W5lhnV0jXe& zWXf-5`_-Ns#$*_;wDy~YuKQe~eDdE4@OU$LCYgvMh_>Q)I!4GXq`>JPixW0eSV|7O z0Ezh{A8@K?h>J<@c2|nMu&5g_%Y+Z5xt&TrSnIQeX**&ucp#-#Q9!EZk|N3MTl(hW zVRsqlWlxA=QK#;{&x^~wdiMN+f`jVjYdk7Ny6+$VuMt#_h45c0e3z;A+>PQX%OukM zbLOjxbSP3H?DeIU;hAatCXAci^)}KwiSL=cuTp*@Fh}Hw;K4t!U}abz%IH z&mKb}w+{2S!NV&_3+$+pNMSGS6K2h>)3?HPOpK<)LxtCwm0ynoJlQNwzw_gA(z{*M zeAD0{Qt`^wxfZ$B0&`G6&HHt1ARR19CXjBeld8H^>w%>ae;!7`G008tk(F%`DXWnQ z6>a7h5G!aRAT#z7TK{b}Yw{)OczL>(kwg}`e<&NB$u0})Gt<_J!dzc#E6sM0H%yO( zwyqf^HWph(r<7mQ&EElb)lO%XrLsDBrtWJ&4po_b* z<;nS++m49AnSKUe*PJPWvo6az2DsHgLBzYOUGV%*6huueYsR`>p{GY-V)n(OSD)@+ z{m+MY>crj5nk8Z^HFCkoxXQW?RVnHpx|E&}^*PZ?@jG|#KYRDj58Gozs8O0Hf4}Sd zk+8;n-saf7Rfu&v{9_N=>7h=J`8@{bX`cF{Hoke+OdaoY<2jcFFLE*O1{`wSn1#}w z%6Bi7NVBzq_lB#9A9pOp;vDoLBM3*-fxV8`W!VY4Dk&35U!y24Zp85cpJ zcwvLK4uy%a*xg$n@OqJhX6CQW-c#es$2{!S&IJjXFiN9(1lx+_G;=otcP0MO>nxV9?l?2l3v#l^qcWd&y{* z7WYR^`*#nT86RqqQqmI84=T!s^xK{sWz~pGBeK7BcC9gPM#&m(Q}EWVv)nPB=qwaZ zz%DMViwP*Ya8rp{9&q+$Er+Su==-WJk#SizwThg%LGdG$!clN(sJCMGfM2DFWP!t>H4sLb-&WaE?CLG+$~z+Rz39`;kQe4Bjl)A7o1vppZ( zQTjNojPN)XbaZ>pR#tHH_uL?}&@i&K{C*gJKb9AXGomS62urB)cokf?95~~+wfV#1 z=JM!d_yax@ZD!tcvU~qlI2%Z;(+{maHyffN)u*e@)h-KmE@tizbZ0h@XWv9#|DzoL z7UnR^JC2YutiPZ2V0{sF&(9~%cd73kX@5Efx%5#_@DjDco9xltB5919u{BCV$CF>#CXF%~G=31D)D~xFXwQJ8J0yRa zna^2tc>Icak+3*T4>z+4wQ0oFhA|fX)LnyGnkc7guoi zNwkm{qS-uVs_=`dQ%t_@nU(z6~L7tsuK zgZ!pShZ7-U<-KNAnDDLGdK}xaWrn)&svh-S(9JoHhbzkx|JsGg^R<&t7 ziNy`TBjd`_yb>^q`k#jNuL=>uM#tHVyR6+v2yHV@-;W>lY4EmhIpgLO`QbuiXYn6R z>waBOOCriMREBb7&-ziIQ$$h+@3VL5>o}g<9etGzbkzg*bolM6*it>io@Xq3u(eSo z@2n-+plQR!GV0SB)qDI^wX@j~M`rreh%_@_23%!@Aj+bZZaUI@6jBRn+$S!At{e7~ zW8T8lZ0SJDEG&1VNhix>m8sFJ5UX6B<;G0zSD?9F%NU!YtJetM_(gHe%*H7;ZlmxY z8VE_nx&pO{XNT2l_~nZ+0bgjuBZ`gFA={|fSTub2)Pfbys)WQI9zM1F&QYL2jDxKWVFycelOES@X_M@rY?C46<)u-LgZas|APCYCQ>r(f z6Ihi01PT)68WfmHB)OxxP}398Q5b5JC2TG7c4ksz1I=lQ&^1TEuvu_LrcxQY!qg3n z#yrO0uGZ!(P?kL#!3L%;UaWrq4@Kog8L*()5z&He87+at)X@YA8z~<;-Qpr-MSWiWiHW~PWf5T z?s+6L`-CR@&=_`{c9y!XCpgY?+aLu{|5$(Y{^08u+3709I2ZNH&$_k=7@nuVH5|Wm zH-btizvG=#9GCc9%S}y9dHjCkoA=M!`qZM%ZhJ6WaH4!L3T3x^U)qj+aeAUMCBMM)ZLaVFl8-(3meEVzLm&BC;=1_;%}GLqgOnj8FwsB&17DCFNe4a#Fgv zBLij7pb1|%z57MYyC_|6FSTlUyw(S+u2QvCVFR=!`uKK$ zmw#vfJWBt2okFOlR(D^%)zlGM6eX;&1gfNWea_9|{w*Jihb@_h#iTUfzw+jSYn`mM|ArM$DjCl2}o4qEW$-bzdE>ep!%D-|L|`9lsx z+^mH>qeA`IJ%xVo5JYvYf)~xQ7KH+?ua)KOPMcOwD&6kyICG`(I}@L`W-q=*c&U*y zI<)d7A>J3~l`YSa6}2TnB!a__LCo&MZ}zckPvg+V4izq+1EuhtCB@Er6uHkRtNGBxm4}|4APiNaMRJ|48RI+Ek()u-_&f-??r1Ja2GX|VN&I@;6UaQwR6u2f{A9^WOpZLpkxK5E?Rt%2z|@jDj>NiNxk0{T5Wz7c_asb#aU(Ln#g?>j6YF4=pT4|3DL4-b@s^od(R zp=V*v&AQsnP~uh#_0Yb2g{dPF4$aj*hnUXO;G~s&?Wsqg+%p7eZTQ2k-E}M&l%Vg_ z>@Fv+dg$p>m!pC9)Fbcb9ObdGP@aC!Cs-w0!OzvuAZJ#-BA%>(HkJnCRt8)p7=#m7 z74I@j#Pof;2~Y2SelUP;a&DB(t*OzyV<6j;13EL5OETb{u`VHrMd28b#3zyqwThUm zqoAQxdgbEUpACteoZqN@N;LHDb%M~5uq+fU3{hT#>cn6%a_ z^zm(-?%#Zr^4eu&g`s{1yv!p)kKG^!-TndMH)^rmgfQEh>{rrFpA=6&Sc=8F`NVa6aEn-Hcnk(YhNovWR>X~=*?rNq`okxq)REqS6dg^&4VbDrsmaS!V$|)c_zMWDR-r(_E`x zy4t7Urm|2}ow!w2;VUIOuEkWT-b7tsXt7sz5#q91DLSC2Az}mx@Ue!0HE%U0^FSKA z)2wi(@*CyN{6}gvccQW5crUB-bM5LMNSW6BhG-cTcK!@ea!VJdAW!nsE<@UR%cM%I zE3dJuzoJ<@wYdv=s|ic{vFl4&tO9lJSE^R5-;;9c(|&|VcBzOI@tV^rh&DNE#j-j@ zNFTp)3DnYU!d(JONYvn1lRPUghhz2mscBx%q(mglc$JC}^H@8yRz&C0LH-m(6N@Nq zD(4kG_;s`J7!R^>>pOWx3*6~ojE7|7T_WhG0 zU9DUu-K4$zR#^H#AodPPF+6L*dp(X^%qpYGh7k<}!4`b~ko1!5-=^q07lPd1kXmXB ziZot$nezM|%Ifs%f3~j@f>$qq^go`5;5)v1AiYL!YMp}$D&6a3XK#jAr;Cq$Mptf@ z>$l5Jdz);Uubt<-&gOdi`VcT1nQK|n=Rl|zp@GASau!tc4wFVvR<|hIu<_kI)*Ay- zC^9_A;eD~Aq8Mo;$^wRt9nKeo>GiZJkU&x25+FDBtW@`6ipgV&%ww_(f5leHS2%wO zecf#GQ_+sN)v~54@s44sovuouocxj?v(j6oX^gtCcMWltF6MU@nBfTb(;*@m*ohj>!RjiOgO*fHU(d8O9Dhev%>kSb<+xl zoW~%#x3||3aBeZuW^Q`5{u|A3|1|rZzH!7&?XhxWxtnifqnA@(MDy@Q^p008eR1f2 z)X^O*UGc?aQtw=dkm$Ad#dp}-<2YEel~adE{tFZOpJJ1TWwI7>PAlL1qe;>(=&O3< z2>$*x`Nau5o6QNoYRBK{*G^j-%kCn*1J$#I52-z++1XOJ^EqL9Z)B-3)+2tBX!l_( z_0thQFbNbmv?Dare+Hx$g5n^>sMtX0-F&tcKytIhcE@D*8SXWBdb&p2T$zcDaY7jS z=TM8k2lNFmlx&5ns?vet(0JfUkv%3ieyP|v19C?$D8%7)A#12+%r6=9JYi$~aw}g- zoB;8?L<(|VbF+Rub?x3$qopi8#to1?P{Dx#jW}0HF~l+7U+y!W#Y3v$+RcNI-{jkr zn|@YM;yx@zNGmx8YYc>(s^dNM+=dXNy6*J`$vU*M%`|I|VcDO$$xB3gc%Hnu%i=IS ztg$i840qxz=rl&e-j~(p;idQskg_=_*32>0RSB0}3Q$XlF~u;sw= z!-TF^_s&S<%P)<-^OW-{3Vn#q;Ni$`E|4|J6jaL$sFa)YN+RIv5HVrp_)DbmBVrOL zqgJV^zoiBo3Ln;dOD5C!WEIN3P`P`xd{S+`c^T|9Lv1|<1i9>q>%KMVw>@iZvy_Wt z88K}B6E~_lwtC>IRm#c4l!v*6J@^N}x_ke$sAW2Z!f}7J>$o?N&t!Oh zX28zr{~-K7ZCL9~wh7QZjj_XvMv^C&2NZl3^Eqd;Z6guN?q?0_Dmie`^^I=7oGimDN~h zJKyi)7I8L^t7O6zgF!eD3jEVayP|O0t@gU?L*@-QExdz~;%||eu|CV35CaWBwx-D# z)pb7jA}DIxo5!#t3Rg3MUgK$`|GMD9IHp~gfB+fY6;NrZk#13GlTsjVd|8pN0j=!E z(g)w{7CU{%?zPk?kSpLexm%K~2v?e3cBb?g)G_s_Bnd%3AHmYX!gR>jkfU+pjG)zN-9C7B)zC~bo$0!Ru2>sPK_zC z)vv!Nrpk&?Xm?kK3}BfXHYkY6SjW_{{3Ib;QkFG}lyv`6$B~L%P>7c^$6Rm9yCAMr zb()_0B<4Zy>^$o$lh>lS9k3Vu(>7~-*7m-IM2fnYwvoN0l$mM!#LI8-@Atf?OHWgZ z^W)!Cu8QbmI3Cs)4^&r=_+zAEEpU1I^{|s1h@B< z>-94At?d%Wga2ht|2Co0lKaq@;t4M~>M;t61uG}q>*cZIt(~s3iPIXke|z#T$K@&G zJM^&qDVgV|8~5Ye%DOEo$djal;m3#A6*Zl*^hW@_;_GW4QDRac;Uqg-Za#{eXL${cy_ZX=oV;uFR&!L+Abt_X(^| z7MB?E#bPdT;5{wCSZ&4DtQTR$Kr&GJt#r7|V8*QXBMH_i>Edvm=4tF67JeQU2D!Xc zI!x>c1=(LZ1oLYIOA{(mAgWPvP-duAAcsJh7cmyK#qD;1^kDTeGf|K&6vz~s*B?m6 zlCQZYE?@Nq*JY~IIi3WG$dBczT-`Ok%Z)fNn;5ijpw;$}u1|#w1Av<(9n~@ey>%wV zDF<1h8Aen4kiBZw?e2KNR9SS2x@Uq%6|c0)e7fFPQ7vl+pxQcVFwSG3k|~%aONs8X zpB@Q$`DxOCq1_xP2HT}BJuzUT-ypMDI{PMtErExDVJ4v3vha6NS#BJ;A6_vI{5IDc zjwv01kRvV{TCfK#b)jySel8V=w-&7;Z_7M5=Y^&Rs$ZoRnNpE3yd>gj(Qendd_UK9 zi0#GZX=d7Zu~l$#<9FsXkybo$vaXexv_6}k$v~Mmn^;+Q-nNkIdHGTO{SI2w25&u_ z%i$l${4b?WprIODBIe_mHa~NyU%##E)pF)I|7T3d7@y@o4}W*CQrKva6SZzQkZj~XYra_5#IND?NAnKn8{+-WLS*^`OM>xL;c6Cg02>epbIq@%iO0@m znKWABL@=ATPk&@9*3Ao4@_!nVwuhz6D|=-OU(+lCNIwh~ryxzQh_w|xGjQTJ;$|@U z+z`PDvYg$T7KCTB6eJtJXTVJ@D%8#NRVI@?qyu592eG4BN>{%IIhM?AE#$C<&Jd9S zfu<@jjvhZKi8M+C3=)64{G)c=Qp2Myz)3DT_rAi>MkV~xqx?WH zo_!HW?kanFky*`%u`Na4=$Hc_LSxqZ)U_m%flW;@Rve@eWHwpYspOA4W@Ry%fz+#f zPP+^tE_Bjot@rkBXTbPzJ|nO_Wdib_BqSJ{?J|+~grbR~z7x#r@0E3TOBVsnld@GM zG?cQ4#U{t2P*^R`^EM}zr_ccAFv&JZ_@nn-3~n4OyZi(%Y1WO^(-hLYR7Cc5XGMXp5WEG^Fpq*L;v)xqyyH#!mZHD&b_VUH*p0}dI_hJcCi~Wa5 zHt%bD$_!USf7jKLY?R-LpOY$JG+|{aQc2v!_*pr97hFGyD)7!Q4vyQLzucQ(>PiLf zlyq#qx<`AA*`F;QK=5t$za1Js!RRi`-ENfm{By)@bYkIFvyFu7+y-o2H}NWR3cCdr z$KOy=a;~IXd#Y50{U4~6h99^agVc|~n-JU?QFR%if6jCX1}J$zkR!zu*{DtK3q=I&URVwRJUom%)vd>SY^Rghe1HNhIo%oRSgX@ z$;ru&Kdxe(Q(Tji3dly>JUaW+S;so90?P{e6ipx>MV>u+kut5Np4A=rh=-ROBrvYt z8u+PdoRoR|g(bYC?ET|beyjN=@cbVQ`p>`AMwIgEI^DtdX;zLfeH0-PSSl&aI!>WZ zI1l`gPb3++UTk7%h2Cx6x<0|d_DAA?YrC*~Kau-;WBc6&YnYd$YvnROdjfj#q-LmS zCC=+=haFdU%9eWWmhXDri%P$_oZR@kwx~-vJcq~2z#;KVKyl(1?5){tZSErT$^AL+ zSz}SR#@+2Go}Jn`s#_zsv!y5uZ^;PCrg~v0xx)%&UXm)UI7UW8L&TLH^axWtJ+090*4uRbFwWr%N45cGOhmkBp4qfCE;-ae65TGjli0vOBUtofMh* zxp4Z9Q;21<`rQ(4Zo6+=2INE()IXGa+9AZMKRR8TRh~x?r@Y9tV-hHCOA5Hl$4#`g zw|$e50ZBUflNr99?*DXDY{B)Ni;mBiLz4BaL`3ljAZD1A@gdhB`L`v`BgMrnx8{*; z?EMNsH~Fnc+Fa*a1LV$TuECtR|MuYDj#F3BQmxaV)hDLPxl>x0aB)#qL3$`dBPv@x$&_rDqf!zUx!DNr|kI2R!M#^%EXFzPcL~EQF-ZD68qdcBDAt#g$EWXeIG2EShP{NA96-o$s+nKxP~~Ob z&``2XfCB~jPrv~N(~4|NIFHpK4qc+6Vx)@~Og4Ks8Q3X~X|h+@sfi`m>A2Yic(0w) zso9&+VuYzIQhxk*d|@m`lUWks%P;C2B5aZWq-w7D!!`vj&!CX37wsi?3e$Kxu%qk^ zp-O+Rd{2$CP&uvpe$i=>1BP;$as;)O1E3*Z2b4=8uq!_MGx#o`-$rlzleRqc54uJK z4yj(#FQT#Xn(=YS=JfEg{?L>Zw`OkPL$0JGw+&?yvA|p!F|7K^0`!D4@Z6-=^khv( z<;o5r**?`#fG2yGc|+%D_fDtzJ>V19b;xY@991crHD{ zLlt1*C%EFMIA@u+S4-QH-NuVM?@4m)b| zA6Qa;6EN1b{)(Xek8Sa%cW)lE%+^V$H+cJRJMxHHg;?KCmqd?9nwFLN>z{T6d=SIP zMk4i793R5|Hnc|{7JFvboo-QYjI0>M#Jqie4ms}prft{AMkwatKpn1Jm z8ib98E1}0#!c72&^EFkpH|U}wW=&Q%)j&(bT%%7I_j8eXw~IbKF5Xlw?gT>S8Wj4q z2b%w;*0@-E!B4q8^Gk&=ny4jzENkd(8%L+A2$`UWeq%QOfZ4B>4+Tmm^Ouv@qTY0fnM2>Kr_z|c-l+eg56X14$1ZsFRU zxCHKpx%qxPy$Fqx@5=t#kdih1?-fmHxG|9X`4xF~eQfUqgrXqNDE2AGp0}WQfCUez zCo1>#h(8>y1uUXpH`vXOu!i$|Ca;*S?o7!O7)L*|a2$ki1+Kce@zsM1J&cxP^x zCus6#iIU3LVFHTx@#@&e-}w&~!#{N^|H38js)o7^b|Es zQMQrT?V@6SIH1F*eE7f+`(qwghV;7a+j7G^OX9ip;FWyfR=+qrSk~-NxS$@!?hCn@k0~!|*A5DN6(;kTFOw_e$WhwYI zF=F=QoS5<;PwL1MH<;(kej;^wsrIio6McekjFy;B4q9g{v3vlrd*NZ9CNxbw^zS8~ zLSLG`VJK3y1#5)x19x~di>_FtaBXd;ILV;F|3-BD73okhK6BKMGbT(8=2H%$NxEISd3f^4Sm|KnlcAVcy<=mm5}E; z?B9RNvpoUJlmApGI)VA(Bq|}xZ0FH zT9;zQ_Ljlv^IGy@IJ-NU&X@lKQ5%(B?dZi%C7-1If0TW7T+`q8|63FT5e!NYP!Z`; z=~UkqjdXW+H#mI@B4rTLAd;gy2TCajBcx-5^hPrpzIP}(R6aj{|M(cV-PgS*p68tB zId_N!D;B_>PG-NJX)VfYDSzQlgOO#w;^2}Y+skG0ZRCA_i4M>=E6TFWDI6j3?_XI$ zUck0Vj0iJmvQq8RVJiz#^4-$?Gl>s%UfTh>$X@AE*>t2|)bCcQc)BZItE)_6b~2ha z(8RRSfs%7cdTThAspP}?uEQ8wppxqN1@uG;%H#c7UQ69{@42SttaoXGO2PYE(U_BE zCE?I*ZMYFDkmRrBb6mSm)ss+`f*RTWXE;2SZ;G*ftIvUb>guthpX^v1S zuUYmNWQ}y3qw^QI%T4Jb5y*4hdq3R@LM#(r@3(jU198CZ2pVnhC`v28$?nH}26G-?TrNfP_SePm z>Eg0bRdY6D;jqops*LHHTNfyQRh%HHl3bY@lfze&o<~06Z=%~~a2iAvrmND$mX?R^ z8CmI!MQi#g{;t2wv#ZHYDH%>%p{d4c^0(~DW_ZyG{$dpUsfXTYX)JdPW&x>-^kNLS z@o9BP=+^@A9EDdM4_`rkGeydi0%04E$I?7BKq#}w={y%dE?KG>=6=*}Q^(-9C;x+^ zeSCAhoPWt|pf4nB++SE-k$SFNLGuASGrn{p=I!MVD(fX+`ICqJB`M=*ye@Rjw1`bR zOpTthWTdnIFVRqZNY8~t0$!g(cJ@3YEpHC@J>eD++Hbk}*f`rD`&w-$byOX_q1 zZxUqhQbN;Uhh3?U)BS=S6Y@AvvCb*TsO{2}S7DFfihdhgYt3s|H`I)W8R&G^Y=8V? z7yZxstfpLjeGCjhq>e;uEWY*F*p5nf5uXv(pbHdGsWYISbRqvOOPqLs$yMJmemnHPBQe&*wdbe@@ZAx7wDk`Rl80x5rrc)CX|VA zp9>`*^i)H?EHS7h3?h%{8&dl%0yVee3NdP=`i^V#&u|5jyF8Pmuq`8?%JWndCfK3Wt3i`puO-9806 zr1I2k-&@j-@{S>!CugrOHJ|WQs3-~|7{7Mt7fnMlm|qmymN&V~wDuPm3IFQ~x*GO9 z6C*)Xh)gDGNcnHt7U3qD$a}8sE$EWUNRg^)NlLu!4WIW{9TIHNeZKMWSGWP9 z;Zq%4B(5U3a6{cwcuY(I|KyRw;oF1RrO2%uW74VNPvdrIXC-RP0b?VMmz8NpYbb4m z16pjzg5P8E^Sv8c@JALlXA6F5DmF(P!DI4W`gg{2sltDjp{7@{LU?$bywy><^auqb z9x*8~6O^d-58Ht;LL^hj?y@7HCGKj+z+)%2arBo**Wo>iz#Y}hq1|yOuSTcLN(ol< zKS}S)zKem9xwOM4UfAyx{zJEa#geC$Vr!NYDV%I3^997E$d%Ze8Sy0p-zI)l73w zq<>gTm*i#LxF5x{U_>ckFJGSi24(4 zM2w4&uwbX`z==rA9Bo$}&UUQkteHkEfPh3tJkn&22}}EXGB0J;CpL>WoC?*beo=qv ziN5)veix&fHkMvVU$J(Ecv!~u+Q`_?LWfIwt-ILv?-OHU&2b`K@7Yz2>kr?X%^B2h zWBpR|8a_F9S^S$_Ye^je@W9EvK77b#*GqdNb{F+QD-H_fB-nlv**7ZB19%v0_r;&-NLd!t2&1OEF3MZk}4 zj9K^IXU%>t7X@?ag=OnAx|S_|pHrMJ?<%j^7zcRs@^DFKp5BINLhaS*R}AAN4zJAp zdusUvv)lqOmel>}w%t-zmA%MVkYq3Q$^B{caW{LM;)Lm?^;$>F2s>jkec$MZY#EJm zA=iNQ>ML&c6vLvw#wHN^NJ%ic9msPgEq!+qKGa{PHuXzG`HK;Xy@Wx$*ku zn$lgg`=O;51iO0`H7Q)O0vfl;FJ%v`CJuhaUbHzKmrLHF1=J_%7eB*Ty9b=s(&~a< zz8FY{H;n!2O&}bW(_(~DZZFgEE#;2QJtFCeK06vpnpf;*dmjmsNz+>{l7{9_?0@+* z`f=rTXSPZ1*)5K=;HxXtna8A7WtD8h{vi3KqlX@Amk+Vp-o>1T^u*?>KLw{{o2z!z z>e~l~)A!mc&UL6={UwyRE#r`Gu86X9!*cpht^`W8S<}E0sb?KODOTLfz;s4`y4vn5 zBn^a9e~tV#-`jrZIsD#x+};hlnzf4-F-2WbEmetmbEMpL#x|<(jKVMZ#OgyFMc6!3 zdGX{{f5%7Ub&*^zs#N7Ff|v4my1zUH^JDoPt!}jZ`Zlx^nwYI{qGf))m!raTahm2x zl}khEV_VE)%f(}XU9R2n#h8MN<)Sy^iu}w~Epu9gz&N5@+-u~U)$QTNhNRY=2N{@3 zRjeRg&O|K-#=jJwkrDk+xc7>20gsoaU#W zU|wla zWqsjNc?!b9_8bYTtJc~DVtL}}=Ct#CPR-jnjJms*Gk!h>Y!zzi{#UK zOvSSum>&H`ONr2f#_9&f_aPm)TtSu(?_biVBQy`4kfQE~da}giHbjE^AX9T`VXX8W zEz|S63B?bfaDf7gw8gma(tN=v<=Ha64Qpi)zdxYk#pY~~69)Ld`SyPz-Z;5Ci-PG4 z4VJbVcW-%u>Pm}JaMMa+QW#dX{AN!{-wv{L6cSUuZ z8A-=2@!v1F9C7gxNjh3JM#|Fa_52p)OYnDSH}EHD-(5I|5h?I(HGvfqce%0~Y4M)A zU$RT)U&Q>!8Tw$K)I_96c-9c4MzW(J4uL$^^xqhq5|ksAwPj6+0fUIf7Blo*hg$;DAZ_rGXH_V2}trIfg>a zY**||*{^Fgyj34lTAh9bk+q)~JkQ5$>Xn9begE~1?=b+5%a^XxH7^yF^mfXeg&&g^ z+szcouDE%Z;p{#KVsUj&atNoqR6lN@X9)t@K@Pp;?ypBswlQ&A54cM+%}Dq5gIQ9A zG>bZDE!ioI#6mp{_l|F{rbhF%`P418_CmVImaF~i3;yfvrG+VJFg5Q}(J4=k&1T*{ zO8pP#Z&(6cMC0_TTTRyF`E&LNYu7lPEhST+1tOF-i4NiO$NJ${( zG5{DWiW^{!%+&8yEcV}Pv-S%7=Po-sZn>k;AZ7zfA2~pB$R8t!ysJ7kW82%!fc0cS zjlmJ~hGpPW?nU-KnCXx)XKz~l$>O$|*?Fo9za(TBJ%CFvQ3@P>T1tv#{Yb#T0N>vw zIsqybRB_RJD%(}esG?UbR2xMvByU}(Gaaj(>=CwoZrbqSryen{U?z|l*qUm2$xwP0 zD?h7w9u@y&M%LK1-8Fw`{JYX-@$6LC&Bm(eQU`X7YqA#Uq7yKvJkT!){f6MphjG(X zx;z&V6LWg;vN`tchYYQ z8hzJM8;)L$bKfWbs-oflVjHo@VD4_zAL^({HA`+e(XDA(=)V<`suMN0RH%aM=f8j{ z^!YB@J}3AX$p|H>-7mLq?$G6k!z5~BKgrZt(zF{5Kx^uTpzcE`#bvUB*_m^SyU}Js z9|xoz(?0F+$*SRm-GNOzuFn+p?!wKNBsBOH+Z*PFOGiQ}uQ9N0| zs$emx5*23q%0N%j-1eR-v~aR0si_yW7Swg6@dA&;u~R)#ZP%}wYQ-GdSe4tK!}5vk z#q83MLqmQslupG>^}M{1DW89%8Zn-A3CEQPpC7cNF$A5>(#fk$9l;yweYfd(zT0vC z?ZN!Vh4zhQ5qy1SDsHJw7i?d#M&-CZORFNizONJkjY|xXg*rJ)U>Z0cz%L&vQE}f| zCR@AF@a&)T1>fcRMl*MA%@VO1@veOvu;(o@yl;i~9dCRppTAOzS{WYIp6y7*QL_JM zpL5wsKxlgvrvNF{mkDf#UJw?;8sqR%lFD8@f+D)qBRa!#+A&>7IAO<0f{I;3c0;N; z=+H-ZnsE1ci%Y^u0!poZuy$^&6(fZxoLE&{-@0caT z;Vsom+2MVjk+tsvCQisjV(Ag$&N>H@irw&$l|!FazPaM(%}LKelUDpkz!Di;#5q#s zB=vn4RY$lKEIM*f&>IyKy2>9h{OpU)5%a~MIw9u(5Y~F!gjg9B&b8X!0Q8u<>83F% zqrHlrmjKH^T;&Io>6#uI_cvvATzDxUz9Y08eE>$su7K8Q!)(^aWAC#DR$znG-@pKQ z*{ZbyPFSjQ6XZC8)(eQBK1qHGC#<}F;uKE%Db5TVOOLg?PjB6%jM(DTzw)lobqSE} z%K85G=1mWt1vmz~5&~}ym3 zNHRlmoFe#I8T5LV@Sis8&TO^up>%n!K1tQV8qixy&?lKD_KHWU(DRMM3*SU%hkMAB zcC90kFfUCnIK*5t{X^q5Sj%SPU1_=o_`Crv6YAaOjZXL6pPG~bSV`pgGxR~u|Nax%q5y2(N&T5;6dSl36(ysmjnG}(xK((#r|mO zPRpRH?*io|E6&tEpq$)}A}1y8EzRZ?#5(Do9xD|uYiVZYWU6uog2Kqf$ujTN=5!t5 z#6Ut|KJ(@h_e{&jAZ^W0aYe_!f7p&1a;K&t@9h=EV*Tm7M6+^mL)FZF(c7@`OH{6f z$)Xn9Xhj?-MK=1Qr2-^cpMAYV9&+Q~?gzJTBqENZLOF^z{>hg8VesH`{YN&oW)OyU zL3TXTa)l3c4UB->=7Kd+D5FCvG9iGQ~5ihfG@&52X6%_hy5Xmy{+!z2CnqQ%yiILHiLWLrj8ym%Uq~r7JC+kk@%4h8WBEw zdin=vL$WfpC4;fC4Gho916rJmI|uYf)z|Cuwax8}5bsNDc6>3j|5f4eD?j9my}Odf zB1!W2G-Msb7HqEH+#lpez%lI`9#?!)5%s!m!80KmB{cRQSJJ>9uLd`yCHR7Y4hf2M z4u zruU}ANAp12$L}TLw*_sVyK%|g?`lGzyu(7XB+ntS{j|gJizeHt%6qc_yLi)Fj1xtI zpUalkP1+S+7sg9BHGWK^r8r4@Z;*Xr8Atw9dp#Q>d3iJBt^QQDoFRV|bP^u(WUp3zO%ur%Ai~+^esdFmvDi6Gv>IC-875|WC<@g7k zQ;?&bS*==Kfw@U=M~*F1k(Qdq&g#W24gR8!VC&Um>~ahkkd(Ma66nJ@3QM?OR~ z?X83b^WbbQK_VcAsV^*W=6oOn+1`4aOhtzuq)kO|1mwB_(Y6I3!&fEY)BWtTUUO3p ze_Cdiir^cF19qfnJ3WHaack#;(9p>w;bhPS>y6xzoAE7i?A9fZd=s}+bYt09OyO?K zHhHI|Uu}|qh&;znrmFlw><7h!%>^9AQTtEmVDji2|BeR;O8~AQ0>AF_T4F)x%*2aN z9X!{^uc!-c-EtP(U#PoL`|QL$5v81{~MYhIiHXQjZ1~ zlh?d5XUdOzD@`F}{6JYrIj>jw82WrHU>Dv=JgN6Gq1>tlw>kCA{XzbB1GL#OQKt@H zxa^*b4gH=L%5fHbNsLiVr+TC{l1~v!uv9d-2bV(nteS)Q+&`Z%zqK_6nwa7Ped^JL zgP@o(1AAKjRQHlRhAVMJ$M*LI1V$S$5SehR#y)oh^hl{b=pkK3`nvw$vSLe5D(U+^ ze%`2}TA%??3iK`~fiUGF>Qq2XNfLCm?y3_KFio6^w3o#We0K5>kN~T1ud{toDT3t- zQgeL0sMrKc&yLR!CuH>$g^4^tH@tZ#lhREd9n>x#2#4+2CpJQyp5WT8FwmcD<X)_L2W82gblXew2HP>z5W)Tbt*1EowIqYM|vraEW)&!F?ug1>Se za}!3ciT~Q-_?E~)%4sm9@btiDl*Ryi=4gF|AW|iYN2g<;`vmmI1NwIv&Yd*(JJgn{ z8IKokjiS^LV<&18{5y2%`rF4A_j;ZRG1QT7CRH8=fSjnH>E;vkw86t4yiLjTNBQjq zj429RUwVYOJT>~I+j43ek~H7Vimw|)sCwv)R@nD`@U(M!hx{5hGkq5%OXAJtw^{yZ ztrxRaFrx|upjz}?pP)+bExvh7PqICe04D0La~8dwq4Xn&OeA6o|NP>d`_uI&^HOYl zLA*-g=t`b8&`8|6cD2ws6}s&hzg;+Ao+W91Gn?OV>n_bJ@28wGKeExvele<+5onC8 zv-=%yG9M22rKVzd0WUn&7c&;m3YWSMZYEIkQ%;1qqBhD@5sn`Qm=tKJzJxwj_zTk!U_CKp#A%eG?@h z=NY!#O91u%FQ4r7cURTQJYJSFB(dc_e|rXTc*qa5IaL_4AEZkzbZ zW0kAV^q{Qa34mH-qKWh53Dv`{*M8;`$B~C2WjV|1O?@%Rsc|YG8k8)5q_F!qcvz+X z1fb=uo)lf#OBkEGaS$H!_f!;e_>c{Kb`zX@#$P#5O`f)G8#$gv2FsMm+7FZgVRkvQ zjsng3=3724VtI~!MLsnzL!5ugAvLdHQkS0S3uuJdlYJBdqtPm-Z1VM2XNut|QsTEF zs-dEtCO2E^X4Zyc)Ea0Hs+o#H`3$J(-i;SMH~5aeE)gOU zy6MqP?lPI?Zuj_2#N+`6vc7LH6znn3XNAsjg78{(RY1F1X1$^+Kkt!qFz=MP>ewgC^B%pz;NCAqdKgy|etxo>|D zMFiuVQmgYGp%a@vhWWfZ;m+KXfD&?ag?YM#pVTFMK>qb{w>LgtrTn7a575Li3e4Ec zX)yG}KXX0l2M)us90*Ea>pq6*YCkJvBio!WY>oFf{*qCoy~LZay!fM@^`B z-8r~DwA6$F%x~PtlJYvbe11?Uu_npOo#pDcBoDBNX*pvQxU)v8C<0V6ldLgHa;0V4 zV;y-Pjcz}ev{~MWr^&TaM#|Rt{8e98p-u103XIhh@FKP|wh^@}c?l&-%06NBfIU9L zaNBs~!VjqVMG+HWI8o&InCjA*e-SiCiTv+VbzH~gCEl~@N0qEN)Mm=>_(}UI9C(m% z=XTN_OzQT^MRFPzq@5L0tx645j;Q~%RX6$ACs$pc=sTYkLz;#5XKj`*l%>s2=#eRu zwcmCDV*wQohwBTP4mXTXGzHF_qF8Qa7@ypbgN$38(3o$htzt7qT6^s*RE6E&Hp>(eb?0x*x3387hBW4>>^xQ$JzV(S*!M!G; zDg=Bqwy85h=nyVKi{N_L{O3E`RZISp@r7{rf~vKG(AJV`D-sT-x$SJM300l!yDUxr^AJ0;PL5!2fP3@I$HK`&wPA4>Pe;kTg~t#!R2-lwc;JVDr2=PE9I*b9nWA+*gk%qRzvUm_u^jmKbzD9c zGKb+dLS?(*7?Cuhh&_`&wsC?TDSbU4`#eI5jXdz<4I0)f=fd?}!y-%GsE`+L(1$Q4yw>}XvhsB$}MNtg`|QkD8mjH>7a-C=mp=PFH3tsmp<+v0>#c!maqKT)Dzsgm&% zkvyrXn0)qglLBb63G{htp{dBNaOlPB1&CxWsZdRg*tVE*y+Fx|uOB*lA}!nDP3!Q4 z(oK>7BqXilAaV&cNvWD;?Y%|G5^UIFAm7&r! zVBgg{8#m*;X!WsEq#tgyTUkB=-{A1$`~7=E=1<=;aU8W zr?)!kOhXvf46aDhaW!-gd>KsEX{VdN&*(IS8{5u!cEH|hmbX}$yh7CikSu`ELzT;iua3Z=LCO&a7Lmfr3)aY%lbA=hgvmS3LuFBpQ#u<^?$ z$raT!_vhwydoF37#bQw>GM7tj*=&px@W zE;5#apd_cUxMIr_J#%DWig$;4mBRR?Lhy41xm%ZS-KvLibJS}2>0iayMcm1m#n;um z+R(K^dzNrS$%lo5nH+!mis2>2i^rP_UPhSsO9!M)1u1_s?DvZCtMfE zne2PqQIx}d>5`1uo_Mb|XI@rarmRpdONj3$k-G?0jF&_<8$?W$H#LRPAa39s5v5~y z9f*8$-BNOMrL%ZvW4WSYXS7pOr~EF81~-bN)8c}?V&6tfJcoWqUX8tfjeN7qq%ZgI z#3e&khe%w?j^wr*(LEbYXE!ugnktKozoKy*~BA~Ds6{1>5o7gJCS$nn>@$@dN{5_|UKyXR^#JFD`W?7E_x?53jP;T*gk zZ+LlSUm`ccUh6KTrL+`(a-%QxV z&cdj3U?HN!wT1yq4peW;8cO&c%G5ZY>9*$ZIhvc_B(qFZPSbc)yfR^3v^}??L!(#$ z*;in=WKn24U^{(ZOBs(jxi;8Cg19IVzQ*AZxrJa{}?;sKsHh zk}rGJk>;Gcqj(dl{x0jwaL=1hF}2g0{?Af!lXfyME4G-ZrHFQxX^z*_O7PbVpd1_J zA{bK|*YzFx*r%GD21n>fD87p7UMQT@9En|CVX!W{IimTjO{2`pTJ<9=`mq5w%Fftl zYj$fQyJ%wBbdf64y()hgCTK~Y?YS=Az0p!Zl7cLcwRO?_l9Fwb_*|=h%3i1H>a+pl z@x;o|yT3W>u3gICj*Zn)n!QKhe?nFmUXO&K(bh8xbc&}KO0WFzZ)s7;q^-wuYQvqN z6s0^X&iJKSU=Lv@xL7hC%kcHM@6c_N7^k`Q%=Vzp8k#$+`KZf28}o9U`DmCd9wQN9 z9}q!$#_jy-_F?N$RoY`M&!z0TygdElxrBR5IcIfKUl&N;g(yjRF`3#SZkfN>fjRg%&KG^arDq3pP zY9ky`Be@cKAx+Kct;4BagIjNX7gU>tDLZ*qIyK))F8738?0Wmb*4gszz5Rf?r-I}q z99%F(;5_==Ne>x|OoVq0zv)VO*P``28TQI(b@swEKfVZ%pudtAZ-axOW8vA&OxmVe9g5;uMxmq6h~V1f3m)6m8s|()>kg>oM+)sOYOm&sjnyQpq zkFiN6%zb^^6DsUM`H4gRrr4#7B`LlP>tK`oBc%(kI(&YOOW(w}Kuk|h7P3vUKy2Ah zHrcr(Tna}znB7QLW!(qDU+bo_H813l;T>nfNEFQSNcid+S?>Oor-L+xRaS zjoLG13ai!K=C804d(+OUIF>8E=2mn@FbqNOgS5QWqi=BFbVsb<{8eELrcGUk0^^w*h#(H_gk zqqMZ%V7Z~R;%&d;C(F&YwkAqEgmy+|zGRGG$*{M`(YPuX+EJ7o^1)@2zD;v%M`c(h z5t^pP#HHc>q()-W{?Dd#F-r~?56_FGJs6PeKR^~D%JAYuxcfDp#HMg`#iQ=NH-ec) zU1kHDnlzLW6}>9GEH;7Q#`}fwau@kBbo<9Qg5XbK2sTZ6=pc)m0%*}Xi!$wu-ESf) z6Kx-Dl?hF`*7;V2d@)<Ro5kz`)CQNr?oXr6*@qr8h%2Am40Emnp?dw$*rfo$Z>K74qY*pduD-jGB+AH?kdT z{PbVf7(B46Z0&9so-sq3+{1{qhP3^n|GUn!=`lW9{Wx%Ty{9`^j#NBq9X?r-E}2fu2M*9@LuGvG!PctD+sR#lISKvJs|PHzFU1C0eMm%K_+0y#fU{kh@Z<pEnc;LQNQZIsaC_Z$*l6t1zff#;@<@!1P#K zY(69W?_>jEWU=b;C9RH~zny#3l-r1saK_$N@-^`oJVsbpGu@qP%hWI8+NIudMd*yR zGdy>(Tut~dZT7U%Ey#wjhXAV+DjeBfVOl5_nLldHdyk2pOrcEYjaJv$flHxh{!qO9 zLHe;GkWaaZwa%Fu#5Az;l$7Kdly#0M3KT^~ux-#o+$xuXo@*Hs-4&IilahZY_(M3O z4AQAuT?H(KeoU?>yn@G1UE}&of#VU=Gn1i}fViFA90=ZL-6~$D`@^RHuo029lSE*$ zZX!i@q<>cEV}?m+zDo_RYK+P@^v2=8eERXPEYi~K$V3^(8v9xN&jcO=9ZA&|rskn5 zdVFX9PMnXv?IVKER7HRXJ-Ir!!O^0>EZG7g1Sq5Y7c>*j|D}(LT(pEdeyr^rn0f%Z z|L)<;*8$$^-?h~FOFJTBWp`LuHR{YlU1lS~XXJy$zr-kARLL^C&7VQ(?27&nonGL_ z-7cDf(mr0S?bKVT4fS|%hl-fIYWOczo+WTRQfTw7(|X%@ zuBx%TW1~|V9K~Cx&@A(+&>zOV%`x186ggZHL*$y4TeYBNK=_yH!fX(X*EHXxtA1vA z*O0ryeL8)l)qF$V2;UbgDg1+1{fGjU3m2u!bd(xOOshxSw}Wj>apm!QL2fwwWVTN4 z*c8m5v&dm%G4$AV*I$Ksz}ZEfl3*bIu{QBKO!dOwDW)O&kye+Do2|M3kA3~G7a#&y zHB7h;_naxz=dO_VZ5$3qH8uFi0CE0^EFuA~(!vSfsGs|*ZD%dv3PqsrY_6r~>P{gi zWiSlEHoyoFTI!2Wzt2kW^!$APe~Izub|JJR7?8^|?Q(e8NlJtzOgRoSpIQHtSpM_B z_9ktC^H_WIuBp~v`aClQ230(r$KE+jPs?-%qfOccS!bQmIm*S#fB8s}i%A%Y0x~On zS`lV1>k#YFAK)*9a=atjLwp~UXwKh~xq`%4+i;Xv3tH~9#h z7AD?+K=4H=<*r&*V@eBDekGEL6d!to`^(wC+`(Mt%8V~`07iSV#l11Z8igJo|L+X` zV%hKD|Ht3No;@D9ypS-zTJ*jiO7z-RKd&MynZgN+WZJ)sX(H^OuVX>%PaFQ- zYayfJv3-xVg`3tGsK^)P{Gyj+%)0c-yGlX$a~+irxh~Xu>F<^jsX2!s{pKf%40BGJ z7EDcX7{!#@u;#_aUzyz=94cCBoX$NQm0iP;E#=+M82yO&FO5Kp_Z$Xy?hy#FXlzZP zm^-;xxB{Vt{EoaMxs-paUjaz2CXAViVg>xga(#fTFOmWSUm{?klum61Q6pnftm#fT z3jh9{3Ga%G(F;^9hEmT_f7Ie{3rQP;e4nu~JxBU4cl?!(eQxX6aDAagy~9jF?N#f3 z0@ATEr>AR+<8ZiT*R$KlD5+^+YZdWVO1Y*laPx{@(-hh|0J(qFe^xb)=GLkNa_6fI zhkZvLNCuY4vf3YH>qb!V)}6yw)iEH&HnfYwFRkI#`j%?c7{c{fMkk%*gbNSJfd>&O z#`6QzOAn_}=|^5r)l)7k%>(Lv2TE38oZDob`&@g}<=iQ(+*G87_zj4~T{8pjx-*%! zxxQt3y&l=Or_-5c-lk*m0sE~t`7FRu`C+c!D_FlDy)?JW@m8@hUb71f=^swj7!Rtm ztNqCv{%XwMGRWldre~oQZrAypRya#*^Lt}re#s!UoX_6J&4|EU4FbwFr3cE0~x@`3zNu&mlA6Iqj27=8RLa>1s(t7fp?v)j%iP-xUB$j(* z)+8MV4%Wf;Ck|RCFm%b5fa6_()%n-+Gw%p>t7<9ZK?0-OK9wJ8WwNiBh+V+@%6Hf1 zy#*iVB$*@9#eTFuGWfvw8LWX}UlS1Nr?5>s*6)Z7B~9^MbLXs+j9E*W*!f0-gV9w) z8|db#^P~ipBS1dSzoZ3AJW2xFe5+YFVstP7)T?PBgXUW@ZUTOSiA-S(c@#^5TF#_O zUpQW!L~9h@jlz_#Ey)U1qfu4c2bn)4;mX{y6r$EcYLf*}NhF*KsZ@sx_!u0_+Gxg+ zphTQZeB<@gNg$*5w5;pyvvy5_&IJov)iHC_aKz!j_{J;?DHU>}XLVilLXu;MVzt7m zr@Wn4$O^JNHOD?cO5}} zM#elNw8nO0Ai_2uq+d%{RYBX_H}q2rT?mC)yT8ng=~|xS(JB{`?`a;xGW31f zFQoOvaj)PecssW6R{blrpGZC{Bu&Lp0EXbS1a>)T9aM%pzKI~=3Tu~ zM?g()HpskMTbR7}6O=?~iJewT2}fd+0vK}E)qBu>CkuGft0|+t7j?d69(5Ut_mZws zGNnA$|0$Y%Cp?7HpfVT{LsvshC46}gUodPje<3Q&R{61Z!@5OS%ug$T;IUb)MR3SoH@;vgQED zMi@%@a9<7hOi-l#dRZs-xi;|zLlz&Fp^LI)H5@)xTO9-Y6p(8WWibD7*1|VfF->8( zD}~1v({F<(y1zuVm1y{Wo_~En%NXEOY#Hv6s(RI~RV3(#`d-x98b4l%lV-dUEgN{>!rPK?aN8JGwsh8$$s9j*Fdj$ zo3GaKn2bEht%zYq`A%%jUAj}-l`Rp7VVd3@^ejL=UIih^BJU^`KE+-;iJ^eIK>wJi z`$GXsX-#l>6T?Ej#L<}x6`Si~Xj5_{gx)H|b~)_uCVd9C zR($&g+8sHEY$ZTUNLf3LAGT$cXHUC4aMKhkXljL>Tq;>vnXmfH&~1Mst^Hyv$18hi zriz|S$%J2 z8k9OJnWKsv1E*~@Sjj>%Q$4UOQ;q@BRA{NGR{=bS9C*$K#goR_K9*frUVg&0>VBc{ zK?@Cin{QIN(@lJG;Tye{f%%Tl4yi`TC5W4c{6sr!13G@1KB{spQ=O8~ZBx(Tg%{^^ z-N_BX7q-&xvs#r)!+(AVGAcJ17)X7ddYhd!w^UvsgE(}F=XyzI_cC{B|8P^k?igEc zj;65DmisVp()9=rOYsZm-D3o4H`!fC)463XjhVXJh) z5s&ai+rH(Y+hKy1`C|sz8u4h}m5Nby*IGE}Q)}{Dm6wlz$st>Gx%L;zu*|~LQUlDu zO7KFSGB!G3V@mAOF3|`yBX9$q;xyj-&7=856D*eH#VT}Y_PzNP-qNaL%o{#e7l&n9 zd3@-*-dwW-SH?M^HrK;xy;B!xsFt#H_(me84qU4IZRplNqZiQ z2BeFq77}5v#HNPh*O3x>JJO(Hmw? zB-Wt&RLjDzsX!}j{_3aR4&f9Zx$Ci^u{nZL9cfG1@nPwhIE!%~&>KAAi~YWR50W0k zAuU4(7VV7C<&1G~Fn0$ucFR+2eBW+F9*OXtRNGt^9*GX5YHVsX3ijgALu}YIm3-I)<@b0g6>-%< zaBJX!$H6wWLpb_ObIAX+t?RQEbDwXD1}#PhY)FS8Y%C}3tA;%#3q+lUeq;mdM-Zu`r%AkWzV^v=(|B*t=t{kz}) z_}Gh}U-7ue4hyP(CR4x51JHLMpgAmLE$YnuN19tY)oS;o$bOuOaho;$1@_xVnb{2% zl3HLxDx(6vLkH`5?F48Uxqh?nos*9#+ecsUq_ughHNGRoHiD4b5X@jl5nqSW7ob4o68YkyHx zFE_YKx?Xp3$C;LY>?H*yktfPcB&=%@g}A%(FP|RBK{xe9O5= zu0oIss+E{mjJo%tu@Y5mVI4UxX9d)N=kbIY%LXCOS+d~qp8nRdSM^A{0dZMWHr`F_ z+M;mqDk>0=@14O+Dw$6gJ_j>(p+$u6vqW8eS$>Wm`~!J@ac-k-y|(MqNUm6S!DxuJ ze&#l@wsFUScEW6k6C=LHjP_V!>sH^=ui6^1q}bsdXT_}w$&Ik&$Fe8@L&5?c|GI5f z(RDv?&Fj0IF~zC%|A3(){Ut$b)U)0kS8)3&!bvML!e&ODJxx^LEZ2jdwkYC!Vsn0; zW~7R`R^J;@QZqImh-zZVobl)Fns|w zSv84^_8d-L9o*pPhJc8~nbE(!*-B`31Y4#1wa#(qJ$vAheFplf5+X%KIV-Ww!`~ZX zYDT`(H}n|%op$2`BUb7aOmAGd;&T9Iq=yDZNjc4FLhV|JV0p-UZC{BgFF&})ZuY~T zYs)V`$g(_OZ_?X1p=H+Un;TX4a?XJfB5tp(`MAh)q;^4H&&3FkZfTTU;csqHC-Aw= zk5b&`zqYR}3kZsKOT^j23P()`d(Ylg_#@%)So&$ zzuRf~)Np}%M)GHc4lD`(19=F(4|+ZH<@RZ=_0m9{kwl8>$p~u7Mg{e9Io*;s`}Kw; z>K9^z!<{?lSKOA2&l%j# zXvb89t8bS;)6cCSt<`xg>*oG;5v%S9?rL>vfuN^G(zzP={j={Q*_l4a2zz?K^ccNH znG&e6jp88PgZI=c3>L$^>K@#NHxl%Ko~^Zf<{@4>26i%qJRK$P_KpD8l_Hp#q9vFw z{vWGg{TC>}F)$Yi7^Y6TfM*3D6~^e*=1=E6P{BY=5WFjxj(;90n;)1&{mAMGhDmEG)iYB? zdb9VZI24dT&($k@X$9zYeUO0bLWZ6TNHyBx8RfIR{7g zswcVkDLycWyeRu6O@RiCSnQ641}|q>A0(Vuo#P;0MJzWUF@0vhfv@QF-$m)nI+Xsq z6~4?^E1C8f`>YKjgW+ciYtrLqKQ)!v_C{l7nOU9$zuA{yVQ=f=oF``UmHPskhz#VM zH^H+@G37y9!#$@|>Yljv`yDWS2rME?92D~*3>EvBN+ix+rsqIm%;FQXHw%D?6W*?f zhuwwxTl+SG8Dm;4}wkulc$<=h~$UQyI=NwN@iqX-w4 z;g^Vhg?Unor&#aV=ba)6=iUgInjkm2hB%CC3B&RUCp;b^vzrKbS1yq$Oj=~(Vx@M{ zFONvN&Oau*;bG+PAbl$1eBg3%`DK&pdQ@h$o;>@NB!i`Cei@5m=9Z@nbtT*+J~n_2#PSCtX0gJB67n2o$N za2&h=J5{V?Oso6&lIvuv)9~x}-Jc>dOI!rX;pzRA6oSF-z(aiJKPLA5Nd&Ev&3Ze~%fN+!kHD&3eS2N{mfiwBb8L7|vAtfT&(mw+lh zAWH~m-Lx*iT4L%=scW(KBJz~f$*pgYXL?jDce+nMUBmi)N0P#-z3Rn-;6r-t2%w)G z_GER3%c$?tOdl$|o=&d@+kEJ0cIU%wHDCe*tLsrpj`{Y(Z&>?AjdP zjKOSO(rGBt2@rCIhAO2ur=6@aBf1aN1omTrVS(xaFrL@5{zo3W?-N;h7!NEcTEjB5 zefIf_C_rc;^mk@#a$inGGE>}`Iqf*(Akn0(_(m9zo^GAC!_9{{zUGGJfUnCkqF9-= z*M)^nAD9I0b2DI7V)S!cpFhoS+}o)Asw{h*1~aENU+Pf$^@0Su*@>6xe_L7TU)r5i zQg-xDR!+a`(*_16_=ld_4zKN$MN*ofj;4>iFSpPjalKCW%jxzTIhLS;E?y)XYx!S- z!TM;W%~cG8g%As^2ANDKZC5NP0c{ISL$06Q*UuIi!Wl^vf5tg|!&jx0l-3 zGlM0~npfkj!IYq0XmjD>A?xb9$Aafy;bV&zQcuI~-XxWcNyaTbql1)}QX;5{J(V2c z^|^4%91lM_Dv?n@IT@!5{osm~SgLv&hwj&mViL&g`UnJ+pqn;eA16*xN9oIfp?310A zHsoF{!E;-U;>#Ne%M^~!fHPdBd!_Zp&>QLJ#%_pSS=(h`@|bFsE)@yRjp9XmNT=}ya!>TDmL@N#~P{YGg#i0WO$xz2#1^pD3a zsc+DyrYofr#g_zOKOM_WPG6i;1@(s1|1tK}aaCZ*IKY;WN3yq2+k8FQ4Je+`2LUxe#XJGLJSzO`b<;~IRV&%Yg-3;&ku0Tm4{Xnh!zpd@p$7qN`BUta&bnb98 zQv*@k_Q>(+Xnr=M2mQ5fzFla8*T-?L`-hNaDAVGg<2=xJGgV_;o2OT~u*_IoQToso zXl_|LR5J=yN$Lh#v4q^K<}&&g2ejvUMZivP#op5(j1*LpCw13FXvjqh zfE2iLRV@FRW0NyDyoC%OpuY_e#p*O9ptC9A=dJKlHUhTXfVlo#4BBXh{NeBU5Cg%B zjeU0rzWSQ?#}$ATo(jU@{3n??j$^t*EFDcrKdh zl!~2{AZ#N!?E4){EIrbyZ9y|p|Nhk26a;H50HG?4kwxa03-niJpv@?AKu<RinYLs^o=`6>~VId^S znmU8nBnZCXPBG9Zwo;Q)`YF~4qHc3y`W!hnr|sZKLxTvITfW@ep4+u9MP?j0UB6ue z+Y%Esz!g>{aT0?3~PVwT$5D*Iu>+(3xMG&cyBCoRXRP{LmRw1YS9S+kVA=rA+=3 zbZ8N0bp{X!L_MwFg&MX ziLvuF7Je6MC?9>M3gfl;2+(88e+)lN_&o05DmHR6y5e)EJG+&L?Qz~5(m^W!wmCa_W7gzBW~WV!eCP*p)CJrZGpX3wZs71D+r>bXHJ~ z;>)6{;^~6Ip6y0$LODw=#_cyerDT8~Q~Mk&m$0FZ;77CGtF=g=Vx4bgbK9+d*s9~J zfn(#Ja%XCTHNgHkU)b{vXMy_nvqtMxZN+THOkWZSq0iQohuu8z`4tGAz4`eJ)SZP8 z-QZRb_d@CNR-BhoB8Rh!ja(bbdK?!{e4JzZ6GXma_mw9{`p4pNTWegQRb5^sIU9*I zUwMB3s>o6q&O~#C>FrdN2CPC`Gk}cFYh=S4qMNY7t+e@v)9#lQNSP<1aY4ohnq$XHgf!gT}$Q~E1o^FUx<5`mc9|tWz%C0<$-45#47sQ98j5Lu|N<;G*#&*2Tw`dVxqP$CVXDeG)#W2E;69Rg!7#`Oqia za!X}N67wWnAq~$jQ*8i`I?&}9JOI5k)Yz>r>XuQF9A`KM5B^{0I2I(9E4Ln~Q|;ryCZkh1hbc<~W-rHS$!+Ak9K|2E!iC*>@fE z=c;@C9v!z%^rTzTdb-7`{s`MSo27;}o%c1u$vNPWN;0Vm9B`Sq=}m+URo>G;xM}~G zuT?`7AW1UTq$gKk^=!|Nwdz!kj06UVs$7|i{)iy7o96w-P1@~)fXozymaYua*Q;++ zT@oo=HZ^ve+gT1fNA4WsRm5#e5}(KAYW7g$BzE3Iz3gU5B330GD2g!3={JXeS`3(d z;_)y9&nZ!@_gM39F#G4o_=W?pa29lX)yN3-f z$iaT@{KlWf4Lq@hd+D-8LT%&xx;~&4&&i=%o|G&|$U{2vIP2zIRh{-m^v3ZUq=n?< z6Dr4M*C*SBV3ug1zAd-mruu2)qPmVR<*L`JcJilBGL15wWsaw%B`zxZ7KeQxS7^yrkm4xasG{8G~krtMt{H--vgJ^?lZvt-^azh`WJ`X->< zh3WrVW)-|-wmu053YrTO);syx&*%^xWAcDaq2TP}6x$snN_2$%D~!}$JSS1r0wHg4 zZ8=Ig4z5gfAH0)G&&NS(xY*V*TCL4w!)-xm6pWkGObdx;HVq{U{wmMc3$6P{cfZ{@3&PSOE)^R?0}B$cJg3Vz$6j`lK2v`EH0)ObOk+xkwF z^8JmcF)(|&i79{TCIae*e5JjKq;N0GtP$;hhsbYYk-|egIETH@E4CuwOc#swvm89R zfZ{LXW`s@t29AP=vp1Y)M+C?7_Y#`f0hj&G)b@s1g>)u*Z7t4+I;W#%yZK%pv4Xa$ ztYVH4xp6mk2ea-Mv%{e^<+JAZq^+J1*y;{?FD04<`SPs2RcCi|}L^gLT2NyCi z?sblNwOGwo8a9Hppk4#xrx{hX6Tq|q9JtwX!5BiUPi8-wdp7HpcRB=w6tGG_o8Z50 zyzCe6p;}yBZKq_t!?EbCP2NAEc}(#iXi0N*u>-OrmhZAi=CB-iKv_T|ks{wS8v!-A zfeyY(D8*qjW5B~2n{qd+8LQ6q3DL=XT`Bww3YdGH$c{f{Ct~MY= zjf1DE*n)IQ1Y6T+oy!0Y8dUC)84)RoEpSX3zy3A_%x>vFD~aLPc9&PGUjk&_Z}%3% zhe#1}6 ze|YQ0;}EmfvF+Pz^Tmm%N&`&4_OGpwv53Y$=vLqi7C^ZvkdsWF=2C}$#(5!W-NPxc zyi95uGk~OcpW(XFu&C~}N-_`{ANhE#WyLGDl}B1mm;-WlJugx5Sef=o)eHX|3j-fA zdy}lp7fLeipMbh2cwyUl*s=~`Xbxs~82lN`*;g2QWp+N6;EbXJXS90*?o~tNm;!)&ke;C-PWEBX9 zyDlP}@TnJ$k_$a;q~)U`qX2&<1J#f`-HfL&?V4R|dJ4CV%6fF<_nVPs+nBbDIahKE zJ_2YzlfZxxv7v-1EH3XNYJz_DD(<60wdFe4C4R!ejezYSvNOlO19}bg7)xd=z)qg< z+z_l|ogxWxM95_H3P@da16S1miMJ|#54Wic2R(t$)&@buENUnDQ6nhUCs$IFvWl}s z)8cj-Bb9h=R)+h&W516Z;NS22VYT-{MSL2t>XrlDc^MkplSbgCX*;k?TsI|hq`&w3OVrCf6uZw7<48;i%n))|o241wH_=Y~8BwgsWFGK3bqgWqJGDwY_*5wfws z;$yv;DtEyyTs;Js1*5@-Y28idhZUt9z@!VX&37t?2UZ%SB6U?lbmN}2O&%^ep0?;M zcw>l}0*ILmj@ID%h=JHT+x^Vv!4G2!bdI+@^&NG+ng?M98vIE#A5#3{{DnMN#!Pis zr576k%9IRfj0T+-&Hsna(OtMC+8^?;>%h3ie~PZftnc2Ucy@LscdKcFE26}_od^4Z z+Nbhh;1X}+FG}Sf6ax>=|h z(@AFw5;z|r3DG7C?~_I4e(2NP+*1C|D|BU`P0Vq2B#V+clxRz=cEsP<`!MxhC>ap| z=*>BCYV`N!Aq#@TGY4mWVRh1XKH>-x9ClKd&(6#O*@q7om>u&Fd^#P?)}A^+Ysg%5 z_BNu&c82Sw>Sh5YA$O&7=W_kCY~yR21OBDfX7J;?6Kz;)4`vDA7~iT{xNuhnMqVXE`f25ZoS5kY z%1c}~b6g$g*b=0*>or*sCJ+|=!svc_?eaZ-xY;Oh`L=`!=A>EyDa_hQ8c~;7#o@f> z1#sfCfLC}nG_PHN3W$}W&r{PK>-R}CyDL31bDJG2K<4E}=%`O#4NgkChcnqQ^s%YX z@K?^3LzfdeY+Zcz-fQz6oD3eOuE|K?<(%Vr*WlF-YnSGZWZWs$##Zn=&_014Z7r0o zbWMMe0!HZ{L{r$EYjM2`W2Dc|{vr>*-wBjv%`as7+=j-kikcACk@?9%u(x%e{>pZVK!NP=uQB^;M z9yhHWCRFHSo0rHX*%0*DT*QS8WgwZ;XgdRLbGGz_S6YP)mL{*JjqT=ECZ@+TVFH)$ zghUmOtK&|5vSdZW0(KPs=KuLwg3&+!eU3tY!%)Yhlp=?Jl5>OOxpEa95^_OS8(?J! zUC~=lJNNaj#x3kpX&(N(W9iJ$`%GnT<8;hN=qWIHs&sLwV|^maXzg^@`&49Ys^sYI z>N3MnoS#`^akY-fAwz*f!Go9ryU>drf7#kEKo>ONB_kGC4Y4YtR(T-3sMfz6bQEka zbtgfYg(e_OG4Jbla5fw@Awsbv^EMWI%|vQQKcKH(5bv~%akoNc3J+QdsG5Yp@Dv+e zs>zRdsdNDP39Z^W|3k{=I7Q=X zoD2ora=a>>Hff6Llvx|CcLHg0m(NI_a&db&Zu{NP+;;%lDDX5u^l2P;yU`s5L4sbM zvhgQkC)pLe?#BqVG%P~apk-n#HGJ;u2f_@Nun24q<)u9!-yZ-Yg*(zEY>r^ED0SJR z{Xiy)+rJgKN7o%@UDLN!C^jEOH_Q-v%^3fD`|=Eb7-65MpR9rlI$1Q?Ai!z4+yKKD zWXwZMrwG$b+5NTGepUZ&5KNa zaRtAMeP>#WGz~2!w&vLkqEnuf=+d!Bu~>DhaffvO600$;fAVjt<=^BjUu`6{W-pnm z&F0GrCJmHMzB8yQM(3 zV-J&M3T&#Fba{hrZ4x2*UyOw|ChZkxnI?V}m`k z)aQcH;oCcO+pLvEU~bQ{p}#stI1f-n+HFNtvbNOIdg4CBVkGX?)K)3MscHfoZRK*o zLo)0%G}^HXWC+tyIpC8>pq1}zb$xF0)XO#`;Y#Wxfrf_Lz31|w32igO92THFAI-0~ zCGSp0y<*E}oln2TPyt=!+}M6tmVo9k<-Qp_=F^VJzp2V%K$*Ka7?g@Em}4-*P!pCF znL1jG$nV!FiG3vozm5Sb=3W%+57+LRHe~l30EXnsZ~a#F-J0Mu5Zr9mKUI(l!CWrV z+b-#^L}|wLIOV%*7sSRD^kt8%j@3A>uEMph1Q63X9O!{^Ob3tbnrW^+?=o#6Jbz=Q zBfS{qdK@+cln+-_?EHWmGJ$-p!(g*3;8l-2;cXH}cG)h+Y)abU8EnkGF7;ojx;Nn8 z1&BXKyg-t|Hr#<0Ipb#U6>M^0{RFh+%MBGo;}+{-F4^mcbI=X5D(Ox?uo8b+w4VVm z5$EMav!m35%d2X!Fcn2fqa_wZ$Ck}ByrPMAIICZs`k?^R(+xR~dTSURYtILjkE&M$ zl?PbzvMC%4SR{^yS((%h)i_AJS>xXOuw$PzOjvf=s{&?+PCl3JURw!#G={7j1m&fK zW+6fsewk)j+$AbV^Bc(T4NX~9);1u+gs=taX}@?ENxMs2v*e9{KM7^4eyUf~(t1L! z{$48pBRt@7@ux&E-inyUVi(TlU3mj|O49OKK{I~(Q%|2Pey-GQVILcH47Px`0L|Ry zJa;wBuerh-_-qKABqBW$k)GjOJBYLE8dpCP&4rN%Bq{FMiqMfRNcXstvaN2pdM$sl zN@LeWugYp0&(K7N@pG^B4VEXE>%s3gr_5X}I)Jz~p7G-Oa4JD?He`AXbMzOZsgq7dAH?ygXj+Ogj%&tK>kre2&uNnD99UUj7 zX*zN=->!#yC!jJ|IZjIN7U+i+tHZ-`#L-+bT@t``w{}~sTy8HLN^S@{ge@UVU~W|u;np*IgwTYyThm@M7*G&`YT6~hLFwF{xUEX#d=K!YIo}}AGACP zmiCY}+c%O|(lCSk7`78G59DSNqM1sBy{aUXw^g3uE`7^uEk7Ac%SbfqpY zbCu9NMwh|$W1PFFk}b3Nad~R$uWP+g{YlHE{@IuS?SkUV)p&|)Lqz`p({_VuDWEmi zCHLU30d{VomdY~$DN3!Yy=s>R8TZs8JAN}yEj)-%l9UDuVMRh;oE>+sUT`H^kto(Kpu]SlX(+ zz=?yP#=Xi7Xf@3`y}@n1=rI}M>>o@%;dBBY$sRXBs9T=cdSAAsE5(M2^2&<-;#n6J4%#HT|PYH`g~*1V@VzWY$q zCrvi5qDtZwBZ`UA&k*JBs5n+#{|vvhXEEq#KzGqPRF0L}0cruX)DCXFLPCz^@?Gv> zJTY&zL}bnOS>l6f89Tt9GQFwTOM{|~SGBB?0}K#5<_4T06(SWl@;pA9O2LDT@2Aqg zqYL5k5UBD?R_2i_v~xY{L4mt|6HOBoq>{bN=q*HeuPkf$m^2}lF;H*Z9#h1Z*(5B)Y4?C7W8m~l#r z4v~I~EaDzXHHPx`3lh5On{S2W_tPb9eI<9BKQ4H9jIfpR?Z}%%&%fh3z7&E9>^8q! z6S}=MHQ}7A6iUrady6I+q{@&;%&OSG_7Ywpo>J?u{@XOpe%v{Xi1DdrZP4m40S--k z9!1^gQ>uZ|%iB*Cpe&Ac0{r5)nO^!SSY_@T;ai#!8v>X@m7-(0Qq+?Ns!{AV1>ZGh z-FSU!%*#ysV@r+g&me2)M*`zDpqXc5HtSxKBd@My9Z46IF=ZsvK${9Su@6;4@i14X zF`d4ndt96^X>$xEd2ECm86EJ6rZHR#k<5Hi@!2)+Dy_wHjtj13Y@}D})FBupQ#)r% zkhR+REb*`^?%p)Yb~gvn3nz4CHj^>^`LB~k&~VX#2TzYT)z4MjGM9rROWvs&lz6>= z>c&=Ay+2)Z+96_oE1@vzk=GfG=hIV?9K(gz!j#J(LTf9C|9HkU`$qTNewO~+88#AG zV8QdIT4z(My%_r0Fx0AW(#nmckfm=%x<*5CXE00uRf3=-w!;$1ERqeCbvUZZ~aC( zum6FaQ4z8^#H}V`FR0OpIoyucGqN=U^#VJ_%UlX>&LrYnl6R8sl2|tu#Upnf5o5L^ z=L|gFzS3QpO4i3D=z5U{g#%2u0oeIN>M+zZE#h#kvaYaA668g6IiAb#7L#!jO+l03|)zI>M62>UkEKYl@K ziIU;$a}<*TJ8^d}d`<#mTzyxJ7x&%YV0z3LG$JaVU`DVn5U64YR*LSimfsp*Zl^GlANA zwzm7s_xRv7Oh4XXeR9D3n>*9if}H})Q8)VeYoZvzq^bmn2D^5#zkYmfBJof2%m438 z@Bd6e{|%{63Yfoa-!Qv{dj~lK=e^Bc{G?cAQE*5!kH?xsgQ_w}vD+&CncnVMA9v4N zOfgZUFau2XX_6HihlMJXSv#BXrslkG?Rp2529-_n;sL?U)7);xcUcsVthd)opxUzd z#`q6{&42suo#n1QhvbX!!cb*%AJwKOhdfmy2KS&i7G8GHd1 z0%yd;fP^_(8Da$TsQKID4_2t^jO**fQQp-q+7g^i7IGEJq)cNimfT}G_}(k zMc3L1q^21Dc%zX&i3&-%A5A#x6El=sJEK}Wz9@8CyyrK%#-_9J!7NQmkL<@4;TpVg ztIt$f7FH3cMsjF2tLA6W(sDWYtM!$VYbyjM2@3`F`S}mX`bSNF=?@?AOr4OOdWdxY zh0-0E1rN(Lk`_GqE;d&G`phPe8l+rxf0V~&u9+s~)n%f=16weRm^YYlZ;kl=-iyJd zLJ6_$Mn;nLv69mFu}G{<-ehhsB&76oy7MM7+qp2)xy5Du41$=*qPg&$<0`p^pN?}c zviZ#U@O>DlEtPH*a_`+aybL8%8$ACcYjqL%mZOKah=DMrQ*YQ;Qh=!eui)bJ0}z#2 zcR{E@(#A}T_n1iR>V1weCZpbBUdSZI#XpZ zhrpz{boi-bRmtS3>5l@IinBfR!yaeQY*Ct8IDQd@5s^1}{ygB#!|gpV9e!TByBO)f z#TXCyvg5#B?xniZ?D|H$Bx#+KJf~e9lfhzOqK+6`5Ol#qtXSm>ktqw5Z$58^dX{76 zc4Rx4`P94Z#_sosRpP~%D}(@7hwf0;>Bpl!RU=&_ciCBtL=CVER^>7x|MPJCcRczH zX`kkY3iDQw&TaJ4rXUAow!l#au2$&aS(0XnxN=hP$@?2t{v17ht-II_+?Z*v)x0cM z^546e+6j&JzucLz*0?J%isDMlJYEBwfsULc`5nJE;OZXZr~JjLM`CBStUR7MGAJoA zG>ZLfl%iRdleJcN6&4meEun-0dOqRsT{eZF!1e})r}SDQTkHyU$RL}AX6WM|htTWV zk`edToXPELsb2pi?bF7_|zvc97vKiSMb^>yu#|~VNXHB8LGC{^$fg{G-eR7espUh{+(vRJxQ;;V4 z>$?Pbx{2ObsU932Jp)JNv`tpD7h95$AO8pFAwz@G7Az^W4pNbW!B&6V~ z)hWRF_K|sK0$`A~TcnHglapmZlVi$e!}_mdiu7d>$xV0$o-|&YHzcDqL1EZ2f{pBS zs(QSt7Id1QWrB`_-(RySbWmVJMkHs?byklSm+m(*EvL}=z?)L5RK{0|^bSt*7T)Ss zm}8q4`%Pv^3J^|zO|w>^@|nZNT=i`B3iE7mYmnKKEQRI)U_Ad(eq?Esj4-S}#nOI# z44cOtnw8vQCx2kt&v4EP<h*ZIu*M=a_k*Z?b3D|pcJEsghEN~h}7i|&I6P8ma`-G3j&RfuF;#U;KGq~rKrvpFY7lU7-^eQ2xG)c7Gj z(Gx%x`+PKvfgF;W&-HK#7hT^}wM#Zuhc2?Kw9Ii1Tt(^x@H8GCX%jY04vGyRsR~?3YUtFQ6)N0f|3Cg7+Yz_XGJH`GJe}p~JTyY-+ zmx}#{LQJy!Dg0wukXqc>3=Gj39jx4(}^D> zs@khU$>1OWH>uwxJaM&+V-5;*Z(w@*Ofy|72=#r{aZ=5P;LY-zRg+O?ALs2WM+S>T z8aeTL$4mRH4c=i(l|vTHd#};i%q$xq_>R%f)xwg3EXmlirL@pZ=)M!VuM%VbRt-2s*bWp?HwcZtJ|)d#u3vLUvQkt7(eJ$9997E%YP;HO1(zSitb)h-_C;NAi|mc>vtpGC9_|z*Xh1gS@W9hu?mmv&O0UGF z;agn-)Am(8>X0NsC*J4ZGw(EfWIjsRDYwd#q3ew=l9AJUc^KnZ2wt$};NEnA^*L23 zPOS&t)oz+oc{1VR{61x8tnB@^q!RVV5ow*%x=_8|kZqwas)7^fYMSTYv7b4^(qZu3 zoI7+93835~bp_2zcf+kd7I2%16;hZ7BXNW?hDV*U*La!9|otJa*Pv?w;PrKQaHXY5*G9JpC@hflO z73~~avQYfr3#tAKl-(!)k^dXupw8w7E=r0D`QQzo^MUpfptKj=2jiI&l_l1l(CFQrmMf%+Z4j&jB#KQAtid9%>J9sTrEXl=Z{IxYWhR0D~%7~ zJ{|m%2dD$op+Vvn)C6h|eN{h=)&z{#UjI;J=uX;f#0Z2F4}%v5;ht8_ejE=ZVEQqL zFUiXx%w;;_c_^U?G6$x4zEH@~7U%m09CE@yqCkg`5|vr0q=RDZoJ!QH4AxQ8QjO7z zu~Jq1*@$4&3tsvSN}!-{OrWw9HgPLn4nEjrRjB7z^HO`jDV=zdqQaFlMA5(u(- z{Dn!pc9r|Gex8T~AL21va1?z|syLLbWF|CPlkePFZ=s}&5x$`bK4dS;OD_Wun0Vrt z4hJu$HV55BU6U4YFKH4@t()$vv!W`GTu0mI8>b*c-+;{K#S6EqlMF%o!v%(wsXSUF zaXyU)SnqGFKFbA*V>V!FU4Fi*^U!^ux__I?ohpL~&X&1W;8SbVk5^4h`hF(b7eVC} zQi?zR#3f~sn(1a{6zX_A_$B;xSgG7H0)6d{D zEVV)r`O#kF2M*blp(>)+-#G&=+6yLS3f8>X6~$;VSsv)H$=~qT$O~E?r1xyMvP*vF zar|KQhDc2Ec=rr?myqdr3!h>S090M}Ur|?64lA8vgP?|6;njI6d-)$ENui29f zi(&;T@5g5@AW~R#)r=e@`!413u5*s_6N|b+kK?eh4A;z;{>nsHB z{yGD^n08KRB>KpNmyiv^rl+YPT|sJ}0*9+MA`Y9tVCNRkh2qmro7U8#`Lv!XmG@(t z=4Uz9=lEGh&|4yvwk=KW4ZGC9jwAJA@?45Js05^8J}P&zig*`P<5%5MIRadO#Q!ub z@{u52u?jhf<@mMg&|x0GPDO%Qgc2P&??dbPhNE8E^TigijAAqr(eTTx{fL*hin{|a z@w;6YZeMp5VyhC~+4oA)BxSw}VO;8S_+ev+Io_<-?2mnaQaWt7bH^p=SmV>EpOI@+ zi`MOpo=05e@o63Ew|}|A@IOKQ0~5-TM^?mi@eqwxoEP|v6k_w}!C^MP1H}hnKm6~` zDnv6EZo7UjAq5nlkrEF3GVA-L0+O?{-^5-i@_L}5$;Ne2g97GsJUjE>!&|J%_;vGg z9819>K;B)oU(XQr=||N1quR!yc%b1)rHq+~T-*wF&pml4WEFHUa_*GI;`W9e`RivG z2Q_Ek@Sz8^f7?=g@3pAB2)t5i$TM6jI^+0trvj1HXB#uhr^S-TP%zoM$^Fo7?VUhg z%cq@S<6BCQeOxW(M==)x1#i;%>)PRT&?I|PI@M?>BA1THPJdOjdD!d>Yw)afD7mUK zZ5hQ=3fgJbenVrf(vS^Q?<3E(1QcLIjr$Jg#*42qdDBo0Q)#AU-65V3V=T-o&#IFr z=T5hVZlxPq89wIwUGJ+T=6&`fj*4z-riX`0y3(mRQ;YiCg|;+1+*!kg!bcya2O8bh z?mj$Z1;2^E5ng;~&BAMc{W{Y^&v(!*TQs}I=5FK;xad^Z-vEKTHQ)B_)8ha=COTvvRzv!WoQW zqRRE~3TqLXK~Iu|r>%VQftwGujDj+qNdTk~E{e#8ieeu9of=VvSNgRYFm4c}VQwqKN{j(lf4Q0$dFPIhXEhR7oVOkcsuW@tpzj)UYpsUnAM*#nL1^3g;mW&fK4 z^;C%!%Oso9LWXCTMFfln*gk}(Sz41PV->HBX1y$-bR>83Z_JnQ7oU!OA6vk<_d-V4 zqkq9X)))U`Ghm|XaPBemFi}rOY zBcWkc@4FX*cQ7?%XD|F^=dM6+LjgFJ#6FcbSzC3X-OWz;c(MDc&JYt1^ADs9r6r1( z(C@dzy?uU+uIhp&3g?+2I`p~3>tF7q8~S1~{KQPG*twnQ(he$1f{%n)k5k3MAi4WV zJ7c6{(aku+io)(-S1pUR(#N*G)C%A}mS;dkAL%pX)fw=rGhOb>g#W!?F{XP6Ad+U! zN(i8p2uI|S#b%16=QV^Xn$o6BjAC~&usJ24)vD2{TMISL-VYFn&CTf9K9A}>X;!m|-`{AI67BY47^fb%GbBgsAz$rlRJ@~+6nV4o?n$Y0!Wqm%LPsLpoPHwdSlqU3L1t<4aP3-7BGYF(??$3DK4 zuFN=&CPU>D{~v_!pCoWA`WXj74uc)Q7ZHIZ{Tx7to8s}rf4=x$+=ooe`Ix;@F@Iqe z5{G_N z)u=laEkpdXlkSt!_Sg9gV>Sj49iCl3R-RR6`ST)f-)}$*&j(PJWJVCRRsPdGj1*qs@%{>XuE$Z_HGo<>g#4tyO3+cHm5)_$fWeE?4U=iievqEzW*)vQdML@k**_XZ^4 z4-Lfisw!Ko7SaQ(KC5QgW8~lgzRv1LlEg@@J6bQ;T&`XM;6097hCR}Swl$F(Z%kHlS_<#QDgDr|G$s+kK{f?zPqFhXvv#Q$` z16XWu>h%BrQ~h?Q741lmM^Tkjizxu$tHMCD(#K68t5PxMLEG97pMSC7|9o-ZGi-7K zJfR(Jfa3lCet^9s;M}*4vavxfVF3iu%v5eqpr_Ph` zp`Fz$l=Oy$F@0|p8&s^Rvn%j}Yr7l_{je z2OxU^%BXMRll9-Ty$uS2>#c5a>xaVrV8GJ@f3XbrzX6}7%rz6%@$*)Uz(KZ|Z)D(T zv4K~BPDwqKW>j4%Xsp5o{wIC)pC=PWdctkT@k*6ck4dZoFM{a2piYORy$soTMg4D= zhreL-oju)^JWgQH#+wIf+SN()TY2rL2Gu`5z~5h%JmLq+vy!w(1kiP=VLWAm1DxM> z16m;E*=!%u&zqN=0%OkWy+XmT*-phbc@^Dg2wh8jMBLEKFSaTVX?B0eqd1giB|!RG8I5~9{7eJwS04(^ zSD_=P@2FTP-a{&ywc5(6vCY{Qxijh;gXxpMUO5#V0dHj&Se5!wL9wKhe3IpD9jU4P zcK4^y`CZT6(E3bHKut;LszJ4Wdg6ax3jaUf>gOa2tm9{`A2TVN6?@-V^J5yDa30b} z)p13KsNMcpx~t*l=}+;dR+vL7;h8Zj_|rLFt6FmX4r_?BnFZWCUCFPBxli zE!v(6>4M&C`de4JS2pLE;2Eyf|NQBHM#?lqj;uLd7MXV3V4``d+<*2g^w0B%ruJha z#d6HJ?>DsIDXN4qlbGhRp|PTskvkTH6!uKF@g}Ky(|(sX-od&>-@Ou`@NG6dTs1Kl)$TOLAPyY zy>@rHE@gQz)M3m-*YtCpcXu7LN{Qi)trs;rGfzJJeSSox>>xE%7WwsReZ6fzz%R=M z(+r7A9(#hyfAwt0p0vMbSg;q>;_gM4wpDdg`+-~U%QVK+zTa2-iu!BuIsiNRQ|`HB zYc5DfQ5jp@>Vzab`S^hYnGKL{&}e4JMT&;T#mrvPUL9d&YMKKzi+})w6p1NjAP0@2 zf;i{8L{}NX9F%5hu{Vo>ZIBHuu3U}dkP zhmJ^zPD|;=W0vR44q};4A4GJEZBTv16>r8VZW*^RP~ZWWFdR(!7kVE*Nm-`7lt z6$b08f3^}^%{9FrNO?gWhzVR9U4i``>opLkf7g@&)JC0jP2Cg9t0vJOPvtLbaS^FJ zsKPn=#)_{|3&BA_cC@*`{AKc7ZaMEHGc;OD@hqHN>CTMd>iNZ`ISQPIb9Ej{ya6Ym zvQO5(KUvc3i1;s`>;=W%^jhg^aBB<4X%wT=hawN-)a?aDe$TVL^xQ^opK1NkSmiJg z=0PQ%Fj3&Wmi5|*KJ`X%M$2Lm(il6$gxq$OBekM&-|MW%9 zsA++l`WFEo$SN-geN}q6dijEH;f%@xdd3N3Rv))-3~?WfGsXINt{uL(+anI_Ti-1D z_t!g^ZXR#R(1a>Gr%bF0R+Ag;L}qRlH0(`#X^fs?#hHsyO?OuHkJ} z-1K%|j0R7gd&XgT<3kHw;K0>gnUDc!U%ZS-4KY(aD{|K))X0kjGs}{Bo+Zi>L(Eb* zj9h7uv^X@k1Deuxy?Syz%lPC6A)CVRFE{d|Ph`G#3dQ@rWNu~we!~N2=H*wqA3J{j_QNnvNT@;IJi2K7p5NK>9I^ zUa;vE`v)i3jZR-0Pk4wWJ9oP>)ZT`S=#PMECe!zg^?u;>ZHm3~C%HVf@;MiuVu$}6 zh%(9XCl3zexDS7gW54BoP-|zlhE^T*S~xUkSKvlZkH~%=Id-uA9a=m@DQYkKpnT`R zJUSgwAqTNtImA0K7nyO(vSnZI00_MvV;0;5V%)O ztecDUh5PQ4fil!iQYLlLZuTqkX%y^S>uX)Ak8)f#T836=iEmQ5VBmr<4)ZtmisRvk z>H^I;q3D;686Y{N-3J&dwCB-?jfDp6;0NQlYQsA1d1p>P#;`NHR{Jb-p<~4Zj7fdT zoYDMBtgjcmww$C6T~wH1`szQCXdM!J9dweFKlU%fKD!iLF5Vgemthq>L!U$J$(?m6&iG>4C`o9`Ao|1NuFDp24bom zik)us2_4O5KRN?WHgF4XddlkD<0{#($E?}Yl6^!GJn0NG(~Ed(MDR#%#?>=b zODhYtVGQ~W6L(mF@QX%6efYJWXzeAcww@3&Fj(qy{>i6u-a?KF$1RcS=s)=4Tje<4ISYm(+J#qBS z*ghxjoQY*+sd*WK8wQ87Q2csT9l-CC~g7thjY3GTNH(>NmIJu01~G>Nvy zLy||5Gi`o3(*a>gWth|!%$vu3{;Ly84$Q0%?0PDIk05zG>6qUQaVJ!owx~rr7aSxu z>F$e_4%-oLX}eJ~YR#Izv>r_tGM?`ns@S{|tiRoBc?-z9>6!4~H$vxZemuU!?_ zmp}(M746;q2$W+4VA+cR3(EMEtwvYL1x%DeMM zP$`!Kci7~H=S9nPg-iOVq5AA^rV43t1<>Lb3Lvv*0GIaGmyxP0*5Q>&>kDjo5lF5K zXS9gZ!fE@U?4L0Fzjr$DabM$2`RtdipXS1qavcUlnM0@%jGp8(B(0I1D8}@=A>_VB zX2#i6VjI%u$;MglfrR2VAgf_#m2!SKr5y2PQ$M~&^vdc`jn!ol36QRD>GU-ufU`PA zQ5V{z*rJkAOrCvtb^YTpK-QDlZ}m&nyTcO8@vY$+FJ%PHmFr5dclzUKT&fgym;hd~ zO~oi=g*AKVWppAI7i}kH3?L2!mJm|6ZtJ~xNGix4o2{}%I;1k}YIA5Z$WXhp7z>E5 zeM26#A!(PqAM<<@woOla!#%vCF{wL4Erf`^jjG1XFlza;X}(pMEy$5d;MH9Z)8(Nk zWmu<0FkvN&>55wBK!nqSMByxQ0jCF$AAl%&f5lsZ<7z)Sue_$Ts?gcxq}vT%V^s#I zwd3ls-ZcoD=cZ~|!w%pKGCsPH5@#Nq|GtP#Ca|D|ZZZsmz-aRo7Or~YpcB>OYn*BH zqsoC3p=qieQuB$|6#uAo|N3o2GRhI(D#A%}b#K$l!Z`}RpWj&K6*E1L5X}||pyejF zwaZAn+v*+(J#ioUGRf7l>VieWeJ{ZAum$jM4{5&15nxfR$i+Ac&JTQk0np(R*%MS!#0;zZ^$>BKpA z?@{PBM`Iz#*&KU61&H1KsfU-@#-FJGzWocM%3_f7!3w0TIo)o{?(Al(+WBSzApItwzI=;r(nnnyV@Wym zMbM^&%o&iY>AkL#&141)EQPSI`Q?*niysuvDiPu5TMDvW;GZ+fph~KL90qZu;bIi0SzwX>1f!bigS=I= z!)Imr2eUU__5kv8V^`B4)UIvp0hz~=YB1bY4Sk%_GoHS^6bRm2a&jV3fSn$37lR#6 zZv-;YV;j?ZiUhDtkj41XS~Ot!QMrED^nI53^8$E;5N&|V2&6X1S=XMw=4WZPO+`-I zz%QT?-8f;U_0vep{sS6{b8t534sF_rNjWYUUIzgcCYIaI*AfJ)vj9P|-9wlR>K%|R?UF|RP=e2XDz?qr3Jx* z-CfcpAt6dgcZ1S3bhm(Sul20wS-bdj0#G`V zE!S=bg@2-M@_@wPX9bwiDU&PR?Uhn2v33)1T|DXVDS1K)Ax@SxT)LmG7oRg=a_p!d zvBV6hsg#B?DPgA=cORbHO)#X%ed&D=1`fd>vJcEV>8dHUCJ@bd26mC=P(30NKvc-3 zg?Jpr`VO%v7NJH}enMUZUtYE|!-8n)Kt}4=VkL2lPEb021>ZA z;b81OFsaO{S5@ep=xXt+WCh#>^+Li(D@^0hpLMrBXG#lr^_v@@c`i;}E631^EvVem=GZ^NQnQJhaBDm`hW!MSW;LPVwy)3gvUZrJSA7eOU zbD85}nD*X84JB~&-NP6OX*7wZP&bLu6$|0imWaY)7&b7Q2Y1A%h7Vv=(GK6}GBq%t zF8)B<>e5z=UtxxWbu7lJ#jB6Ot5u&zTda6+dw zeIL`Z{$wE0`@x7i94QQ79+-klr?KNs+4oz6Czjhf#!s{EsPAWtpv2!m;&rNu2yK;X zk5#^I%e#K6S~GTNl}r~UXZ&cuz`W>tOqE*tEZjVbp4eP#5Kqib#$gT9U`jADG#g)lPl- z8%t>!hre=()3gC`op`IMuk`^inCFa`&B%XvFu@EPa-rU#&$jcu#<&%s$M} zI48RN8)5P-R&v*jFG}*s-11XZc#@^%XUHbSG^xP~Le%tOkPuVRn=REh z_kDSB)M^7njYXHM&DBC~e7^C`K{6V{Y=Nb@5TJ<aapiz@fcw-qt*%lCgw zq~+Fnbf>Ra!QH)cS@hNV$Y?d9PIGjC>RU;rH&W|QJ5FSM9Y2N5u{?hjhThaK{Hgsg z*{0m3ML(uCnD!jmck)%!~wwF8;SzP!c8{qRnrlkZqQ)&Tf0Kd)ll}yf=KrX(Ii4YuGSC&g&oc)@>>So5v;z^9E_*6uK0P@`Z&4 zVfl5|iYokUN(B0=-gFPE9I{2*S`SP~9s)jX)zPtI;wqtyLRO(_nT@d8d36@HWhPBh zlOc*R|MVvrV&l&S32;a|P#zJWXg1&RW1>)LN|#F77L=tItJj}>DWRcB$ z(d(q_xKt=C9~#eqqnu4*m~_}=7z7+3%NQ24Oje=7PniS?c(iTFZM5MQD}Q5or^%ZL>%L6l7$+|MwU`mPs$=z}uFh91qU?wMk|-P*U(G6~2eP{i>^ z$vD%WH?qbOabhh>Fjx>3{1=9pK3^}@UZ@&&uzS&#bR&i22CUFx?FAX*Bw=tMfe4Y7itTZRYa zDobo!n9fh0@~y{hH)7;BWWUl0(uujN^zQ`)%seJ{U#c-HnlF?)>d6dA@bpDBnVQEO z){-7kUE`?jk|TWl#x^5x`(8^Zi~Fd{0bdb!YRP9ST&%r^FX!Lb7d(5xYb3zE`}@@B z`523AEY*R9+@q}}6W6A++R|er?XC{IGf4K1Y1lx@ld0hn6E51Z8p7*x{tOh<>;g^> zN^x4?%9f*(<(lD}r9rIj%8s&|ZAxL}EYynJ_I#^vH62`(BTgf#WY*XbiZaHJKprBh z{8TWiOxNRK-Zx|Rx9m^c#sMlWb5mEbmWp>Bp$wPuz3Bh|P7psL z`l1?-bh-`bxjG_+_ngfFmRhV5FavtJI)IJ*AosrS%7i?1qcBLlcw10r!{mPcj)?Q> z-sLSabl{=a7m@S_6J@nSmN5=_dA9AZrD_=djI27Zjo+weQb{E6Xg6dO$Yhxw6_HUc zs8-tK-Rjs&c;^1x|C344js8VFx#(UIfz`7Ga?Y+*=763A-WLoFC+q!_f?{ivziGcF z?vnITw5jt3n7WF3*f;|BvL?bdjiQu`h4g2zT0_zLY!!7Rs63NC` zHR@fE)}SfDl7JpOfOS|czx~=F56~T^$|Pf)9uL2z%lzc_7)iQ=H?M)Ky<)y$uf~w% zNt1Jt(#S#M(`WUn(tZFE76QCIqW2$nuq(|oYZF3@?Q$z2bMrLVnT`tuGuI0<0oVE` zF=no0l}DNTO$~OF;d6G#odW}=uCUT=(x&Ns6*l~oLJUcNW&N)I8mImOrcrKQbI!s{ zRnMg6E*KuY_BoHOEJo!T*isvxTv;K+aak+B(+H@L{#ndUP2x(DlE>phM*|MDGBM5c zk5ZclIGq_Jd!~7^2igwS`@;>fEP*dwJd4En4;LWrURlor*Q4zPfm42FUBEc%@kmIF zX~N(=2W+#4Wk$aAVa2C0s&wsGdmGkBomZK-1?Mv&mxF|+(99?r6z9L81eXweL(x#V zYo;W;##SmguJ%nZXSAH+XEsDPfjPn$>7>D0ayW4l@_uag2c)fWls>NMg`hDVm1Ls| z!!Hfo<6-h_H^TW3#@?%M3TqhS%Z!;n18=G&pYr@2HuD|Z-A)@u9bKffqht1-$&~Hw z=`wGn@5WMbYl#G6%WMv_@2W~ZW*Jb5xmwe6vzH2MP?~Pv*Nat9(SS21zD; zW%yS1lSGqwfF1Jz47t8a?&asaFN(LDLj&(6UVX5f=Rp&jMXh+9wbP_SLY0#mjm3o= z7_~vUv!1|q&M@_`a1t!h*8Ak#ESvLg(z1t}Kqv$Jm94oiYnCAV)~58oEo9F)7wqrj z(AHmA*-pK5+IPHQK(t`Gjz!ZG=5jy`%3ln?af;F*ez$vru5&?o}9)iT(9tt87w*3?oC;WDM@JR-wWy2 z?>sEGGd<0e#lyf>Aq^>M_xffjpPQvwG<=vP9I%A*EUV@6(*O>&rmpJxr(UzKx2!%5 zx9Ifug_vN(|Cj=#67I3!;xL_zm9IK`eW2*^?lc?J*{*>v8WtOw;B52Gy4NU^c>9(n^$_cJU;js?Y6)f8rRFFCPigU7q5ik=V)v4?vzPmY)oAu(tszHpp#(xm z0@c-Jxoz7)krS-ObSqy4D zxIW)-G?Zzam({vZMQ^OocQ{m1VL)f6v`w!!q7)~XFvd90Flaygg z1iM`_%V&BAA$ElSdSd_MI^%c3?Knc^KcaxKz??IbrblM~sr^OC02$^3{nGb{P`GE4 zRs#v;{vUkCS~0emB3Lzy)Yy?Tj*s%Yb@oXnH2GB;^WD`wz7nLLrZFLE%sD;y881FY zD`W*C zGJei=iYNo};;kV3?l9p}O!U5!J|^3nEQT}NUxYr@jD)7UyNNV+`XO^Sfi{k2tb!_+ z_O&{)3dbvntaYVd0-9wZ^wgcx?;A6d-|^aja91UK>$m!~ksZ3JoSnIYlai&mX}VEY z`ndO%c-pgXrC!1MfDIV@2I%ZKF_LDCO8(1_frnZp+O? zS!8VY51PXJQ=0mKqR0f&Xt(D}EIT>J%_&qkJoi|iqVtpZhNlJgeI0d&3(S&jfO8Iq;#t2TQ7xM||W%5cJxgTCpxaHFX;#8wGN zrVQ0H{J5f*?-e=(k5`7sOSTP%XfoKXUn2^gO^;98PM-n>H$=Xb7lW)TE!-UkoN5Mn zz4SZda4B5`z-1OqEqdt7TVp%#0n*>1onuufi+dn)0$Fr^mx-0k6+^a_>P$1?pltY= zqqs*~-}i=~Fjyk>eVqC=wMDBlQ=p#c;=t*rP(E{#$X-+WEa@JxD0B1Kp$NM?L!TpW zqBC#LMiSJ?=prW|dME`f_1g8N8O9hyv7yM&ZX|xmlk}KN&ZAdr-t(H;QK%hQEn{s}#O4Ub-2R+8QyU zXJ27LRF{T67f(4x*jwFuy01*%duwE|kSD$KcuU7SwQ&>-ZPC|P;B4{@ujv=jlQwCu zAQj}3xo!avp>T-4T~r#UyvH`C&-Ke#2vAfRn(G;NaVADYreVGZ&~*vDBFu;xsX-{% z6Br7=LNWEMSiO=$!xpj{PctNT1QZuV9y>a0H@UFg$W4~_8o%t%?o3n%+TcoN1=?G7 z9I!Q(b4dLgG$kdM-$yzD!>Xvae)fI2EE{$extAE-buWMhNmnDzH0Z7SFfL<$wV5L=cIckV&V7gy z=2*AJVz&eLb4tr0BnVb&*XL-o7+<`BU#V~%Ilt|Bt3-kvX#T@Ii8Ud{=WZ?vQp`B0 zn4nwTU#DWd&S!0V@-GAU9}d(%XTr|}F}~p~XcqAU*^zFjHb|h#4a3*QG7%5LLq}B% zt}5f5w!Cih=beEloL-=|fhJXDrOkim2sgF*W^ zV2%qR#VD-lQ6cui=nd(oR>N!g5^hE`f`(Z_5W=0`Y75KVPUg~xzPj)ms!AaAdfBQv zKHVK&(%%;z3`E1C4ZDn{^8=0tKuFahs+)ig@ePymNDKSUWwMzLVPjs&*~Tq*8a3$_ zN#Hf9Jbc-r*PHnb4GQX4g{=HiOg{!j+U61IOV1ncjoLsZ2SBgBH^RJZ6@fWXnj$FI z<|>=ip6SuwvB;gJ`Z0PZqlbL>N`lq%YJvC5&0a@uRKGwFR@p6z#QxD`+@tEVIM||3 zLXG#}!yy7;iGWWl2-<#}Mv2oPcIVA{?IyIS>)W;hJ&VXXR-CU$&>fhu{wpJoUjqj+ z*oM0AK^cNakZ4>4iZG2Dd_P*Q93N;pQ_k(T{y0^~v z)Yv8ATL1@W09%PUZGP)6ly1gQGIg^Nd+n`p@wR-Jb1{eDqn+;O2Csi~B^mpbI6@_U z0Nwpi!izic+T6E)aknv6V$eY^&067`GLuH)Q%Y9u7YxL$wyTnsh}% zq($Z*(*Ws=BleBLO0OFY;3Opkg-uvq$)pI8=(>xDjQf_SICU~45>&rqD-wz0fX4K; z_EH&j8cW-S1$b5D!LGG3) znIqZsZcliu7kDJBK}M3YPwm#zxM;a!^=4x!A9~c$41Dj_SzIBRDgJOChLI$p9a=(1?b zH2@wkzA-}=q1llJ!jC31Oeh~^lZvop#j>0ajcc>~%>tb%1u(z6CSqO9=xrUlX*VXv zYZaTdx|7O`hcgekQj8@HWRK6EigQGxL(Gb@v>U|uD05Bx2T7rFmdrYIB&rz;-SkLR zdi&V0m*b@~7Nw)>G7|xAS&hv=8%>%lY^M}+%dhc)b5Gx$AqE-EN~v6InP&K;%^S(=F*01X~d^shDL#D~^U z3N-H(Rwg1jmn(VlG%JM$+t9)Dt26;w6 zL-(+B6LH8&vsUnGKAg99;Qp!(kFlr*2%2tvdyDniNoX2e(q4#a{juT99Y z)lKuu*G7DfnwQWXb&ylcP!Xw@#-hZKvtwKqns^b@o!6l*R%(?U&I8&{$dIIDei$KU zl`b?7Y)b(!^6@x?sIeI%>B4=m=Pr3!>r^(l@7C-q4L2JYsJ%&G^h}3 z{bqzn^!QbqF?&@vZ+DC4CEqb@S# zB_%$i&bFn}#dIg_$mQnSiRHzpLmx!^oWh`dRsrRHK(Z$bGihq$8CM*YFQU%U4n?G( zp5Qq&tjd1xa|tO9`16q{KzU(uIzIZ*xv4OJ_1Xv$+8xn~uB)8CXjh+H2cc!?d5i) zunuXN8b>3o-XVO>OIW;aa0GmGR8K~4Jot&<2ZqA!sd5pUQLoT|QU)xgvQX7b@9Xqr zQ5=+6xqqoXs}BMx_;^ehUINCDgb3FPJk+b*yQUg`ClJU{qmhR>4uHy^#1Z2P?0$kd z_v%WoCAd(fGpyO2ZNbHbX8gS?FiffXZ%tc-Z)2YOC{yvn9ygPj_)b z8RKu&_l_OPq1()rZxjKV=`TX+|5S@s#GwPqjQlXYs&W~q5d5BRMnl0h+l;Zxb{%M9 zDySafg9JP?8AV9ayOWJ?$vSSWq`#bcsjDVgCm{S{c(c3@PWUmQ$HTSdPd*KA@kga4 zYlK%p59EgcN{13xxWYfgaEU^@)O()|5U!u8a1za0g$qYN(vknQD*h|^7iImIR#An* z=w!pMay}_yw!b(>D`)>hs zF;CDkG;}>7+KcU%eFpl}EYF{1XD0UCrG?u=f_~+Al>m8UzGVvA{pXKg{*Nm;-8x_T zjxzn1lXZYEM<_~4mY{zC3V;H3kybmvK-l@YT!PE%ia7^nD*?0sa(J@<5|=hAqCT&( z&Nxa<4!qx@;LYnlRuhw*W?uuLxAOcTe1~R%wb?rdCQE$TLVh{`)ZfVGBWDH7IDDf2|MZwZaun z;D{Sx2VSqu?Nu_$00=8tl9qz+Iuh9>7W6+x&Kb>1m5wSs)MDC|16o; zw4k(6HYmVTu+W__C!0Bwfe%Xk?4D)D!lpHepSV?HAd5_8ACln9@}Z96C|^R>c55Jp z7EBs=Ko&gAZ(wko!iq!i*!1Vt>1^>sF1VrUJG3kF3f_}au;EM-!^B-6ee;8{D} z+v?)Y6b_EjdvVEp1%8AwYDpI2@}Ow#pI`+)9?k=D`V54{9aRqCpw%7n;r-fYR7Uk{ z{r~qC@%ia|)BdypedU!>C%8=7oUK|LT?ltWCuo+o!+SP6_9sLSyI}rI*Ruy~HmgO@ ztqfK90)_97a4!u^Nl;hp&u8#aMBVN!^MtJ1ooaOklbDby_BfbQ>CaljJOylD`08Y# z6M+ylHhfZ#foCrO1jHlZ-v6y>>1DFkQqFUPx4I%R(sIRTWRD-TEwTfm+r=@0ZJiFT z<`)Irz-R{O0-(i~AjqHTaIs*%gCBk^iB&Az%=V;@gF!p`PQlUa`BzIh2|?J#F3be@ zg96ylvCMZ;<3-Y!lV!%K+!1TsV;kNG>+jGF5Yl$U!vt(Vc7dD~APMLP; zR>`>r+98B(hwc8s>pl?DkG$Ica5dwa&YKu}i?VAp?_n18g{c7Z*z%;#>~r2DJG0i< z>TTV&#Z?)Cd7>?q#Rp(&QfD+;abba;(reSK;!wIcgG)S%EY?u>oLvJ)%a*m6t@ddGpJX*fqgBiAQX49t+5f9e`ll_o2GW0QtcoqDwWABt6=)+Y z3bZgw33eS4n_sP!y|Y~N$_r>GzKlBm(mkcab$;IZr`cb;G*J{AxZ*lQ2h%kTvb^;l z1R!S4O_q#9#)nx-f329hd1(Go56MHeSy7;d6mxF-QS&e1)5UEA`>Yoy(N%V`utU8C z!{v?fnFV0U7OaKYmq?N0@;DuIzxYy$u^aTAj3wF=C_-NPPs5JHW!<{sHi%$QDoDLH zi}`(9HXKOL1|_gS<-o780nP2<2@R&jv3{2}|EY%B9*y*e!~( zA%t)HIZp-L>E*MDtLpXt^W#82B>*j^2m}KI_pxB_~7BZgtf>vRy)fXj830L2+NhkQ$`(nGUdC^zPh9- zuY4dZ#_Y2ogxOmg=huL;@wq_uar^t}t_bJw?5j}(u)pSkMj6_Egd%_xPLca+`WI5vT?lOmx%nvN8_52+ve_i9iLl zn8I3>HFo=2CC8Dhdv~_`#5I)uJlJ538{{7@00_x=t`*vyQ`GlMm=A3fV7LSWOqLW6 z&KB1B_|q@wD`$)Y+omoCqk7_P4k++(2&pNlrU8QZ31pgo9{~D6VO0$%a5gwBsZvtD z^A(T8v7#=y#s*qV_`;*YHg55l&bjX;w3_br2dH8Q$;7!-6pbSI7(0iHCY^cN9q2WXqdrwdkxk4)2^u8hiNFQ4(b{;M_bU0+&$f=LTCly zZaELy%A<>)f@$cg%dG=&$vw~jMitK60S%xkC@k{~q%!!Sm4*Vi>BwLn5MU9``sexi{B211NpZ@140Q_PDBe%1M z@_f+f;6;BTB7a%a|MfQ5qb^OePj{(*y`ux57NGJ*lzwIT{kwQPP78}ABh)Jd%aSJa zcgZm4GO(KnT|$pR(iv}$n;2=Rdb2-v3E=YA<2nGT z%4#U9^Kl)QP|7Ra*)|7l6J#t%LmoqvXmcqN*)v)t|I5t6pRND7Md)`u-4=$;?g*98?x8}ErDeha(z{YHfaljh@L4@yQs4h zr!YJS>wo`9v{i?k>(RX=*xg|vHp#z0Vpx_}$iCFzjoP*z7j9X7zp0aG3U+^992ArU zxKWWiK9Y#~)EEET)in*SBz}b4>}1dFyLsUw$*>;}5gmCTx=-gj0#=6FI9#s_Xc$+@tH{GT+)QrpXQS zeq@njs#EPb*|u+uuoFpon@G&PK+Fg3WF8q8_a46U?Ujl$c?SrNL2;QGmRKOzDGgol zg%lT`?qY9%4S7v^jo5=5&6f1}g$Z$cN0G;QV76F3hm$qy(x*`s+9AX6DLBvcl7=?k z)-{Z&x7|I)h!ortU83Yv+v5A07w9FWc+y2NWH*}^+WJyV3B!n(JS1h~3o0fmi~;lF zyyp0JUo?REdiJ%minBBs9%GGBMYy2?ysvN61r=&9^wc!$nA z#i>W$#8Sz~Eu!pY>UT%MhdE^@#oc12N@w4fLuP}x7$ggGP9=j0>^aON+6FJKFPMb` z&e=LR(zPQIXSTfVDx&}rk6o~yq3hay5{sRpWd=i#luj)inpXW|u(-(SUT+&HT3IG- z`YDxXw!TxU*Syr_Zpd59y4jo%u@~bG31pV4$vn>xbl0bY8Fu`f+n`vyI4+OF zSq4c!0lPO-27!({(bu++Ev+Gvy_Nmw}m%LXCkZ~H8P{Dh~k@+45{e7fqt zb1%LbxpEP~HDV*d~JU#c;3+s|6 z*D9{df0Rb->B0is31e0X)OxL5a$*?x(+lz1JU@-d^mEm0ejM3TZo}C1uVpAa^p_bv z?0aR->#5cXKi<~8Q)SfYcY8i3^@3QG#>HCAl+n7B(qpyKQUO_o9n_7A z2>i0R-2{JE>lutitD-PJ?3B$oesMF4-9v7bmKu>|XQ}GWWhfnjD87-F0`Tc{l;;yn z2!UZKnOQJ;m~WD3|1pEsKX~5irI;ux-bFwu>F6)9OQ{ zdi#UK#YOf{=l|#bNz&%63@SaWv8l3m)p~flC6=`B6>{W4Pm*AON?fLPXhU8!J7rt) zFp&y(tn>eD<-h$wlH8n=@TC&%DoMn;3bO#FOiBVUiOq8cz#AiB@VsPu!gv`_3)RIx zIq3HyxSluWfMmCrvl0qm-v9B%|JJWx|2OzEN;};e zm~93|?(?8Xu>7UU@xKNV?UN|Jk0eZ(IYH zhJ^WOMi8q~Zxgtn^swsyf<)hSu~3U4wj(*7oUVGX-{OCQdJN&+=6@vwCO_2G3kD>& z@7q8s_Grdr)eu%zTBmj{4Yasu__QLMgdd_@97@zgzDYZq?C?{iMVc+8Nl}ELx6H7S z7|ip#M!;LoH({R*HPB|{jzxheXiHe*^~I?X!n`I)>;Ydx9F6_=KK?U*&{td}=AgX0 z;qkVyB`|&3%>=SX!dd)&uaG6RBGbM-ggFdQi(E}fGL%K4qfLo2MudAW4$*&XVMzok z?g{~(VV2KaDx1FVb#13JSUkz-+g~jC!X&*Bls>la@6JpmdFr{VrEmx*Fwl}2pgp+E zTE(Md)>?U-yfzQA?+$H|X)fvH9f^I{K-V?Qh;`A4Czf)vFPTVuTtugeZNw-mb8q|ixT1F)byxX@ z>@9NbC;gu*H&>!NHff85!=gZCqt3qrGE6KwJU)olW)0HFrl#nwiA= zLNR1#wBhKN`CYzM{H{$v_H`4>| z@MM)ldMLkLAE}65czF}Be??H3TpC;MwH?Z`JRKK*I+bL!E>w7J_tghrwjLB~DO7VY z&xS$>U|{7-=Ddp1-^DDq}K1CNXza#UBv$7hP`o1B$m52 ziGlpE8aN4#kT&;%VgodIFP%k)@0-S=#3~TM zEY(RVja~2E!~wCXpnJiMw0v{ShbjfBt-dhk=I^d=i2Xd5oK_>nMt8%o%WYL!^^rn> zfqlrzWdQTseEb-%fA6cPskqy@!t+`XR1U=DOOpL=`oz$#Gkyp%gfUfa3bayYPXx>F z+kVB?2hOsN!uPBBJf_z!IhoxK$oaGsPPlTjVXTst!pF0>{;02lz$H3-Z-5uSis}Cr zO6P^On19~l*C57Lzj*?xW0BbR&V&MBVnme&MCYerbw?M!hA#lOM7u!zSu>);ikVVM zk|%Rd!neJH7-mRd-JL)>IEgVXBI@uxI@-Ni)YWA5@^}^uNiBshuf_93E^ge70zUWm zakBse-lqBu)jHVAd0aMaH|gPWYxBJDooy_(>;LzzoL#0Tc?+cw%edb z)RKpIkjp&e#71Xkd)>jAP~fs@gQEn~_Lh5>3P!{cAI1m5(HWKI=i57e+trVgGHds< zzi6sm9HhVI(984M^xM=n6`v298E<4-9!SdG89}R3_aA2?Krgr7v}WO}Q%h4jur}cbq^^;1M5jBI9uvtFO416mXW&8Ll0DI%JgP$a-~%r+RJ={$V6l_n1FUl>xC{_^VPKkO*w3?(n~rC{kw*NW zjOD3i0=D3Gs*O`z*qYVG_ zk*Pyna>r&m-GsANv_F277=s_D_Uv{VxZG)L;2t-v|L_vpvfsAF^f-0p7^*NKc~pj1 z{&;LgAt&~Fz;>nXYK9-psS{JHva*OC#gchdhrV5NZW;^w$)Ql};InMq{jMKO zHOyqJ9Dq$vn#R-)KC4f&W?uCX(WP}M#Z{9`LFtFa8EtBC7Z zi#LOYS;IE3s2Qod^bACdC#3$0B?G(RcZPM(rV={t04L12Xn`b)H$D16IwetN5C}}v z&9nbkEyN|Zv!;Kz002)}xE)(#BiFcgpkk{sh&J{qo~`Cpc<`3E`=)IZH*mL#l!n`8 z?F&;{sj^1eOGeItTe0=y#Fi#t-e4I6XGZV9B!j&)tf{e2r}zdUXC)W7;JsYe!Y6li zZvd{kW|_y@zUp_*a&R^G?r72Jp=?AGWgtrc)vL3xv9se`?4Ss_JsbO~BYU;LeCCPL z5ni^Hf^X{GEwh^l{Q>S2mrOEM8bx| zRsAJH+8;2No;$Ex9ak}_o)olAR|ghmie+<;n!1RSLg-6Vf}CbV+JYI9vQ0P#Oy$M- zAUtXBDW5)kgEa#o(KfmRo({N=ja|7}n<KJ!z zr?dLvv!yiuoFZ#y!&p*RCI6(m)6@nm^fut_;)=9~Cix2f!8hHM z1Jcdnhubx6p2=;QrK3t+9>|E#(THCa6Ak!_9?#}hGRK>eB!yjn#VlUqxEk?!}?d+?VoUazNbox=^G>mNrab+aDaxr{Ac35l~B*$b`%r1(pWM%1o zXZ#g!Ph&rqW96xyode`SzuBpjd_5yY9K4O9Zt3~xQ_<(87?x@5AI#>zR&TW`?caX> zYWbuvSuFgE-mNx&NZ6F}DjBH6BGlBgxl+vi`8WmSDAuUt`%B`86lL@Z?A!aTIA{GP zc+Bmq#K;+%b64l=nQ9W0-;kBd@akK&*V5vJ=42g}Vo5P3wJ@J$hFebLUU;P=tv zrvy2gsF!j0Udz{w-IjhBoQ4Kzoi*w+!Sa+VPGI|C`ZYFlsI6_@MvyaBjS`c6q_K;M z?{^T3+?2Kb!m-?PK3Y_?-(*XVIeI0}Y!)xZOOC-ZWsxN~p@y!(bfAy`Be^9pC+Cjt z&gRZxdQ6&!ZZGg@$ylq>AJK&$G>SRX_7RBsY3(4QF#?UUVEt$&@36eye{BU9ANy1H6MwN^GJ~+Dxyih7*?3X82 z`@1r`t*|Pw1#+2C|lIQ`ocU=1rZ>crs@8B}8!~RwcgX-B!FlyWZxh zl9Qf_P#(ic!&2g$^4rcx!S5W+CYhl58nr2nm6{E$*_I~SQqqgH{V=juc|k-^^{J*o z0rt8Hm~%cJz59cL(!;ibS>$Gd3X>#K!Ua!LO*t-Yf{ai|HXuT(mc$|k2X3GoyAg{J zgM9M6F_8|%%KNs@fn_HcSRXeZEH*znX*}r+MzwaVqtr>#LMiIYCAo$GyGx`j$LQ zHB0Oi?3~q&r;rYC!^VuHaH6-Fnmj|BOIqc=q zEGL!U7azPnasc3U&4O)&f3zIZF*udd=AVY#vZ>;_%8u>7zn!nHvpuvb5rXEU`}nc= z_5j-0S&#IHt#d>+cX`X=a=CehQ!kqf4ta2mYyGAi_WAb%`$~)T@G)m^;-Jv>m(O;b zldG2$Q{7elBnO5bo0;^?2n&j9AMbu6MX=r24q}tmn%)=v_pt1v(5a1;sTD;&o)r)2 z8;q7IRkr!|mga;!OVTvZ3fUPiT|M9TeX{+T0~Dpj=v)^_+ng1PQ^lHtkjxg|;J^mz zW(ZeJ=b(2hSw?{LY$uubq7}oY=nu*o;WsI-J@^~fBc93!c+2tknLNSc`oN@y!MZjU zstj_jM`JOTL?n~5a4`nv{p)eKl`~ZcU=6-#$$ljJjmtUVcuJte165k&YfcZ_5 z_*4JeIYz;Q`vR^UmL=;4o=oI@kqF6ajOZaRdB~^hf8Qg*cLzainEOS|#CeE)i5t43 zXj&q*mVs^RFMJJu51}jyv%iOeQ7u6T6ND#&r4)p)PU^*|xl@k1W@Ir$)V?fN1Md*=xkJS=Laf%xK zaQR}w{qKMX11@#tP@ravbz;;Cz+Q>UQ$4?WvQfnMTBodvF4qrST)*%AsqYBBS$>z- zrUH_crme;G%SDebh9=zHS^UC_(^#Ulaet?k^s8^oT^7YCkkZX}%N#NpRLH)xv6PRH zRL4YZBLvI`@Y8Mwgn8?&n)as=PZCGa1WlmEz^6i6NXnml(W9|Cl+V^F>%o}}wg-!EjD(BRj| zj@|{mbn6Y8LGIqh_>Ikkiw z3FqH=pNM|qIG!ThAy3 zv;BpqW0dq*qZ-9uml7}jq`9D|ULCu){JnS`NMHA}J(i>k+;^ytbx^IW=cj)|xmqH8 zc+ChE8)-A*A)?UKHz&2XCcTY7`)Co8Gk&Y z3XF}o#|cj-33YWo9Y_MUL~&G(ogH~@mVNBfJwD`?Xv*GGfq(dN5y_L4qUiqV!s-j1 z<3rx(XoDcLO8}TE4P@Q{n?ncd$vlk%Z2-BDgMTEVRC}e;Y8lhVKRb98V~=a|M%e5V zVKg%4Mq|~yPstW^@wiqCOVS7x2NCahZi zw;3pFX4_!`GN{b)z$6I2zl-qa6916^%_b*z6v($DT7_mET8y1pUbfRDC<%*A<7N6v~xB+ihts<)mYjDWa z1xK{Wx|scuSx1W>17NMldse5CoI}IHr4ZKb`33(K_4D$MQ|dJPcdx-k^R_>mw!?54QyFybk7J6ET#yCGW{t z(tJ`du26Yed^Gr$h*AYtGK7W6zFzGSNQzQ%mh{hVM$hC@5+&dnfS@bGV9O_~LoqR) z^s^yDY!<}WyQH^Pg|>3+rU@3cv1B>Vcz5RYWZlm@pr64$|g$ke#;fh61Fa za0J{=P#ckZ9zVVHy7Q!QC6WSEMP1u^eMFj;)bI&jFuV;|7*$+JO1|os*~MdLO5#f( z7g)&^U)a!Kw%-v!?pR}C&!s=F-T5hH%dp!zqG?$%GpR~CHD>JWI;(%^goGLRyEw;p z9cf3Cxf**0lde*lfPJmC`Po8@6*8mhvt|56zf~oxPD$2f`a&mC)MKrP&Pfgm2cT!`+ql7&v|& zD*O++TTk`P)I(1Ut;lq4{Jc*9ALTAwB9Ha*!W$XX`X8SW^bvj=eOJC$_sz-ZvjOu$ zpx_;;XL)7VOn`W*2(rsD`Wq%;k&qM>6A&UiTbju+Ev>9Xw_?h879zS*3?PQ)$&Nj= z>O|c6S6p^zqOm0XZ^~xT6z#R*vYZ$`o@Di=l$|yZ*rC-kEsBJ^Sz(q87|`yx(gR^s zHBs6$5(;z1^~p7Kt6)fsK7Gb*H@&P><+nq7uFk%g?pCoZ|0{M7s$8~V?GdEpHvCHF z2mxf;S-=p^iAPS8JTdBxCe1A$UYg4wm^N}YD0OU_t;R78dbG=@&-#1}(k)LZFB>Ig zTsC&Vv?v*Ju42-Ox?IGcKpIm%4AP!d)i}goARPH>JcC-i7X9Dt_iT?lzUTZ4w9z6y zJ9QMzT3&jwoZ%$nV6XQVB8%#O#ak0$UZwEQt&DPrI||z+D+uFc6|;()zVIop8wnGI zH5{z)i_Ivsv+6)N9qOH-^JWkR!8J z+w^Et7WRj*4^A!50?xsfVmAhjiQiJ|`gJ4~v}J42!Q-I24XnZpo5DY|U)?l1i~S}j zLn=<=7Ud)QKHE*g1n`xDwm*H|+UNm-yHd$;=WQqoAF63ZCRPCdn5 zR;=NKi&;X*ot8dfkcp3Ne0uZeKI^&-?uC-#=jWg4jN=Suv(Rb@B@ZDM32j~@{`~o~ z_jVa97H=ud1$@SNW#}2i8JI}{Z**+pWSxlT0ELp zMWql{?UkW#nn4{1qdAzVtlzBf&V3ka6^eJoj`xWU`r}0Y@O33+Ttsq^Z_UB0wHQuu zw$R&zTe^33EWTG7m_4*`a{}S6`|ZH55E?w?MAak`mJHcr%9kjeq5=?B-+>L4xPkTk z1a%p8?U-&rA>zc54EaA?y>(O-`}aS5MHEm#Qt1xq?o_&^8>Abgy99+pN;fER=#MrQ$c+=HBX}f!+ z&H0-9Heq!~^x)2Q+v4B zLe@s{w6wl^bK0@%KQ9c(1P@8G9NNG%*q>y(`@>k9W@gI;P5*dTz`8V5GcLAhaXC4= z558KC!E2H>zAsk-X*vr4eC5COa<0);!DVlXw9>Rc`h0hCmf!=Ozg&i(?#YIC1=-%g zd}EGkp&HbFf3`LO*g`3DveunNtiezGuCRiT&qek%qt094d#CZrx$?Lm_vuQrgiNte z=@=@xZ22_aXtHN=A~;z-jg_*oq(lZNoFzJhHY@K3md73PrK(_so!Wu{I&l;Isc&C@$rlE}j=n1woJViVI z{tl9C}aBiT~qxU9zCR=Xl)nmx{q`x05A1-)U^ zFionj<9UiAR0`?zz+(xwqF{Mls%FaPcJy5~k@=@BSvpOkQjT=@_jdu=!0U!udlFW@ zl=L42{Y%}o;1q$c)t<_be9ttkK>}bP&fsA`nf2YWTO8s z46kW>BsxE%us6iq7+8wDqfA-zM)2*mL%06=sBOD&uZZ}+m6b6gp`c(32??br1f_f) z!bg^fAc(S_uE14@8#N5_Qw!g3X=t5`K&*c8)>RnwI)D1jN(zL_1zsBKPjM1OPjvnm zB|F6d><{_1xflmnh97^s(|u1%1Tkz*ks%tr1C zoYkb7=pvQ;XxSuIF;=qyYeBuuh_Em${l!4<{U1^;y$MVSKLZf(C#S(`b)O6@#`2z<;YM#MEEG#f$M2m5D7VkyMOVbMk;Xg|tzXrD$VL&4KN<#+?LU>8HJZQ-5lz zTMZnTLF|lXgHk3wu@-AMH+IqcQrBX!rI|u18bbT)=JK%DEW?{h-{(4~gF!rfzsIm^ z&x7q_Z#)C5{femj{q^R0Ki4Ar!tJHF=h2ZryZ(soz!#k=JVBYBFxJLn_F*B;|95F3 zlM&s1=N%N>dm*Zcj~zj(S|}P&_BuC?Kq!C+Pj@R{o z9(bF53qn-lK?V&UgaaHZrGYxDOOnj4V`xQA??RhUI=PrdADQf_=OL=0sSwIEwu}Y*BHA9W?dWLm?)3|>JW{!G2 z<6smz7Ha>dGZYZbRm*@XhR=mUU_-;|eqt<@!dX0ie0eY*gHA5~E0xDFtdvQktZm9A z(3An~9O)P+Q@Gl}<{X@MRdyHsG5MY?)p)$c`hHPZ@c`%H0vpTzr6`P_06%QIVTa+- zI2oUUiCzJY0;R=DHc>{*V@T8gisbtw@hDi1jc5flL#vYaks@HcL{}TA9P1Mf=Rd#C z*4p*fJ8p&*r~y479w@5y{Nf-C%CI_z&4K+Bmg66*-7(ZTu7?W_S4Rd$Jk^%t7;+aq zi7Z+D>t6x`e+3cyyzPw_@QH5HZ}ov9%t}Fn)Sp`YE*?flAy;24QVM_1bi*IeRoz?@}!0kNeVJND3wWCq>rAfQI zR{c%K(qE8(cJmBa`P=Myi8rfq3)?NtREUwVfsNIvA@VtHggNccs4nV&7QY-{3 zDmWPN`f|~iQoqG(Wp(vb)D@#6HRChThVbofAI>NBAMWHfI)SgP*T}_EN~mi;o2d9G zf(zu^Pd{$|S)d|Jk2SPU(O7!G^yWPi?y$gShU)9uD=n{>D`>kp(&M11+XY-yR1{P# zFj>cVTC2v|g8#^|c>@o?h#>2gt$Su#gR`0z{kAPpfz!sy ziEc-(%n|Ku`XI7K)syCJ_;C99~y|H8HkHPA%K?ew>8r3}glsPIWb z4jEs-qw1Kw(xlPzvg8T|YdBYFaK$q8QJDuTXCUi0xs{KvEx4`BvcKCkX9aa&E6q$xVn+pj?6bBFE6mVu5EO)B!dJBclqLJseymbAs^uHo!(QJ>Rh#<=Ks z-gS{9x`ThbWrj~1{BR!N&RzWaLSl109WUMfPtQ1N5FUF-*bW4I#*K6Lq(Au~|3r+( z@lNPmyaOFSgg|?8#U|VLTHTOVZR2m6*58c&y8^kDa~aTTrsf$dv}-?V((w_+i<8tDbZSLg2P*a@t%5nFT#D?PjFzXShEr|fPDpbgdP&jLQ|;DGbk34d4d$9!1I z?$=-Om3)1>=>T;f>kLVy`Zo*Eqsk%3UH1G>y+Hd!hQ?vhTj9AsQysqZF^*Oh0{}h= z4Cf%L;?Fr;I1z+=rW8KKI`t!_*sPub9xw~Dgn-E9h(_1Lc&>$Kc{DLE7IgX9+U+L=6CWO592S9eu50Rj?!qS!s>1fu@+-_B%&v8ZP?5VCa zIBrHLy354m)6?0`*VzMK-Bl`7_9SEJV@2vuTI4%yGExx?)p{aZs_A>Z{sx!DFtFc> zg;0I+cE+kyYA{{ku}BSoLxvBnZh`(4Gbnh|m#AL5Iwo!X?B(k-4T9qMi)c))heB4} z{rVTI0OKaY&+z|yDr@`}ZYDgJ;%(lY;wmxQoMDVL6LPKov9Fd29?NE9z(c%+XG=^B z*Fzj0FgO5!I%j7h*k4K@Oz@`6D`uf8w*AY?-b5CoY^UuJM!PrpU6$ue-qJdlKj&;< z;KqfIZT2l{OsaQJ#}uTQhma7?XsWx^#2 zlK~PZ^7->;a5r&Sm zM7t4SApYk(H9h7k6W?r@{kym$m>dqKayj%v#ALbsiyArrW3(6CR#AdxZ8eBK-NBr< z7h7AcXR5k$;@;4y$rh*5EI&mg(mIAf|sXR~-$pg-2mvMwd zS#7-?jl<^4bo+T@O>|kh5a0|d)YvWB%Az}mig`}9&8WxpIpIq+m*`fRNAa-8(^&wd zV9E%jeo!)B(VC7w|Nb;9QjoQ!2o>On(GAYK;$|9!MuS1{GFf6wn%*{m9;cFgk;we? z_wj%Pwv9JVu6JJV7lD(<-9lpfG$^RRW`DoQ+w?dRFbVp%6kd^ky1lX=X5h?JgVCxM z#2$b30A9#cAbQV*at2xl$T#;Fpuj{n0ZlHNLJA|-JI&ecXvu$XzHw60dNj8ksOc7S z#zVk&*QzZ|tc_Nr8+{*I%J*{?DBgbf?x$G4tI3Yhk^Y6(@&6fNBz}GZ{zF3_G+TX| zZ_h>*Dr{ys3vFS{QF@C!1u7;;3e9?l^||M@O=Pgi z_4aM~%DG`d6|$3uKpkbp*x+|Zx!KIQ8}UB>r!WtA0`v0Ge=^3xIv86`6{Q4q4iH41 za68NCDf=i2j7@BX)e$@p_G8f_xkpNEa2Nl~cu#o^IC6kqx6k+unDBHx&_w?iLJmO~ zy+>~n^nKh7iRQ_&9XAI?GR0m>#nHy~b364$W%%A}3SKQeT@62dflVvf=(3+}*ck$4 zfr*~3b;pDoanS4md?oC{Lq3(8l-T#S+&-pVZ8YUsL)zB`ddXDo@_s>&nD;R~XEbhd zv~1PDL6!#UQh0&sK)PS##e4eafCCwm9QG&n*X9dHZJQqNW#J(5dw6Zqw-yEE+?CTy2HqWU3(VyYor$_lca%oT=RQnZ`Y_ z+ceLzd^N@jEbv10%yv6;(ygr&Y+~P~h-;(NmtCtXk=PI^$aiS`DMN0$A9aSDl7Qht zW4SoZh6Hman-^RcuvFPU0T5v)_y`?7#{Gm=TcVY?wt_wJERO*grpV!lW=AFnWxF)EDkx;_y&&aPT`TlZ=Ca}?t1a|@H+z}zjWuiN|$ z48dwVl})4uyHz|MS+_IQR;-p|Z#657LGx@S3Kg#&i|6#Ww^5gX7|m)D5=Jbv5FAuY zfw}dHtoQf8Be0a<;?&c4YGQB=h}wzgU>3XmNrk(4__LV};7m&5b6PV$FOTyw;qh?7 z?8vlD+mc_yGpn_oFDhB5FRhm~+76J>V&JoM&>pj)$xp0s9<6S|j%nI{A6HjQ(_tB^v)c zZ7Ea9D0v?%u8a_abU4%BX8n^=M6dcwiR+V6&5iPx;C{Nt1QgF3kbxf0Gvq&tl5jg3 zGi9*%X+IZdq5XEsK(Tgh{G)7=?s>qI@PU1~2!JZ$i@?p>pDGu5j*RnB{>5k@$NYw! z0PF@femkcp>JraA(r3k}LiJL;mCa4Fvtb5e=UoVJ<#mTELh?vmb25a)E#*orF0h5rMH5-~JfVXp3GtB8f$YE`gz4%q{*l}VZZ z%0{*?y_&o$==m!~ymVk|7AD>|&p5kCS^8}BQ2(Z-{qx`&1lG!py27|}-!N#MK~hGO znRB!yIL(LJ4%EWIEY7`=YQbWKaliH zdSc7*tbks20)Q4=mIMavi77$RRgP-lnc;HbpWVdE-JETYb_Lr^6lp@0XJ*cJF{xy~ zNypLF6ISveXi$9>E*NMa2PMk^4IF46u(v9|cO+GRyl&-2R9hMdUt5h|{VVTy~0k1rsm2y_LR7`^xqmA<%5Hj~D z!u@!!R*SK}R_w4K6&$Meo#4+F5WlDX`tml>mz8D z+L#5p{XuV%!f>#&i=H?-lkJh5jcj_|7Qynn1FVO;;bH>tghK@LZKpG-d@VXzn%{kFu=r5# zV6rN841f{0^*)L4-cgVNAd$rYQkgA8kIE4#Ny{wuF$YO!W$@t-05S{}F3f3OgY=JX zd(J7tM|RHadBw?s+}+>&FVz^91^?I-rjkc}W%eZMU8;7x?-8 zLyjcB$*J1ua+W#Vvj8iLi9kcyC8xo~a6;_5IK8WGs7t>cC;EkyzlxVKD-!yZ%sZsrtw??L|atag(_0 z(gFRXW3Ck7e$de)aBkY|30BPG@Npq&-6SE(4FA#jD-*`kTW(e*Vk-1;1Qv1K9e1(-npZ#o4T`@?WXGZNH8I~apx0jJ{G8XIj~L9aRrg>;=qTFqZ(Ed8PvP&%UKz_a2+JPwe&L@;(aLrQv= zVwSk3YQIYtsO3*#h9Epi0U0o~MDbfVK!euHe-k`^H`*gnLyZupijp)0j1FK(FacP4 zlsv6go5V0Ji5i;AL>~VKk_ewYJImh#^wI7zgO0xOj-ByBGnK=U+$5W7pq&73ifv)J z+f7#Ew?8!G@2FJ*WAsZ;YL?G9=IUmmIjU{m5+cv@M!7Yl57ZrWZzgKmLuq8OF6$7+INIO&qJS^xN?2hJcyS3Rfr| z1uYV)XH{fXdV|FpQ_a|I4baUxeKnMbqDJwtD`+<^P}QknOgR<{ahW*Qx)~Z{fBd zZ2+1y;Lk6z)7$Kh>5?oO$t|+s=!KMhMZw3j+mrBS(_b8#DER0xDop39N#(eWIRwLM z5PVl`x3HVC@$wC0EQ5BBjGT%xLm~54JmcrNSIWor+={V_sKl2t` z4cCCJ7PQ`6yn?R_IL&?Pd=pUFkHDc5mPMSackG&~um>s&v^s)Mu9jx3Kp%JkxYSKu zoV>;$xsOm9xXs}`x&WYN1>8kTT@T%hImf}IpxH8m0L68g(r4#S(pZe3IO7EjrN=tq z^Q%0Y5)&Ma%g=>`li5^?~8V57hQCSKDLWml+4r9Lgw z-tb2OTb!cpaPW-!^-FSf+ss=@Cq;0&WL9LM*>iFu9JyKZL#vNBc~sG-<#vT4sf>_G z{Fm`^G5zezbbvpj5NPLqDvY@ZR>)vhKRR>1D|Z8_z;>#;$Yr-V>@M@M)hP>}G6J=hdBhav%8MvFnamTctJoo*&l8O&4d zp?H>IJE*j@R3%@j^Qws0$Hxa6WJ|a^lp}V!%`FdCAm7;vttnt4f=&I-X4_rvU?G$$g@% zR~LXu`Hoh#V&OoqP{K-;RhfgnnQD|)A3#j@?mo(0k1`)g)cw5k!5q&jOomP+rN(h= zx5sk40CzGy83Oigb+6oB1U1BM047?vF-I>LQjVz+fsN3o6w4Q@g?&nhOwS7yse5nCfk6Yeu!NMyU87yV! zQ8{!5{E0h`B-xuJn0PfiC1|@Ge0=VjOtUQOOS~Io(w=x9G4g5@&xw!xZV3OYR_FT% z?a4f1aOv*lS;z1p)XY6S3IB=ROJKo^;^R6T1?AsE3>ydw>G8+EpA$X&nTG=L5MX@N z#f9F7j{tOWdpIjdHHW!ZUW-PnHi4beW;=jaG>lv>KW)C2Hg{?8k zS!x54hBJF{PS~lJO15fCw-TyGN8u^#!dXQO4r}~ z(wucE>6e@X2DoXgoQ8QV{P|;YNoNB z{;Eny!tjFg6W$v_w4*>jD*0^`#?dao_vzT0@u`t@8p&oV z*7|{@pG;vQJvCxnIGU?~&vkw#p6oMm?My!!L=~O{oRA=kb>_R;d&ZIDTP1L8)q_IO zNXjPwS`*cD2Q*IIVigUKK3yo-k`1dbUi}t5&Kfusu^Ust;pBGfBRU9*}%u}sr*USQ}*^RX# z09@i~oAd9#w$dt=tp$&P?YCS=VFs4TUYRWB?O$wj3@sZbq++O~<{O+d;(M3v6Bu=) z0Y5+)$l(~Aof>zRmEE{z@9deXowh13b8PDr#i>uUe@7Bo0+d>DN}=NWTqJ$4F;nkg z@^`1DwkPEEGyu<3BTf#>=DG$zgE57#g}z+gNQRA^=r{$noJ+%}^@h1-ZxN?PO`uDS zKRFZ%j!mhxmC^XMYRpu8@LfvQCm)VNH~Al_49+%gPT%`m=%g|XL2?D_Ni^T+s-+f~ zR{LS@<+p6~+8q1Nc0dcG@|^c3skkqa^eg>!I(^TV@GytQQ@jMdK4KYYjEl-%z7`0Y-zQqJ9V*kqTn^DJGZ`e z3$co9tZCP6|3E=m+DIZ$w!;0@?o7!%Cen-7Qv2je{bb;IbDB_+RJHBx6f)7wQVa&->s3!TMb=K@=p43{%J}QN#mH>+;~amguiO4YsrOlppQ|Gz zLwD_+6$s z7rL+YYu|z&|ywmAMYCgSPlT~=En2rNUw6=Im^cx@~^#HMh{!z;> zCARYoNtmr?8pYNZ>k0aJ-cGa9cePYr{Bo7D$l8Fl$`T>yUV?g&T%uZ-WT7-QN|ntM zs)|x&m}F~}*&hQk;ME%odZNb!?DIjXTF@KO4D%8)K+>uE^fRx?1U9s9ElUjJkSiI1 zkjPbyP%z2evU*SKzdAQtzrB75U@%1>#=}nSV&O0uXEnKg(f<(Ld}h@N*V{d<+(k(u zr_~)xnidrp%kRwVp2%Ok_A6B$BUh5n#32_moK8Q#$ALl7FB=8%TM;f?wu7zj>Y5HR z=JjcaBp8cqo#DkzS`!1X*QpDn{p=yb`dm`9Mjxvl_n!M*p=3kX=H)MxsNG9Z3~CIi zq`hO7go9v$CMqTVfL}?oWKtMH@)tAM^wBK!lK1S9o8y5c*&t%-z_{y&+vE=}yrL9R zWKkiSqPGvNqx-}7WI-jNYw`>jiN~#z6JSR&(n~Q~GxY9nx5twT^RI{RyD>szd)`Jb z9NNnwlnChU7JrKJdJ_5IIY=2b))KiolE|?vn^KZ*Xd1P<9NC)Azr70KBxEW+Gzz+# zB=TkO^^+uekX-d57Ceb8T5#531k5{-ZF*Gb8^&LNX|O(Xj*syg&yeDH`{5~jw*p<% zw)-drOR=chkN3|W1%W6@Q`k`HC|a=3Dx12&Y1=@RR(m*m=IRxjj z50EJI-LQa=)9JM9Slmq`sA4%YYKR7!1brTfY7SLwAO^;JDbnnO6o=86lna5@%0+Lx z_V1rbviI)znd&c_Hup{zZ*(qv_aC&ZYIpRvW#4@#`GEEo5er@q8IDO3$7SqNA#z`5 z&T8E|D1(@ocgId&_GW$hv+#fa?}8bg(|QUGh%tt1+t%ku7rslSD4wl+nFae1M5L9I z;De7dYp#2%uRZE zu~8|`P_CKN;WS;uJd>_|Lyi%eqP-^R4w<#mMzM@x?h{=y=y>fv0`3+VpV6HNr^t0L z`Rp;#Cx%M4VLRYp8WePq1_~C{Nf-1iPYM2+Z|TRkvFqsjS`to{gu`VlD~Z5{y-iWH z*(wIHLp%*kO*vg67GGV+QVDv{;hEW1#545W8qri%_DrMQc)|Q!?_CO#QKt2%ufZtV zWX$CTx}(mk)I($&9v;p+LjK9S5*MKwjXZ`IJHh_CWUos?aWr>hGS1CTCxX)n$xwG= zW`-_3--ubyy*_Rrl|lyR%_aspgSb7YW$AwCDDU)=a@=9 zekWBgS84-|Sv4EOlV@Cq?k{uB>i(YV$G`aB@!ji0Y!Wt@vM5ip@+@w82zG@o61)G@ zLsr1m`tN|hfGdTE%k1_#FVqjW^EpjNk>iqUe!(2OAgtKLi^%+Lt&P{-TpO!KE!zVa~g+NgWgs?$NY-}D(#;}SknLEv2F)C?Mi zJ8ppYZU7Jj3#zGhFyAG-r{^&{4w))*u%OqDhv<{NJKt6(PV*e^YP&xlu2k?Q;4f;C zCa-gwzu0nYLmzH-^(LtI^$y!gYd7rI#bj|76RoX5FD06T`@E$EF9DIiah91xDnN{> zu~jf+h^neN40bcu6eWxrtqJxajKK-v4*9kFZGmPd{;f^`uLOSuKT`6o{MA_gYiujw z&EUHR3G3M?h+ZM5gp)-Infds54ret(a(tfNoz;v=d%^4*(JCu!`#Ha}u=hAr48}p6 zG!7bhOjZX%+QF8MxD(3(9-#b(63nK7|G>#(hH((?|tt_g{vsl?1!^hyq!NF3!1 zc%2;yCc7@fjAGIN+!U9)qg0F1Mo@n}Rp=f+tur7Rmk##U8UX=f2EOo2J z#f66>=VbME|3 zEDie(meXV-stfpCARzq0X4`ZO)g4iR-J{sKiOe~T%8fS=X;Zm_;**TJF zZe8uca@xu-ItesoUO>#U^@?OHP?547UWh8@zb=x2V-~_hmrC1G&XL?pB-2B;1u5m& zu27PdFjT9A&(DZ`vX2^JdD)5B7K+McgfqlWMDR z*T#@=wnz}QfLBzK2zlI4fp|co5f@#CI-ysmH+<6}TL{J~zlcqtuC55+5a+L7e8StH5-`!n= z-QmpGe0Wm)w_;w9!gD{dXKXlV*j)+4l+VkQdf-vayL9;#!fb7qvCP`?Upf6#uYrGl zv=d%>hSag(cq#-HkpXi?-h_-k2t*LNjV<_z;EMcrL|?#p;=;+q(}QFf>$5Su?s&v# z`+zk{UE}Kdx&kdsNFDNx7fl*92JURuc7)gJ#|o=Lu+27&boVdlXu9&26Tr|xr%|N9 z>lkWCA!HC&b>iv<*hx|;@3P@+x@n|(A zLL2QI-r%1OF!6kvT5F)FW(5yt`|4D%fVzY85vLrq#M@c+gLlOEsZJ6qKH5OC=#=oO zQLUwV-zrUD7LtjdS=T2V$-L<5pH*gOw0R4?`6wc-iKL#%n90^oi(ZFVa;B;)s+t*! z9`{oo!em52H9ARqG8sY=O#5?d9l(> zA%luh`i{?Aqc_w<(AHxKq`~z7nrb>ab~^!e8F|Rp;$R0+N4<>4Tu4<7G}`mCNX?FV@q>y;zxZlN}ig@V_9G z2VV4T^Vx{ZY7bp$5kY84!%jh&*;^BJ_?GLV%atAp-#M>x=fa8Op=Y&NiT|f6{=c)x z_5SfJQZc3yJly?yWptk`cARX`)%757Z`3&aL0bL){MufK-!B$GQE=38nKpT#!<%w6 zpe&dIf@^v?`Cg+b3o4-YmfLkE_1XiO_Oel5ScFDH%MaYlO7W(ay_*Za1R&!ecYM_G zs!QaRfQr~pOu6kOv-JE-a&#w<$I>S3$4!@FPkw4 zfD&R|u>Dq%=q(v*^hi{&`KQUV(;?gK8?oK!y!>(+-8aYG-Vi08lZ3Y_2m2F(E5Ed~ zI>uATvqP|!L%&Nlhb-opU#Ji+4Gf`pEQ|y`5|u8T&m|RMVM$VEMT= znVM|~&OG(XHZ*vG;~1}e@1}#U3!wBj=&Iag93%U+{5ug{-5LfBgBGd#|ixWvBx+k`#d$d$o%I zyOi%fk@kfq&CP2#uD%9_`|Wh5eb%j%#rrIr4}k_Z6Yt+bdFnxoZ=r{Et&hQv!h`OI5WEuKdQ?=ynXYhPLPbaH&Zk7-U~z$Y z&;G5j9zQn2{|FGWQR^IqvIesV$IcA z??=?+iKYCq^(^r3n#@Ho)ImjJOF0a1bi;MQeTbcB%CWTV)Nu4)y@zuv@?^5V~NZSqI07HnKjC zYB;m8QI;O9BQBqrpg?5vnF_4{o6p{j2#55T==6ML$X9@x$OalgGy*LHHD&@^cOi~} zDo5zfVI-(68=o{TgH3kHUG^lSpr*z0r>LGb+%bubROd0@70_+MKD@l|ny3{8RYrLA zRjMTLDB;T%9A5_GsN%ng`fjzaMJ@YLq|7w^uF>5p%6DFw)9%}9xk z)Gi}vOr&}c4PoPXNS+n8l^Pec;)|mZ5Qks^wAPJCr`VjmdBj`G4Tj@uBq^-)-eR){ z?j62KtYXj0qu#B*HT5GkJdxMYPq)d&1yvr6R3F&(aSoF=O@nKx4c<#7Y`6rW+qZr)INq$r zc2-Td?|oHTS5k)EWQp$SWqmXKr{4D!E#5a!Agw@mjMJ!jbFoi%^Yj?cK=XcbkYEJx zAl1BY`_}@uM}hbmtwVhzY)n=9e^3wb%C+?d2%~>(=I2gO`=16h^%VVTywnGq`1+Sb zLHfg4u249DZ(LpxbiO=1(e}AsaBHx)Jq_3-xCLLG)rIQ0iLHYYko}=h=vBX+4<^^# zClOhnqo0XFc6zE@0egjd=LIRF*3ak$ z#1h1B!D*rlm~k>07mHH2;Kkbh@U0=L6^?)yKk0|@cd`?;${1hKhD>qr6vt#K`*L`8z!?`{mowlb+co7w$5%|PeUFlf9 z-aS`1LOx!V2@f(C?Fk`zitr^V_R2b}$Ayh&%dVR`_};wZSjDC0htjKmOZkxl8Og{1 zFl9pkB>D1^n^!{UR>If@+;|$xW^2zEl}k!k!}fH*O*gfT-mNkSt_twMbxp2~QD#m2V5qag{0?K0X`BqglG5xit)^ z@@~%}Xtme`gRS1yk!+IRGV&3rrkc7#nmEwOJt-0Jp4lZ;QrSBYL|`IEaJd8f0qW8Z zBeoLUP)ojqE=Q4BuNFP!rKF~KwuupyblMlJhaSM2yQ_Y;QlnA#gVDn!&uZgNR6jxQ zqIiq*Q!DrP7Apb)UY=HZ{Y5NoPOi>d)s#%Dd9ZnC@zny=IIevlLACO|qYDyM%EJaF^i?upHX%^MiObMR8hJxB*-EKP{FG%m6j2so zGE*4+KvpLfxr;0al$njTgeJuzER!U;BpNJAKanK@5MHdLkYz1EO)U`ZCA&EBoV<{d z1dMogv2UyTWTb`})>W?stgpcoSY$qAL}aMRWpBmSF#M$<^k2&4Zo*Dy@Q2C8N<9FQ zlZXgL1R76p1I{R@*p!QkG^!lK8okHjDbbm2pcK4ZrWZOb=)JfGN10 znl`VO|K;LSr)VU+^(Q@atNIV;`h^3bL>Kjg-X$3Pu}ZT6j2_&ApLVHJmT}mYg0BW} z8^Okm8rbkhvtM4mQBy1tJ3l!D2D_7mGQFzO&MG+tLwVMz;>IH9XnXXTM%#0LV~Pfy^OP-(7k z8G{sP2sA5Bkr+QWE;m!7SSMA6D3lf)ZzpH?nGa>sVp=WhUq!SdeeFOyh$h!qrJU&! z;1*eK3)GL~q}Y6Oxg5{UxKI>1Z7(x>MJ$nid>5k}mzi;e!$?i$%GjEgaP>Oqw=>Tp zYmq9aX>T2Tf_qJpB4b<}$-yzQ|O?z1a!M&2f|C9&Mmv8{93F+G_l;lC5-TsajNH0%H_n51GUwLy^7wIhTW3F zOe~gY)`$Zk2bC}aSIboAgGC5WSETs?6VuAGUFCv~AJyiX$WT$Huugd0KwWr=FetoA zoN%ryD_ZD|C{E|l&D6=GU*J(2nb;p%FA*|Dne&6)A0iw0K5U*p#$6bb9%VOqMltea zse)pU^kWayS=`lIN$5!m4C_4u@q2<5s&lr1(_~Xb{btH>1&Zv49c6?7ei5O7dP#kV zIPyx^G`vXIQrIX=ltNTkG;i%ANiMLJTDWY@b$IaXrz7$rvvD4EADJk*n&GYKC6(yN zt9`bwuMSdM0-jkHY-u_pUGkvf?I?MOpXVvqfB1k@p;ar2=Qv-<|4V>;M6axmi>qYTPoD-J>m?CLQ1z7YI`FHZzO10qe-~$4|;H7oppFe{moh9La z_0E$9KSkJ`*4e=%P4FPB;fB~JM!))W29lK3v`)uY%C5(JPWdjCtaGaaMe2%rgy09U zPQ}}(qdxyoj-=6?cE7xDej}t9KXP0}GnUNL3IEA=yvf|QUxm=HxQ}GJIh@Q7S6PU} zK}~@PUsbHu)kq=10l}NStHM@T;rz~_>~>#<_)@Nd9`S?AB9}}3afz$WoB7$v*f`&Y z5C@6B$7)Xu5sk|WHkuIu!lZ^$3te|h6U2}9?TdAIDT-WgBI$=TZq5^nG@^vG3hjKx z;X2xSqpI)jHuzo`J`WhhhXMw9@vD$ox^l9*^a_gfG=@Cpb8T>0Cwj=KG@~n%to*wy&uY<&_B(E-{V5`r zJSz;H>okoQT-`d@2pcLRrOGQsJ6#+{c&Oam*nNlo%MKcDaY{=V<~_YZ&V!9Tn>JLg>2 z^*pbXKg9QVxmjJsbU#=^sG>wAodtaO)UMu`-qIYzWJ2}3j3+}CS5JNEgTh9N!#m`~M8EeGFRp{QS#jN{{4S$^a1 zA2w398e5493B=1{9+~py&Hg@V-QD375Pg(&_I=4@?FVNXGM_`EV6myN+P5Dy)9C)} z@!>HIUwqM~V9X>*tTj46a(J6d+Ix>~zCb|Sbq?e>Er!H~xp<&|cTY?(kh|J?mB&^d z4EfhS25D6)Y-lFAyB2)%d8X>M&19~E*=1jxYxCQNZIf-ae@aKrgHTsF&zGj))8LGD z`o;5)n6VGdbB~YHunowR_CFu{JAcTB+)0=u4aRgsZ?Dh$8lh{bU5ned)EjMo>se?= z>-iD`wjpUi#Y-Q2k@;NMLKnDYg`N{#qPxYA9wuNtr-mqQ6pu1bo895k-^LcV_n0MH|F8>atQCkg;$hc1Vv?!~z((t9@*PpB$d$7tWyLmT+7nwS;_Tz@rnz%%d>=wKhUX1!c+pGCSkl z4h?CUH8=^;&*Z;V(*Vtx?FL-J#38`mEJ|5R8>^5XDOsygr~30+h@@vb4s_=WjWqF1m>_7eGIC;t z{cIOQ2w{3EAag&MMIVI;v-tyX?UZ-pA^#@1peC3wLE*I;B#9#skpMW%k!*k{oxQ3S zCaQKnv2qMm92b3@lT-lpnEB?fW7hB~c%eo^hb zJH^v9L#_(aQT99oV^%2s*jrpSpFZuAwnr!X-tsdquUA{NpZW`z&1HvwNxuBcU4VDeX1re2dT(JSGke_empN26BgC{fLX>DFV0r#}q-j~AdD zIv`x2`>O3FH%016BykV%5*|20itSnXLRJJf9>4M48W`Ug-ysqc$(kaZ+Bw}G;|W*d zI~dk!r7Huv0>h-IBAy{uh&y#(P^=4yQM%fzTAU<&SD91W=A`Qc7T0Mt7E~e{m0l6I z^njJ@^QFnt$qm+jh7r#m(6-t`)|zfM>j_h%tLV1kB8}kbvFH!1W{gS-#4Aen-AxbI z#1Mo@bVtC|xNt~~BVu<}@`d(2(sN#2MuVqyLEG92!U9Ya`GK8cMZFK5DD2}{&2MWk z@WvN`N9-{UCTYl#=(b3!;MWwx&t%!|Pj+Z-kKRqyb*GoM`E~{`-l%*LR+xp-%dK_J z0l7EQ-XN8?8a}7~9t#cI1(5t)>j=dFmM#?eAO}N5QiNYtzPj5`D3jNL->?d>cZo1{ z37iU}jztRrYkuffxl|KVn&KU zt@y`hKK=|=4z_$K@`U4VDI=4yiBgzS(znX|qHj~RvZzgo8@We|n1XZ}bOldLShuac z)~9SLxH~p;R*6|I91p#X>tG0mDhyfFe7Qg!#<#G@xhbjhn*HA^u~uMcA9JvbOVA2> zrfLQ5yHi|by)pCw8`41VxWB!%BIt?6JxjQ@c+7WkvS(g+!#?U@+Q6F1|B6hSp4MQS zjmTFiaAz^t?c`uWLWNPNjN&bvs=oR-w{L;@QKhzz+;fb6VG zoUm{pIP58QbIoEq_71Cs$0k`~C20OqvguADsA-@s3{4RNWREvQJ3akY2v^)q3WHSN zBP(7t#?)_(V19l*5vCz1?B~BJ}eo$M@y@mc^p^G1N!z;kuQo9 zwSHeGZ zIx&)58&e-r`^l?ZtaXcnyS5UpVi2F}cDvs9;&XyU#yugt-b<;MjYO*DUt(e@zM$j&g`EG{02kl4u~1i7 z`NQ(W)E8^rLjGr!q5O$$xD(4LVcra0L8;@ZGLxF^694Dk#L%ckl(RUg2>QP$9{~>dzC@NCWdbkWq{&A+Ta_U z3fDVG_#2C^h-2wD&qqjisDwYKsywxzLlRXhSyC**(I4JL>oh$3RmIfLU|d^T%$tNx z%gZE$qcg{H$PjPT#W=a|6BW{h*HoM6^XBxvq>neZN4(7Fqkbkt&;Xk4mqo1P$Xh*$ z#mT(`SFLXo(UjI_AryTyBV>Nu!!*uO*z@qDf2B-`nj03Op_of;>;dRW8?m6sW&RRAeUA&h2Mf62jE0a7Q^ z?I!gc)k&A|GNtG2VhqdjW}Khi2E3FS9sT@xBg4CdikoEn&MF3noYCQ_Vt$uDNVC=v zoaNS`@Z|Mkqha+K2w;@3Z&LW}cg?r2NRUzb1A0D3jiwh%$KZ=>T*}?Kg;<_cQg0rG zSLdl)R@nW<{l#FTWzTmXw@4EUzv;llLHEH4DNy*j=zUK5&b z3oZu&Cr>WYvy<&6dtjP!f}Kzw}3! z!#T;_pt{v*ecbTKPu#V+oL6sc{pM=czJ7qL%K7lCowioc{GtEqOQRWn%s43sCGihf zP?I0KTMR%867o9B!YsggZrsCGG-!o_)pU_uWCBgiD8B1LyN$QcmuCDc>%*8VO*JVFL$=EbKU}_p@iR5N(W<*rk-S-qKu9zap>#N4Hc7l&{ zNRj=na_!*BJUcrH6&VB37?C}SUqYbec1gG!kB$4V+u)mpB0UOr;dj5cUVCno4@n#G zs76`pB)o~B^8x-`>^7<}w`PdaZ>QX8Y}bMjvT>4EQcV+ODP6DySvlDybCR*OSl3Po zMj4%pj9&QJCj6wucSL$31H1TKdwv1j zINRo=VtR4Pi^|Cirq-bOQChOvdecc7B(!w3A)$)RguLPrl&AiWdGucm>kToU(oL5J zHWa5D$P|QGooVuNr0y*B>N^-K@!EaU>Sa@ARNWHeHXppQvITZCBBmFS0Wb^msq9Jc zT@Zt49Gd~LIc8GI(?Q|9Inlh&Q{1ax%q7iBuRS{3Ih&4oZ&Fxzqk%HQA@}T5l0mbx6XqN9k%+US=14#PYfS74 zUcTitn?-5b1WiSHLC9+BMd*XoR~4~LsqFU|Dx-&dVfu-ZWnm2r5(M0dKd8J`W+WeI zMsVuO`1o~^FRwg|9QjIKw)kE$yLv@5hdrWOmN4&#_w25wELKth;F5+tzAK(3|6sH> zcz&TrP^(N?Ib4;dfn0N&QxnUGV_OmN7RL!3*p`6Oqa*`}MnY-hXF~=bPi2ixaZ2BP zj@}v?DeQlE$EMQKBE6AomOztvj!kT${;iu1`3jZkSzaN0dQA^JAc@Ypfck+FMzwE= zCM*%*e)y2Vr;i=A7?2i)XIT;)K|aT)!d6`o>LBjd4p`x|pt!hLJwXbqD#zq9de2y; zSe6-9X1kl+01vJGY|m9za{iF^D7RlO2xMPDu`bA^B!jnx|I=Wg_oA%?KM3vRat3f$ z!jl+a-{1A8Uy{5Z3f75>3#tDNe@zp zcLuCllY9h)W3leouU3Ds-WQRvQGxzD83jf!jM9mF)(C2qPOX#et{z@lXZ^Sfd@7=% zhd-Z1pD+VQ8|!oKQ#ASt#3JcjfbBqg_!}w}t0Q5;mL!Ibfx*6dntns7usq z6m!=n5I44vSmDt?)tL*g^awplsFJ<5S32@=#%RN(v2nyoIpG6QbH_B+52J&up--!R zWt!Zv57IPw#&QG0F{=}xAs)QQx;|@BvNgNM+1-Bh z;BqrdWlS!^gG4jL&|Rrt=QBEy$H7xiDX@>KE;cFo44U_vfzn4it(q&Rmlcigwf;1@ z-ZK!DkIA)`Pmtavl@o|;%@;fud=a3yC`oWoB5NZ8uof8sR=zPrHu##+3u+=is7p6K zXn|L34IZ8CJ@-6B{lfG>d5SE0vXzc>3D*hNs3hM!n4ZKRLPO)ZC1byd!h%N9F@8%K zzas4R*w}U{n?vNtF3 zU1I$=wACvutnsj@Y|vjAn5oaA*UOl=yzZzz*v-u$D(t*P-p*x9A6QHDk}#Rh9q0zE z0~$DY0UQwM#>Yq(vXH&o8x_P2X<*eeySwiB)-Zlyg!>W6$ioh(F_#11&9;A|5{WB7F8kO#(NIHIq6d% zS)~WTd!x@~9`t1)^%$q(x=D8k6@C83Qhs>E}bBzB2u zHYTmY>vr#Sz4G~z+`6>7U)9|ifBr)%wP_MZSM%Cf8P_!N2r5)aH*&Z+y_Bu6eXU>C zmu1)6Zb(4T9T=yR{+Y5TC7J2b^DHSgsA)4}Y)38VD` z|E}kq8y0DNmb?sQZ?xM_4hS;b5gahNSHJhrcVF_ffi)<{_m*nH&wIsk8zj0jj%r`o{Wm$z2{ zUS1JwgkLwvHV2ABfL$W~cBgAu5iicV&!$BU9`(iTJ&d&ZMHTI<9xsyphbt!ng34zz z;Pxevi(_9M#?a@im|qbWu_aSD14meXq|wEyW?1BpT*MejkQg>o)ImkBwPVVnHK^G$({pyTlAL*0IPP@q-IdDZb5htr_Z zgMWvauq#<89ySebR!DU{H{E6cylc|n!dP^D>IX$Gn}gN>@{QnoOz8)&(h(VmR7Amj z9U?30?oH^miHttb1^C6|l@c3OPCxONmiNGtLLJj;Fz^H1U1`TU1k%Crk)oW70R~J2 z>H^E*&b}8EheIZ5#7)?k5b_OjSGCY$A(@S1U7tbe3D)8$hG-z}B);mA>rvWy2M9x_ zN=A{-$+r14b{q|0@esBNfc~oTc08RRI}qpl-+Tokc?ROBF8lHEU)2F_hshZ_!c6!O zr1M?AYse_XDD|Sd^R5#}M$Kl6C4^ISO)SBNjSt3jzdGgSPnspnDw}I?^^T%!9zAV_ zp_eOq$eT-dSaB1(xTtb7TSRPK6aLgXdSd%}DH=`JG@Ru3BTPh6)VCSdDP5%B0@2A(#P`iP6JpJ3^3ln_TH}zV zZCNFhIRFjNg+8~c!RKx&kM;AG&!uP0pS?@DtYj`S-*c(sJ%-(l&_?cQt3Vf9cfuK} zQ~W{*5WdT#dl>)-XdqqsE`R!~(hUm+B`X=E#MDLaimR+q!Bhny6z}cEotq<@BZ;&e z7+gQmFK#=jz9kmmSjOr&nIX1;m4GnZf9?z@q@3o)`g{D8*_OxiHnR*KU`65^U z>TS-1@#yL4QCOf+T&Bx6j?bDOD6q7jvV7p56XvF11hd2=^S~=8^gAo8=Zj8(wX$dB z*$>9~&$S5SJsT|t*LcV;&%g+UgmU#O0-xIR1_;L!l$ee(LFs=hc zbv*Nk$7h%;mc2UC-Av46pa#Wq7sFnHZ0zAi^1BG|Y+mF?9pl>hU8`d3Rff+P#EG+7h-~w{?bnaSsyV@ zLwPR!lour*9wf5!Q1Y%KvusXCIoW%xAB1rF-x*6HySnLP;6K(LhxZ4@Au_Y_moi5amk(f{znohKwZ=i{}fs ztaYWDmSuKx+YIxccJTzf^~(rei+mRE8MSPJTLC@MahsCf;C$|;+CpJI#T3uyAZkTt zD)KoWvKE{##n2GsEvohHOyITn-%7p8woO$P3AyyPk+2Xb9 zq;Y>5uU9`*`_4u9Zj2ELdkwG)Z+?K}`&61&bG%Z;G8U`7j*)*IgP`CmG_;F|Q(r#q zD}?39w5y6a`%fGlRtJ`~xl@?-KdYE3Ew(BTr0SiPhh0_t^9{&!2vN6y)J2!C7IxT5 ziY+5<`j-#9l{VD4GzTuIb#s`$bfgcOydA~tL7Cxg+1>+{Rp)@~g!sL-%Y#_nipG43 zNOYInN(k;+Qtp*fmdAl#3VFIhDtW4pAS*mM7%@Z~Puxd5g0-eR5)o@oQ$tk6=%l=h zZj7)>4Si1-ov8OoSVemb9<5C`C99bAsqwR>%2;BZni*EhQACsxcOpI^1`#tYFrw;{ zK_5gRA`h|f%|%``r#Y?DM67^^#H!i$MD`;@>HL+0%=K$LJGWsK8{e&P2yl@omTGAQ zKyl%%miRHCGOmA+O=?otOavo3Wx%cE5SArlzSOUzMchBfA?E%h?{;<~Q(Lo^7vCP= z8Q&G(7T-x{9HcK)#*nK>h>|6s;UsY&cLnpk`-*#z-|%6Cr_h>&Q^gDE2G zmU=_UY@b`z=Q}KqwQu7-7%3*r!7|I_$fdwkA-QPA2?_Y&)zG|YaNVw4WR?0Q@vIES z<34#9=c$9D39{PLMr7l7)Q9XHX;his@rrTR!$4L5`wTlW?D3vuhxS28XAJ;>FyT>M zldMm@&}$#cR`ZhSCat{Q4uObwZBhyRG56Xj@}uGYAvFov#~4{$SU-;ln=2nrtSwPK zcSu^|-wiElv|j97&We`Xu*3-hFj$tWU30%I^y$xw#>InKEiu}g5(>2PZv4F5A|nCF z^}LpuFRxo2-z{?l;^uusx#B=SdhNP}8VMA!4|5w4iW4b$oXFVZewAU2CXZcbd$~z6 zeJ$hC^82*5b_x>llhFa3*vh*lJ$|& z*ED-NCLH$tO?k;*txiJw7#sTZv}CEV?w43BC0ynD-8Bk>UPJBGEa?~4Fi5&tE_@S0 z3odYY+yw!c>)Pw5dt;A4{+5@fStB`ZCj1U*Vu)6$N1LC9QryB@iULc7_q+E^zy~W< zzJPexgF_HF<2dF7-6r28x8DxM@%L@%Sz2tH31V_(OZ{rtr=cX=ZBq*&jcaf)^jFeR zlJjLUFA*T`QrdkC2=jW2k{a!3>9-JQ%vkml%TB>S%S!<$dHz6s)%83nn< zqsT~;FA_x^VntcHM~_>lBb7s)Ec^&p7ufe!VJ&9Htk z<1}Z*7jyCn_Y-@Y5K1S_*=J8)$Gcf$$U|OY{|i6)O?~!066k8$vt5@OBJ7kLTWRbG zcVgUNq&*cKD(Cd2i1_m(uqPx29X(!lGYznZyJ@x*@vnsjr`&kz#qc1f6k1Y+Iqv@q zTKAkPiM%x4Rdqj@B^ztx9`0_{;*0hpU1plT9XYo~|3;=u`>P;8P|MSb3M^8OM$jBo z8FX5@Rt4qkW6<({0g^b|lD%=ASdt2XKj7?xxm*qjbn|vqwR+VLHvV_kUJCE9P@l~+h!J9b@u5CvJ!nQw!Q)V=3^Z6vLzStVsvDjwW)!Jq)g74~x zQynWkIH)i(l~YB2$m#QTQJ0v9pUp-ne;+Mye72II zC4ZKgFDUG5S0JSG>P-KUjP;W#W&*;C;R5NR6U)5~Wj-Eb&Gt76M@<}_Y=kyPlkwIq zqK2F{e4XzOf%U81%|?L-uYdWN=?mjKAPq+_IaZ6I;fHascbY%>x|pn)uWj*0?B%{f zGsrXY>M6|YUSJ$^S55Rq2=Eb0h-o}Y#0{b!F(m`5%Z>Ym=miM<++XQg#Z@4)%H`5W zV00s;oWA5+l)pGu{P;^Qj61S(?&?eCs7BNMbM#6DTavQKQ-+#q!yL3y;NrMK$d)1T z2*Az274G(?z{Rm=&a!N(EgUapn|G}lmyu1leXxlu|HI|tpk7w!vW(;%Xm;MRLia#i z|NJXF!DSO(06*Q2VP^9DZx0b+)!r+rL%%Fx(Clh+=^kxBfy^MGtC^a3I`PE+@d6C4 zG72+;7p+sI+)cW=ujnP9X@24aN77aPJ)w19OjCT$2VUtnxNHZ8tl11s&<$> zgF~#KGi+5%aptKR&vKU5XuCvEs#Q%&&EL<&6kO%wMcVmJ*0;!;95$J6@hwgJelE)P z6RD(AFSYoiC5Pp#NCioH@&wtoww2Tir`xjS3!B5m0%w%$*`_3aj)SW}$@CAM&mJWj zmv&=c-Ny{$1(crmsDhiQ_Z@#3nql6vt5A|34Z{PRSXni3XD09~7kQ6f( zocn%lThOsN`#O4t3d8`bVJfd4P^dI3p&b!(0ntc=%l_DJx-lYfy0bH+DK+(wxLo5< zE#4G1vOWF7-k|Pwu$b3+;1j(1rWg)gZlw$hU%g?+%9;C8y#v&P`7ZORlKZuKYgmg^ z+=Q0A>;*R2{#9*2tCH(3DgOtW!*73Vu+fao;OlJ3kDEt^wHAfwe_qR#FkYMd{#@f; zCvT?f>KBhc_P1wQ2{MQJ?=u@7_g2eam74Kn&LkV+P+3 zB`_i$+j4DyU$U@EuE})UYJS)ZvStsXW~4g1+ zP$h+dV&&+!B|n48$6=Du(A-W_dEge2KixNAXrUKN#hA~vH5UYrGe|V(+71u00Qm&z zfrw2pN7?4{GyNj21v;_`bTxpW{dr+Oqlsq`qe{;Ze-PJ*pNQ*-SHwHI?m{};Yoalt@9_b}07JM6rsZY*u3}@GEbl}L1dr7gd@;ExFk8K5% ziQ4=Y?whN|KQK?;@tOG;g!DUvB?}TC0EA^+v-QWO>kGw-{Pv9v0hx$<6~u=}#k#Lu zG<-XZFJ8dtkUM0mCwB;_mPiNyRiaucPux7{CmJsvhD?0^R;9v-7hP)^$kmfTe1m7Y zq(y);X*HXF(%Mbo%R9N+5%W+zX$ol-2M&dwk6l)#OB}DeMnLW7rSE07oYk&s_s@Be zNgyw^k1Kq9*Fz#Z&@hY3X$&@Az!Vo32c0s8^!l`4E^dt%rb0HgZi{(+uE_E1$^xXK zJQ?U1=RLV3|Fx1jA|PvYLB4?Y_f(1p&o|qQV6MN(^ZUw@;@J&nyey2mI;{+5%nx^- ztL1)szDg683}3iM`EG8~ zsdz;0?`A(n?SBHJe~)`#z(KxzMfq$H{8q3nPxjmqyLYrz$J>7WI4C5E;`wS=gv3Fo z9~!)nIzgowZbMGP?JG*a(!N<6RUTRwb6!^At#0A1k} zI;|8Zsy^agh^*4MWXoHnX@u9G;KInucZ`kDUNgBt$DG@ zOc#olY)P)uP!9r#v3Q%oXA^(XEL35{Re+QOVRLMypB`&;Sm#(Bf>M6-lTN5xslz(zGYbaJ`DW**e7SPMk2JH$k`7uq@d5Pug&}4_t}Y0m=%b~hD2X0yo*h`A znh?Yk3?ASQeufu+JCEq?ytI& zhOecpj%F|$#TEW-kpHb%)E^Q*0EHc9ND)@SguF_iZVBP?Z>52S^2I$T=Xk6 zTft{_&Xx+I{V>An=R@nfXo~A-@|46Z@^s;stSvjwM)i)P?Y20~S>;L)l`r~PvU<%E z__S2iqU=-*m3f|oF(7BEPy4xkV1}v$!i`>ey8MZq*> zLNi7jiOcYLZnZPJpmv*9vC1#ZqYc36(XH)tg+L>>SGq@#&RI&n2ORfN+fHVZ-$P*0 z78{7MrA`35yrMAfV^g}He*KxGj!zsYdpk8b@*R=@40ID7C*r%`17enXkM?}h9p>kt zWXTh$6kHUt!X+l0N7xOvRVx{&Q|m<3#%-oLK#_vu!voY~81=^7xzdaM9bJ9?8EHDp z@0+s#&ckP#N!w)6eGSoG5nd767d-bvgW6rxq$F9EL@bc5t^RTk&OrAuvl$bq;_f9h z1b3zyq~I4V9&QGliJ4H7?)?1q^U#JiMIPA89fP6b^S;)d?>Q?GwioM7bzcB<{(T@N z$zSN4bOw*th^f=2^6hs-a)6!Rt08jy=4p5-f9}9mD%3m2d5E?^o?J_2Y(nWoB*GZrU->_{O7Qdu#AF1xQN+{;?iV<>q5nk=2I6!oP zMUBwAGJY#fT6j=dC!_O^qpg)P^ibyQ3)7pUb06SBvd^veA!siaG?~h8f~uc)i9HOy zy+mKmA0p};4OQpGol##3;!D^%a?VnHkok&}Lx&4zoB zccmY)a|J<*LeF+OZMMY_R>%TmK+Z$L0{1ko@u)L#gC4lHTT{rCHd~f zWT;h|y&4hHru|VsSMaBMT5{B=I|AN=xNv%52Ww7(DR~wv%O98hnKlGXU%VzwH;w&h zCl@`A7z`kP?>BW}$!p1JH6rxr9K!^A^m(C*f7W!ycqVFS7s(HAXr4|7m-tO(^!vWq zG?~5!koD5uc1749vY)#_7~yV-91Rg)X7oGGo}McI`$ z>-c^KW+kS2#R_00WEEq@x5jXUptdiR`1$e1qPqwygxp2Gh}?x7x^OGd@hit!TMlhg zDA!rnz6QNcw-tiSob#=0wDy5#VY)D*a`jF9SXlG#Oh3zjpgv5D-X%5GZHH(BmJ`({00IalZy|+nDtVckM zy)o+z(xfYu0@08?0@n(S04y#0-wnSR($3lBD(a6&I9(k@Hdzl<5U9uD0@Gg%dObHT`NZw_ynZhs5jQtVDwCbVBRx5xg?5ej>5jkeD$|7fj7S^Q`O zw+W$nhZPzA8x~rgVSv3t>i<*xXyZB>imteBb`#C?Uddx|4JxHg8E?0h8>%3NRao-K ze;@?Q*9R|!?Z~X;?&w<^wo`x1qP5D2Kc11Mo)WCkG6d}$c@7n2{dJ*NP z$j3@oby^Mby^MR2dHX;ay-&86-tzOAbAy!SGkWui^EMQXGY}@THNCTbwwP5$5cQrO zHF?8Eg@(*T5dERHxRoHrKOJV?xJ<{JH3@BO?)S;-%KJg>a(cRq8=-k;ZNZG@{$ zp6y_=0>uef*vdewA27t6Vgg2}k7wJB?$oa(Dbm27wA^FZg1VrRB?#KU#`-H+*rj_^ zm^@Ps_&|@1#q|YNbMzOF92hGJM5Xuvj+91LsE}e+9$is0B)LDCL0XHx3Ywv6qY>=q zEMlebtd&oU{@|@fk`m(r7WvWeZyW4xLIpXP1sxbe((CZTIHMVJ#~^+l#DsaS{Bg zCR20_cb;<}o7zb9v%Lr$tx#KFoZ^phbj3I?cv}6?WLi2vOCWy5OChi zJGO<5#a2OXS@~N3Wx}#-c(UU6pj+H!k*d$^8fhjKh)zH6yH~K>W0#*ZOxt7H*hyKF*f{Rr=!BwUbgw;}@F{dsrT+i>x&` zt{${pdTkFTRYaE--L`zQ$)5L)!Rj-o2h(HY1^;|0@8J_tSk4R77tP6!)ZpaaNLUT!gmb6-qs%08a`C7 zw)oXzbNkKXlc_1ov689t_;~7dL2FT%#%g%2e|GQ6P zvTUt&<%_HW^QZg(UdixluZ=^YuM$Y^K*()^D(V^X%LvyUlSAPBgt{*2x8j)zy~^ zpAjA2iHEVrzHj|R1D(C7UMg1&$PB!P&lc~eTBfxo>aD3d#oA0_zHSVnY!Dei_Ed!0 zFti8xGrLza5%ryLs6K-^rgYW4eCyiMaIqGMEGYNcH%hyBPWDCikQP9~xtqa=44^!O znhqXPnqseZE;U|EH}np-U-)?*&IsfLSij{U_CIf4W~8+UfY!R0nS5;ST)rKT{O5sS zy%>CWISoQi$Mv__>c7|eZUVF9cgkcPdkP!=C3pR8?t-a8AGln-k!f;zdn~(#&%}5e zcv0xj=-{*D2e~?yzxCj=sAe-Ld&O7~4;F6wU6~7KCzY0}qv zbv=YGL?}ddD4d1FM3pFiGzx@~D)Llr@+g~^+EQ^)S@}NHwvzE7b<8_?Pn<~nT`!db zR{*!RiSBz1VFY<5OJtvr;+J|LmvVx2hu4!1qg;~W1BmZKmtw?(hDnRwUwf*0&K7aj zMZelUSjAh#Gs?Y-9qAr5H-OvVCBnoI+tw96Hg!#$gQy^GE!v|hBMfS>^NUMhez#-O z;VtxJv{!9l<^VEH_rdRZEY_Xcw*7!S@sKc^&J*}HPIkIaZOPiFde${@FNxXi6 z{!Ok=$x0)Dp@2BbJWz=>JO^Bh=$tm_$&&2#+{!4tM@(%X2kP7#v2;h zc&;(R9fLU0$?S+iRv!e*fZfp>U{+nxNWxZ;=j|1LJ`3zi^+g;9zqI82 zUz_v_n2f8dibnfCY~t5xW;CR77)uU(2Iy!fbO3x9n{c~tJl1^saYJoDGPL!f%bO8r zEj6Rak@aZhpoXHFX%4c1OMQQ{r^0Mh0X(YyUG@PG!8aC;%l2AMx6Xob!Lx!MJnwnN zmnN7NmO!v>oqY@R^MFAn@xx6b%UZ}X?>bA~I2!jA+pF1xx%T;U!^=P7#m+kM%EXUG zzb5oK*7oDDtGYh=Rh{M~Zz>a>yTYJk>>DLVZZ=E179g1&D4wH&3HLEHZ3(34T}_s( z{2A>_c#!b`#&ZS#GU#>PrMkZc6+G8CjLzhe3~eGRb@RGrr3r&37p6JaX?! z*`I`J%HXK#lWO)qQ)Lz%@wfHMpgqmRHHEK{KR*fAiw5_&mQckj=`*qe4nFwizdd~> zz0oOoLbN<8cN4*k3YntYb0d;bDp5@5#jqpdVp&MVC6&$n8_9~899>LrBP3*gIs*b- ziD%K#iqx7OM#~WmZ;A4vd=iYzYmz#;JT^eN$Pb9#rXB@&U#$&diE?*-ZZhf{XQQs* zg1dZNZ>w+JIgtPch(o8q2w^)kqvnqjUC>0ywhWn4rpkWhLyH}6F@E3XU+q!JBRNhg zuyoW2Erz0|_k@lrl`=Fa4CVc_cfz~u*d1D$tQ;19c|qDX*lc8r^w>T;a|mqG8dy{8hAQ6)?m@THCJkpb(?t0 zpvrVRH&B|qnjM;i6%^n5#^xm@rW+y1-`@#$ix!Fy&SXtv6?#xdR}H)a>L$X z>-PN~E1`PE_R*S{L(W==L&_?^_RH%#Q#5P*xW${N;bU2yjq0>Ul!I>)I~8I*M*2VH^?3C3P5|9a zseY*Rcvjbd)qO$NDCpCN__iYs6CjgrFi)yiL7=0ue2vGc(UDv@l`J0kC@)Wg23hplt){GJFUJZy_dSB+;U8+-WIzjt0%K9#|U<7X5FWVgfrdv~NN ztHb9B%(ur>u%Gn3blcKlO+*GtjNtAa;gNT8vkYJNKHlr*NGj-^qZvh@>iiyv6gq6=n^ZJh#$OWau^L}V#*=Ah7AsvOO2{;^h0HumfMfUh);O8EHZPe=3@@L z^GU0SuMX!f%xvs<1Tr?L5%9uWx#lNY&;-Es>G9I}OMzz|yBWWFt6F20Z^nGjhY#&^ z8&IzvTDCToDg-f+xw*Y(#P(VWx_th`Gl6xDc3HW9Jp}hKL}*G5pP}OEQe6Ovz^0$z z14hU1YA$^1lr9C^bYB@ep=C^o6C|_D{JM)C|NJ$n)$^za?aBUkP0>P>1Xh{Hu5}BS ziNu~@MD`xHv2ymHsV6bY@ZGv)?a;5Wgzk@VLknq5Ox&4&e)oraJ__E;d(b;(G-bwE z61dnB?)G+P`rlpv(UlJuF5-9CFqs%iu^Io@631KA0Tk1aqb_~Y>AIkp|lo2s4ikQA3C$1=c0ST=O_x^9* zHK{muYE!-5jc19c(j&>bJ_&2v_H_+#X}k7zeRfaD_C8VDyvu4tt-*4W z0mX>DVUTFxzj8b4CS`z*$xX*wWYdMj*QhSZPgI?-hfVlBK) z+L4Ulq2ilOk5FHVw=5RCL-_eQsemKybFXqV1r0%Bv2~ig$z#JeVOH9Ce4{e-&vzM) zo3b(!2EhmQ6n8a3vP@G@vl=6oslOfOoh!{UDag)$`2QJHPNblUxwrFRX|GYA#CGu> zy1C0p_loMtO_y8SLDu5Zi#i8F5zMt8{rgt3DXyF!C_cO^UVXAw(-)qknhh=RTi)e> z97UuqSv6K2ig#|T>*aaAPVm&53DoVF)MsM4l3`RB5K~O^;_EF3THM?29M+-gnwZ(bF`UQSuD|n!2=kT)>VUJTubi zAc7v>_|N@~38KfCMca`ga)V){)$JrwV4ntIX94@8ehh&QJu8%XzBceO=`j-7-5nyT zS@}Lu2@ap)^O6tJIhE0;yl9~FZ-2~hju25#b1;CTz4&VQH+Ui?KUc>oa>V-V2t5KH zb?Krhd8;8FNnfNaCO?5(njh35~^4_!q&Dj1~`V;b%X?kPnuTUX`nm zFN{*ACS^EkId;q1BxensD>)C0pPiaS^2U_w5ybw+?1{k1c0^8G)Jkw|Q`Ml!Be=##rJ$Xm( z0*QN<^?Ah@d#hCK3Hxf4R~Jz27G+A=ru&<=#$EZ`1DnorA;(?)HaYXPb2MP+p(H8HdC*Z`jP~HOU-2 zW~;|zxeQL>C4}?I2_6eNRYE1YS=t}hIpIkRT!lNA#Qy&F-8{kN=?d@@;Aemdr2sio zcw846yhlUbD*z+?{7C)7e|~kUNFM4vZiDof+6KN=c)lHsA;jcvnM9CdJAz~Yr~}d1z+ll zmX4ZCcTxzj(XaP+=98-A3_t!q!zsA~0myqfTU8onOFWUEduV$|zTD7U_)BM$3W^@Gq|6boh6& zHT9gVN%Ij>{*hKG@W^qc`jrbbNU_1r4D^dhoNr>`Y|oU&V|}vZ1f+Bz!V)@@=k8DV zwi(YIy5WE3dtRu(L}^r_WJ>hISDan2&9)0i#;;j=*I_#}gj z$3AEAa9Vw&J!&h23FqD}w=jRA7$I2M<_~JI^%(CqEZ7k@gY|kfp|TQ1+CzfQHYYE| zKt}}RN2)Ig2$7b%W29vt8C;t2(fK7NBplfB`;SK~olW4%4u6KI`RxLwZJUsA(C;t* zIiZ+PddyOkU3KvGE9@7K;8&!8NAWwv4-jlwDBcV|&#wr^(eB@tjmnL{L;j+CBpJRJW0^e$@trqr>{}nl%UCPgx$cXgNq@N}>K$Wr&kQDX zC%pph)K|!RY2V3Wnp+{<=~MZ!O?AnB);qqHz5f6yVefhjltX@x65q;5{ro}q?kA($ zw96WRPr*3|lel-)LqUh4%v^Ckz++ge;9*_!VAJx(J2W%xU*Q;j5V&bybI}fRz1FHs z#&r?HBP|7zH5$s%IFJv}-+?sh<*2#lvJM|N%$KZD+P`7y!?~8b441Buy<$q{P<^hu z{yjK9bEBKyniASX=3}bnv#-Ljr{GsJ#d3Vta!q@m)m}8XxgbE_n)kA95MB*@Gbs4C zFXSMT(o-QQ5vMC_M8LD@keD)JWPOMz%&Nl?4AlcY`Epo>fxhip=2WR5=z3u8f@9rdN5 zKxL4=<<7KUSshAaRqiNT+Gh>iR*xTCFEk3AJCO3dcX5%tlajmRRJuzeD}^s@{<~O* z?le_!kt}d5^$+hnJGg2CC$!>2sobE;~lAoe-c7hR~(> z!1bdzC71>`RaU`CDKP>L+z!CW0aZB%1%t2%0M8R{_KCOX8LfGh=c-N7LYlsHY_)Os ze@N9m3))e&WfaPKsL$Z6RXDMpqTU?JRaOq3Nqtl*LL{|E9>d_nodO(1?Wly6KDMk66wyC9n{hH{kKacUQT3xXggJdbCNyMyuw%~ouQ z&~I~?MCpzspHG)&{0thDLCkMnonA}!gn#OPva zkq##>WDpKB$+U#+TGgcG0KsP*=XO8R8-mtbH{a!&*!5#GL1lM@hgIC z;Kifgnf?Bqd$9p1P6gqALeT!fz5zq=%%NT}=ZTk2Y#~R}i|m~3zf)UttR6Ih6;F|L zXLYS*+ou0>X+Ki8EqJtC&~e!>pC;mY|NlHrMKO1v0 zqK|Vz;UA-WwSSl*jl5}_lL8XWwR1PXjgdpq6Ukr1@g03=6{S&!4 z^vSXKzJ(oTPQ;2^yf&NfL)o3L+S+ZR8tDcD-2h|c`f9gPi9dla_M>a54sAi=M9zrU z{n|{Hr}3u=39=$n>tC+Qc|)-NgUUFK9npUCIFkRfs5v+Dx6O`|(7y3hEsk;aaJRyL zjU^*YQ~#GeBb0gy*`mF6OExrlBevA^yO9FITsu(!Z7-LS!49?VzKNIMPLt0gq3k_P z%osN+h)!c`FI|~^B!6n$E65aZih8)y-0)pj-&T=2b?+MTwTM@T*I)uh3$x)Zs*AA@ zXz1>1LY{epq!2jVJ#S~|m9`b$en*FLsl^2uUg>0yli(R*4?#ZYmGZ}LqBB_=Gwl~O zOHER*rSEiqVGjo=Z8H>QtFpylgg8?@=DPYes3(k=YFD-l#IR^@w&oh>XOkDkPBU$I z(@ekXJN>>%#(EJV=|4^QJzT^K{iEsdj97yIAa#TAZYtfX@L|8^A%%3INDg98!MoBZ z7?G8dgl9hQisKv}Z*P<-psHT1-xO|dlzJ;X=Tm$y-t|*`tX?Qi=E*K^=vyk1r1gBR zbEc<%?Y)}4nd|7T)ZAGH0~D zRk!{LH;~_cVSrFN>s8X7D*R*mzm>16BcUs)(T2_6(r@F^JK2=wo5$%R4y*ra37Nue zSi%ui=k{$|!_|xD^RM&+0_FF|=+2|(X4vi!cln9k+4v3yu zC;mIL9h5i19|Q$Ga?c*FE; zj7?kpzD;J?6~CN@U#IL1F+R~Y#c-vAi>Wl~Fwxgqb*T1q?^xoO6@=uo;&1&1&`|Ezp zVTpJ$;UaMB*lS*xDs#Ri z-PhT1+9rcXGOAvUf1`djWwvySq&Ta$el%D3Dxr7~MKF8*|DQ7d^L%OIpcHAx#Rbgr z+ti#&YgWq3FCNHk>@b>Jg`WS#w@kN-{F9%mZeU;_e~^uiCuO1XCyb5EO}+wLi8&cV z9IZi48njCR8i)M)JOZhZSf+)7#E0`4b3jf8@jz6tH&=6cCoaJr8fMqMcDg}<_3eN=*Y>`LGHegL&Tuqj z#%Ibu;K0k|DaFL(B7J!M8K#GIa5n_Xf!EWAUj2#j&NmYwl#wrNsR>yn3p&U4#fF`I zVPL0SW0Gq5>>4d6CcSTGKHQG;L0xmnsaP{F^FA1ZD$HI3d3Cfs;zaRh?s=fQYXC$e z5vBJBjDc}b^Btqa#WY41>6D!LcW(JErR-{nkkH4+MrNMWN z3NWAi+q0*N`XAo*CtEJQkh!!>7j|Pb96T7QSA?NB?KYNNS5x=(q_UqdmDxcW!B2mL zKKxdPNG(XPP#oFVBUjpb>fXy*jMg~d4CIC4jJ=;Iie5@cm-OVr{!~c2)N@sGea_rVYLN^YPsrAzn5$0oZ~L2ozaVrfQTP0ljwZ7ThfElz zCI!u7k79S%ZE##2?VEK(1V)J5CB^Hx4g;sfXQoViVz=EC9`!? zwhJNJA)~K&yk`E*)dIE`$r(>t+m!04y~h|m&)ZNGx-ahRxH#V1pJBm- zFHbct%ntaf9lq<(Rbv?9F>22q)IH(~r|yIgZQbTORMyWJzIpW3Ay}C&6s)TY|wX{2x{hVB{sZBq#wL3&RmYfb9Ydx zk}LLmc7^3#eESuy=6@{R!&!xU4lJlg_e6JsU=-W9>Lg-P@z$agk;FH{Hs6ORkTEivI`&8o=Kyd9bY`xqT1SIJA* z)$lKL+nY8)>$BuHFL?#-GgA|~<(UD#g#yL0DvQwG#X``2o>t-wN<`v4CyLyajo^lw z?rH^x#Y+bVzRlxkLK|RvBr*ujZ%3)Q8_{IoQjG)nC(9*`Q{Pg3zNRlqo zydjzI64MO%EuoJ|zwTI0C@Fe+?cfv&Vw%AQOF5tO)jFBo()1kA2yp`Tfx~{^=rFT& zF&tIb+Eny$(HD-YMb}D|+$JH%4`OBhuqGvh!q8OMf+a8Oe=N2y@IIlPV<$<{s7|Z% z>;o@xI4(gJ9B?3nzrhfyHfVA4gsHTTzt`SY<1*jNXV>$Z9 zke2qgf%IPSn0gbXjMAYpWV6(jSkAuU9Y)E_>nQt>$LnD{j=nfGcq7hH54Bu;!Y* z)GFL?(mBChQD971lbDYE)mP@N>?KD=;>qKqad0)$Hwg&}-c&N5vjiK{O5wqMORIW0 zu=try1p`VR6DHN+duv@dVqkN;yy;E15mzC$tYw`r4K6-9eVNzNJcH7rb0E8FCcOR( z4@4_7T zA#*L=$Lyy3Ut`V^otd0Hh#Tv=RnKn%a)z#PV5btrD|Aiw?;R9f&v@xt-an}N*i^`T zsKkz%f2w?KXYVk#4bHJoNf;WA*9^hgcLrt%eg?QuB(V=-tk(7Kpa_5xcmrnBpzPYy z#HF=esnG5iVVHUAx=wzRH`f96Jg}Yp4(gHb#mFl+4Q#*0ROyHX#}1*`jdb#9JY_L#ID4$5QjX4gY-$!f`HfCj&pi zTg%wklY|R%l@47uUlg9UQF^V{w*H_xmPVwB>z~*aF5d-`ng492;M{_t`RAV_pv*;Z zQtUXZO;$jx6Mom=>`8R_f8OOm56G1)yN@w{HBoc=Z>F|qq0x3he@&iZ|4;oip}gP+ zKz+y6y-1W}l%XaE)8+mqf>9YktIj9Tkkq6}2{}tX){-iIzXTywBQ4t~F(ZcX7*|y- zNF((?a&~mC1`M(@u`|D@O9^H$wNzQm?3AR zgsz=tTGWW6+!iKHKyuW1-{jJ>Au=;d$TaEYT9FjMZ6q%wr7f|n>CdWe7Wf@?WOD8L z)OZfuf%3KNH?liBuZ6(loR`aTvk^d*MN_|ANcxJwo}(A)EMyaxZ*!?d_1O@4RHLzpFMkkKpE1g+scx?~1BGh?uLRZ&T@p5Vzo4 zqlxNUcf`q+np!Z9un-sMI^CU<2R6tzNbL|R=5q@uSB3-C1nc;i1`UUnD`kiUi=;n{ z0VHBxGdGV{SCJ+>bG@tf=Ghht`n)ULCWsSW4j*Zg%YR)v7?DZHv0+k%$P+w==WO&z@g9ix>?2K=}>;t zue0y5`cqwNHXNRy1VNV|GyF@Ajuf)3aHP~C=UkzFzb1eD64-*~O zM{1j!o>o?cnpV4Q*_O&Qto2I0`DGAK%a-1368x?+y=W3ww7O`dUIvKV`6lDx%ixbN z#VPGY6_B0~%u8vfa+@FG>)Z750jlVpsjH`K8a|2K{w;ssf}7jeKdkxB3bHvM(cV`N z?)`jQcBY)jBg#Bnuc?od2+1*6+mo_oY28?U3EiLjde6ylv8%ABDIiyApoo|Z<#=iz zEHmVGX05MJzrGr@?;1+90P^>YyLfZ6MU327d50f#8U{wq+NPc*VTPJUV$Zsp)~dS?V$St4l8quRNFb6Q$J#PjuWrD5J8w#%Vl(@Mr>!xaW2}*TYvZFsZP4% z@vTr7W_Fy#3s>XprR39rQacKtKzcF}=27P(wtzjdW3D^M`4R_7eu@6x zm_72E)l>n^)2%&Rw&tFGnXiVgdU5IRmgDdo0uP79Y_KL#$ws_-{{ecHetCtXF=Z@Z z_-+2S>&~!`N8}y3A(;`Nyj`jA?p5yRz?;AS2*RqRSEK$0br`R77aU$K5DfLS$PGT4 z!S`}rh%vCx0B$*2%~_a1|9`u?RyOeP5I);6ZB;we6Ol#t+5{E*Js;U?p%cg*w+S z7qVx^0G?6<%1*-4v!7^Qf!v@@i~FUnBp}GGFMN~UgNv~JaEhaLi>t6MdUOO@+bEsFR4GG_S9nZJ_6MP+vL9dTR zgsFpCTVk_D6fAWbq_;EkMl;<<731jrl`4;!bw?rNz{&)jC6EODPbWM;V{fGsbbVsy zO)C*{8j08UO;S;l;J2S2=eNp@AGg0Ek$=ABYY`UCS_|ZqxQ5V7G+mLzX&IZhPRrx{ zw;6a!u0J)NX(VRjdlK|!Pq|k^v_VJPsIS=-MHZBMq{12*PUvfUHSNV6WgQj0NRb8J z7){8V7dcOZSRa3m7C}gXTCT@hBYkGot9rkyz^_Igo{gRPyho2KM937@)mi4y1&+yC zlvm?xD@H?0gVbJ1p4Jjd+j=_;Fu1(hU)`NXPBG~-gpuRYkoQ+JLAM_}QJSB2)X=+- zA?p6QpZ#|wWg_>0VcAjxA^=;c94RCTw6rd>Nzg|y z(#<=3%1TRCpBG5$lWpiLk5TPXMf;tuucdUtCR6Ym{G3Cr|3v=U|ON;NWb2MdI_P-J#=f^3^-hAvrvOp-VhG~T9zc}E{rm&S&Q+8kyhUjLFXy`^d|v{ z#8^F~LRo}~oJYQ04*U2ukSvhwgOq}(wY0zY;@@5i_Kgl6jo*&rS!YgjzTYgZDd@xl zX0;?!eOUblc9(l@cI<^s$;P}JZiZ*rnn_(5tj49Dkse=C2k{#>EMOIY)giAW0ogfu zC;2^|uMyPpYZCkF9Z%LNpN5Efr1S6J2)rgY!p|9=#9qN6TWxR#zhU3oEb(4v>@#_` z{CMuF9){95G`YXN}XXfqNW|ISv2#tUPAEw!0RKa}FY!*~& zk!s24<0#9J*9Tkz-$YvGCgIluE8x2Ck)-Uc%SoxZHS_)cccp(KiHVnN6}w?l+%Apa z_sx@UgWwCqO2Vf#B1%7(j|w;E1i7SmUr}4jum0uh zeUn{$l`DnijM!xn8YO6RJY-^cIi3{a3p>MdCZ(jL$Q+DFPImrb@)|_#_SGu}pSXRH zxlHp}4XM~QT`BvILU64U<=$M^@6_tspF zgxy9{vq~Z4C?%Fs9q!8veg}v$k4^5mX)5<<5=fJ?uP~rH6$PE;V!vn4i?n@c{UMwA z3Kaf&dq({kz?hQP8>sx8Utr?SJ##<*hl)VEFE(oBiu6rU4**XoU&w0E*=e~(I;x#< zpGha;Hh`G`*VWrRBD8?EL!1Y>0Hr7c(~Gia6DD%0qg$eyh#KbFxUW&n-8a17aZ$xW zDcu2v5yBDoiyLXE6ViSt+2tr=p85GPUSqWcx&XhkCSA~Hp~B9-1{fx4tHnW-ftzr@ z?QEK2iON_sU=BJf<&W|?oSn}&1^ihKJNyQS{H)Gs8hDdFiYx zB6?$#;7s%2JaA0tI`DQ~>Ro`vt42lgdGc^sl@^Hq@w;epa;}IxCz^fosM_XaykCuF zrOZpWD!>yzty2ZOy=?wuVVt@^mW8~QT!YzKGq||!QMei*h*E}42wJ%*qIJxX0_61B zO#QM6V9}ha98vVczdZ}0VT9G2+`v|#pygMHY>5Ex1NZ61tGCD>L_Hu)e-;!NBskE& z;fXt5xw8(b$IEhZrTba*8z9tjKS9U@_Szx8$otS9Xkk$)uCM6SJ+!gD5OGy0EE2!p z;QJ%KnRArI!gkNPX08WR@H#zo?&I!$^XW*Q`c;;axCqOG+Pk1lVtwa~iyc~yiqYpK zzSllBTD-%SJF=js@6>UJ*g+V@*u59netq>_iIGj&g-gn4z&JbmC29>*X*+m zF@UT5Qf-5PZq*|eVl~!$l%5EeWn=XNLA42(0&@+a(iTHmk)*rPnD_zJbVu>Z0CU=V zZpo1YRyYczllSeOkKSL4T^joIn{%7KOYTdF2R)Puenl9-@9>owjZZsAqnFYgrm=Jb zA$IfjTO5+d#9r_D0<^2ExHXzls#{Hw`)7tv_mg;`Yta>LgKuv)$D!94?=jnC`@C`+ z3LgG+>zwfutI}zLj*Vw#LHE-{)AHYOc{pTE%!4^3-rn=LK#^VbMaLSIx3^5XfSxz- zz&>$}uS~)WGAE*GN;bS9_Ki%hZR`RZ5b^3gx+F*-ARCf3AZn_Xq0|CEb#V7@4K^bb zaoPK>(1@eH{e7bE+D1UWWb=EBbz;TshGu)J&?mKe`j6yG6%M-0aHi`CR=lFg8-b{G zPV7-`+n1@K=2~cYfy9V_uRMux# zzd7vy8ms0rw&k6zj{bTN;*ULnd?2!nBRhNRc&lh1EEeKDS~n;d`;E`%fotHsw&m51 z!<|GIk^a^ZDr#IC&7wLq3R(+jU-PTku&fc>jU?Z zr0H@4UdI#jvjrI8hI8%%C{jS0WLpLO3_}mS7 zev%q<+9zm@kCQq1RWw{Kp4Gk`#9} zmPZga%*+>SOa5%lApves;^}U)RZ-~2IJ?P%N&Md%O9{e^bE_YzI8w*VtIYoE^8TNH z;Gw*XTnbTx5hix~{i|l0f~F;Ii2@ z=WF}$G=Qg$_ly{4UVIG0Q0Y9+{IW4z%au6wsJNB>2MCCP4?us(w3W5ZF?q#`MaTNF z>lq@2kuqY+RhdJN2h_0=`VRJ%4|}|L=5}VlVUvE`?Z9gID@xCY-XDVwAu5~}aaS}? z7+)Ue1<_+s)Gqei1n!JRT_=WCliQN~TK-6-!bX#$2`O*3?D(%5XT4N*d5Q10?2Cp{ z7iKTGpp#yl2MbrzJ}MwY*FVD(Y&~|qR&&yUw-Xq}cj^FkhTXw9bCwW7`VCepdpQIv zNiSLt0x>PeI=gxio;;eg~S(&G`oo@E+YYzyzfY9AT zX=KgjiVIz&I=GLqVyUUI>;FbbwdtK}+vksx?^La6P5z|?b8LJ`jngs+S>p(6?jW^! zf*gIbOd1(QBBQCz(q=H~TKyW0N7`yQIpw8AZ5iTCF-=gmkWiK!jjf{U55~jpTToam64G=!4jYS>--iw?O!kb;pPL@%qC4K#O z-02ti{S`H}qx78C)ybWpomN2(@rsGwLIu(#@b1cfl0h3uvN`OGLkBN!ar0@J&!-!2~wx$;i?ImTd6r81{RmZ+g#k_iz zSb=<@{)1QZopjs{I`+7xjX3rq#t>iydsJElmF|uM03JuDWl2)vvbN3!&N9=?cnhG20UC^eq!BDNOMq=1E~5 z-G_uQW!9-ft@hQig6Z!&zlrzvpKEO-jo{X0*oPj-9^}v z4h=pR-#Flrj$k!R3(a&`Y2fD^_b+46+}e1qdHm~3Qu=+;9^cL8*weoTNR3B)E|$`1 z=kVq7H47qD@JU_**av*2EUf((}-H&ld-#DjmzosZFRJX zo!;nF*j>=;9|eiH*nN93_wx&WXS(AqDII(_`eiI--9|xZ$U_an*Gc`;lF;cz!`o-; zC>?BeK?vS$GGg@eCfA)-%=>P3R3-lJ5US_ebYZ#^NWNIf7q`GgZ}c<3%}V1mPj*zWfY4Y z6FD=hCM^6=^(ojd#TC#BJ{0G%Pv=ZhtGqps9t_p`Q|&giD#9Z*L5GhWQ4;|_GsP-D z0`)N1!P8tQjM&91IU?wJiZ?K8ij)De_~--kJ|$~@qY+ZgqhX9JJi zPiZ%4X~-FSijBeth`?_6Yqf{=&rBuVi0q_Lf)Q|<^4RP-g1te8oFDnn=*I@YTPZVd zAxfNRg182Mjr=UmTx(!qey=rExbQihtj=;;VrAB_OeREPIU(R}K{R6k!`dx5cuD;9 zhL#P1tFU2HvRGy!w`)+}(!94y=m))aZ3X+mJHT@Utz`C^`o;TKV6lK_SH`cUur8^n z{F!vwQAdK0S3>?K!t-(A9`?*LY!30i-D7JN0=_X+?h}d!iv3=;eRqDY;6OAy7_NxCL_%=CRAk>~WngrwxSg2~ zW}kCO@tD-!Q*JBu2-J=<_Fs3HUcmZf%Cxw;8nF&Iml!D{ws}REhKEWIdFk}Yv#Nff z3?tMeqxkb>?VZMAhEvv0QtD%C-F)xbiyf!6ky(2y7O-^&Sh#Klp2tbQ<}c;C9jVD6 za|6ec)ueIruBJZIcuPOJS9_65$#d`lJkGT^ydnV-BE>1y!4mpHCs%~|mRT!ug3|be zR}J#!9YvjKgV4d>n!m@^Q|~)Eh5L_QU^5e#tZF~#G0ooh&>_u{S^N33sEC7+Yn=8Z z{Ly!pd|Pg5ZX&RP&4&Rr0)1AhbNkpLEzHN?K8zjWJ z;)W|sS)Z36cCGnl0oOZ;#W27V(>iOBl1Ye_{a`Rws%hF^onYYQS! z^|G68sn-3@HBa5qO`}N(VT-S1bLK~J!BN%z=-_=D0&#>84uyu|7BWlgatykEI8_0L zpo6m|p4-eB3_l4!iC(_)b@?Xc$MvmMCdNxa3Gf?boU9yQ2bMCFSli!C_hVUqjb#CdDF%ilYW{)AB>T+|Z z7~Z;k76j%KJ9Z^x6;#Yum-L*Hd!xe$Ka|1@kUyvY2wb4YaD6f6E%#Fd(JRM5&%?{L zzUcmj5a$tT=RJDOH)!ZxO;c!&;qtyCoDy_iF_ohI$y|clpVa5U!N5vNtpOusMaN76 zmm_l>lbykU3MTTFdScxT@gz#&)&XF~$#jMkbpw4#4VM6(wP$AOL_{tSI2t83d$#^b zSLC!&Nd$X*k)it;!ZRrzu4>^M6+f+%0)o=l0A8cPgTNeA{Fc-;w=5-|rqJH6>~?@x zsaEJ(y{!<{M>Y%2eK)d z99uhjtzX#!JM0V-*~4EylfON(R!8VHAHqXm@AB3sH$dC~4&b0VeYWdtJ~RDSG9@L; zFm0w^;HA*-g=ZmLc{3CS{i(m%xNj((oAWG3E^7O@ad*M?CwU4@WGjQt93EzPS)ydA z8-A|IKIvAzN{)~|5y&L;2XLHIK(g@n2$om&^DR9Ovc0l{{Px^-$v$Quzpb@8-o#Ao zm@ltc9iI1yVsrt(Vi{~03v(`weBxjv|jah`Q^B1uqfW}AyyX>1C=g3uytEXU}q zve2Ns@W(<}%{%iKJah}!Xz`_4I*Hut%%v3>PN~sEoe_`vNcksFzD{6K++?4VH`zH{ z7WdNoYV&pO_;Pc5xn{cV7@d6HL`Irdsa>((Nq)#g(f-1@`BXJ7isPi0+PEf-W-_}t zcCC34e_-Qp<3xVH2D}T_F+@Z0_e8b4dalTkvt^mNnypDWICcT&>lzUa(^}kOa2b|+ zTm`wvusMNVwGJ5Mq_)6dt%TH$LU+XmjR%Ot|7@N-F1yEVWku-A_-?>M0>DBKzS(QWWC7)10TNgQQV*@N0w1u zWjrn^`eV9f6g@2QML?>f1{JdEozFa0Or{H zqnTKf(b5o8s6U?SC=H>sKWxcK^=fO;CxkDQ-!^UfKocyt8`k`nFklj5X8T;;GrX~R z|7QL0?qhWibo%ufH3~*V*9qGbpOHSA?x__`>VM=W3kL_ z%U8MiVN0irjcC~JWgEruPg1|hs$?u)dA&Ep`tWz~zotcdIV%6F1o>a;kn#qyZpX%i zEE7B=KV@?KZ3|dk={(K3?XunYA9?-XgC@4{J_K_=$t6D@FjZllu2}zyY4h!#iSVGi zH5k0c-#zHOR*EHSWqMDKiZX@=A-TaxU03hL!_;WOrocAAC#>J=I;B{YhStL< zP9P0XLw1&4HnUtN!HGNxKsggL@59}fg4WU~el_bPpS&w7H;@vVuM7h_rNB`!%c@x? zRt%ncE-5z+uQRZFRTudB_B`Rc4AicvETc|KKetMo<<-$7SCG9IxoL)UALS6UF9=cZx z()%DOIBVec*1|t-*)N^fXvDSl%k!%QE81(6lx?~)N?;U91X$TIFEksY3&D#j^N`S) zzgH9|bC|OMJwlJ@SwJfs@8)uAbCTnh7@1mZU1K`?Qd&jERJaOdg7K0Uz1Er0AACn1 zPwOj|DIiER>@RO`TD)WahWTVb!@0opMIR$v*VmWvThNFy{7#KX^wD*;=7+Zmy*@)xw@F*}4(%Oo{*Wv}KCv z4Ww!zds}FV>MK?b74g3qB@Gjm= zC_`!fo0ktIPTO~bJ>)Obt4Z#_!^*VN!G$$7d#~CgQj)wQzWBQSs%eh^TO86v%7DJV ziJfE#0`GuwT9L_7+TQ74dwjFZSo>`HsSH@ZO_K<@03`vvfnl+K)@beZR+(x`vnI`fNNcGDER(GLNs`t*yx&%PsJHluy<3 zFtA7wxrAd;IcKStHP#b0<@ghqH6*@g{}P)?h?|znEgu&BWpHNYeaA;w#2|2V44y7^ z=e4xoV5&~EPtPYdjRW>yse4r;c|}SyATZ6xWTlW4z+t zi*I$ZUTkxc*0W0PRw2^wzT^qrI`ojuC!{M4T279a3=`N--%JK9#ybj~?04+Per0v_ z8vEkLgY`c_Z8ja!@b6Y0+tu&V>;jd#@d4;jHcoMU3`0fjbldWm_+cWk+CPOuhI&^@ zNvv}b3wCsz{0FYfd5YI4KF@>+ay0I6Tn%eFKv#fNdU04r_zTu=0Yx$KOQx-p}gQS=4|7YZR$q@d} zIMmceNxGwc^VAE?6w`-Dg!!?z9O?%>cr_=Z1A{dbbJEeg1?B>Pf-(3b)G8=A&o zWndEgF=dg?X_P&sG6-a3T6+vTEIy)NE|1{$0(SgAvc5X1>G%Cx1qnrEAl;1a3DO}j zN=hXq1UBjJ21URP*Z}EPkj~La35<W*c`!uTLT z3M`Rtp!+eJP;Q`=pf#B;-Tf$-t`oC#ih0Nnu61SCO(+)bAqd`k}XIKl(%Q(S`SQH%Q5wU;<374>a{tbW0!zXCrAEX zh0Z0W_w@SnP-~OMhVPggroTSAT`R4!-hcBWkiN`C+F^PGhKT81`r>Iajl`i?A-i|| zL1Z+dHwHnuwT@0BuO_7mSnji!w4R=`!xr4s=}RT51{qgZh$!Zypln+Fc((1aU7yd# z1qQ-;wR(4=X?yiYWEkX&&uat9`7e^UYrFs0*`-)xf-Kn0Jcd3x^4O{?ROGS?Yn=QX z`Q%HaiS`qMv}v1cSX!4_`eeyiEbu*EUAO?T4*Ni=XJCdF7YiDXb0F%sZgy!qh|L^w zux1u%w~r-6ea`LTykMXy(fX*}NC@h^t5&p~5~eCng48t`jI+2hDF2P%oTurl%E&%)rSn_8dypg$Dxa3IV>tLauIqiEdJ1hQh_{u9DIBIRZ z68bixeaTPL&Ymya!tLFqDnzIn^G$!8DzZJz7&Nq|X@7ogCj>1Mq{9JG0TJygJ)MNJ zE*RH^waXc93>LGHlj0CDWa445;p*fh@Kj>yQ{%<$XB(q`y=To-xDH9<)F!|F+M+zH z+-W`MzXS>@UWqi`+_iv_U+)ra_VbjaxcX#nug>nC&~s^!Hg)L0L}2fhxECN~fwVNa zVez!e8mxT@zJSRN!sS;k@ibUh9V;>fL1m>G&Zp_0Fg=17OOFqDP%oK~v}y`Gr^I^M z)9#jir7-b3rc%_xtp<5R2QN&jYXuk=rWIGATQb_N;U9i^%wEb&Ihb?xB}(Bb$L(Oo z6&8+KOvO!(rAwOB;5x*r+_gi46#4>X_llD7Q zhv;}m03~JQB|LD6Kf3^hkeeK)H#}zTkI#rJsNXrJx*y$Z#$UfAZKUU)N$wrE+BE70@$w^G-n-g|tlVJY0P*$AKMc zi}lB7zWUjfD7`e7?cyY^>%tSbBONzscf*+MnjtnEAlCJ!&$iErMp*POaA{Jem2<1- z(0r$RAg&hAqm%CMisb^^p=_)r55@sV;y2&HK#&S!=zbeFc_el}?Pg*-ptj5Wyk_xj zBfdFKOtpQFOK1Bz5f7r}tT(W0sy>x4uuUP+lR`F5_|fhwM!aWDW`ZB;rXw}?rCZg= z1Wu@S$3&J-NAU!9@tIJaz1!1*5I6|nb8J2>u#Weia&Da2{cK3+@MtOR4VOS zqu$fxn+UMA)id-I$pH9Mx2G;_zYk0qZg{+N?EglX>O2jUsJDH{>KYfETt**W>!*4X z@h6<_jMF35bN1>d*Ep%S*3VtR=2fk-RUp@7+$PKC1OTToGOS z{)MFFc26nw8-dwRnsU-}q>(}*@N4$~ZO4w|bgBY5%o;7~LTLtXknf8NQ!roUbs=U_ z5(;_6l2gVV!+|%kGN;_;vLs7FYTpM=lj-OkY_?JNxFyLx?>Y3CCRrh-VR{-}P4EWI z`i@pbwKAAPfrObLIwC-Eoa76Yra0c0Ke^E=dQcI?pG@V^jTbL;!9Sa#LNChZKRNlJ6?KC(EOYKgIj)d7_FCZ$rek5YWJ1Gc5wiJ|k2|r$P8jGm7m8m#pCB=D2JFsXy z4Qafs+v7<+Fto6s#{4lBgIho;SDpFePb!PU9jE^up$2=lR?ee@KG8N_IlyZ}5#D3asT4NoQo zzY2Bgo_-baGBlH}SARG$;&URrH%;MvC7CO#2o#T&q=Bj!@aPZBQcWeRGAB!+TmlS* zpnSsZ2P}4jE3bUH*l2JDNL@jsjRRrl`optiDTaj*@e7D`1m>VmnM6SS<;9Stk5m~- z>_WnoaQ6vf1jNv*Kb;yuhAPVQGjE&@5$`q8r5BRcK`jF+TetD%h;sAZ9&D z*A0?01T#+itWT*0=+z}*?FDmFyH=nWbD(;z9KF46x$JD>QYxQD5;MYQF@8lmNidpK z@^&jrS=!+W!DQDV5Sc%!4kc98SpAYDoO%catrn@qwtl>Q1r>hkULhpaCks{rW9~FG6YDnt5ee~_4$mgldQMaPaB-oR?G6)S|3{4gs7-A{eWZ9qW ztDDsyPekhY3xY-|Bh-hkLBomgW1JZ40Ke5@+6}m>65db)U6mgf?{B+QJG64WOwMY` z{*UI1MXxi|GvxW~kFzmNBVvA;Q-732fU1y0h4a}D{M!d}UB{#UGRio?$5ph%ES#N6 zS)9IM@LzMpPGB3oN@*_S=?|hrm!jkc-%c>p&ui_MEh1HfNxV7&VXRB4F1%t-T1Wf;6pi2!{$rJp z7yLKyYeC4qJiad>&6@UwH0Q~7-&`JH~+v$BQ zB~_E_Q{IoxI<|G`Pw#Y%gp)!b(-)rT2@iXNKP%DoGF3ICo{MR`HL= zE{f}nn$rwSz04Ae6lOpR`^h$F5C`MD$={rBKmTw|9Ce=hr-v2*rV2+09ysY=zRxyu z-97Lo(REkw&lZmr1BL;0TeW_N=5?=vwS?0;cF!XuXRZPBL74*UN8?sf%gr(DfaOEs8>;!l>#p4N1`tX@C);%SCJuDEe$mS+ZJDDM|~iZ#)%o6V&sy0E=) zAmhYtkj&eh(;Ncvq1CAq?ZFCf#1rAbzZpGDf$b81KbbnF=C_`gY)N458Sa+tGB?>Y zTztuTJs}+2(w7~0mt&o4$3?d6m&5D2nidq}+c<@WTy1_eIdi(&XSj9t+tA7gOK99O z|A&>H-F0=@=H5P9xX?*4?NPKFc;xLIN#;4l5Nd+e|IhW>^9}B-_r&F*{hU-_%`rQz zmv+N4y28;@2%+&_;oK3=*SEDyFw#5V>c*_u6+a|D`F}NS|96`^`4Tjog43ZKQ}xV#g$r zaO}B|9IeQ+vxzo!WEa*Hf?U60x0p=R7PEe9!tS9V^35wG&wA=3yF=<0e7{bSw(L7y zxX%=-AyB$nO?dHA!WSxANP21{MEclw%DG!x&>`6hgZ9%Tsup+NRUlP?y$~OYD8%4F z%x{hEV0ZgM`nlE1NqCWce}8+6{Na8~=R?ql1Ll>hG#>RKXgU#@c6bdMv2HIPb&II- zC0TDM1CfPaNZg$#3oP;Pr<0L!#qh*bN6hGX3#I)j_KhvuQwIbXI7t{8y^=XTKo7%7 zu7|wvfxJ~ybhH$9*y0I4w`w<4!wQ(avt$#lOu(JctsjU80a0~JWT{-axk43sz?wzd zb}E@bIf_h%gUocRTn{1-9&(oG;ctCbc6v#9128fKYJB>`^S0i6eNmbC0wI7XpwJ3x z>}u&Yxt+LigsH}44&RuzQF>rb>Sig|i4=456BLWA5ZLk+?TDUZG^Xya{Cxzf-2!<6CXh|WZ75lE9IqmLni zGKr~X1O9hZ?Wj*3-MV6DXU_xQKEy#)TCz=yFzMSoWow`i(Me8LV>up8nq8yR%1R1` zZu+o8%4Z^?CoEd6zUPvWrs)=KCn+yot|Z;?5NxDqZl~0Tq`@g8@)N^uV7lCsbc^fg zg7KuXa(+;0i$o`Ls?-d!pQPCck9+BXkMo%|WDY7AX*vy2Y4m zNDEbYHDV*|rL$%83_XeITiaH{`c?3IpffAS<;uVfBV*~vSw#t9&v>-<9sICm8$Cf{ z@n=Unx{hAfFC5 zkY)PCi1O}4kgmk&6OxQ7NVgkz&Z;=-SD^6Zwg$;P4Ur4zrZ$kUX!J=n^irGM$JYna z1{Or@CcfORbUOJ$*PFO01aT<=czzgGd3>MTu&J>w8s;9P`k2&X3869im!?pY?k~37%ty#LS~dY36SSGe7O(hvU!sVsCqTmKCTUQ;`Sw zWr%$n;q$z%nAmvW%Tw|`R{3!hqM^)OcAdWaP);Y`!RtaRko{&pGM$RsH+NzC_G&=% zx|kshMvnI{5N7%H86qvnmkOt7CQ9o zZ}1s>UOs}A5BQ6*fRii{CQ7TvF$!cZzmHWgl0AcHJ#i*Sy%leAvPIIsCD6{o{Py(y ztrVG0GRd470Lld??*&E>20>L&hHJm$#EccWd&q_r$iLUOhE-Yxk*xbsHt$I5nwf35TUkbG zhn#)1-qYgg(G<{%5DG%5ABaQv53SW!(xYJfiK{L~z-Z$8I{ANkHJrunlc197ILUHh z52`+Y5%{_lqA8Bi9I1LZfe&-OYEkmi_07-J2lmGxpp$HC(NHR2Gn3PS-^7uk_VK3? z@2vR8eEF^Jjfcp;-J6cU#yey) zva9_@1n+c@Y`oQYZfmsy{lRa}h(x#ii_9L=+`|AqY$3f`@MJL_pUBZ{5)PDWjumr{ zbWR5Bp6-0bGW$D~WE46!Su7u{vGmtK6h)uD22y=0&P4)xU@-#oo@nl!x?*ErM*v9>)rFWDve#hY41Npt)ZWYTH;$4g z4!Ktwith|d{!N^C130S!?q%}dxL;kmn@r5?&cx4(-7KG;XcJ#n(HyyYc%9wx$j{Ee zS5pcZk?Sl%2Gstb6a|Ge9=~LVqa5QzQWT?(w86~mH-kHAxwa0>)I!CVB_7+i=0**g z)Tf+E?#?80b$)djukCG}X=tp%nv=8^UbSub&OIk3g?k9#5laac!AVEakEb6L(g zcWao+(T^^8#Eab5X4;6?PfmBAo@(v;Se-w@Ac76E@}zb)g{mG^=4R0qYZEHBb7g+l zK>>9{sa1t}Va6y%ak>n5ls$&TcanI+5ZP17nG*nHl?-pv3JX zaQ%RCTy*j|?r%y*qMVHn+*5{HbA|@zb#^@n6=q@)_}V8jn?!orF4a8{`y73Jqr*UsH; zj%vLUqh)kodXt21qEB8$^b12}ZKZd9S@C&jRB6o#7!4;QF7I4knskBDtw60ql(~a| z&ZGLd@(F#xagB`IkFn^&IC7Tw6Zda2%<(!^^>bk!Z$bew$1f$EuS<9WjKu%I+#laT z6J;P_6!9ORAfl2|_D|K1`jjcTPaaX5>&mI?Jkq}ozcuWc*EqkIET8|>QNs~(q@)cj zM$A2E(R1iKl~^GK^`7#$)n=@%2jd*<(&<5*yDD}D_?YKfwI{_I1t3EoKh(;8A~j@M z-wx7{$fG&`0G45%hJ+|E+h3-E^3}@Bx%e6N*W`~32`a2`>RqP8+Mhn=maZXqgRI*Z zyH5d`W&Up48`QvYJc5`3YKFRG2%wyPvxQSls)iF*Onf+@%6g5vT>I?t=RM!Gi#wCo z`y1sY#TTYu+wb?+_h+*7JPYate;^3@8un=iHLs1#Ki-N#N{dRLulKj(OYhg!-fVC$ z&^g}L*B)H-4oHYlX-XagoO?jnpHPL9<;??LJxqnn;Wx^RG}w>XJ3l0JGaSFD?zBmA zNxb^M7qzQMfY<3WY1&o4O$NuqD^|@$0p8bJPsWxLOz4&9+_hgUXE9t`-Fba>a;5`o ziB$nR;hu!*1P*B3U;soL#QGylU+1X`rMw+WwQeuA0t{BDjV|DI?B7p0Q#DdvV#afY z`@>%j_7@w?Bt2P>gAhA&4usoiNmCTwHT!eiBeTUyTmi|`qRElIEJsdgIuwlS6~tAJ zoS8+Ad4W6#7?i;Bl%@~FXBGn@$p!SnRN>%pjXysf^?!1;KQ8x*ut!^h#4-`?`UF7 z1EA%1LYX2_QRaKVD9p90Mfy+)4ZgRO_@J)3I}pm{2{{k!147qviR4JmKxBf6tEGNT|vf3PCn0oYM$YE%l8|f`8xl^XJt@VAR z<^SP5kIwLaTxp|(cnacT4_7E!LD&BCWZQ#I4U}IB}tL<6aP@2l91pt^Cj;s`>G%APp(W$6*TULZb=$u$uxWB zG8Tkv$E#*E2IP3`GV9RI1nD13xSE<}ZQWDcF&LM9U>X1pqP4wloPMb?)|R@|1Nvq4Q$e zJk~?b?`fcho(6=|-JI2UG!{;}O~YqK^>#b%xGLWj-fw-};U#Kmmz$A))^So8_Rv@O zkm|;4E7*a&%k;;=r?efvd(Mt#pLX?YIm3x}fUv{S+yW=9iBtLj(+{TdYlR!_y1S24 zc}ik|5Z>ywsZ5>y$A{f)`cyUT4?RR7@%?L&be;VM0DPiV&o9k>wp#S~8Al@ZrZ^_M zpa#=C{>Qdl9C^So$3_5nnz4ZNtRVn_GsWMhY zEM^guD=GPp^F*bqpGnh)Q^er{j?Bf_jJ!6han+H}e|_}6Gw_{ea|hNJbUuxq{vc=o9`}dS9G(eqP3sTIW|$Rgs~dja1O;()w7>ZP{8}zfJeF9!hUiK=k&{W*_9=`op-$Cil(isvK7k}5MA3uU;qBYli2CD=v1^RU+sa) zLVggL#4`T@;R5?&f@dNFGf?`wpctDPLB~=5(>iiB)h6gp1HRpI>E!Q7ubT06;Uv^c z7i2H}Ip<$M#*2&t?O6v+a^@<~!lmLz8vU&K*$Pf&T*$3WuP%4sM19pMKQ00QUY^ zZfdRkVauDw)ibh_w&cAxA9$1mS6ZVf>plJ&ajj5gof{6PtWuND+UlGymCU;C5(oaq zDbq5!2>D_Yu%Z}Hs+q7$d3_M=GMw$Hp5BpRwnn!)w6)hH{g+S^_R@9~jW50TdAyv} zv7qSRGAd6a+yDo&ab8lx#TuETpHmT)x2;n_S&n5bbc}QsM`5tFZe=Smz0P8I+$`U8R1sqpb0rnl< zCyX4?9WVp5@C!1g;>g7@^Kwe(b)4$=d9NfdrYpcK1&mZUBp+nRnYJjaVc#9mVjzNd z-_knX1~WV$9>|ljFS{?RI{$zs1LzUmb;q$$F$@+1gr#jJ*mnnW1(8xA|A3PXz_L$) zae#`OYSB!SVA~0?yYN*TCE7YJep<#`cumwFbHJBFrsCsu!Rp^54kozlMm_oM_kwRl zgqMMQVdEG-;tZz5m^V2x;eUwRF>M|nCCsAww<#4XD*|g}7y>^RRY4OzbumY^vp}60 zdWbK#IO=zJ>GwcElA_F+y%Y?BCiw=V-(dZ-QCNko(CHEKxeDTMVC(Qh(ZB7A!IJw4 z3eSF4hde!D3OXl!mw4@m-*>TJ^pN`IuYUNQ{&k$^(;gwN07+VgA&2r$&kA3oA3)iQ z!yp_NUjnlxjw!2s_U)pb?s33$eq~uc_wRBJXivu)bM=8Vb)@IlSDfdauL3l4Er0Fw=&Aaq_(=6d1mPEO+~VwP|wlXj4Es~&Sa@n7V=RY9<| z-CA~YbHn1`wd*^Vg64!+)O|}dB;q-_Q(mFU3)n2 zi8Tx4vVZLiHu1=j$b_2_e!2P(!YCHeR0Wh_l4iz`VBzg(n{2Mc@%Oaql+0{oH3^So zcJ__xZmuz@g{(Gm2l_SEpUzR?Qg>Taw4HDw5}R z1-5rA{Efxo>)7t9fFGml4<0}$f?k{wqbVySZSwf_$~PP>oM&3oBR+d4d_^fCIVc2$ zGwFp<{m~H+C;v0Lo?cc4lCR;%=byS)5S1!fY1j?d#psxF`JEQwrU$l%>iVpapP5o% z2}jxfb)qI5b-{j8@Wfwo0?;^_Y+6ep8E6le7FCKWP5#2TT3^MIM%kj}m66U1S4W(1 zP(s7!nS98RTRP2Ra-vE|j#gi%Zl+vQhmteHiYk(ARXwu_C3S;iS(n+rbao&j2}|wO zDfqQdhV|{z`P;4Q&1ZDROl0Im=B+M%@RGS&{o`%aT%wZxZy}xEH2v3JtBA)Zv-LrR zk^}(RxfKQp?m4`B71^xiBHe+P|Uy4CWT@e0TLy?;_%yw@}!fira*d&;LaQrh%{KSII*W zeG%>ZyS_d40_TNLljpg!$AC*54flwDbw9OnPiSy8ZU^VkmVp<`GmBCmJ7xFxu??F~ zK{xfY+hw&kzcMf}8T->-Tj8^@;MXvL%(!=ignuJo^3u#)%_{oC@U!CMI=SbAhaRULyd(5M=#nS( zL@MAuCY@DnS}3+J+!kctcUb<25beMGjpg~C zNBsM=VG$ijZdhr}H?i(tCh)huO%QEJ&OL5Nlzv$A@Kv=z-n`>Qnb!}wjE7{0=1>3T zUr=BAQ@r02cp4tF=iI#SGb=j3JyYH`bsD0e((gr4wJL?WcRx_NJs_Nb+^=ieSBYmd z5$BXDCWP2mIF2sL%kOFh_wE_P}LxHQ}x zmp02^1@6hbJXoh_hd?nqpBh!Bkzx_FDJ7S*Dt_WJkV`_4H5I#&QK5?%j5!#-c_GDvB7 zo&v4X8WU2eWY8pk{HF}4(NAORSAeLV5|AD+)_p3e#BrB|U~%2yxs-H3>z#6?-_Wb; z><9qC*=N|X+)C^>UM#Cz!bsSRFr-BPa0Q~b}M5^mh=UhxG0D)L~ldFOi*^gKWheG?`aZt4PifJ`~wp%;6y* zfGwSU7K)SF6X_h^OCONI^+waOs0TrdY`hrz>dl!Fx>V-EDVE~>wc>n$Uj|azYa{%d z_(8!dJT3h$SA=FC0lRnDRpQ#f?j^NYAk>zG6=^AP9bK`8t#1`>t~l?ELr2cWS*)0j zx6_|?IG#tLyfpX3_neY6^$*o|N$StjJwqL*YIFNtdbkXS@M;t$o}T|et>@Z(f1!5( zlE!7Pt{u!NcIYEAS%iu-TMGV2ch1eytyw-HoYq!y%|@7R!(yKH%@MX^_izP{XP7Mc za5D5+~?W+}XdnlP!}COvS6`87vxhN%dO1hmC&$*$ns`)nV| z`oT+qBoUG9f_1?Dw}d7kY5xI*{VhuwiJK$MTepmx+oGo>|AoAl;tynM1c-<=o964B z@21@zP?wg)Svq_^gPKyPHdgQ7L~_$+xp`I-A05AyT$4$u6?t+7N`}BKnHeSluW@)Y zoyqNFAbNgp!E40!1I0hl-7$&T`VFchJU=5&UQa%}oxO1ZR+mYv?mTA>L=3Ziwi%EmMIc21& zDjfWSm@yvhuwShF{#~kn>e$9vuDRnv`*UHEOoeFX0d)}leKo7EYibhKH41hqeGddE zHx28Ty`*t#9+C$p&|Q@QN97&>`2pFmw+mAKVjBO;OmswY z`|8$T5#!#Ml>@#d2Db-`LQ<)Q2i`VQxzoB@;Nl44xv2pYsfcm7RKP)>C;{3mLo;q7 z>Z?uuyW2E{8<)2yB}2uB`?vH%97Y;a&)nvI<(|C1oY~YaJ>Kyww$HjJOLV(n6=SPs zS)P!HJtJ_EV-VKYToS$-e9KJ-7%S~vau<&uvQW{wXKozE_XehimXxbww6sSu8WT2! zEk3`VUA2Gx`?Id@P6oi_b&;DFAU=>vSFm{@7Y_0$j(Qj^1}Ex zKYc>82e=YtAwU6PuvL+gF`eJ5QlxfeoJ*bpa#5u4mY8Sy?1>jKfvw0tU5V*90k4bn z+?2~qzDt-eK`K0gjqUgg2F+Szh_`k&sZivYVOe6fl_qDQz~;n^s_MVL0|KnHy4kX_ zP*Zv{lCI>+2>$9n>-SH^`5!z)Q?{jzkiJwg=Nb30?3auI(eg)CJ6k?r z9ya>8Ab7>H8`x;U&iK=n@fD9QMH?42m&<)IP3Mo_7suLvVxlf#h3z5uJ)O=!jPA?{ zRrU+LG>hKT+A4EsxiQr{8>7#_Xz7Xr|5cCWM5F2V&p#12z)%R4?<}`bi_b$e!14-u zsMvb@{xQjb01u_QD5UO!!b8plyTh2nFj=_$BLH%fP-RJA0aBMZ{;f>mN0f{)x8sSY z@sA~Ah+kc;{xB^XQ=)=hb6-u2?u#L+^XF=L!Lp2r9ivCR- zJLxNxr}4O&q+-vw#RCobn=IAOav?loh{b0!r_L2Lg!~!%#Pdo79ZSTA^)z=)sC2Ig z6Gt03=Qd)^+8)eG%{fO~ss{n9+)wR zY#R0H*~n{;N4=gNodej9`1Gt7wYQe+=Lu1xyc<|`pH^XigH?I9sSzQ@Sty@cAU&mh zIH!HpTHbJGX*Ri*Ar`vmWjHM}NR>8nb!gQ{EjWeAWZiJ?chVf@jF&SIJKeeHt3xrJUdMu8hIX~?kww+OY;YdIbxjIhGO>%z-7u=PwqlE10ykH>zu<>0s^>4)75$;0JC z3+7+y1hTDPs}O_`u>YFywVFIt2_C5;^0J|ndkn&fGCY2L(=#c?!aT4?_FZKupB}0DZ)Hk~q)3f2ymi!2nI28! zp8KmAs~`#P$!UA8W-p+x36Z@?`<49iVYINfoy&8jls)!UaJzR!A0}C>WdH+5@Sb_ahb>Cm zRi4oq+K&Uw_qlI6_$M^yS0+$QaYzZcpULLOnX_-FW{VQ{Zc1vrmwek@O<+q#rM?-f zshS7;hmc6$q*{cM?FJQRL7)HbUZ4kw*kmk|P{mDC&G?FUFy}sfM%C!n5)Crloh~|G z17Cb$Un{`ExVI&lGqUd~Nd^8&>wj=flf zEAfVpHJw!LSJyP!zq_S5KTT*5cK8sui4@~A+^ao1f-@uzyYjK?F~9lG+QTyMy&(%V zcIjl@FVJh`O20YE#bRY{_WhQo^vRcD%V_9=*ezUT2;^qSJSv*ZcM!b+mYYrJCxft| zQjBE3pS9s1>^@74TNE72bujLA=**X}wM0^dw>}r`)@fi5aAT>{e@kHnYM92LHrk^< zL3Hb$Nf6eCO=}PpG0J_0qZ-gYC1my865-HI*qjl601KF;Ih;w0f+sB)>srJeAlzhB zLd@RzxVL!Ch<7Ze+XrrI>^OU1Gv2|z)UjaMOzODl#lJwm++9u z!ejkmVamG%toTTE_%^P>2t0A-#q-8E6w4z6sR0Uk1TxW>Dng8RD)WOQx>`}s&gKZ? zRN3Cz6~AVy%twu4%E_4nIJk=_LKKg%PHyPYweS*3Wb_)$CJRsuqEAB%Q0BX#O|#j5 zL0K$DM6f3|o<~Y1BsV>_c42>CD-CD9#LMKq;A#W$;z10Lax&Ptia|g;h!4?9d!hEi z4d!T^d4@p{a4T+>m# zg?vSxB9QC>Qk$@fYhZ#>1sbVm0C81Q^|o~M=YUh^3gJQJSxLEvXr>0w$Q}lP=%={T zK&4Hb=h!#d?A5v#5N3lce!RuutP^AwQcQT({UWK;rqZX>*dYY_>cyW*uFZoJfuW~qfc-4@q&K=r2u5IoTr99s$Bm&*K(Cwm zM|3QlF+E=7i5(a5sZ_w-=_78}Z)N8AC!^fdMlv7n;XJm$ur>o-LW-D&@B8B-o z1`U<1kn!D48e+`w1l$z9_w^4!X$Rf|mexG&_jAvQ04S#GiMmA?+yYyy;o-47*D{WG zZ_3Q7?TcyH8}$%z6wzno6Dr|W`lb^8U!5&Om%?VS8N0l)&I^fnVz?T3Nm}?_PliCR zQE&VBjyG)a!=rtJLs6o`iV3+TD72j|BI8_sgF_?{rPr>WnBCea`ua=hI4(2su+*b; zznaF0mhfgk*#27@vR_4F=^+At27*4IgLf2QO9YBH4iyhIoLH5E8%s>CW`8B}U%?}^ zTMaE2eM3z>rZXb|x4~)s`*jPo;WT3wvMj0g^xoulOVee-72{y_t2w-<%_kX`UhxtA z0crQn($%ZnB*TI7c+3Wd2l{pcX%WiNx0@C#g3{e=qNLJ*TCvw724wvNe{(D~s{A%` zR>voHwklQ#feSpDT&I@ofhb`=yL_k)?a15yO#{j1D|YuVTl&8bEJewbVxKpyUIs@Y zh}9CL0|*1>WN<%YoC4_>&-6(85X-=iLn|ggsI8rJc6ZVffm7#3b)K`e=h%S7LlEWj z8Hye_$FA*a08jkwlCN`@stKv%S-mBCd2E`p!edSbu1PoJgH7%QiI=@ZiY}q{uU2Jb zr0)`au6~&GJ}cb3Jy{4unmE+jf9?IsFDWX$|MOI;XypyH;r|w=1iASiTtpJvl|z}j zg{*+K!`p3DP9*yp1-rL#U?9@5QI8;INqxamEPOnMSf%K6(OnGlye8bF-TFWXrNi~e z+GF7LL)k?!3+*jN#pm@KI|cG6{eG_n{^l5Y_*5f7KRPs6=Rk+9etgxy+lVC5eMz$S zy7YY-bX0zIS5kT}eFe0?(`-=|yM3Q@&+t|yj@(s+ID><)2DVBs9LvH&=~%GS42b@)p8f9Dp=USZU6f58E)l8~Fg!RKfBo?%Z%)k! zL}g65Yr;pu;GJcWjp|FxM%;L|tt6fj8|3>aIS>(_r(1|Bvt_pMR6~c|C=!hrW=>Ah zU#iiC+e1h0b^-6Qb|Wt-%B~MQU0p1th#|bs4U!yC_Fh<+RU{hY8ql>y+Z!4}4Sads zCz2<0;BoTBoSKJfz1s!tYhWN5X9<0QV$~q*i{%(}N*_3CU;5L*^m;MQYdBVMOQM3l z5TAQp0^-y_!mD!&P!)!Jlw4TR_5%M-JkJs{M$`uSvR=4bY?l0VSn^_;d#|8peg1uy zT8zPN@YMLQRzH=i1{P1hNi*N}oy)s`_iHQ# zsmT!Pyky*UtbJfc$K%PR>b0TaY%ciwf7X2e=U1FWm#*LSk#GO|;{Jm!1>nS@*P+j_ zB0CLk*oc!? z8HxgcA5D7jbRc{2nKR-E&de|36R@DNaIg&Oi5XsWw2kG$V;VfPRDfoz7VWKatvqaG zDp`S@>o+MF3%-0`z0IoVC4kg#x0?(sW5l9s@F($(4~A} z94YT>M6TdxUw!sWWoG%qa*wiuh`napv5zv!GelDS;))R#%yWun&bWEIL~>Gxi;+$B zeln;bFTXH4C)FAH0&g|`R+$r`mi)T+-!K1e*aJk0$BA$Io$K4#rEgGPy|yMaxf%W% zTb|RLozH0TZIPQI_HcY*J7iY86kng@VUO2wp~>}snsW*iw^~;|ZQFOvxh1}@7_6q~ z;{V^AYn|dp3OZb^?e^@i3^kqVWgMpa?ojQDT_2s*{2#8qGOo$D@B1&Jbd2uUkWP`& zAV?}9BGNfzD%~J0wb3D6N{Arcog0jtpuj*nManT@L+asv?)$o*>v}%Ni}QSWzBta` z@jJf1Z{Q0Uxvx&q&nR0~65!d>3$V^sxV~ljKQCvVJ{f;iTLzkWK@G^)({OGET83k$T3DQ)L)vWgwOhqpxf6PRnJHX}!S#I(DqFSueU-Lbs5Pr! zB%d_*V62YxmXfwj&p~V_X2x^opOrtE#5fqdw0zEX!t%;z=D#y!S(WMWBB5VVNxWMS1x-t(xxxt#8UL zFUp5rl__&x&%LN96W915+^tKiaZ9W8fhxG11FAo6K=f%o!?j+ z;MHzl@+`4^##KtnDEW=EbCt86F6RfQR+nUXu}Ag3RdjhG#*oibT*y)e{fJT8c^cp|!6J$(GJg@vsaQCw*8>y?a@3e&fag^gL)D6b5; zSF;+FNitrk$?IQITi(a-t2SmHr`ozz^7l{mNV5siWEi_~Rg`iVxs}~saJBTRM^%1Y zOx0UnRe;3dw<_Y)VZ`ZzZonO(V}r%;#lD@*!-{W<`LL~|9McakM&w~u>f#b4631%o z=|o~m@Z#K=NAlXoHos~Xa}_&A!nPU?@9q39UrbJn9gpX;@;WN68&yu+z!O;HvFk$e zD^si%hwZN)I2^q;|6V%SoNrPnqMF|Nlfch>AKd8)-bT%`GiJ|t8*E(`vpl&X4I(gV z*RPvy_InK%k12$bGN;FlSDG_a;tH|Jw)x&oymuaTw~uk>c>pRXc*S3d?(}v3*(|Z} zO?=@YC?TsFCL7B~$2qb$E|VBZ^Y<`BaW2uiE;Evqow}pW|2Z(U>~~G!!IPz-JSCc> z;U{hrJFDVQ{-O7+W3}+^@mrsc-83H`2O+KYr{OkPUV&haT8jeLub#^t2lb9wa;LA$ z5xA8I)^Kb&9JY<>CPgXn|CYHe(vwPofonyc*|mog+-&f~D> z+Vs(5dyK7n;;3R4>HB|MN_>E}zdh{hzAIsP>K|8by*opD)K{^wrPbHpK1b4+GgViM zd2S@C>N#%r_To;kd3zG^&CV~7{{^pLeCOf1dJ&777Qh!|iYX;9302TSN`-7Sx|B_g zi3p$z_W`kaR`Gh#NWs3|+k99<$G8iOmeeM3{y76oXV3P5Q8k!MRq#b9E>N$qvaVxmNK^!WIIEP)B1 zH3B*XI^#>OU?I;DbU^^or5WyeKc4i)*>{qt5WC8_W#1_In@{5zE|X((w_p-8r*MAv zLDP?Jj>omSx(Hrc^>Obk0`xF-f9U=zg}V)bi$&*qOrMOiKvYNE<9kh?wKLrKs{v>m zgqGyRf!piKfTeiB@yj)mbL(^9O=id^aj#}QN_s`!w0MyCw5KS&IZxSMrp)g^2Z0R> zW(V0GXSZP59tHFHZ1r`}jR`TisyAnS8d_l>y6D*dYG0Fd*f901BUPC#b@>mH?=zoW z9l#|?kI5uNB#S6_0T3!Q6rS-)PM>qn#aAEgP{e-O7LE}PjOjIQnIJ*26V7QdCF zPl^7Bcah{FxEipamF7c-xA#rK-h={Bf(uhABs#Yz>5ss}dvCV}!Ot-egQ&nB`pMAC z_-Xu*NtP`Kv?#=n=F7_kjmOOoemS^Yrx4nMm7f^IJ_7pUpR z?sy_Tk|4@VuLUj2C*rS0-~W6~Ib>HRyZv3ZY~xRiNP+G}X=HlI%qJNy-$=*_81d&l zk0@+nYHI&;lguaN>|^(Fe!Dm1^=v0^T`KhwwVLm3o3`G&HLX)3rsaI{+4M(AcUfv0 z*)w2q1N`khrY5RS5WEl1uZjfZ?Tvz6(oP}QlN-2-KjK?oRE_?RbQZ_r1+Mr`f%TS% zNd|VV2d;9y|Kxpcy60(JJq0W4ZZ=aWOLgdV-4LsHExz}uRkpaKnJ!5G3sOS%MR8bF zT8BDZ#@omMbU1IJi)}68@|B@89+NNi>91W4a)-tw}5zZG9F({~Se9 zUpdBe7~80|@%g`(9??Z2H8fO21nAC%-vAWZ(-V)W&Lq{o>UM5-8L8~3)^}Pp7@_Ms zH$=Azj&I>J%L7#z#hbT^%Zr0#>(hccvR0MlD{qong}6mJOqu1?f2}W}>RaV)+l5ut zJ9Hc8S@!z@v`iy7a#3HCOaWBb=DcbM&yI{2bzr&D3%BBue0hnyELW%Ge0>#{tJw** z5!sqHi5;0Zl#f1MUOs8>qA(j)y?rOFJ+isSUGiFaZEin~#2qiq<<@7>XU+8o=PM7D z4wI+v$8=kIA0pioMz}rrMx5SNI%U1~-lq5J20{@fD0h1YJ?C1&D>o>@RpHl+SAV|h zyjXHnqWHu`B>LAv!VkOhJs(JxH?#G(rIo*V*Z9LRmetsC8k$tOJpS6J44k9LRk7i% z>kFl`T@MZapzW~Le{SRd=$We)gZGlT?U*lqvlMEcHTnMTS_fjp4NW(*lH>}~h8u@G z))j-J!p^$8R&e{Q^^?br^3q)dHtrMU2YFnQ;UmX?ksY?fNW2y6#tAlHn@eCw4R=~3YAmwzuj`7WE`WzLX#VAO2gJ|a?6fjq zO+`TUr^^$244>~iFN=T8jQ;x>MozS|a#~FoS+GUeNT0%X_I;m?@2khD{;?(D2zcs) z_d#Y!w5yOwrKBAN|HxYxSm_j;t?gE|xoV*qs*nhL2{Z=8!wjw8r`VFV+*xXmWPZnd z788DtQa#POS(qY#4*nx|#Eh8z{=`xDjyDRTo_3xGEF4CU6%0Oy0-pgQi0u$6IzgMC zs9zu;6*%K=-GLUT^NR!z?fbt(V^H3dg=o5$fK6JDvx}ZaFGk@s+8p!?@0o(J`m`Zl zn@9==mK+NXbJ(J2(xrDnoPz^+leff#64c8*v%l|%`{(0l1tzIvNuTkh=h_=o+bf*i zlJtqfMv3r?l{@ujBVfRJK<^K%NOumUKMkK4mYj~>pi>`CxOHpMj6W5CfSF{BkUnp* zWCzo#0h|wHK=k>uq%lL9etYktl(haTCQWVM>8H%EqOoTKvQMJy{Q9;!MH-&QekPlD zZoHM_I(VxhuBxTQ8*h%1q zf=;-B0Hjm`7`&?YdDw4mFeiM(8(KI^Z!{?ZSgmaCCUC9sC8XdHmL8z{KFSSwJWog@ z(NS2yDYD!3E$`HtdKU=|yDgZ47JG>HUxO;Iy(#z>CN_Um6$_pCM8ti(KkyA7y%83C zluco~c{_N-KT4kjz4Vq7ms{|I@W8TKx+u1r>V8;jQc_tH*?^%-3ikD!h2h>a*wc{h zT#9Rvp37Z4a^ZXqp{;VBA-5%u)>T*Zvk-889r~l?M0@V{kle-U<$w8>>JIA~Y|r6( zFDu&)TBGa@&4L4hH{nM)awb_9pWTR<~xEm$ixHQPegtiG~Kz1$4%%`H_Tw}@U>hm0!D z3uw(KXLoDJYg}l^d#Uvr>!n9YDz~54Wg8|4(%hT9UvyMEdbURP#CuVM>jAf>{JPZ{ zeSczCjMz|GE!}+~+SEu`VScq1HwDD9o~?F?)x+}PP7 z-Ue|4%DKT3@sn{IzmWMA+3HutK{yTZ%#=>p>u*!p4(kVJQ28vYM?Hwpz<0qlKi}sk zm2soa3If;fFsP3NA%A?a{MFy`@3d&A( zgq1N!jCe2Fs*p%HBaf;Z4PIVMDvUrUgvh8EbrW~2mM}r`PR*%DB$jBMe(gS+q$6oB zqn`aE7s;C5G5M{oacrkb2inW7sK z8uyPSG!%Z2u)7>Q4YlVtJ>^`J9{N+ow6x+u(gO*W4Y=AYxX2H=$TOokB>=S1qnD(E z2^0QU6%U@zfAH(1IvlBRtQ7k8GTe65a6Z1=E^pD=lci7tTg*!~ipz(oRC{Hp=lzra%-ddU9}?DXl0TgbgRJ!Q^e*Ik8o2L6IKuiX z>OIf59ns&DdfHC!tOTGiFW8>_zwK>DdU0;3!f4xbvZom8i@M?1^cDusM!qv4AAHO$ z3=sT}mHmKpzx5hY0v&B4{EnXEr(=P~5r=F9Epsy$oKIzgT`f35#;36D*MEKJ!hae> z)qx(U{o?%wfNSuh5CWp(4m899z#dvM-sMyz0JByC? z!29nRqeEKm{4%NSI##7XqO-a)fIs+-mx^;RLM0D2K3&g9Z{8Qv1ih_j{}Wn}L7Nhw zTk2{w68F8u-a-17Q9KVnwR-LV95WOje%J3XvP~-+_}QM=nYi|hSIo7pR3APxxxwu^ zfRTI3Nd+OwaR8BMRZegoyw=+=gU&2Of2Mhz_*?Tmon-xR7vZUt>Bd4`1b1FaIm|JS z`7lvZ7F_=xB5d*n^Z{_ANU~@gWL0N|yBuC6y?;?}vpC$v@q|=wZeHiUMAVJcxuX$@ zu28h_9*p}<*6v*3N>rf)jyc&fRT)F4bb?h}{IKTg^4_hX9x>JWf{tA~u^+zvoquQ# zI(K{ivcCFufj74@IYE*|Mv?Q2J;{Z z>opjG^cQ*9iD-7@1UpOFhS4z2+nU2UurI$$XgjD(PNflp!F3OY1uy%P>#dfp=bK?M z(etXV4LMo$CfAKZoX`aZ#SW69=39=p(JLnLIAdo658bNW&Q+l&$K8a|#IjDe4wdEO zt`Z1#dTD*|`eX}py*fB8?K;@!l#vy*QU`W;9Afz=u_}3xtJOU}?CK1ABYtz9zR1Jc z(Fcw-AH|OycE1^}HTZG#Jz%0=BJV%v{F3vu{}nT}kx_e9eiQR(n0OO$D$o;fQfsW2 z6U95R&fGrZpSdGvR&IM%&}I|4S;a+5qjy01Vy+^9L_F2j zts#1bQIR;vK1MhxD;bVMG$}mYovk7~OnB2nZ1K8~{;bi9!<*rY2n+2BeFs3E)z*gR z?kr<`Ta4KggAM#Kk33UqYCbwB&Vn>$RdkZ+HoYiZnyg~+5f{%;CQ1C#nvN)l`9!|H zbv^C-$4&|6x2WWJ$jbb41~*jan&>s{^(W9gW0)q>LQd;Btt9-&SmjAq+3%N&o*w|X zJkM4KetDu5yGA*4uC1flWraMNus&up**ypSBsZ=-@vQVPmM2yQeM9AT+8EFgKT*T% zthqt$7}Ih%_h`wM%J#aA*V}^j@$~G;9fIAI@1oLHtCy&E8ogT{ssMJzTlgp`iY7(m zQ}eHpz3RxUqP1aD0^$U}a}ptCRID_pymeW#1+933GniH>(hOlGBJ>I!y1M+CnFN8`C>d<58)lgAa?do{j_ECFASRnPywtv{_3{+AX=SK(s z;+IXU_b-5=&X>PY#V3!d%~LXKT3igg%`Q0`E3yBHU&a?suIxX3KytfaB!NWW?4;qW zeXkM3?mN;rTNvc+5Fc2V(ff9CZFm^7r5Mg4>HGe0g7buQWO4Aw1JL8Imu6#gEDz zycZZI1Q_eekzE1WL%uCxyWD$^{hf}H#PadfyKJ z$uzKLU{*d~Bq?wcqRgwZc{=&;0MHEt1c3D|pDXeIbsA4>l;{$M7j&h3tgJ&T9(55= z?6!2tsxIa(3r#|kgydvRw8}8Q*ZUjs=sr|fL*TwzO!%q4gP_KJuYGtt$nD2_>4T5bra^}fHCalDatLG<39m$x#67-SRxTH!i1;;LE1cFT$LFMk zk*YBcYuM>Lacsn=WF8(sHGrP5AYOgn2gaUpR+>(rw`6V!wd8ErxW1PnQ?$kSg_(&G zrst{thYZC$k9?&Tx`n7Mz9AUkaGxf%aK7g`REB(xztlDBZc>_4?--C8;!cC!$&{8S zl6ucXldDbgN0s5%lI=7--lCnzI82;D(({hr{$Y*7NJJ?|y>XEaV<#nkxZhZFIKq85 z*FmaOP!Kd>( zf{0#I^?u)t6RgyQ4Wikef5};tmB`qxuvD{bDM+S0uiyu7t9-+)R5vGztGE;xYx;oP zCwVIDgZIVH@U1R!7;pnl@aZ~#u}0og4Qqwz!2Zb~ja*`Da3#?U7&$}C#7inc>E zyUzAz;coU+l8d4!V0+)-UDtL@^aOX0qj7(HIGTfOix4{@!2VK&1Y#B#nz{vs5|k}T z>$_Ym`tv1z_(*2zv$AVk9ok&9{S+L|a*di7d?4Yx#j4N#&!hG?%XjmIz1Ts)x37A$h~g zWQ_M;xAeD@TbqxG|F!tpiJ%3(FBbpATdu#uq2nWIteFIqj<&Fyz4Bs6>o^WXkb^>f zD~qj?xOi4447q8Apj#KM|L~3t+nuEB@FJRg9sQhET4p%>_?D(t$06*R;Rv>bG~aDg zUJA_IJRg0zI2=z`HqPC*1 zH(~A1yx1}DB6~*)DDOlVf;-$%gW`&gxh&E8(FkY$sk#;Pd#RE$~j&JCA97SI}cyA1sEB7J|Jz`$f-2M}c41;xS?s&a5JDyo*8I`%sN{_>>OKVsM zUw@cC3xwo#O)n5SgHsYcwuik;&QdQbAx6I#DO&#j+V20R9R5BB{r;?sj|-$=Mw(xT zyaZm;T9djXm>jOt_T1v6SAHi;dB1UA^gBPmeuv)hMFLNIhYKu)b)QFW4rcGZ`}4!( zD{A@OAL%5>z0?L$$YujXl{Q8vEUG-|xsG*Iss63rN91huQt@9w8tOy5Ebuy5CkFmN zlLg2Oh(VbnR@0fFMmZodGQC9-^Pts5(L*oC!XVn+QV1|NkL-?o^)JkJI36B|NwixG z^W(Qq|L5@?&nH}+pB?o#191fHJdTPEf1dP|I=kCmbZUnxpb+?TPJl(0aH}S##r;WE*^A5|_n?V#|E>HW&0pt=a19t#>${7=2GT2+nw8}g< z{*$8@TaR$<{4X;dZ;(o)EM1peAtJ=8b$(mdlxghuJ;!EYH}G4u?`+C7p{r=kiX+fL zKh6NM?MJeo$1Xa{*`Bzs>5u71PCBNR$#bJ4 zm^)82I9+h6k=Rs@FV$A;$0zcoOHwH^^6F0Lh{eFHQ{)a6;LzX2NR7ssM~5FK^d7OmxTvTx$={nA&pQI1PP2x-Pqr?0JSHlt8yPo{gIR zuk7L>yl;>qqS~@`Xu9l4NpG*}vj3qb_Q?Hh;G*ANEu_lXkM8eS5n|q<HdC3AOJ@+#>JZ zuNnE6bdWsQKy;F;{*M%e*M4pXP{w;eodv|tfVWsgxBrU^Kpzap!`?A;!7_aR%J6^8 zXtT%{5TM^{FtidXdPbQZr|0vhax1{xZQI5{UtZ+Ki`r<>opSEy=w44;uFt56j@QLH zM|xcER{Hvnc4hhHjB@K1;EtkXMmpTzJ{UQgfHlr^lFB`2h~{_jZ5yMQHi|@OO-xP< zJqR0M3XIF{5v)sPH!HW!mCwz#D73ROGm~w9C@lDf+`B53W8C-H>FVKczF1J**~o2q zFn`t5EjCl0opb>+%d__!>MB*Yv+-$P#qT?*nrGn*+q;tVG)*DcRu0`z7ZooXxlv@o zYvrwg;Acsmzq>?paf(ZkjP<2NLIn$N=0oKp4+rDgF_g@w!VRgQKIyH29}POF?y4+& z8qCAnMu6ynGZADDXlnzO{b9o}jTOMh+IkX~2CdsJYIE`W->1ehef)57!L`)N082dD z=OTI0E}=*e?_@-euy0AXu}a(lU*xKeLjAw#cgTdz-ef85oW%tO9tLxTB3yQLCQ_AD z770=EBah;PQ9;Df``2oX-hRK17a251HAC*kuH8cqX8jhp@hjB(ykQ5T;GS30vrs}8 zrWW6BZ%5@-ixl2UvWbc)6bn?PJcIA>V%bx@AEQM+X4k{!Hveo>C#SycSwVB5ycJ)$ zZQqFlS%=K+I%zwSQG|}lhMZzF14VmcaRKNjMNIye#{!-^l_w+am>lxbmL+=s6;EfS zqjogBzW$RvO>G#E>-)kp7BCr8{r~jSeIVKhS!-{tVE;22MW>#<@0!>NRqf%r6_*Z? zo{%~Jk}w}0gM~}D=MA?(g=kHz-0FW(lm98-Pc`jYm+-J9{}tt$w)IYINpnu=qku{a zF$J@wt{SeRfI-z^vAHnW)4)H4p2M!aaOp`okU~~BkN%j^KPW9?9^e9tupya&&w+aG z+CFU|3aTMAeZvG#O@Idr1iazO`21@ol}gpNh)lWEr$h*&TM$S(DxC9B!|ls=4VUFQ zevoV9t$ujZxqagR!>)yoiMlY*Ej0ikrSY`V#$+nDbwk*Ng{K=Zq+v}g2=kojzELVM zwr#LYbjq<2?Y=ffWD6U?M=^TOhi2CW@0J1`FjH|Ttu|JY`q1|qykIEZ)MebHO{M+B z*e4P?&e!miqJ>PP1F1_MH_F`l2P__w(8On^mcrlAlPRn9oM6GOA?{EpNs>A~w2m@h z=MU$3*1)vO!pw;n@@kgzVzQ6_pg)}bKvu7+Tp#`beJc9d*!&S3Mir^nlh=Zyy_ zXLb?U|{>*HU;hp+#=hTl|iYT{+@ zA#?0uYp;}eyDLv9VGBQ8-MY-Sao447w*awl@`ZiSMVah#-s= zBH?4m4F?OUtn3$|PdUUSLNiHFG>DPoz74;TjrPKy>PF!=B_>UQCvn>>8KvW*lYj5f z#|rXG{ejGIFn#lOYSu&vmelydKuyJn^?`<)7 zJ!o&lkDDfGkpeR%mg%`|H3jF!tv4l)9|zV1!oO{~d&Lx^;F3c3C^8b#1HR=ij&&ii z8gj&r9Yy`OheobYoFNp7$Yu{2Q*Y*@fi{%D7CpjvKXYs(Y!fRfOsAreNObiDig z>$FnG(uEJzH_eH^$J%E_&ak7nVy4hrk;TS2laS+7^@mP973u9I;(4J~3>B<27hX*5 zj=v0+Tco*p-~XY^ZVYN`_1%`x3|0TG&2k1Ajs+{e;H-ICK3_JO?M2QxwkcdlgJua{ z8-1a!SOg_0+IOCi7g+8aTL|R8v?&~<4y4y3z?vl#0vS0+=X2tqLw<47H$R3CC+K&& zEWnykJB^MZ_X~F3`CtnciyqtCMXECR0ObA_9NmIgI76@k&VM25Uq8`~p=bLRECp2J zNWOj?^S;G>6yyX$5$N4Y7f^C^utT}&?>P+}e!e>n!s-To7T=1;imVK{ME#M*KiLj) z-U+FSC^TWIfC}%Pn&C*~A-oCc&{5wIlF_L(WZ_ZpUU2|^tcbISFkrgDZF`X)rCOE# z)NM@J^SstAsW${m;o#Dw`&R#5SkMhUh0hG(MgHIO#pH*?SLKo}t*@V~vkQcss-+Sl zlhT4GHLq#?-OW?)zHxsYzh_IR{sTSx|8Z(TK!6}mUBJjR26_~-UK}r>U&`1pPVqW^ z5wFhng|25Ut#$~C4hZ3B+5g(+KeO;^?=XI@cdJ0keFa1}Rj$rx^x1(uB1TfP4rFXX zr}&1b;^zHl^8=)V!Y$q*lN#W4%12T~M4p>@r6nL0rSHHDxlvW8SH`F2=?t4pl$_8e zM2Fu2z<%m+W|PiZ9AjWsc?^|7raY&4!w-PvsVSAyYie?78d9!j76>JK^83o1S>@{% z+GQ>vuvQU0WH5&Wo-ec=5+B>hSI zV46~)0s0n%zm9Hc!(PhBP_w`PqWx(*jC?Aw%b} zDCK8_S+N|SnToN?c#yC!g-DX61`L$>4GYWAYa5LI2gP<17P>SWm7HBd#Z)9P9$BDI zoU!!#Q6e=V+qx4Ac=;h#Nrg}fgWM@+A((!avU}X0Z z*S=>MpRBFgCY|tE`jY8ltG73ixdAC+?lOr74ZT=;ODUh^;bbH2*hM8V?8%^QXaU7x zdy>e&epO-)+`Lq_*?V=+!5cG=oW&Nul0>~O%fTbX(8hUt^e6hFx?2#AJvWx{`aE~IM$XkYM$Uf?Zur%JCmOEvxp!`53$Z<^XwhBgR{oo#=LOK z*Jv(3b7;?DyY71B!C5z-a1|7584=?7K$};z@pM>xoZS>$wwkaQo4SK|YvEmR%Y}>9 z=I=LOm)4Pu`Nq2!JAa$!C47v-rz%Az;bE$Kx22waL7-0i325&ghVnumtG6#g1b)k) zrGs4g=)4z1apM5?1>(vC*W~jTweZN(A0n=w54-WpG|{!^(=q?4aJjZ64)r_#CFgjg z>4_jSOhy-89|Vj&F-(en;upWui&cG=4@#lKq2Q!-2rB{cQTaTU=W3{-gLWM)sDLJs zgKfK|CvjUqqMdr-J*{*B*dwHG)RlvbRI`~8WzftojuQCgmg}Qq4sQvt zX+nu{Mwgr6di5H|(XZ3zo1YIggYSY)&(lAD(VjAD{2EiBiH*L{7Uzx0sU&wi5oXCmg=OEnx7-`7xqZ8;(M)xVhS;T9 zl)O=VTS!7sAqq!9#E~B%#hA24K}cLgF68tQtUTEgHhq@U<57xs#gC+w2r*}QSyl%% zPU03m3?R`P^}ETT%oosk=lP;+w$%L?d2^AgRRt>zg`~Pb$B-#`3-Q~sDuaew22r|) z2aTGklmPnuO$D!nrpXEMU&rUgIPmp4dbVGtM#d1WGsSCeAa074-y=6{PoVbm(olJN z!yEirowjouLY8cm*!=r?N{yMozH5l z#>@cCtkTMz9gVVz*paQRxnq;7l%$^t`JI79Xpsca2fgUvL00WzNrz^Kr8o!KB3TJ_ zy}({yiEa2WP$#PQd=yK_-NTIG&#GdelL~m} zk@66MpKVD&ZAZ&N8ziz&+{A!Y?NCA84iFp^v6-b&TE&d<##G5*;1FZ8{ z6R*F9&?F=m;0vBt=prKic_ha)gCLb51`(ZSX5?+)xP!efHc3p8Z~-N$ny&TZfJJ+4 zB$-5@_xO*H6^-BXf;@kD|50D8z-R~pS83BoeM>#3SjqbJ#v{X@h+Ue*{$koT>z-Y7 zh3mug5cdW+&ftr4Wh7IvA2h%~0&N1~G0K`)l#Wn$7C5f{9m&N9RL@|E>qBn$-STb_ zyptN6?+Eo|h^?n>Th``3S4^P2LK*U@sG0vdE){vv=#Y0wY)LEv^TEl`n1NKuj%YV3 z==>~H?oM~Za#Q{wrQ(Mo$m~-lXw+Ho4@?~&09kr;{D~jv*|~vkf1Kpkw)_NPc)m`( z^Vr_IS=WA$XxEa1dEkY!yArX)EzUH?Q{?vDv|O_x1<({6?*c2W{QGuj#3{a6i=W=J zotcjHigec}YTIM^VFbMzKvfXzN4g0+BtYwT-+h>2jVn5?-I@R~06M`yj+;;P>uzT@ zncUU+S|1#S!IACh6UX?(2oYsb>AaC+SB2v~ZK97=RUkcwFB_wefFad+qc5-QC~;F2 z6Hd_({gmZGfVcVg^vjb@k-Vj0r%m!eKXbHCu%`A<4W>&t%ArS(^kDJ!(2xkR@9t+K zc4Lrv>2oF7ZTN4n%13s~=LF;VR`rEuo0*#Z_H@2!sHH{WqVb0jU{HdlAT$vJfsEr&q1hIe==VoC z4vizJO&33mZjQxa=%*5UOWfVWM2kY4VY*zEZ7IYUGL|Xk`IR?z zh(yK5Pl;AKjh1HDU7u7VM4*(KHT7Tz4@SE1w4rZG`OhCc{9LU^h4l`|II{-Fkw?ep zy$>q?jbnQ=Vrd1m9ZYsYin_TN%e())^xRt#**M@%ytDWt_e z?e5KrL|KtPL*yR!U8Qmi#x21TcoG zly$;a=QlF;41PJKYYZ5n`r->zd*6PJ<2?h-Z$mzao-dddeQaCz=no_eo6wggF(&=Zvq?-IjVl;bt?oX@k(o6b%WA9$&7!gi)Zn#%)eTbJY$l{$ zjju*kaP~@KqV{6rTi33W&D)U4eRJ%X?WMw*^m4!dJOsMa^~cnIk)7Gr_fglm?=@`+ zXd>2!?gD2_KFHq>%ryRQE=K?{@OSUD)P9ev^E#{T_4!H!ZoFiL!*J;mp8OCtCiI`w z@b{!j41jEDe;cLK4==Gb=unZ=Bb12*LS&jfN6RWg38!uWESbCBPa}+zETz`ZC6++#V(mi0tCSNoJgCiG16j_Y1~ zI2)H{b}-8%yV#LnEsUDuIJEbH9{^J@qk>dg1cF!C1`v&M_oL&r_7CG{NF7G$4A5mD zUDUPcrWj04z|<+DNrlFIfc5ViW0QBIx@$ltM~x+Vg)b&Puk zuD*9y&nr)KyEgEASfUxBH1xjDz0ttDJ1#ISRT7z_BU4DlzGo;GlYj5wvAKkb!;%a5 zq{V41vz_P5H!0Bxh=l9p#Ty8Yv_G@JK`JU>ftU@BfxbPYw!8hI6|{bXm@T=;nQ`O1 ztIsM|#fif`8mObcj-P;^#3lJuwoR~(*jPD-T)J3!YrExj7F~`EyIAxMq*i$}R)k|( zjfx|g9V}nAL;Dmk7&$NdWsKVX;G#6NfDm3o4h~QG7F^UjFxm|MIZnm}4tLoMtp9d5 z@?;YSe*8>ZV))`ggX`|+iEYms@Kx2E*32g8%sf1AXJY4u2|q2LU|2r`9lr&4w_fu* zYS}NRFiAv1?0)}W)-u-qTVK}Sj7u1>nKL#ACkYgDpmvan<8pE;-NcvZ84JJG@8 zH-05SR5Y&1LXjftb;Aej&a+kUGu)0UL+{m-ynl@j|N9wA5@8v7k|$?(J*-hO1q%xt zQ```-y-@GktmaN|?(y>D@ZAJp`)NWY;nj%Fq-{&E?C`7ONA2lm`pY_cj|jY>CJ%VH z#E3=K-`2mG64Da~X)zqpbqNA#Tj3{(LIyt1JIu_l?DXpK*m$o9xRr7K3kG>O=$j`l zBVT}VsRY>)W#A<$<@L+vL8>>kx`gMRXGhVlAh;06b(W0j6nrj-aCUiM+WwhfZsjJ5 zt-a8o)O|P{s!8T0jTHi9uM?kr$Tzrn41-V~ZsEQDQH}pLq037>jwvE{*O#TBcCreoxZeY_F(=l3;og>>-a%kyLm7%eeJr=Q%H7J7dp3W z1ElCro;lthFiX9B<94+LtAjjIpl%4X0$mv2GSFt?Y%gD&QX>1Q#7>+s^d8qYsDQf0 z@E2iwlzIg7y$sRW=uvZum1xzxDzSV*VNei_;d+jY)08qL_3wToRQoDFdQJ9NNE3Py zsjPW8bT@Pux~Vg(Kj$~c(Jgt7D^RF0J!U(8>Fv8x3}QPceCX1f3qac&Ns{apmvF4$ zIp}+I8#moMD$d>d_5_6o=T{w+WW8)#&fM10r8}j29jK}+pzpgiN+eJA%^-gL{od>C z4R^_l2+V4w(4<~-0q1h(j-v)2^el{`_QErGbxT0g5URcCJ6ABZFrVHr-Ji44ICkmQ z9ujC3I5UHt3=8q4q@gj)4GX-%K1$6;y@3CNeFzdSdz|zl@h>jyhV!pQwRKgpCHF&y zyJQL*e$*!mdW?-^{+oj5rcBe(w*5Yh-_XWiEBO`mJSgnmF^1R}8h0Pl7F|@CH?Dse z=H|Sy;nJn>mkrCSp$W$@7c26Z-Un- zijqOeic$svSk>Xfb1t?{gQfX9qyTrNsJ%mDuM9~#Hh4`#Rvpo9Op~-qF_pa;Z3{{s z!h@mC%@cV}_6Sz??4@fXn?FvpJ1_n|Xg=uMO|9X}wSw*^obY70{&jZBHsS=89P zXp01I0XQH**f)w$dQmZnk~E(*sV%mL2cxB0iDP?|pn z>FX_?dx9RNUf)=+E4vHx7)>zF)3D?3Vbp$tEXisx&P!DhC0qizI3I3y7SfPBxqXqO zCOt$*(BZzuhmwU5M3>gf3 zddv?`tT<=2@)>J5y@)!vk+azysz)>6z3HCHPfzpH-LjN%4>!A=r?vg^HV@fW?hI1H z5@zS^W!UnCuhq|G^Ju@}uu#C+2|_&dyNOjjzdVm+-O8k6FPJOS!$XyGi|`Lofg#&B zT(Lr)Ay0QViD6VHx<5aBGK4^^$Z79jJTb)!W}3eH{g0J-QWajBqe|EFdl{8j&zQdw z$!uieO<8^rstw3)d&ME4-Zz+??1M zd2-+0eaUsUe{QiF=MH}PF=y|Loa~d>SZFo%fJpQf*pGODbAQn{!7Ot`SaL{gJ7$T(o<0 zwKOY@|Dj~N5m!H49c7#=n}EqLHLlMGsodkRJ$@Q+Nhp^^+Q)J+K$^AU;1buTa~qJO zt1L6WePai^rO|8!$cwHmknLls4#G`fB_A&RMBX4%7CsDrgh+AVlaI#eg#!ISTt?q{ z#rZh^sw0;O>t*_)INQXvd;Cm6j5me3w#~q1!#^(S+3-iFi1dvkm@>j~C@3E!ChHV5 zZ4`eXU&9yR#qmk5=pag$+<)__Y4zyb*amq&EWqLrD86K?^qC#^s>R4^oS!pdtC?K= zVzO&1&dZ_w{Y^TH{^#@#y)k#+K(}}~Pu(Qxv>C`0&Fb}v1#p2fnP0oo07sdxI}kkpvf;8e%t7sNrTGgv3}a_I5Bn++06(siok~120E3{G5I? zux2sKa8xb0P}NMDNL2nae%zS)0Cy}QpTS|MW{_GgME-VplE+ zHY^+qoujpA=>LmVHVg8zat|$htwhTu;459LeFIwu4dNzb6K>0*Ur!k&T~@GvJmP*= zKO6kLB8Z+`7#taWL}Nw>l@xHM2_obaS!MMu*2}Mp6kY2DpIX-@2pN|n1$s^f<*=*W z>xE89zdA>xNh#2VE30vYwV9Pf*~`6N#Mh^d9lt*QF1r?3cuGEarSZ>L^1qtv3lrLa zv1aJdOT10@#&U9QkFyn_3a#{6tQT*TCi2%5+1P_kTVX9_h`BVGZS8vfZ*%vXna#B@ zk*ax+ch|+S?0(y@P*4HGk=#Y|^%wkwHbnFjB2Ipz)AG4DsuP)f?d~CkZTDi;gWXg>T8(g5MDE=P)|pr7Td!*K>Ne%KRWt z*z3oAaTiT;ZUq4f2ih79DU{A;=>&bLGLUr_zIspZ(KL{A?R`g_jN*rWoqOV`DS-=0 zYR}HQCT`&$tA7%rF0bqQ7Z+e+rRk&n^6Uu3 zYuCtd{kCzBkA0d(|H3jel|DgJTxT^% z)NoBxcNP79-$Iu+J0?$1v9p5N6GnE%(`^S9W9ZblltP=p?xpS22B9RzI9UO_uWBBR z%Q_T1BmQ@^-Se=02a!u%vKdG~~kt5S0)1>RT zwEFCMCUe0FW@DbeX=A6y8+H++9ka0`yTUfi75K_!xl8_&e%|TM-%;^lp??Dh1F}JN zq31&Z(|6D-Qt>ZwGzo^PXOT2;bL-2mI-JGy?a z1_TY=5%8 zfTc#Rcp?^NjEKC!GT^bsrrb+SLyb>kB{lMe3umiMFZf?DI@8@6c}=F}bBk_6gr&%_xy|W@d$1}3 za4Xc|dXj^FESX$!O(dDOCRLTzseSnZaaiMBSdBwvd_4E zqj3hIhgN%p`B}ZD^W+qAKCYz-khduxR~5HtlJCA`txd`IX~o^|vSc0og{iRXKzI<9wrwYI%zs@&QQA>y6vxmR>Ak@^wpR_y@lf< z?2b%0?&WqMsCNDjS#KQ`)uVn74<(>p4&CXFq$}53*qc6xcc*AoF9>pmhgf znj&w_zixRh&TY~qOo&GRKGgEWR?C%!R1bjje0i&ejhs0>XQ{zwi@YbGg4VG*tZ90vVBCKA98f*z(zQZdqwApxN6L{wjIjG4wZsNiL&L#ovDSx)dmH~oF9$Bg7|#52Z0 z4a5JkUjH?^;tj~4M_^?9=+&RsoI%JbX(925JuAEc`qHK0@86x+66N*6#)Gm=AwEk| z!RW0|e$q+XZ+Ucld9jFW2|gcj;y7LO zb)}%D?3El<6*CDPcrmt>nXC7dkF`()bfJ#)vV03nO4^U+5*{JN~ffafBIAoOUrBOwnEn7y^^*8p^4V+fB2M3iq6KJ z9-sjGCbQvkD9scuD_CPGAW9p*dOkkJs*-W{bVk$d?V1?k08@`hOs3$^CjCRQ6^0yHP5i>hHp4X4VRk)Kl~0&{|uLTsx z^s1s7e|U0E!bV|bp1YUJc&I5m6PeFyNr7_aM;x)*{b${E9#Svjqu(QCG;mB7eX`6? z=73>mDc2biYTB1r5m(My!(3@(wN7Q74al$m;xgVegF4&OJws%CPNr`!IjnOd5lCFY zOqVq*%XgdBaZF$&8%ONRLOu+>GI~VX0jgD(Q88mDA1UtMT$fSbV|EzBy}79E4udY@ zF5`F*60`xBH~F(U*UVkIjloPb3$l#m%o?BA59B~{tHFrmJa5se*iHD|api@%q=K)< zM(HWX)f;>3P4AeIUn>=3iek=+?~a9;HN2~vp<-ujk7)HdjG;u6CylD&a8SsTL}Qg5 zZ{@%YIXbm~cJ^U*`8TJR4=T^kH&?q3+gaX=heG{Lb66V|ea(k5WPpgUlcm0;R$S58 z#8~D=&{k{fV>hE6Z-+1+oa*aO%uBWH?RUm8Wl5VmZfrXPeZs5VkX6$NT2sZ7eijxM zSZ;2v+HUNb%d2BIuLqVVRd5yX?#cac?GKv%i?T0EyzjhGO%gTf`2DcMubcb&Z_n7+ zI3`Vc<=VGoup{(Y_R)>ZtVa+!ITGpIA3MEPmgdBaJo_2ydi6tT6>lN3nu5H1DjN1f z#OzW+kY6zA{as;Nwe7ekFi%*>rGtH`QUZ(VrC2?qVT&bI5wJ>r#t@-S#wYtE(b*GH z-1w+9aIiSL1O`qU9*tlk;q_HtE#9--<(>-`&Ex2@e*4N8(dBqnG;K9>k6O?l2u(4E z(XUIAj*eiySpTdJ1Q|CKyt1Gev4k`!o8+$Iv3AQO;*&zlW%hJXt0ABRFrt_hOs+~~NkI+%B_ zsm|^{PUaYGTRt*;UHAMwShc%hjUFp^2=2&!UuDgHM&yzKeGThlQKfc0l%wX;JcA)t zlNH+6*4y&g>m=8VUw-tUoq=z!Gk`9i@c+ualQCqB7%-jDU?2mIMuNm0iWk@`qd{1O zY3s5WZ?qz;Vg0ya&v(U=u&#ag+7UaZx!muxo=ohC|FBA{CFM9mOoTKJNAG4=BEF$8F` zMGZecslXgp<#Hzk54qCmc1^K-N~DwBC_UsITmSP>WL1ed3^<~=no|wEQt$GwX+?A_ zPfj<6`DL2;gqqr4cf|@0PW}k~kLrq%8L=#t4?;TpRp8}ilpSc_CbV~lI!t?4;p+R@ z(9(^CB6KGVf|YYRS4s?9CijV@4+`tze_Nex`QK9VF9GyUAbnJA_$3_o!>7H;6>F|^ zzB0=UU3DSQv9hvKUd<5dBiTfz!88{FTE`_xYo)j0oeyc~5@?L%Wf6^}@b?FN2Aa3% zO?XYg-{Sj9@H^SK^I&8w9`6PDS<~$by->Fg$7o}+%}wC0$m9HUOk)Zi{QWm`^7tb^ zVy@SZ1B7bmnqt&-mU)dh1K|D^3pN}Cu%8bT&o3>?q>~1zqJd-6L^lo4=n2p$X+ry# z=uKr3J~hzYkV)uQkDevNNMl%&8D7A6w1oLKC#uPlyz2Cw4|!;-hp8-YlzbY4!vw`> zKM?cZ$ZM7kkKMxgqNzd(@m7&c+I?a8v{N0tj^~| zt7%_n3$SVK@|6ttDAMH7VtB@ekt$zjWXE^oeq~H?aS?l6qI3X~bcE?k)lmspU5`k4 zT`l@^n=;LBeI^pJtbOk^zMv&wYxSZpn^)R*NngrJuuFbz^R-x{i7E{UQhWM9ba)`_ zii&o3U)960|5}sbIRcu5zU)56PYBJ&iR+(dX1Z8Boij=Qx|WAwNf4_q6S-W=n)Cv|kTbX?b0sr(I7MR`I0h zJ|cG6-Z7yoF_UbyvV<&! z@||$%(S;V>l~m-leC|o1tLu$L%k117H`6uOWwnlD;JyWBZ|54QniY1S*@vaD^G6#qASV=N?Fr{HD;mW0;TvlSlP%Mt>JUBCn-LXt6N<^l^=#@_U*udeg3}}^Pug3(ZjoQzwzG* zM0wrw@q6MtWimD#gBQ(r$--4_QElK}R@cp|E8n$DyF=5~Pw#(C*YW3%=oDSUxe6qS zLuXO+{UeGVRoDKq^FBYQL|UTV51E+x^?_JFg$OV(fEm*aRNYI~(_$T}I z>ykm%(oiZ`66+u26?2OSdasSGj(px1ff6PQ)0w(rUFIgsTq~ZzB?=554R1Ve znJ2yOYFLx-!)>b-+TYggsTcC$Z;>Rs(@#SW3? z=d0OJ>WqsLwTm;{O5aFaQRp8194To*Rf)*rNz!P~N^tFXTbOQt!f=IrGx{RoMa-Ia z%SC(RLD!uXA~4==zSZ51?vHWm)bNf^{Qmq^8%tF}-$alaFI7fF=hF;Zg@k|jU%0ln zE~nlOtfTmi!En+VYSrI=F@+8Z_gqB(XE^@XCO2J+40_OqgdK((vY@9H*{9lGmOR@1 zq-e2{c~W2hYQry=8F@XPmYhi5^uAN~Id4~DOuq~M8ib;F@{rqcNBc~^<5?OahG7`Q`iVfa+cN;v#M;j38y9V3w zG?M?L-Xq7yqVOZop>DbWl)XFfK-Y>pAk2i7$>bt;>j@t_? zX?n>#ted!^u40dsdQ9|rCX#z}HsD`P7v?gFMIb4Zw62Tsk-F*VnzsH&t|c+gbOF61 zKu%gDBG6e4yCfI~d;nz4g(smq3oEgE%-A*&fGPgBnF`b}35V6pRPa>1}J+?8vvnGhYe zc&Q>nOXxu^8cO&wZ2rK=q$a2pd@WCChV2E1na}|R0-K-Nx$aX}4jqD#WWgt|AxYPf zOGRI@?MtQY|4tA1UKWk`9sTXU?I8%?bP9}pV2w5=r2L-z_j6?Zb*zfP1DiV+vwi~C z{A5sOJwRfU|A~|pTvk7F+-B(2J@PTtL9;KtQGZ^Te$(0N*A3i>JFrsp)5qKF(Ce=8 zrzni;ZeMP;2({G%=SVnh5_~C!h(r#T-cz~3SsN~;Dx3d&Ne{T*WqAO7;VNi8Et5b$)uajDc+g{ghcuplQSI_KH2PsY3!JJQbZEEy= z(@2Z957|(bJnEo077F)+BEB}Pr6_qh2i7Riu#;6(cwB?{uNrti8F%y@s&XH+W< z9#*q2_U=`q>cv_3sdBi0f|%$y@<%i$KSy{2kYqTgDqeK0ZTxXV4j3NX z@~mUdR&3_OvB(KKTDorn>e=1`;_oqnLLenF*kB|u;esIoMvc2FGS3Pa|I6aio0JDr zga$z%yxkwEEH6Qd`VM&U#xe(#O@Z%_C{z8&_UCR#T2ku%YFK(=wjxtj%XB>V zb@YgdV)SXhEDt_=)%qqW>-5y@MlNoDPd_pWukYJrmb0xg+bZlV=Rm$_i(ge~S8ZZU zfoJu!4FoD`aVXv&B#xz78n3Nm*gShofij`_diyMXsM+FUf#8)`@#8pDeNVXmGXA|) zhXKQzj<4i?nyH#vTGLPGK+lNPeOY7odKwq$>F4Y&PDgh{Qf2p_WGCUSqxxf&$tL=w0kmH8!6G zir!i%GtA9unwIGIMLSu6b0#=b+;3DJ&1fI@wF>qIac33!VcL#)O01`C&llCn>go$F z)~4tNd_otAw&g8w{mrc#_k~97gHNv!K4Cj{k}vzevdFG~Zai!rT&Y?NeGI)SbJF)s zeJwamtP~Kury}AzmbvG%2K`-B1nIF*OW1chKc22>LNj4TuiiSvJH5yoS1*eCcTiuJ zu)P{aebr`nLNqKKeP0S$?#Mnkh5Y!uil5N6)QhO7&a3g6+hj(8x@3h2?Er-r%Y@E+fdn50-o9F`1$cDI(AaCL3&=$t zeMn+GkU@Zx?@XHt=81GRK5+)F08FAjkqOZ8?2)`Gd#yVw>8-#-`&tf_RWh5WQT67O5N8MfBLtMl_5>FEtUEb~@ZwEekX99NIx=;>+uZQ-s~}+HQx#Dfdfz zQ(4TD;?vi|`0GjPDhUzMNuRT<%48cXlW(sTuwSTHJPtyn&&zSwX)@p3OPK3E^UX%F zD9pgaHpz_iNyxt?cOJG8z`#nK!CHD!tR9;C*}G#l#w#Oip{Z-JC?-vpQE?l!_9}kM z4Wkyxz1EFTOh3`7_WoiRXbxQg`B^Q5$`C@*=;pk1r(lb4W}D-HcJOx2)FOPV1N)uP z8KUTk<}qqJY|9|NA^hfkR2{98Pke7Uh|po~Vtr98-ne?VdK)v+kVEhFq3OMo&^4Cx z6J==AV*O|a=2SNVM2!rPY5U)l{=zv(@SQ{8x&`JpbvRGp-h9TTnI!o*aR;qJZ_o9r zG#@Qytn&JVdFB1mMc{*dPsAPZ^D^9|RR{7iT=gi&u*L`GDe?RUGBKo$MfvGH0clsy z%SwLl2;yHgkCkM+&u{i*l%oXGgv(lyP~B@sWbUS49gKf^zi%01yl0=;w;CEt+5cUL zVe=jZoi6JX7zMx0+#mPyaH!d+{2r>Xz(d!94eaHrmBLbRU4~y8j`hvv&ArdG0B=EY zKy@K8uNY?Hk)>Pjs)mpjPk>zc+)})g2Ugn-t<_bPGs9WR2nvjA8KJGg?x*>Efn7?5xz9$y5k+WCioovNncm?Q1hs^_wtBg0w%5a zeD0HL4?A9_y{VJb%&Ppaf;X4_j*fTy40TNcq$hX%v)0afz%+p!BQxxKPoAggys+cX z(s-{Oy?2+ZDk1bK=?%E)dBe2kCwb`Kzg%@Tcl4~-okV#@qQqdwJ)iL>4}-+D%6>0M zt2*cOjQbNf-IqJ@N|uf_mDd_YetJ-9-@e47dc03x5rU4XboF zvmlKxxFFTl_pbO-MICM&jKYF1k6O}td*ka4jUqa(HNTiz-1H*<&B|2;w;ZJf#|@WuanadD-+?KpfqU+{3A8;s_(FnMvL-us;`gOT8Z#97A&WF z4)3QGoY28RR=0L(?Ght$kpXr_S+Zi@EmtFM^X)3c{)}KQdJ`Bq}aqT05a0n*;%2ICeiSgD}itl2=KbotdqBfhK4?=;>juD z=@V;1&B6j|#kIBQ9~Q2J(5R|pnoh79{uX~0Gof)XQzT0#`3&(^2zQefR`XXmWMKWU z|6ccwl!6|koJz6qf$cNp436P^BBo%#H-P;bYac>yQt!blu0-Z2(!BB&&d<5(P7bYn zM7c*=LF#tu!ZYQiw-+bLOYe=sRp}lrq<*(@C3q1vqHcGH$>kpP?^q*_oVj7t4l15{ z4siPsedF8Yu_f3Tma#Aj`Vk1u={F2ew>bZxr~X4DQq#6JTvg}=9~^)RdT{4u#^UzK zd2p|rKCJlCR-w0h-U3Wi64p)=61j+u{L8akt+JZI{*rr!<}pGU?P%5yrLCIp>!t0T z=9#&wFqjD$uB3$}<_|GT#;6pD2RQI8+8chWR#~o&hSdS0f#M6b%2kZ)4~4(&Z~e%{ zW>NvjLXMuP42~%s*(QVatnTU8i3F`pc#VYop#ysf2tiZ5hqpg}52ofVKBEGRhjO|Y zU_mD_o*nw84t`Pu%icNLx%L|amdf3VyuWO+tPnE4!YV?;E5k&qU3dnHeWMM&crT@I zDnIH8yV$^WZK~2!54Ec?4TUK_lhTQ2V-(5>H!)J&+hqxwhC@)xeT{oGVL8Aww@^ZVPWFcjke8Gt-W1Wzhqh+lm5j6xa9F2EJT{?!(x<+hrC* ze=C8QHU2eY|vfU}H5=_Ro*efxHz622E3kA5Gw54O$WZ z;R38hT)V;O&$^+{R2e!_BtE+lHECt{$!UI3XVR!|Jjp`FmW7|Jx_NrWrHq8wK@UW% zwBDQKjEAk{ygB)y`|42U%oZvvsl#VhJ|YR|h=~}uUeVlMdGp|{)>;^fUy>L^zGR>M zoqMw{mbK^A+i5;oU#Nm6+`2xo8TV;#)d!2*x3h_5ch?;~OgM`{Z2I}|ZX~{~5`MR~ z8DdIN{8`JM523C~mlk7)YrW@P7LX|>8M9eZ`CQ45H@TWa~K45xun)QD6Ztq(~Ftc%}k*cg80I?;fu{i7Cj$a?w#&OVq2U z161oH3dO1L9p)#kfRh6Y%m!LlwKet{DC z`L9qOpdU~4CKQ6!uxvIxgzI-rWS2AipM{|a*Y3Xy?TPXSi^{%GSr$70xWZ{r#=BY^ zM%SLt&XbKk7Xbb8L+j0(eKS(7dJ&1a2B*&?ff-!3AD@*-zlyxlN^2gr3F=WpcQyKp zRm#*4HuFxx?`;o+7ZUZ@_;1NTs&I%L;C_aL64*mk*XlmMH30h=5~oXrx>wVL{ zLaau!Z}m>Gjn#`JJj+G*gdUat9LY9eOA2t|t~s3iUFEPR=&iE#!8=Z4|8^+TFU05r z?zd)$c*cpj)je&g!*Y^@1$HaPaGT5XcOI)^%#L&=9jjMkjccX2ab~=#iTtSprs^8| zYipv-j;*^;7sDubyn{@~N*Xzus{fMML=Kq;C z|CQl8cZ#B>j3|IVGp++6<{FS|>|AhR%U&W=kFS93H z-VIkheP`ACs(_|j)@PA}h?FYsQ2Z7668lGluCX|V5k}rgjOGm>h>Ei5*8@C&1DL-u zCy~gL4QV$5&;(Q}cU~pf6Rh1DDK=48l1eYcv61S(~KU*LBH@-4K9n%)uNMCcH!~*g@6jq zQ35^jzKEJB8xg6_zd=%|1>#n+8&gkCh_1sBT>J(6Wlu2ULS{gA3&vc8trs#t5AW+= z6g*LMF>F31<6$Dp6HwZCYhaN66)(oeq><9I|vj+N}vnwR#&@D&*wrNNv^OQ_7R9b8E zOi=CMT`7{81AWg{TA5Lbp!M4rf?JeXYL7a$xlXo|qR#eP@2f(`NF-lYwZB;4vdvlN zEeq2(>5G_^+x_ve`OQs%X^?vOU8U#1g9pUkP7h-oP_P;Vw|qer&jHj z&P>WHs}l6zbI_+bp2xG)F430WkFMhH{k?>3LftOC^Jut_*)K$bHuMHePlwUgp$(h~!3w8iLF z_bhf0Vz+ENr2F_UU!20I2i6+fCr(E2wT+|$@!ZC$s*>D`@``)Hd_UK1T6qMesM_v^ zKXUsN`EW?zG5TX+fo-4QC{tN6!nWDhXCz!UVk+}PfoM{l|Ix3S>oj_$JMoVp~$_!+a|UpD(3SMgwn3J8l%6 zG%A+LV|DL-zOKjH`%gLEnv3^v?6CR$&cJES_ccU#T4=#Hx8m{syJ1g_1@klLRmS3j z$JJJlERo{F-Or4t)2!t;k- zXM8kfOkMenhN9nPQJIQ8@Bo_T#C49od-aS~ygmWyxn)T+sOx)7UW1ZM`c+z~EZO)_A{AhOe)HtD4&~q~Uxl*ZJ$V74@rrBd< zW2Xv!lGEHf+2Fu%z1q#(L!Y3^5pi7<5ks{Q(%XZ?X=bY(PaiF;OCk7xSP>~K$BF0u zgvz*!A{~=t`Mc_)7xr8ot-GX<_0e#S9dyM-8-D+@>XU!K6=BL&JR44ewmaAELh~|Y zY~fSr^rtt}uu~(2>*KUWw(5Uyn}30kk6tXBeu?r?#jZAn>~*wB={S^lW0tiW@cMn1 zuFBBAvvrd0M_aFU=EPxTRjtY@bfvdn5C#uOMv50QuL&+U2pcYc8Vmyrlk7-u6^}ni z3XOaw#>ztax$MKQB(j&AL5!)9R)xvqzwY4TtO@Mxk1v7M*d|ifSs(A%KJEzRdhao*xe-M%@us!pN!~4Vn$$t@Z5B8ETg&2KsabEaP|lyQ{iGV}J&ekYe5n55 zB}1dWY|cl1_DBrEfZ(Y3$Gk)E-k!AXXy|jVJt`6$ar^k4W%}Qwc?ow(IJD(8EeW10 z_7b|?Av0!NRdTFg6IA+|F|Q|?OL9M`l^evQV!EINn%OYwmvR~fFc1X|WHf_*0W3R( z@6RV?_lrKo$YJjsOez8!0h0pit>PNV&0%$e(dviAa-0oja^-ycMzxS;uuZs^TA)iX=TAv2m^S)l!|9ts6|*y|8`V*rs=C#Hpn7ZcoxH{A&X<9} z7hQ|xoWX;TJUVhZf~Nv0nDL~v1wJnOj6PKttxBE~s9$caaK2wl>n(tntbWN6g*{r} zduw96L0!al-g5e*`qeWsIKZWbuYQK~J=KT@0f(%q2g0*hIE$)nBsZ~WI(^FQSKU*a zDGJ4l>^8Ema_z3l$@9lmTvi4V;&6&)(r*{7Yit+!BAs<-8K#Q1CBUkz@xylcgWA_8?M+)pkvv<6rG|D(mu)T;yVWCE zX}#D@uDChxCV}Vka!Tk*TiTmIimhJly)j_VVPlRs09Ln8slG5Ca!4jmI%ql$wwGPK z=oOfGjJU(wLQKY9QeejFv;S2$EykzNG{i(V<-s~XtCB?8S|&JgiETE5PQHEW(u@1= zoQJM`V2=f6J0XHUGb`(w#c8VLSbFg?wxPF}Q$|1srF5zyhwn?`UZ{9h8T~(9eS|pW z?g;8@RoIo&%AgY-H2=WIjxU)*;qFBoR2X$XJkO8-ZVmT=vviSe4q}IC6mi<7M%N9v z-*O#ve#l_qTa>J9LQXIBq1Zzw1Acyd6Ns`sZ<_Y^Yo{MbomVRj*t&7rfHrs3m*JTX zZG*#LDYzPq5lsPu3G4;UQ<{gUhcLs-hxl9;<1`ncx3R8g+b~cc#ek|?%ws$0(t#bQ zGK!%(4V*giOV6U;Ogirk32lsbHd6eNaooFdt%|tvrMA-jN zMyv+z$L)J4lWo1$S=x}5=oFpj9UmpcQ?(=M=npy!m5oc#50yvXGxsTsPh8S|{V^5` z5Fti~zm0s( z(aiToMq>mtSCrNG>K#(Tl1^^Ny7I6==9xsl)1GOc?8z{uaKv$qcJ6~bTr@m!8O5;X zT(ArGz5^NL*b)zrT1s8z_b}k<5jxrm@W{D+6!I)*TI6=nGV6+ReU)^YLetcAAH|@g zAm>cMEw@N0x^tN`k2> zvMS-nt3Ie%nX((=*x4LQ$Zm_-W`Mj2@4N4WG{?}T53YXO1MQ!bLPT@=wtLyCDx7GS z&PEzmo695;0w})}ekuFk%>4@Jw%nmb5ehW=oc#Y!>C_XQMG1KM%8)$^kaYvX!F8mk z;mUX|_{WW;V|wya8Tnd5*uk!q4KoA9UqYr-!CI#*PMM7b?A&SrHY zz5^o^Xag!h!vuzJN7{zW?JoIpMxlsvSh_YzH}vM40uU6o5I+6yESm0JeVLi|PA6$yP& z{{cmd(!Xwc*u=N0<*YB~6-!+8Le`GWd8yM+jr{zd6rZt}2HaQC?`JVJBX$|84fP*T zR|$ob=lyu*SK{YK{3I()Qb4v`l9k_zUvwHi6}4C@oe9*M#X^U>o0A5@(A|$k)NPO? zI)?Rn6ublfP!=+^f_VT99TVLI5{iF z&cB8beW&xU6F*-_phU~#rr{C5uiVYUTptI+Q#q96D$%t?8d0K(A>DUJb4dw}bs2z) z`aJjv#_+&eq?u&01gONH;eP7n{v$Mb;R_U8l^g_n;a_4-(3W_Q^J9~W(1XO$t$Ev` z0vq=wq7xIv*Utk452{%?scA}gi9D8@)L(^Ywtp*$JX9r(PWw=OeL0v~-X_!Tg{H@> z0sTOD;3=x+?O}fS%=kCQh0v2YqJ$gijA7Cx_HHUAplyMJ0gHj=SVj}@D1)?#{}z9g zeKMJ19>I%vUA)Zu}g2oGd~zt`n)pDJs@aHCkD=Ms}HSMXCP{t%g9l45F_6MVLH8K$b(k(?kzr6K$~ zLI7m!he}tSnKD^pkTkByBe{25|CXeYJ+I|2u|a8^yB^0=AtBayA+CGTw32VV!BC7+ z+Vg7;{ZjvA*;JD;5X0e$(s_;vjW#OC(NL>ygJV=7B**kFdR*hj#icb~*a^Kk;I!f; z$v|k#*9sHzfBq-?_;utHLCEg(qO z0H-w=`b_@;C&fuQo!p`yD-)I~PB&8)hGH$K4f_b(JKOdok<^!y4bgtEv&g$+YE=B{ zUA7`(2PbHmFM$&F3xJdMKfgkj_f)P(vb=4yyY%_nFDU-f^OO8SM=via2cR0hh@w0o zfTYTQVmSILz4i?}1zzAWsJ18+MOs}>6q)1OXax4n3)epDLn5l;B&_LiF?B_z5PFb9 zm$LhfQ=`_!hKa{X`m4BIbpJX0G3{*s?TBg}IKy;mA8(m_dVC?32WRbKYiAiCIgcV1!N)Wa*cKRtK5PcayA z72oI)%$J3)1SVN_Nnoq1!lro6u)rY;Rs}oT$_jNDpX&MV(-Ib|2IU+g7sLKkP`DH(GEdoa)-spthgLQJGm+lxR2Q6x7$rSVoeB)P{#h{n8^!q&#jSRH zi2I#l)h@O1HTTV`+@{l2^N)iIiu#woyX043K+qr7O;kxoa4_LMpdcLJsi)E@nwQBB zcsZ{Eumt497%!I;+=|TVw9eySX3X2sEPV8f{E=E-gkTmRjCT@Y2{0kl8>D^RX|XJQ zpK^<`--xjxOqz@o=IK4payk$7npgUZe&st-?zZ&lq5syT4BL%}IKi9uy?9a%n$^=B zy6#?M#7K7u3I|Vdg)W_tTZ`(d3-Yvy6J>v~i+aC!D#yc|^e%40TruX@d}iV3&*Ow< zdxA(q-%c<98t(ro#KrZNgUbHbCu0^FfBz}{e$fWk6PQunqkOR3(?;l5@{uaPu zvY^lb$6Nf+qYE071*0H-1ZC z$Fx>VZZzq_%qAeXP|)M^9DxJtVA~1roQBDV<;28CHYPe+(kJP3;wpYCT^GHdOi{YO z{v7Rq&zileU*O+V<1!Q^{d!d-uS<0c{H7|a%PPcdgZ5w#6Xb;vN{6qfPy16q{fn!s z#@#iZxo^tK&o{f0AV7_@KKI@*R@=ziK`z~4O`1o)BwDnrAkxUF-4_Ui z-A!#4*c-Z-TEqmOXbF0_iS9FI^am^@=RlTZmJn6Yv;Bh7((Ti;kno3L8`B;mu@y?s zz=r{twZqkk_Yqr^GVcwh{|WwskQdA6%--=;LuY$FlyVncSp~YAY)V(hbZbFzhe60| zB&KaYTu#td0WCGiVy%unn` z5w6@_yBD3_=J{>wz4%PcXL3G@S1FiAK%uvhf-w%&7em?YW7r`1UhKE~; zw=VC;nE&zM%rn4HHleJG*aH>1OiLv34Vy@1>@V@AqkF}4hQO%2e z2&ti`?UNnkHY7Pd4m|HMTs$ab;9Q?iXb6uoyU`krT1)un3qP1@u zCsYfWhJ43fp^ORlxlbxRn0DXHZO(4Ky@!6t;n+%wK`XJ==%%;~~*9#^)L z?=t*FZM6X^7Gt zbI)Jj#Cb01%cT3?!G2$DQ>!tbE-3+@54FnW=1K!YPWL#kNGTAhZp>M-<+0}iv93d? z0m-~;$Ldp!J^`ASc?Q7sPH9 zZfT+bXM-3(J;BTx5i!v_f+0NzI%0$|K$ved-HLaz^h9U*aXi&Lv9%$H#g0VgD;HUl zyT`5Qwmq|7MwV3s#!(1B1W)A;_vU(4Ktv61;78!yezFfA50W@ore#3Rr}F~BoQ5_^ z`0pGvZd(OKGWn(r6ih@lZ6$Qm9lDew>a~TiQ3DQ&JtoOdPUxhk0F^NTPJ>E7TmV}E zwX^RZwx2>NxsjWm#Bi!qV5*v<`h$zG;mCQkJ}D$Xrj&$su7Nl0r$uS6!^(qso-BsO79tsn;kuxo@MA75|g<;%*N4G%faHfUpC>HtSvSsP*0=ga5wYFW zDtBkTJ$=njld;{8*N96w(hFSR72<76_(~dQs{3Y=ET=csUQ_U?i0~itzFS&#E($Ks zMm+mYQurS*+`H#yxHAY9V{C476+FiV@Cx5fce$x|FzW~o;Q>yyp zuFuNGrmY9F|9-#r;kr8X%dfVv6nDDmgs2^N-QEAQ%{n0UaQ^)+ZS^us4fn%O=a`FD zuH(2y4c8PAM+(JWpBdTXYp*%k<>XD3nMrt@3QY!7YPf9?xuy$m8zvEd_p(~Ay{E>}xh6B$H#{A1kB^??N<-jZ;JaV95&b`cCWiIotP69&=#5HEsA~=Uaa&)| z!~EaoJKuKtr<22b5#4`ODuv z;oq21FF=xM@)|~+NG3T}-Fbb>)fnlXyHE37F-_undk^o@zZs+V9fCPFiLlksMdWZJ z6;qCXK>CNC%>*&FWYk(yjp?2~9}Vi*ge{#-#)NM*(Db7&HioB)UHuLe(&Ps+vkdRu z{mnfQKjOUS<32(nu)*_=0U-8h>+#NP1&eOEptoOn;ZlzYnR$`N6U^rJJI@slS39Ad zd$;IP*dB^LhN+acSX2-(VUxi%K*<^wqI<^WpD(a&B7SjUT?ZV5H`Z9 z1E>$%8{wY*{3Z;CA(f40yO%H^8SVbN=W|c{R-lriS=<#Yp5X8bf2pY}Yta{C+cW$9 zK3}88DeZ5LiBTb?Wi>z*S`wwF$xKZ>3kR&H_gd+s;RO}7$Kcx1yX9yN1oSHjv0CV3 zQHi~Pq?9&(O;$Vfrxpjq*w$s#x#;GnH(2EvwJV-S$>bZZ5_grQ>SYRm)5-gH>IdJ^ z^zDZB$^LlfehrWGH_)mBxbKf4BfgwIZvtW#S3Il3eCZC#e4A@R?9i6R-8Gd6AouBxBB5)!* z!D{=Aib-lwJ^e9+-}{`!4~X@M^jM z+Xt;j4tI!&p@a_!8UPW#iN?o5`3#S^d8`m9z!o8Gl6r2$pRsAhh`{-P{oPmC-=z8A zggr^PF7o$?OLVKPj?roe+f&Lj1P-yBArlZ>YYch4c_m6sO6)uc-2%CnvrPc4omPFP!4ei7LH&Jci7x`f!c*4> z7HpUS5I*X_AMo3Savtl!yLxYBd_RCy+GL5!a(jyVAD;N`=JUhq^5~Ib&fg3I^*IAm z5sBrTR&HE~w=O=N;U`87$<(HNCzFbI0J8$`6XT}~1AMvfPbmuo^WSa-t5{@bmGD#N zWTq(aJPIzZYjC>Uk>LdN_upvIdF$S4s8EQUYbWF(N>jO$Xyl^~UKpIt!fX$LC+35m z&3&LC{4y7MLYSO+W@~ry7!mMzfx@2Db}n|YcGLg9VMx^s@j)&=pi+dtZ8EA{$Gx^b zp>jY=S26j>`f9w@`oaD{diYYd_0eRu^~w0<(@joAf0&nD1L{DMe&xUwkw8SHBsHRp z|GXvVi~NSj6;b$|K_@?2Ie0;4GnzR;u989R@RcO=9;Xu93|9sGIOXWTXz4~~RD8ft zRzt$yX?I*Y5>IDW+M-eAxfux^bt&7cD|r$HY@V@0_vC&vIVuY!X$q4&bL5{O2+LZH zb-FaW+`3poh}qS*iZ5+OjA$U5&v&O6vF_k+r+-foIMgR@oL|h7c!z)bs6Y=&=-*HK zJx6Kap2%iAWB`BDXzOq`*!5rh?l^m8e?8~{8p#iU@ABOe)eVSM@(0k-^U3hm9E>s4 zAIzMYT!OIFvQa0_t#@q)$K;d?h|&fL~p;I8;)^Sb8`~Igxq@}w>$`R5b(kYy}dByWRGs(9@YN}k6E8pL@(&;9`QG{+9SG>iTAv0o~h@WBc%M^^Z`6=NPSh zBc10vOGN!-UV^$&;Xuo$z-GC*R@aurmYnDXn?!;E7Gk zTiTe}Qc2F`z+2JouS)SPsc-~rMar)t;V{SFMz6nRKjmUVx4yu6d?Ux**#g|r=z21u zUc8^~mt)D`zSkhk+ORu=YW?VL0Ul8W{H=lJBimc3ok8X3X|E`~ux?xG!tX;nU~W%A zfcj{8%k}tiySN2$@wV>C-o!@^YXPMQOx(-=`X@53W7{l8SEe>`ASO~C4Z(BXY8 zyFi1?{sg?a`O`=J9M>VL6rUL%hqI*t>)jJ+?DY1hOI%6^iLV=z7!Qh@7{3x$>O|ur zDZkI{o_?6(24R5{-v&oVIN@aBRbnq<1^)exJZjp|V*d=1$BOMHu)(Do14P#5?*{*} zB22?Q+8D^Bb7%E^?3{V|QttSI3+_Kc@=t2+#VvW9^KF#+hO@`Wdt;+gb5X^dbVBg__sKFGShd48`kuRxmHchES z_QoPe9DI9%bzze!PcI9a72GSb*aoo*bQYnv$Re4PFX*k&9s(Ml;=JKZW%EFec z6o0(rH$J^=4)${jO{g=f@RE_vfY@~4IB^iGx3H6;iridH9BA)k;D)#sOR@i*$VD+m z88R>WT+_Ber4#T%6hLg+-*y&kqxo}1eZf3;qvs53d+iI@&FSZZ54iwZSptf&mQ|}* z+j%WI5th>a2w`qu{|g#%tn7Yb6_?(+_%v=}{P43@pUs%|z|DD!X)e>?d7ES>L(*M| zI)oK!CtKT1^_gMX;jUM7newI%GQ50|MjPYkcr7Z*(keU?u*$jykBgX7Wy^TMCM}ys zO;)?(;#{xUXsOBMM9;&5tYw zOrw+>mw?wzOlk4qU;0|m8G0ykzd~F*;b)+1k2_JtI!*@OL-l>Ig-o2L5?sPJ8I_0y zBFyQdjqUK1+gD^|<2MDd1W4}9HcKm9*XKe7gs~?!%T#uA6wxV?3YZcmV zI@g%b)tA{S@E&`=(N)>07vI|1&3&(T(?O{AjH>Nu0wLrNbn`BIM3ELn&+ozNR%2d| zzUM$EutcjC^TWA=sG=j|t%)RWj9ZsS%hJ*A2f-FtpL-NCt8<0-l<}S$j<(Qs@FkjT zyQMn7W4vSUm?bv@Tc6~n^{>Bhc1=yASdPfj&;|0$*I(rNdu6vJ{6M^bH6ODdvikJu z4)xYMr^f%H)_#ITX>Xzqhj=KTPzG_Fn2-Y~IO8w0EW+d_nkOlBIgDZ?%#JTs2pstr z_>sy-tC#EDZnFc`^0G?**e2CH3w3(@50h8xz z8KF!uH<(GXh!sq?W<6)w!&S!es+8Au#mbjbzd+Bpg$t8L!&u52dsf^pj=%YtKs-Dv z=&=9>u^c(H=FsAV90W;yTh=Zj!$i!J$C;KIlVoA98Qcq_8_xHER43;5q_2FlO!v2~ zNa#Io;eEDHSsf|jZFgTv4d1L9Fdagx>k!T!auFy7svz7QkF7u=HilBXp0tW^29xlM z{+uW38dZ+rBECfe_{t{&&+@hRllAAqO?vj?>63cFXV~)5CH;NOXu>mf;YGggP$LsX z%fINq#OOd!V5>*f(vOA`P>MXwBYA)8xzp+j)t$2egEq{L8@Pb)dJw(Q9TLTwfzH28 znQ&cX!3=DlT1?S_8M%?^Ce zEb84RZwH%D`SfAA#)`@-A59Yta;zgOV^i`&X(;f;Ifj z?Q+~Sm4laEyT|RKZ@EYDhD|&~UQUisSqUt02bZyYY7Q!2@<{AT^c$83VL60!wQFDr zDb1O*djx04ybtDOXFPa7K_G)e&hvz}bb(5HLxFb3+F#C2{zgSG;4a#v3H&}b9sD+S z;XIgLVL4dJ?#}|?yL$1(*6S&?>l|EQ@9#^@!Ep@#u$7ng)Yb4IOS8+#qMspJ(gzru zUFi~=`xL;7p1}MBDbEeCIHb8bgMWeX5qObQkCre&-iQ52Je=5GlKw&g^*30w0WfEI zBikAkBE#4z9&rjNR)>;1GGiDzg|+m=Yd)-i&lM}{%P{}jC&8?hy7%4esUOU;kywM^ zxLUCrICk2~Sfe3rNg8PU;Gv=(q2*va`-dRh5FSZG(QoQ{ zw3T#6Tn~RdDwdTIERfNpCdO3}plhdHBD+vy#~IzmVxPuRIy2qbOM8GU@Z}Azf0eVt z8_CRVs$@*Xf#uiUT7V2IaRaI=D`PUG(oHXn#XZwuai}jFI)v+5`)5S$X7xftJYGKr zh39`8+Ikf093-JeUw4>59REVd*8qO7JMh~C?@F&X&(=M@%GT*m1-siw5i^6f@pv`W zg+a+<$6KGXobcWb?s32DA84S$wn`Rna^K7FMz_j>2Hek)VncE)0|Nu_rN-jCfGDz& za?5La8L)=VjMpN)d;19B)9((z`=hmnC5XZMkAjIH9(lT9YZ=wFcUuvc=a8W#y+$7rdazMQW}T0{60)-Vx1RJ*UyVr{cmKNN9LeG}N7?3Is?bH= z!wC_Hd3TX|qoP3DW?9-}yc*xAp30LQbAfESJea@~wCH}Yt8nOVpcM}nDIK+ppS1c% z^Cx6H6R+(WC3KFRZfjIXf zO~({-Ns)YkBzkruS-@n&(#o1gu9!$Zz{Xp0nTBgUzvFM*rRe`YP-c zh^9G#?Jy#!r{dAjwl|`l2FY%ksNEl#fT(zF!Bf6`oh;ck`BB<)`TEJp*W=NF*dQ2v z{!66TX0#ws5>D8{_4nXjJIWDIa2dn@=%k_I5i5p%^{~xE$A**!m%}cxw@?aUQQBC0 zE|{Hr<4k&a+D%hD%woFrjhV*ugkxcAp8xuf)ANTt!ic^D+$(WoUrNQ%s0}m12+;*D z0l5zO$?&r1xQi`+_Z@3krg-@ZC4Dd|J|`g&OhhXJF~wK{i}mKPYeXVAL@8~u#*vpi zW%Wf_oCIZ2!bn8_hWm+M#1-qN$R%A$+dlVq+B)jDWt z!Z<9GI&!GYO&u3mfM~*r&&_FrN=9)bh)WArq#07i{37*Ij-1#pL3?GJ^1Qd z9CPREdDS|$r zTjaq#*X~^8rR(xXw%an~?o2giIIgydQ5AM=k{Mx8Y$fb{dXXVt*_t zWQ>ORqP)puiXTDE{`3mrvv&rx5K>QMI){t&`_NS-d>oRPj0 zsS5*K>%;_{Wj~USjE%##oLy;*(9XXr+yHI;rpvo4qysKPMG#+?^FbG=0N@`4@kr2G zxD@zLxP#&+|C&>V#2%-geLU>3xiYdI1K|wr0pQnVG7Z}02lz9|*Exj@O85;ppZi8& z8lXu>ow#4NEm$L1Z-;Q}GOJhR5(2ro)XsZ{RH}|y(}PQ~OmaCg+ptk!Wxb}FA`O+F zMYnOmAouH3?SkCm$axYey9XENd?G$*cfO%-fJmf$w8 zv`TOM?3={S28sD!p9KfBE3&gLBQ5qVX-8h|Fm8C9MSaCRA<)4s;n6KU=PIez9p(R> z*{I=7Px5lFLH&qbTvO^Z%#7+e&+>D-tAvM7zKm0m$i!iM2wl=(XrtX^f zLnpt+&IjGwg7&Js!Fzco7i(c~yJaJ|+hM3llas2`LJh#o+&s^*-WlMq)J`wyY%zYy zM5Ue+@Gdq}ibqfJt#=i5D&m#&jH;v@F+R6kv~n*tvGDQ1EeC+-$H-5^79Kt>?%z$G zf85N?Rivv-2|YW@cZ2F6&!)!JbmomNVQL}{IeRq(rhqnLzzG4zg<%5a2K;L5L zy}H}ywIiX^Z-2`Fcfdljh+rXLS=DvFKmBk#kEU1M$bqAqqW(bs()Q+YxIimw&>iHt zwC5)dO?DFq$K||mD@OW{??X~K#>`)=uP|6f(E#X&_WhL(v656{Ld-n7aK~Jrvildh z|EQ!pI8G_ZfYJ0!lM;vyBqr>d?e3Q4yZr-Rwja*yH3#|3e2^M=+ zX0j<&L5QMt9p;tudYP z2SESu^C8I_F1uPapy-Ybv+9c#gv6|94rZsr`;xCu*|ljZ`P+^8dxHqg(0q`(V79yV zoLisJh*!^o>qYNIx9UbN6CaxbKV?m;->s$V{)y~r1YxnN(dwksYOL5w&DbF1E-H(` z+%i1GV`hOp zGP-H%@2RQ4@p)wVs(^6?R6k&w+V%dndKJb;+ZLMI0=O^se!U$-f2L;Lp>_9PSJ@le z4@*X3t@L6katD9b_)b`;r*h9%7k|Xw8zL(|4#c_MA4P%G`v#=~*G5K~wTIetNR6w@ z|NnykwJUzmI20f(+>Aa-6Zg@=%!O>GmU_snn(_%-?=>l|B4rpwKyZkX!&^oyQUD?Q zp7Il;=MN4DudwH_y0FyPg#yfJ+;HNcPo#UU z(S3Eg@fbLAvjG}`4k42;#=yQp0B{sR3;_0rZvw#nCu{&=vwTqO_T(d`9qX9l4Izkq zP7okPBoI*dae<;!Ee;yJitMuD>Qq!Q*LbTuZXMD=usb>E!2m-x%_Um}Ge>|egZ zz%SwWO4KF~W7U#yz@ydMZ!tQsQu%{9M(^@f4hkn!mMno%@w7w38i{q+hOiY$akd(^5pnHUV~1t;bu z0+@~V;o#Z=tV|^>ynb03ny+yV>XJ_Vz8neIv9GC;=lv5p2)z2{ zE4cOa7;}&+Qov)PSQ~kpm%J&Oob=>xg+f&9-ZpcoCzK~Np+dPMPpj33m@+>uSowWb z0G_6-JG2!lsMVImei1P47K6S)#wze^D`50pX2pc((|LEjPrh{dMxc{ z*-sV7qz=Arid{DRqnvx$kY#qR&{S0p7J6qJgei?lSRQf5a|d(h^`QNf+*Ay4_=mk> zRKDQBYT26$I9by8=5d#d)LZH-utfmNz~_t8)wrmkC^Zq-J8l}^vJiG6b5W&u%&B#Um9^bmMFRqbfDQj&Y8 zG$$5dR%%uIq5gWW0`LEO{kah)%?j$6U0sf1jj`=ZDXPD8zcIw8!0;8Spe_ZEA#Fv& zA^MxjdrM)5TVQ-DkwU!=mb(gXR?ai~+<_!*3>2)h7Lyh;q&{?~| zXh)Yuv7BG$XC^nl#Q+Y&Ur{wa?%@3iNiw;+QYmyp^6D=meHaVNj$szJ=kt>lV_)IM zOJ5*?s*HwAw+e#Ny>OKT!*03UUe7lg-^`KrT&t57{H`d#r5ITLpKv3KFIFp6c7M4zZKZLx~4gyG|R&Qhd?{ znBa9X3|tw9#ap5G!U|eQ!n$~hytZEwKqI5Q?LzZYc0p(b3SmG7#h({A$E4nfszrfcvSZ&_a|wd$`ADf~;e+zRpq}o2uFx z-8n?;E#*q%9*f&KX3|Gi2l-nv^k-d2+$OXXrd`Oz{k+b_=~ztIyqXXZ_rveauEtBp z!EOW;y~xOOJwD;+HyJ@cA%9ks_LQ=P`S~>0Z1gH?TwzayAb2xSw2=b8D|1EGjYsrV z{EgFDN6O|GR%jDtzs5Flw`vV_<){8)i6sokk2pLDR79%(!f#9Po5$NHO)k2R=3 z2!8Pf*GdbkfsH{!LiQ~cJrNlHCk}+tx4##)Mxce0J%E3ROeBXG{ncn!hPaWpGs5k5 z{#?SrBhbU)YLRXd658$=TM|%L9~8Jb{0)A-2^eud*xZ!xeih3Ekn;6%WXojq>K>KI z1n&Io+S2&bCDQV!Bt;}Tq*EQaM47)jteKB7U<@!1EdV}UeBT@nvOz04+i>=Cx4V8+ za6{Z+PyPs-NbxVtX~cd_y}3`HaOFR)EF-LP+RQ~DYflyT#QAl<$lhaX-A7YJgipava0#4wj23RvmT7dyoPZes4mSQ$jX9;afpd z!zqrTTJWDd`?H5ODLPo3T+Gw2-sG>TvRwK0!9TBIO*TVkt$pWAjhoMp683fz2`TYkuTMQ0oJNBrW_7cLJ|M*F z>YUBB{f~|4khsJIemUGW(|pv{w;QJtQxZ6R^JJ4PIDTtJq$MAqxG4*q{k} zoV6svvG{0_O`0HGrSv<=XQ@$nf8SO?YpVAI23qw+hH?f?I|r+C`!1GV7AwqmSJ2EV z5wDA{I!^rRF?`DXeDLS+;DrU@B-Kr$_Aff@{&Y{9Htiq;J-bs zqaL)b8sBaAFcF?_Oo4*Hcps<)LTrWakj0Hyo@4!SxWU5?zSAanmspNE?_mteOeZXM zjrCBHlUC8BKdVVl*3Ql7%(hGUq-S6KGpAAX`9;f(hilB9EfB{wWtD_7x3nVkVFinX z6w+i}BXFmmjaJfh6=`fAaFIE9x!g@D8^mv>H5`#|Mm>=X78t$S0_X;#zES}N`5Qz(~XLAza4lMj~lHk_Y5 zx3>~AUZs=$CNdN*%g!2eJ9?P8dnC5iYnGtC94i&A2V=-UYVQ0JTXeBDB}YY***u8a z{A_Wu{aABN!ViWP27C#viW{H|kez~wzb2aayb&*!G- zia_DLR!6h`GzFm?-_1LzmGH5ZGW%@W7WI8Pj2#wD#~Q@W+7R?c?z*RxUdBd)`Lbvg zG~R(khIi^g!qiT+y~A94oy$;9ZYw3KAdS#}liZoTQ$na<-M9=E~+T{jku)ad+vW_-IJsR0YG){B(Xlv?QTrLfL7R zpZ9b2-^%{m`q6g2{YzlK#TA=O0$%g%>4Cu<$i^Cbc357P|h_0Dnl0+K4Xf4;!K1!{_yS}}x0#mBnltEoi=a^(#z zQ33+TA@ou<-Va%x$@!8q(hkfM6l!gPZS36jKCts*tM{&7;I_;BHpBt2s2a%rbe~D` zC&!|r(Av;=S~yGlo1BHS5PLH*m#TeH@m|5*XEH?MpcPOic95p`Co$*5aFRaFNsb*O z5g{aY%wQcDNVSMEGds=q!3BysId!1s6vn@{FM@g2u4+@_?|Vy!jdA(@U6p*jt5w5} z&A~$II%n-z$6p|R3<&(8bZ%-{P%3#XLD0_3A06P!)}x~iiEvT_s!RTiWaQVSS}6rc zr!RY~X+`7H+;saoZLOO$dV)$#nmL+w=O{9tIYU}zCXq)IJF8nfJHYEAj}Z@KlSy+O z2O~zp*sk3HVx~Hb%WY%G|?0+liL&C+Do} zc2zkCXMK#A#jm^|A;wFS#;->qUtm!{)#dCboW}+o6h3BnD-5vNE&CCkZj)?Zknd`C z)Pknr9|;7pp;g>|7xrb_WZJOeq>^YRl&*o2TlwU!8zZyYL4>Up{fj$E0#sbxhn8H+ zc7kWiw<@t^9=QUGzR9z34eiqqB{Tef+U$k zpr^RnZ&La8`Hjsww0BCKbNN1+`q9_{ahKalzVgC`{u8a``y44wY6XiMDa9LQ*P^sU z{XGqZ2yA~xn+HRy6^9#$m>VB%#tP_G0%q>%A!SRX_iLFqP$sLN9yg;M_B-1=0KYSI zgU3~AngqyKd|V`cZ+^!xR>JnrXL>ILKj0E2w$ftH$^^Rd&~iKS@R;Rc3Q8Cmf zi%yIc(c_FaDWHHOa&FXIUmgiCeP9WxU%OBBEDg#yQXUvoqyMOe3w$SKQrK z?~k?;D=sdg@xl|vIfkaHMw8pRvR9(Uci+2O(aq;=Cg^knWt|?=1fBHY^Ezo9#$JxS zs3%Lu>)N2zKfI*jOjJk1yz_fB`hy=ou3ODaCj6njoZiq*U$|3kVmldQX5Yjry%)g; zF}>L7uGItU?}Il)`M71_;(mf_3gIe;lujzKdBaW>O%@4j)#&zWl*BFn%{N8Q_|H6N zDo?4rpqb!C0^0VRRFGL|tMN`B>!vkT#4pMkl?nDI@*mV}&^&LOKcT|#X+wC8%cv?BfKv}Hm+D_l2oNq=*cMnSVyRPH$$ zW)vsQV?U5m%6rrZuncq|TJ<0Cn~C{#|MItyU!B5{duPi&Wn)Veq$+%@Lia&E-@6pV#)U z^bh@tZ;Y5_Zw}fX|9z8O_J7nNd5Hr|+>W#L=rkz$_Dqo-GbugBSm{RH*xn7l@?kqc zEci!(RhxWRdm%LiPQo9Jb1X^hzgT*~&)zmL+iv;@vzPuf`O3l0AhltYt6GkgheH$M z5{Q+N^e~A6OPif_iRJNwQXzHc&me(VYz+b`oV8b_se86qAJl!k1ErNI8%Q4Ws!M+e zX12jKasDv4#_D$c%N6VAkE_`Wi6bbPt(E!{150Rd#1d~hmI?SX*4w+`kdGrT+XPcY ztXbW2tXV}wtSL!L-XKzH1wgY+S0+t)3X`Q~Gy?;&`TM~gfKN-ooAV|1H6EK(va>Vl z6|iRDSY45W(;OpacZ`kOVaj_yn}V@9O->l31h2>$87;3#qn~4HZ9y)H0eo}*nn*ABrT`U{Qo|rX{ZA9X*Yvo%%sN|v~8ld^mgu3auH%|E| z&nSDsq&q5+>}BvaC{h*e)SFA_e4XXj2hUUbM2yY;T1FA+yrdGWBd-<$k)%)4HkB5k=GPuW1$|hXWq)(DP+Ppt1GZl02`}+5u9>anlBsoy zpV*P?%82xUyY6l~EQ~zrj3kd|#f?NBP;vp=T)9B~-xC4esGd#nxBCuzVo$EdJv3jV zk^sWbTmm&d_rwd!)DrH>U)AWB7OGIN&&$m6o#@p6Xb$6&azuhm{19?=?C<{us>q#* zgs>ZMbr``W*rD=#(l$Z93)>$rzxOJOA2XkUbbC9?(JnNzlPan8qI|?$mLytLWE%T^ z>c2x<(sNheaewPa1`4T{RLX)3$n(Si>ks` zDcYQx&lvr}_g@C`b6mb$e=R}FI>BG42)+ zN`DCex#CByGBLq>0S^zDj+U|v<+YDx>1z!CL3K!(ewqA8Hc?xPko%CV)KWt^30`{Y$K$k|-g zfobk7bE$P>rdCoBvU>TV-O;A5a|Il|i!_Tz#FxD;44FtZr5ETGwj`+jEn9#wKy|yI z?8Qi%mwdD1`@5_~ZDZ4LN*kdyrol3p+B%fGc=nMcBC7Wy|v5M;S#GhL@yQ zhfXilu9Z;xt&A6{I#(}87snzihIn7yjx=%ulGd4&9GQIj?!t zJCc=JWh+@b4PJdSZJE%IX|hzbp&ZxL_rr?H$@Gwi>)h*9*TdQNv{adwT+c6KWn`*; z3!&X*bh67zg^oC7<4PUrLjoXpwl18iKG`iCuoT$RfHc! zHW~q7g-vRcEB{MS+k+a^Quspc?;^&=3TAB0b?~bndbW-q=#@zq-?p8F1(m@!APm!G z9;)j$ev=AYU~}Hj{2A1#GXn10$6k2Z7kU)hnS8g&G>yrn-AXacV?IXyYq$s(32k+F#k}S-oYv z@?Qj@|3pE5W4su}P3`V4B=0`Rd9l(-Xmu0$n4n?%XUU4=;*BSBtL05(C9% z4}}t!63M3J&sQNq!J==b>6^8u$6x@3aM{5 zPT|uN9{D_b%D}$G5IjEGp;IezK!!!wek_wvn=ldF`&WwOt1u@MMQy@(@EF#oAp*4K zTP#xT*UeKQ*-Ee6aOAN4h}qFee=*6uw#KVpK3G2~HJJe#N=i~sC*?wp+ATyi^4Pnu z1SpMyBcAkO2+%d_^Z6gm4jfUJtP z`8BGc+R|xeM0kJ@CEa9iE;utz+gD%LRhRv71Rh|1vW=o8Bt_AV5>&d6^5E-WZ-RONnNfn*(EXMLBwEroN+!hiVftT^5C<3^;*}$)yW=E0&>o6eWcb1q>T&JMs#+gqu&GRS) zoQ$;}qUV@*XO0&8B{i!WOp~Jwi@8+>=L^>Td%IItqtBY zqnqE}&hv_I`d(53b)MhOYXn)=>>JXB`A&7UE6nHno$rmz{ZLm)Wd$_2#G@9<9Z$AK zg|}tUS|AUHc?Q{xI^=rUEN4a?;%LGV1nSH%S*(;{oGjJb-15jRQ!E2OiY0!e z{5)Y({;h+|)nO%ZC$N8^GY@`j#XoHXtE{x=iL(gy*CZ@ZYj3?>#tE-BcqT{;y05g> zH}%#ZD>6_G`c|JL`BCc)fW?r$$V{nV?5S^^3GU?@i!$9IMu`;Dz7SO@d6 zc0P%tXb)sb(e_^1rC$j%+k56UUB3|&6&|=GncjzofP2(mlKpkcpFmArZmBEJo6>?< zl|R2vWmj_*y!EPf>8vddqVCQO16u!+6ocv%A0BQ{*t0x)i=HU6W|lxFZ01p;ojkwWNp1TSML7t#+xNt7joZRtb+zAGH{~LLiI8ClA zzWbMnB)=)ADII%4Pm&+#aVn-}6$&seX0Sb}TvU0Iu^IH z3W4$yW%?DD;{cXKiSR-Q^d!?`=gC6QVkC<;*DYKoOA_OKei8P?*sk;vkRveAl}7j zzDJ)P{rWA1o?uLr@R0G7rVav2s>v}e%ErHjiAQiirUxkBJ*MgRC}-(qkUnG2_lDdd zuw&GhXzk+%hazPw$Ao070t5oz^+D_=d(1j1HoIC#JTUcSILVN!|Bn{lx{*R19owL2 zC|dUFd6HH^8yUmuQv-L-Oh1y|xHuCKd_Jixon6Co73u|c(Vky0J6;%hk6{Q;-&GYG z8JC-#%N{0pRHsz39nIW>a}`Q(O;ZP4$Fk46PyFn<-K+OwNga1U=(Ld$RE=2Ot^;hM zFBQ>x&Grb%8u&>^;UM8VQ-DL3H{z}W{5SOSV(+nKNg0UW`IyBJVg{6kKb^JB{lwwQ zX$hr%XRxAT+%)AC>gT8)aFtVe2KzvBzGZ0qfXYL!-rCP=AY4`^OmZaILyP;>q2qNd zKEIP#&OSlYvFW1UyqP3YmCovsHmyhuruDo>uy_{4Ot~&02=Q7x!xTX)e@g#hQ?H*e z`nL|Yq+cAOKXhR@S8a19fU7g(sC=`6-oGEjwsuEW+E2Ao(A{5w+D;QNmU^){V!QX> z$KJp0{femstk$PG7H-I5Ylgu4kmQx0Vr{n@R2vfUe$%$8UF3lWo%D*XhW=~06*|FR zJTKd|l`6GfDdkPx1h?F$w#bH@pFG*YyPxyB_gehvJa6u2yQqnYZc^cdsDysqjUlPS z(ei?>4Ce=Cu{LKiAnm|v&BP5$UrC*@aflwXe+cdYYADlERV5|_l$^eqZ@O;n_aC~< zeMJ*TmV{;Nx-e9%;IY%P@0YUZGiW|^EZcURmGqowE4D$!j&mTP1G3FY_B@yx$ZnPR zZtF3@bREGln~exO)r-Q-)A}d5>D`iw0uZZlJD}jHGF=A*{aVs+3g)oSnU%Pup-4wX z_Fn%cG*)$7dV!oc2BK=id5iv_KFoPGSh?yYT~FNj!2tXDPr+trV$sY`VMJpWNjSIL zxE_%ErvhaFhZkiyk-Dt5JoJ;wU{k`TXC0Tn$b8oIdKJZ2aw5y=oSLb^9N9DbqbvN( zWBE0OQ5wy-mwam>L-uTp@jnj3;BvB*tJKsZa48yX_vAxV>z0XSnw~o8-n1;^u@NB< z2$ZucDgu7+o=HMuv*D|6Ju^n;im${jpMmd}47qqym;iMfKcsF;t5;)nIxyrNJPe>HK}(42rt-YqkyN>FA_a(wGV?hxz(_kgM--k?3AAtlM5_S8iXf2F zTmyjC_gPoSG80K5BAXCOOd?+cP^Idnaqt%KGvbO)YoM;CtMT~_5oZq zy4mY3s)BVc!j)I0Wqw_c!FQ)`s5;aIdGc6YwunGSTKra8uUoSODvVXXa=xVePV@@z2}X8U>k~m} zm;2ZXmad7R%cb@r4(n3yt_(s33R7F378Mn*CQHs!n){k3$G<2=6hA^zMt<&F$5kc@ zw2St*&_X;1^5v1XGAU}s@^o{}Ec+eOT$cJPDbOn>5~-v;PvclwViL$D7wo6#>jn3D z(LiG+hzhmrxB^+Nr^Ni*k|w?=Y94&kIfp)t+tj?t@L&0o?Yg^o`_ccx;&=`z3S^MSYYafgmDR99*-#6P{}N z|6rB+)6YHjs3Y1$&xwTab`{B6x?W|{=b9e=3?c8Y&nY}awbY(KF2GA+boBo6;As>| z=^)!+LuW#T*|V^A6HEUfh907N1o;f_>a!4v8=|ieN5mi!BeAutOwYur1NX15nf?}!y2?=tyEN^!yIfLo{Ni7^~DI5 zKcz!aJ56)n=X&cGiTvHp^;v3|xQ4Xw9;>B+#T~VsqXzgca9tzozMx3;Im)M;cgNU< z-t)1($gQP>JvB)6bJvy@KCIRO{Y7c+icZehDv){De>45FK9%CC81VR5rh#Zxup3wb z^(^PY&ue+aiO3M#iztwL$_oA)dw;qRGv(8ZYW=}PJ>!3fwQ?pZa z_Sx6Jq>iOG5C@njn6`+IGgZjEbVkHSJV!41D4@Yy%;6N-AFIH6ahmy#Z&!tm1U2 zQkVEk6H$G~?og9zfh!I&wX%WYTn z>Yon=jc@9`?^2F)p8q(Vh*A32Ki}V-()+xHEvCfzGQU{ha?1Yh<;^ya`h)B~kv8Xd zD9ilwE=R>XMYfS6FaL9wLQAF7pZ{7d|N06kPl}7`W<`oN_t|FH=5VUj`H%e8uzmjx z+12cQU*jyd?XAP0w$*Kw49-!sjj+lUE}?t3NcgT#XZnRIi~}|%JQngRjQYJMB$WFF zH>XyjlP#TK9`kf0L`|7fYW_tz=h`ikbku#Mv8(5wLse7^1;(5&RPyTVaNr5y3kInyH5xBJj?;Q@ z(FvKb3%5YgUjUk(9LVhNscAZA0J@a~2mxH{Rn&o6yMVblC4@+0jVmkq6~&hxP2)c^ zHhh3kH>q>%vrl}L$2p?RK~V{j*ai$bg2Fv{Ht(?E+blMshB=!>HXcA+VGo(^cyDw# zEPB%_Bepp=12Rv@o2)cOqs#Q~EWkEAU|hB?L%%C0OvM53)fhltjHTol;UET((eqBZ z?;q^%{(a*lC?ce4?B#U=_EJ~3H&)3Pn~`nKt0>{R@{`FQSzr|?V;Py5WEJ`tx9Q-b zV(e#r0f>8!aTN}_p?`orm+kA&~+oBV1I3EGr5EA9PmHT8nXx!;CA zCEw73e4PP|B_tSuhQg?t1fZx$2R%N9ODWO`2=wvGny4GQfKN}6{R_s9c7Vb=T z(eZ-|A@t1j5F<%)#FazBr*08S%MYbWcVet~D!u$pmRpe9M!dYY=x*@xD}rsBrnvCQ3x&=hG~gEdw2W4Rim z+zah{REy_toy_G5;p1#w^R>!@uVjl%=YmNlrD0_e*>2K$sg>wZT~oyRQFayN?Y9aW z-lys;BsPFf`P0T$x3>~^6Fm0ZBZVMP8+Cbl;WSFDT13yv>$jIOCUhczIa>a?SPn?J zmQYm4ROZ0>o0)hv8q?0fwu3wqbIWLRmllbcS+nF^-KWYAI~Jd$zq_==WAxVzg;g0u z=p%<#M2?uE!51a|(s|SjwcYD(r9G0Kx#T8QJKphMU!?Hk@CBA%^c4lKC_YJj8VUIm zNzlp?6#wZ(^F=H|dFEBsP)h!u6B!jgzrW43Z%aEI^T6jHy0K_ zKF3O~c=PGc+}z}33W6&PGhdaTFZa<<(1QEwXR)E^2oFBhq4ngItTDaVK{dyt*OyZ~ z+I0;A#Gf;!!19^V1PM45j^}U-erpPtn$zhA@#V zggBn-YzFeC>~E=7`0E9_5{RG8+fhA>b|rgQz)7F?@7HG%hj5h1tc@^jF2p?P(_my- zq2Rh3Iqb$WkNtGB#IO4)OWW3WWLea+(C9GKgEq@dB2wZ9w#JU8eVN@vjgWK5Fk*#& z(wTK&)ABe(x6#FEypy0-T`-?C+%CC~viRYe6muQgU=i^IC;(uFv$GNez7NwihIKaC ze9ai#|MN?b;0Z&@yx5-89DN$!ALDwci7rF;55)5;Wp;dl{qJnBZo0isG};+K-jvgy z=lL+*m;rV7zuL`qAlJp!5&1hMi|ef*$n|~`7pvdJH+tDh$rL&|BhTG9^SUmd4S7ku zUk8QnsaN8yJ>T?ukn&|t3Wv5IXTpu8c_$a0iV@W-fM>{s01&GFSwX&jrn>Tq7b2s( z0!1bqp_}1X`7HO4EwJ12avJ3qKi{~oVN{11FE^?t_s~3XAf9N@HcN|Eb+w4BVfC9~ zWHU@qT*7$3FMyJ6wWChlZiUY3nCtLjaQ<+q#VfG$K8QI^Byfn(nx(n;yr}53#tRu& zafcc~_0JUj_o*Tm*S?7kxfsX{6med3HLq}Sh=t-d3u0ySbY33%dNfX%a zd>=f>nI|RHI6vP*Vm);dX!l-%^2k7g z1*}?$t}zfEtK34I81DZF``W(G$0q@8$7laVLroGjNK>2;uW7qcPi6%<0Re%%bvLnB z(6m=YA)LGt-QCvHQP8q}mSY@R+H3UFDSZ{6KZCWOKMvP-Tzhty6OFz(o~)*gNZ7k7 z`l$N7>kd`qnGr=u$K`plewo(o?$WKV1~_i3S(K~Pu%r8bRK0gRoB#hc-j)_`S_x{@ z-qZ}Oy=T>GX)7s;#H>wg)T*5bF>934qG%~%D>V~)RH;o!>>V@q&*yjF-}n9b{&oF( zJ+8;=@w(1A&+|N|5iK()CR6?#$0PGud2(4%z3V&eq^-+{Lw2et*;U7OPL@TSRq*0h zbYm5>@3iMB`Y31C>|n*x_m{-%1y)thH=c8xgY!I5Re9;uLJ|D$5qD<@Z4R7sC-loU zZoEF#hJ5G5_Tp&Nv<2#&ei#2ZVLfMD)4T(oQ@Hg4uy`wsB;dP&!j?9_!;d zn_f@Oc}aExWGdl`J=j7#J$=qq(@@bGH;)$sSMxI^o>v6cy-JcN=JA=~g=om{)3x6y zJ`05AV4d;Ky!>4Bm9(@X7Qc& zGM(OyWW}RTw_;|Dl^I2PMTS;3V*Y;kkp3w<)-dx*x+NH+zOWZ|T!Cv&hfiJ_RW$5Z z2skh>_K1yXXcV5d*%ie7k2f(An}^CBw!aXufEQwbXD{nw&PjbJXFeBtMI?}2v+I(P zjDC-WI_zGUG&9oIcVG$pVmcAuQO(9W$+y7r7CBGpj1iXlg5X8?VKh$dc?8({$NJ{5 zj2ueoFau_%V)Wrab^K!bv+4ZXP!56zp;UC&^G#lG0f9P`wYNt=@GVjr^U!R|k%L}7 z4R#kYu^&nKI8%3dFN%RFAPfn9NmEb;W$`4-)|d{(aRslr_WD&;8!IPn8Xk&*HYZXu5QgQu3HMJxA=E8FQROz9f^nHBtPcV1glr7teNX zle`oMTdQUlfbxQ3^E6!(qz7CyO4_z266Lg&p)exNl#p)HyN&`LNb1D-ht9?0dB{1P z1F*#vGR5huQX$9@^4fP}_LoS3t}k}v;uGiu6HeUWjmI=8H^4k?RP0EvEG%N@=M%^M zrn^1=K$bguOV3LE`bC;$M-Sc#W?_cccrF-vGn(tTeDvgQvf@jU5ZPk+ey&1$l9Mjs zpL=Da;*ddcU&oCNd>o{a%8H)NwzD#w6UE?0%r5kq_s5{|GgEQhvU{d&k`|Pi*2!y_ z5cqZ<2vqCIb+%inf7<_ayIhZz6v$g`Y42CF9oD&N+>gn$y43Xa*iZ{xWk%9&PaZ9j z)}vKFRM-ULge8#3+N{gXi?Wi@tFqYtOCKC=hnAT$`z$vF5DG7s(ojVtTB{3l&+gHW zpbZFX+{-8bi<;+M3swDfN=9t3`T*Kk8oMxjQ}gi31NhG*sruv2VQsVWtatw@A%fmG zM)qv1n0~@=;eD2-8}|;t9K#T-Gr+@4CDVg)&1Vn0E$=EQvP%a-aOpQBXGVxLs>&4$l8sU7)d;dW1a&uPuLYS=kk%4`*lm;g; zT9hx9`{kZW1v^6jNr9nwkOqKviT&8x0gV*u#A|)`J^w;rNa%}@TrC=ypEeukeUq3M z9HoB(Nv#rSy_)nxAl(t}J)cKXL7E!v_c-h}Hce{4NtpL!^?d=h0Sf6%044LuZ`+T8 zuUtFQA9!gK`xv7Vs>By0Z)DyEXmAfqFm1rS_WSQxAz4r#(bf_9+CuGZ4LhQp*c6&) zQzyLQj!7MBL)q)T_ELv2n;GfPQQAVxtnLw4<{~z#kBoPhyk;txg~%Z8L%pXSmFuS) zUzSk&e+}4pvwEKj?~DZ_A@J9H54=9Qnva8K;paR;wnvzl4HTG$1%(k@(U(C#M8+yP zY;4LzA+)c*y=UeFMH|#JKHjHTEu&p3@;@cL!9Y!IzbOmSEFC+jv`i18eujqb{uPuC z6%FO5e;<6`{MPkDYmSedi{p>?GM~h-gW)D+p4D>K3wg5m7^LGH%_{tb2OG3L8XMVX z^+8wwJwT0D(fM?6Ruj;wnHjdtv>>#NA%;7#(Nx>38e>x~rGh247W{23{6J zFQ>4uL}0Zc7vMY!$yw%?5l^(MRjDV{=Hc zQF%bgGk~obW{#N~t(zNh^}Kwh+lFWsZsKC(DGE!jQ#bgojCTtb3dF8;)4F$G1vOr1 zSvYreC#Lj^S_irZ31ybE-PRMbBSe8|N6!{q$$)fpdtb0CIf=h&Hxr7!xg-I$9jRNB z(R98&-@ncrN+FOKag=)+)jD>{yU*wemLmFx-&Fd2s~YiKnWlQMZon~VQi22YVyoCJ z>xq<2ak%?74tf)v=gD~4bHXRhkSPEimI1d3+nvv*5R?gqU;&O)v7TG^(_hEO`byrV zuXI1`Y_idl)Jhk)dLR1wJLqQ_64`gn%Q;y)OnhqX59NG>Ri4Wxey!8ikqk(!GB@CF zuk^Ewb!_}=DpNUswTd&(D_-w@`P#rJ7CW~KuIfW!w}L3}turu?G>EZxm?F=YsMZsF zv@>vyc7OArpyLorx(EWv$pI{XodaDz4beHHYaTFB&~n6<4xq+;+MLh_vCAEcX-!-M za?8`CE&ajxq2tj9X;tz4{H@TC>}CFfJ;m`17>NaaD%bSY^U*Z{9fM5gg55fnyxv;Q z6Riqif)(l7bX2_&+^(-r+nZ%6+i>D{1CPLxH;tIj!{}-x zE_916zx|I?)Pjy{855&9{d0BOJ2X9JKGc=C+^q9k-$S`Zyx#tg_WCb%2#n_5sYWF! zy+n~67e?X5+|NJx;V3SpjO2+Vzs{USMd0hmN0%D|23f!S%mw@{Bji?#5KWB%$>o&j z_&}{2Jip;S3B`71(q44x`s5KsnoIOCT!6&9NKs9%@6nEMYeMYXeiwDr5x5&k@d&cO z22JSYE0Ue&OX zTJz+Fpsh;ajO;Cm*>{hMqNlap=n25*Xger3ESx0ZA<9I|B&sR}u%emIVs>b6d1!)p zGkU}B1wS8VJpM5`>`D*+82DOG_+V*>G%D1hRaJCP7En2S}MR2@>fX~M-9=;sKAC$?d zz6%H-6tKF>*>ZKx8CU9id#(lq&%v|LuS$G4Rf7)W*~5SRu>4p1Y?0QTb7YMA#In`Xuh^?(*%I>KxjX7ebJ|4`{rEjDfoF>h!Nvz|F|Twg(L9oB#?UdBY$Po#g8 zW6nE0)WOaDlnb639WQs%fAa%J7v7$k=j`a6RTboI;jvqgzB$V>mF>83tUIPU>HN(R)(ZcaG|o(}*wm0WIqyVs+$QzZzlqXggj>ejb`drV zDSshjNyHwHc65V7#!zhS^t!-MCo|sw)Q4F4FB#0oeYfie7%q*LmPzB2T}`oQPx{Lk zRviWJ0!ddUeT1F@tGpY~_@{&n-zZ)2&$8RlzHNC>{5NnDTllJc=Fd4Y!62bdWChH# z68yA}iplSaLxp3KPBhf=UGc&P!lwQ%@b}%?`9dl{TwNb40T%wrU^-Z)nR3x+H5(QB z0a16}jqrPo(_+g?k4?{kci^?&NElvyK;?S`q!?QAA!8*yWPfpR z6N-w8%FPQAnp6mY2KffGeao}s?< zY8YQrloXmIR4K?j zB^0sE`5P1T{@LyEpIFR@U*&bIqdKjPH!NA8&xu!A^4b<%CMFk+&L4mW@^@73u5Kh-2s9LKg7kZ<0t7Ga<{$-RyorvSyzY0h!a^cuPG2-xwA zQM~|s$>x}|~76yN481R}e1_ynMLGLZXb zsWeY9)^H_A3#FClovzPX>3MhB_@JB^)v$ZqF^?jWu{X8KFL-_chq`PqGTbKeQco_Y zkM8`bC|17Q%%17+sGL)4$^C@UnY4;fp!9CAruNAe1cYe<(JRtzvm~w;b`~ZzGOP5v zqv`h*+54qDSLzU64>))jf-jhHJl~~sFtU_31ggDc3NIZ&0vXvNaEi-`eNe5WP(sTy zTeGIU)yTOaqMAad;&zIDNa8*mf)vRptW1ibDoNHf%Kcoz)-_R}&HPS{VC88K%E~R*SR?)DNO$uw?mE4*2htGv*=3Vo|ScVZ`hHk~bT4O?r)E40*FPf)YC2 zCZB&SB_0-a;tL|Hyco8^DaP;=R&R`4m?r^aR+sg82hHytzFJD_R}$Nf&gOu2=RMl0 zOlAH9@hT#VV;cX$5swmXj|bH4M)C(E<4pt-Nww@kD@*xLDn6elOCpT3>BVk<-7D7^ zD^k`~^efBda~-~>kb05kdKAyGko;T#gUM1sp?CeCXY;VVZ9o+(ho#Sd!|E|0=kBzL) zkdsqk$)mV~neKZ-6CaCd`Ta;?vdsP5WB<;b!e-;EH8TYY8I0H1lYN>yScqa&u7haush^_3futZHQ;F`}!N^VdPIxIbPE*|~} zqNVCoGPO(E8N*WWbBO6O$AprOF$QNxNy^|xzAc$o8%9wgpxlb;LYI@ry6;iTas`yT z*8c4S@W^Vo_d9`HcRL<8e-|l^Sw)~}GClf1^7dO%BaG>(Al_Scnog6o7PD|M1Jw6M zOR3TabP6ODIs3epQfO$%3$d0wIibk=;lOq476t5d$GYV0ZA?vTX1vY6wj*lwP3 zZP2mC=y^)upVvf(a}Bf&&fTg~7cH4iQU2k-QM`@tWYWJ!PY}qKk)6l`H`v|~#bqbe zH^6_z76&^O`+Qv9Nc}Ybd1P}6x`m_)8Xix!D(_EPo%Gcf)k`UuX%d^vSDtm?os3}G z8AwG%`mO4h#a91a^t%*vSHSztz&0=ICUgEp>}>j!firEA4)?7NXE|fO{#_io)N$r@ z^E=SZt=wFB#if*btN8HifB7t8T54~Esx~i&6Kl*bPl3;8_CGeGwsfR_{H@Ie;?&ri z&!?K%$6lQxv;Y@{xBz3i``qgZ7ARe{6z)p+g9KN`>j~~%^{hK-BGD!bypf4SMyidq zeHC@;)|*6zu!mHp_cHeRYs%Ld>kCvOzB?qGXk+qU;rSX25fT=R>{Z=vxHk;*uLXEN z*tK4#a(|j@3*do1RlWY0d#5*%S|Ku@&u>W8by!x{$x*hASsh-a^_cFb{B9eN_0el` zxv(L(Cm6@p_8a6TfT0q@6H4k$4d%9ipvCnv>R_w!PnGO$jtN3k)S`M>qn{A|?C7{gz9>zUvwL^U2aKuV(+@I0 zY?0Zh&?X)Zw{AhB?leXev>kP^{_kr1Dw_+L-9Xvi3L`=m}5Nu=TzKczMpUEax)=Yxp z0V>u0CBj(=qsmfru?tvk*9wwdNeY>%OdPp`@nSkwO51&g9_=7bk8zQ;QOHIMbw1!3 zVi$IDsS4k$2ucfHQ^mhBKM-LzAL`q;D`680-x>AY=B}Bzd1H1Q?Y!-j**mV7nnW5v z7c6~hn)u|(rhi!Bm13muy4s&F4*hgCS^C(K`G|e2>()dJmM2U-P5ahjg?6sWW^K83 z6Y#+DK@!sqOY*Qu=cn`gZ~aSh7?Tf|iNi9mS5mv|;*X_YFnaCJG!EbKvG!DBmqz+? zck@Kbm9l3wN$peH?z8OZ5NXGewuHuZ-& zp)%*LXx*79qzFTkP3T47rkBG7F3b3FOzw#5 z1v-*de*fbINUeMn`&l<=?UC+zHAhULNouKby=9=himUXo%kG}fLzq5`Z!cD2^i~F1 zd?ICPfw?~EE^=F3C&*7~j5qjc*|-RM+&BY>$a?T}y4RfnMoe~1=Z2d`*EHG&T(ARa ztcGp@GNQ5f0&SqIK}#^Ym%#Bl6P@L7;QFK)bmnXzonwZDR;5X#X@945yxG_H%6rXD z$P0klyBI4~2V7aPaZF_4>$xEwLHmaXx(>eXZ{GMU(AXA>q3asyMD&p$MOn@tz1|>I z>AM8_WRN_FVv(yv<(mT_`)wUui`oV=2Ye9~Di@?5!UkTC|9W^K7hESH zQ?`vHbN)4in_@+}UOmR>9bC2$iL*L%A>C}s<9+{7w0ju@ASP@&qnTvsh~qon*qSrx z4cm`&o$yG^G#(<;V&pNLdHjXdRFPXNgp?gUKW`_I_NRb&8=DR z^t{YXTpU*fvHIdJAC>qVc3dDb#z)f;i8ZT!9@_>=4`t{Hro$jb!mL4GpF!se$$%|A z3$a+g%J(}nhYg91JWkKbf`3DFUkyRvTTK^N9*Ef4&{oH7me}lz*72%R+0n@cnC)l9 zf)|RgBU59HlS38@J%tX;(@p9M@;{Cp5Y_FRgx)U;a^g9RH^j`6(WB+0fy1UfIq->z zRX*m^i=1~s(|gCxdA`;`Z*G^wXNP_nFSpTjnU!NJbYn$SHzk5Mr8Z%}{rgg^;76Y`wkH4zn@t*gTmB2y7$47y9C;AD4|{(`{vL zR^PDW_?DJ#TH@N&{X}fZ3XwGdVOwuP_6MGHE9Fq$=;cQ-$g=KzDKK<2!wY@PpvDet z#C|qq`qXZ7rt#b={8y7^>H^nxd*%s|l;3iPS%~Pn$%aeFVqh`nML+JWpS16w zLf_6R+euOCZ_IPH>63D^h%%K@f8$<_=-dif8z_P~iT0az$~>hO4*(BP3`;!eQbFG8 z+NyxvYjtdWFI1S|)uXpC2Nv-8t3R#Pr{x&ws0HBN0lX7$1KCP?4M8=zKLOu-n;pn{ z+qGpq_?=T2Uc%Rbu4MH`7`&*x;5z(^fb+$utMLkccphQ0rejavTL*tmnFr{?Z;=;dXU|jTX=PQNR{o7G`4K>Hm~sB24;|N6 z0x#m_ADt9~bSAWMPmH~dt)xUSmwe)ShpBSc6T1eb=zZ<-m)AmR(a4)jFvr%&9qc%> zuYn-jhrAyiJfb|gJRiMm$)BbL(~qtk%}7!9*#J!g zh3zev-0=h~)!)sr#3ODh8~FH^#LgpWC*o1G<^3{SQpIP@uIL92vP(QPDPKQfQvnN%*ON#d;??D1n!XT}pA z`dRIRRbPbdVqbO>3$Mg7nr0DJKbL>zI*FYhVBWetQZv)!2{JPe&i$KMIVTd612vd( z6sT|**|f^UEHOC=(Lk@&bnJ47rNq&5_ZVhmyO}IiH$E z?FpxQ%2b6UY__qkh>Z3_ei0VW?!ML`(zKZl47cL=!2YOn<>DL!^5Gh_zk|_i=ztzw zput%R%p%ehggNz)4w$DC=-r8J-dbGfZ1&3P7HZ<}p`(*?eAB?F0g%jT+kdq|O? zzZGnINPTp99kV|2%7fMn=tb_Wf*X(P>m6O4k*I+!$L%%L>{jW@_Ue@jJ=XD!j&p#+ zn>TMdYFbf_&Q+bH$GRoeHJv}VbgF6|<_{Y*|@|`iYG6PoEGsLzb3viyH*r zy$i@IE|i^Ycn5R*EPLzZ9qdP*VCoBVSbM<_&$EuVXjfawr6~$ny|?7g(=jZI`7Of! zk8r&f*$EkDW3k(O4ey3(Txd&^zRc)kdPi0Ce+=0`!)qFO?R~8b_=R}bmSTNQ$dBajk*IIbih^4@+^2uWwei?_}CD98zoI!TDUgJd4RJeC}s}2Mq z5uF!?-lv&a3oCb(w%M={>-*NqD5#^{eV`c^1JDaD$(LjKrAv{Vt&QUwJYRE~ zdzi`x;3V~kd#9>(aqlmEI2SjdxM{9@4!SQIjhSV5daMxwe#d|Wl@=oEmFUkNuqYJH8QMpA0Y zo;(T+W8;5p%JC;v&Irem*>yuQNQjz|Tq-B%KD7n>(?6q<{NRdlb<8tzwMjR0M;+vm zZcN z(&8GjNbRl}uR#8%znOF^{(e%10^y|g`RhFKSOW;sP0#8^cfHnvMl zM~|7Q_P+lrdQk4o_qLm-;F57$R4UZ!5-xG@R59n3*8AZOi?kDJVmz~~kvQy~nKx7Y z;mHf3?MlVHZA9UCdhgkONuK6--YNO3CJ-Z_j|E8~_@g)1z^@#%Sw%g?6D#u_mnF7| z1|xRNR8J4HA|JBM^sw3Z`nU`~xUZ>b+2rr!>uZ&L%BYpHp8kS5Q`>PRz=0xAfzHR0 z!z%rRqhc8;yC}1oBW1nwlPQPIABFR=q6$oF-tL0&UK04PBIxDz zx(HXcpUsj3v;Tkf;6FmDqxyfCh3m{uv4Q5fHj>2UdHc|)V?HUuy=!?P7UY`a7%&Jy9GP9T=}b9ZxHS=JC%! zLluAdNlQfx`5Ivl!Or$j{16DMlRz;nJ2sD-#4aD5{4lz3L_Ea=2U_7lp2-ea`Lk!B zd%zf%M*d>3{Fcct=tF#(ms46(tGRTcwnKoqsID`ex_`~DAN=p`U6B(3MrS=g1pQL#mD4Qt&zF6iS}IgM9ay3A~$!n{53Q*!@7PF0R5p%UH?*{ zvdDSwWEPGR-SxWMlECG}6E`+vM8tmyEls_H6?~PgDcwdayKWoerR$J_@LO@x7j4)j zm0Ib+!aW&#;Pkv_Ge~?8e<${vJi1!Y`yW4Ana9tl+LJN{oAud@HuzO4SYm&~NMM_@ z9i;JoZ9;o9+KxKo{Jg1tFJfKA;l%#0OfLNvq53m;;WML{#Y_m+MPY;yboXhx%3NF@ z1v9)1;~(*Ac9_xbBZW5eaiGQzomok~4pZIk0!wlWs9t05Qd3sF@tMfptXYSbc_7Tf zAmWx#|c>M>6#yc^T}vQT}xz#o%r3llu>yMlCa zsq3SbNaCLAPVUOe79z9sa${&0_taJQzeWO=s+7-czz-Au)sgVjDpE(^;7F@+X;Ou-})=Xv7)a$pVEtUMz)2>1ccD7`tMdAd<#P=TIl5uPkjb^&ur0{=D(qE z{{27p)P_BFv=>ngO*ro2F$+uv>uxSxY-}WPSU2At`1M$}Bt;!V=I!^5OlL!ieYsp5 ziM8(3smgF|)p;R8tr#3thIW1spQn{&o?i#Eswq5}nK-QKzVQqqs!2{?*ve$%3hd-b znCTbyiLr1fESJ38NBpLITIy2Qn)S(nOYwhud>+X`(9X+^}Z4PZK4(8Q1k6UaDQI_ zJebWX1)Ua*zSR?0A6>O3Y(@U`Zp;^j>xY%BtUntBXPW`#S|#Z>PzYhG`=c# z17|(25L_>E*uK|~ZnvANpk2mO;~HgifAMpMc3TNp1F4IWC7d*kyWhBIh6e-diN4i? z1vDCTa)1H2`LO+MXxRnZ;D?f_K;t8eK;j$Gv&XYSUsOE96tJJ|G?5Neer}V=kx%gC zpnqaX*g(H?&jk*vG=Slu1A#kghvsR_Z6KZp>sH7Nu;B9Yr&_EwU)UC7AanDtrq)bEMf*C5GkZhz3Y=E6wSc@~Bxs}t zU~Jg6P|e&c(hF)XXAnG+=;WnHgLwt>+g+|Xna6GZ!dN)#jeMwwS2GZ9zpds|J?~x} z1;2Dggbs`{uQ|oto{@^J#*RaIeFlKp)tyeP_dwHA@Ws?a(ym{?zK<+a(>TCoBaP~Z z@>ZrJ2Gx4#=R3dkKgrmb3H13|G8Q0x zOJc&uu|1Q>v-T{c`zNZCwA=KRw8$B(UScK7oO7{{3u(u#oIwBUnjXA!P2T_rKV_)VQdRFL^HO#b#4{XH%d_-I|AK^Y>w8 zz$4g#y~4Xne)`iLO+K8jWO5CqrQ$pg!DvER2p^rhY3f?7$%N1YM1Y@_)&PHvgGy-O zdUb1QM}Wge288Qo)FP66O+!r7Y!RLwi8mjc$5?l3y!U}TQCs6Zul~h$9yao~%qxXA zNoG^SeM_L`vl^g4Vp7dR>xq3Qni#>pnBZW0*MqwlSfI4|1drHIcud4eh_!n#iGYqLxM>ok}^sg0vs;W+B0P z=c|Sf~-tN(Qh-xb>B~2mG6wY znC{4O>^w+i#P!h=vR$D3X>sGmojE93=?r;+3SJ2gwDIAxJhNk-JhX}DaN_1hzhMog z;|yJHmuPs-m>Qdmz0*9*VJ80(gOBh{Je};wM&J`jjE^7V;xdmnc#3$Q!5UC!zpd=j zd)Fj=vTI4V0tn6n?Ni=M;)=v<`G3p_PUtvxJ`lmrt&(Wp4EINhG{Jg8d%f8+CoNh` zWv9kC6$zG%zM>-#zym>1jUrQfnyXGmGU{Xhpkw^(8I4Qx_8Ge;kv|P%6}%FzdwzLy zFhBHkxdY#G)Vv1g`7XJU!SxdNgf8d@Jtu$xGDq zW^mw#ehEUz-Z^3unX{t8Y4j;jvWX)n%e_Nl=@PhG)W9Ze6!E;mQ~tGyK~XGrku8tJ zjvECYOwJ$8E;hVsI)9?n67pAIdq?Crzo$kRgF7<`TD)|kZM}P)B@{tQ%rPS8<9%pl z6>wZOS#q0PTXHde%A-xN#wN!2{_Fuw^Nc6a&J<`9-)D2SQ_&h3((AiOwwH0dp3Ov< zk^czO3A#)D+M%aKF_S>9TekOi-hj`vV&Oh32cC`9Lgruz3=YB8R?x62q9e!2bWUzv zg(zRj81?LNpfiGdt86v@5pR_%bg^;n9{zpH@4Mv>+LJaaypLMS77Cd7jW|0UdCI@PB&@yS0;9sNsm z=l47N(r!P&n_gX1REjpTcEjw%FTECkc>0`HGZ%AAN^pF%5UbUAzP`)rX{~4_g7Mv(+>0=GOtd=7{wq>SQVGSf*ORq(lSdUWy4S{!}5c8@w?kdr@m-(y^8}> zY4d&Nay5I+Myz4FRmVUbf|yb^%lFuJufN~1UBH($(_hND<@5` zw{!}LqZ8_ez@25CsN{57EqTZcc2w&%I+u(f^o7%WP%^toB8SnuSex=h5epHnlpAJB~-(u@eH=|ySJ&#;SchU;`(O>9z9W%=mnB>so|yGf`y)BE1aFL zjBxMLg}(0er-PN)wcbu*$%s?qH&GXV8O!xbFCsu!z$Fn&>5o}y#TqERPCf#JN;z>C z*Ou(eC?;4%D@I%g(TL%?fJxP?zSdYh^2Eu zbDD+Y^}3%G1TzCz?zey4r1a-Q$C|H=N%+yDfHzthGMuw0i>my`T$)t%wCYkLUrt`7 zVLY@!S+Syx# z5yDtMrw)jEQp=Ty`jpkvCPFR1_(X};hXhfoNS{-NRyPf}G@q)9w-u}eJrGzN|IH`vf=~DHGG3P99h6WO9cE9=R8ho7d;AWV zbib|-Sz>OCQ$2lti-%qQ6^dN_XYb=%X2_VOMs3Z#e!upP+O{wXFJ(Jt$E_9#ym654 zSC{Li{-7IflmR}AxTR7ojSqAqmg;Hl$*U11ds7xhcDL~lxNu8_KM!{C5dj-Qc4{l# zu8%w6`wQ-ft8SqN{@@Y88}iI7&uX5!xc8?1@SIpo>$Gaz{ulzh?ri= zq{0j%VfFT{%a%J|NVIB?*;YuO6k;5dOfMnRy+*Y#7#rW!}EbcgWDqF zEu_2c5#40uB5C3bmdRrxv(+x;yN90dVd8&5*Fao67}QCqO3W2O7LmSf z`KP*Dpxnn^c~&yNQblq)$0CNRx&tH^hue5(%=?ZGu$B1NOgQrrt{Mqq)#-_dKor6g zNZ%^ps^%1Pgwd8Ecgm9?7EXK^fB(LB8>`x5%=-Lerzc&D6TN!6jQ8gNAkG5P$VM#L!Y4@B7hDT6B+MsV{(}= z8SlI3kAD9IqinNY4Q!4}rxBzMoh(Lpmr`a7rUh%-t^`0Ry4k`g%t@zR{i`blddq%9>zMW}326a-33vL|`v|H}6Pwp6E z&ZIvv!4n#ErQ^dgcTiFmORYZwc0p-OE?^Y3c%x*E%Pd3A8AcElvwdFq7anr(WehX$ zP5E10_Tg3?@h@jUO-$+H|Fb$~hP8y$&;KBr(Voo=+gSxqQ{#PKoh+3>%i+~R?EKJ~GYeic!?#9rpax2_{f+|4^q{xN+2{a}`1hI~^5Wz~yRp|3^m_OF z|Hlh3g9NaD8q8;x0zQp>Qbi3}%Hx^dRpJ5o$24lUaL*>jXg%zz@DjaVfm7xa4P&-h z@xMV&UQhuTkC$_E2h|R4r#KV}criVN>uKlnlKvR=uLkB&>I)RTVY}+_)#{)x07L_P z0jZFL->;BNYvI@bjJDkZCCYqY$Jq7llK~Na&9>f6>%RuK`;heHqK~Z86vV>1?kqBE zxC-;hN{u&D;d>I<;RdApg=~voew?IITDSeIX=Dzg)EP;g`GwK#GSl*%@HAqGEIM=# z66DbsWi^cuk{ZTQKFJu^7_ejNDQCX6KDa&b7SI8qQBPQF25AU5$L9=t()F_sceE-# zvOEm!dy(p!Sk@I%;}F`eZ0pUF5>P4h-QtmN%|G$mlZn_cJ@N-1J@4moTwxM2De97? zNcM%FO}uZ#vcQdKQx=qm4z&H|g#H=k4&c-dP3>92(S3cBT>OSOlCk5C&6S#`EjpD! zo8x6rghi#qxpT+!Hw^o1(|fgM1hw7cka_|XOtmCVF3DvDbX25wYeaeh&ad{z!zUPm z{A;THU0~f%yOMcrKPu<^W_?^W=N&P_Q2N$v_Q9oGBD%%?3^CPbcWEzJF2%fc?A9`K z10};xbkOg(%g89N@DZ{&VmKYWAv-4Kn7?7;T5q^PC9}kpDVi2}?P&hvlF=U6%nq_) zZ7LZa9{E0L^O_OX$i{hNAA8ekk;UO~zUcx2=1O@Mf9ewd8JNvz*YozgRH>jTQqsqD zx=Q%f;jS4bpzLDL{h;vN{AG?ohD6iS2hcCMy*Ix?c7M6YExRf#n@^%%LjIfJv4w_) zw)$a@&ehJMi!UBzH-D|q%N33Sh5$}?LP@{R-a$clHmFcDL2;@vN#=TYO#}C-eS{6> z1vzV=aFdzBQp4U89UEOKx(#?CsS7Ho(%`3MF^^jWyI(kTKUnv_P__DFM|Lgb*%?oxuTK4dRqzN?hc$kv1EoOgPb^?LYrkN|{f%h&Zb@$8#oO+KMJNP%1U|Q27qXJ? zDZgIq;_=7F^G(bkg><2{bXt0kpT(b8?eS=4>RV7@NU8-Mnd!THF|lBx6@);hE2jLg z2tZmVBt$2~mbtgbjhh9b92q2771QCY<0W3!V5!jJ_VhZHqi2UiiPEOBr0L4fY`cEzccUTESeiEz*W0rg6vP zX@b2$a^M6JPZYJlnhLkGPh}}o5p_BH1OsC#s&uoK=`Jf+4>7k!TnEb0bjB4DXhDPV zh?6RP`3~||S{`z&a;(NU4j7OA0~=cQ;#uTb| z&Olqid46Q*F(Py-4mqhZ6~zg|KEZro^1&w!IZUjw?(wy+_G*nC)cgOCXSo55tO;_-bU;hGwOY$3+7e6n>N<(&+3UlO~NmX@Ar>O=FX1ia>jmyB!0s@1wexS!t!1e&TJ9gnhp$RFRTB{S>HHu$ zb#Nw~i38x*W9G{fm{|q|gn| z*y+KQdhmBUF&yRan`5GG|4TwC@;(TQa zG>16R1V~@4K54&T?X56n6@JYf^`6K8VgZ5YBi~hPu(y@vhQ5EPS4ttM zToDo!5$ZxFAmun=b5^A+LdAFxQ2M1ySY8R#MUSO}eZT`+ zIJxCO&(M~}|6YQ5P=QhUz1FHOa>EVWu@xOAtaKirGK z1Z}!Ga>(kltWD;9-AFu4?%?G}r8QW5)4{A;Y8+~#`$&}wss6> zGAB_Mi8yTWyU}sdg#$k1=TT=)*dA-0Jym(wY&uQAp#TfK!00+Qifv_;FNT!lyjnI; zPS1Os<|SWVT_{~WlAw)C1&p4s9zJ_hb^SxEQX!3sP{=3aQW~Q61PyPhw?UmkPX|j= zDeoDjl?x>oNr;H7i}rwnyQ8kZrK9ohT*^th4GD3%K!-lEB8BrOi#I>*qiOaMg>}EO zl(BDpK7|EkOW+kZ=PP%gf9NfH8-J}sZYSfwRH?l%O3?EkAj{d`_4=^usyM&azkU-f z>Led4>1;aQRV4Edc)}l0Fsd&0?9RsV*5oQq#?ilX9N|=@dnV>xdjY5DiMqFmQ_h5e z(44U;f?JG8IfZALDY{_n9VN}*`6`P$&GQ2Gf30{#SmQAQF7coII_#xjkH_J;?0VN5xiSnR`gcUO zzFCQibm;5))@U_)^l~@Q7Y6w_47}_ZnB&i;YiNm5ZZ{50KUwPer= zdS(L-?Ls)r>9sqZrm{WM!5*x3w~9!AukM^ZEk;3Rqq{v#J~FvJsULp^ToW68s$!vL z@LD!kaK2Ii!L}UAKLU1SOayePEIj%O=8M~o>0Hf&x^XT{v`w__n%l~JhdQ^?iP^jFB* zadvf12bft;+j{{Yl?LtC#+9vHpktg&6|)Z7{6R zChsjWSTZI0?X)jv4R5D@?Y1XWOlL!-xD2C}v%#WQlp>GXZsZMKM6vRzEwBxz#|rVO zg|Ke(0a;fp)FCC0k(}^HIMfPpHcj7FnaOEjEAG?1QQAo05Yv>6(-S+x9BTTQDKh_k z3LiEApq!eFEMZhFcyu))(D!#1Q>l8eeO2?9lwf6dbsgm*AHXDF;4ZW}xSVtg>PTzU zM_cj)$Z^0bN-Br{fl(_I6GefYUQ-mw{J==oObFQ&$GJVU?B04sQtCIiRQ(vQHWGjH zvkERd#}6vb`BFD^4O&(|%VFCvK1@G7!U9iyKF*IHuW@CkwwZ$S^9qWNWlwnqvkQ%r=st{{6f5OHlCMsDtzVf}~tq`hcqsm|@m4P>Gy2ar1I5pfJH_f@ymm z0-Zi!Qf>+mVr}J-8cW>T3piM3RaJHtx6OILd)Dv65B|gwx+_90WV15=YHeVE z`BX)YpX}p6m?K(dNULYTBU8k~^xb#P$uWHS7iH2VobjAnz`rL}bFo!|m#)2|HITih zPJAR}?@X(Noiqn=`jj?sjJ;-qHe3)7e?Nb>6*OegH|6TT@mr+q@;7XfF5r%GF%fby z2u~TbU_q67Obf&_AF5tTB~9fU${kyq^VP8DS&YYnsq?3uWU~!^e`0N~iR}Y#SNRJ- zUfK7u z@{5Skh{ta8dD6<=wGoB3d8n$sH_PoFZWeTgMCLd5me`jyFQP!-cPHGp z9UEL`g+}N7wCCKVKDWogl*+O`> zC0Hm=HkHstY)BXxvTop!c|%73g%sCe*ikiN@T+E-Q(vJV%eZhgN_V9RP8{no3eUN* z#M8A?3_-G9Y6L1ve>woSd_jfFB^XfXHIBC*7;~Tq%0#> zq6O^(fR59;F1_M8@!SnpUlPi}@eHS@_0@EOS!Z>xk5FZ3K+-5T!zOa&vY5%vVuk2- z@Z+oQ19)3#3*dmqc$X*hY^k3(7I!Y+E#inWpxzoACW`K3OIj2w2Ut1e2eqQ?}l=kn;4eTrDr^SHlCo}=$nvOU)ZmDHFqomY{6c_H~p zTT&@kINklec_1;scWN>VA$fOD;9dgfw4xQo``x;Vss8x z)hL4b0icNwN1EssPbg;{S?;LsbX+i->e{BFJ7_PF0+B15wIiy_G};{X#IRiU^M{z! zQ3Ua&y#>5hJs6Kygu1v28nIUaI%ll@&R#x8tCwHUi`0*dP)#Ub>aA?6rjE9tHcF3u z%K2l8>+C&Lk>mX5o=m(6p+o#jhb%D>X{MHXwzR2T{HWbY9BqIcJqTsq!A)0+YK9W* z{^3|4No?3|9LqL|P#onIci7k&J!p5j-vf+B{1tEiM(qvI{;2)xx#E(Ji=rIFt(P#0 z&})wm($$!`Z8}22Y99K4)}hYH=P?Zi@!-Jp@SECJ-$=W%$KW@`sAni~DI1!>MPT%b z`8~+aAHJb{T3uAx2em6v&cUHY9X0+W#`59 z?AMD<$T&!ie0E}7G|rW^oxlqX*~n%#)A#Ov-z<`H7v1}b)y7RA~? z@2yrIbuV&}1AhT-4@EB`u;vWnP!jzXzqsVlB7^P;JS4pxxT`>`iWOaIu)agiFcLGa z_R~nV!|zX~44{8K=_}u_HG|{o0fY4iTgTg&lzJt=+NyBViwHriR`-6`!kaDuOm9DDLR5(!yWj!d-m^LZ$)kL6UBp@kSyesNUjzL7Mw; z?IC37D1RV2QTvmWdXaGztnV*x8_h7E2(x9b(M_?!l_s{L-l;ES%N zX)M_iZ%f~d$PexRWJB|LRALgImqE0yyb^{o7oEf}hjsL$IwKPb5MpvoLyNr^ztl!8 zI+3}Q{2tfje_`LOD9j~(^{l7_#wl-oBrWeqBIC&~nfcC8-*JsU>U+FI|e zJT?X(Yj%cBNyb#{D_GX;Ch48>hQ{x2T{4q*e7yML%W<=mF&Vp^@9h}4S$p#*`(ma1 zPGaU1S26tu`LUu$7&sMSYut5fct@M#3x!>N8#xMP!S)|Kv2k|#2Z;F}^m39FUhtVy z2$)3YD{}tNn~od)16@3sK2kuGJ^L=32#+$I9erDr5ErvT*XU*>o^m}KAz&=V5>Hqp zIh!aadQoW@qz?}pcD3gYeNd807&vcPy+S2CknfZS z?ekvxAEpdbaKEuO(&bszN&gicOEp{*=%Je(*oAWy1_gH4x|WIq9(#U2XqpuEx%^|w z&aa4I4Ea1Utbt-x{doYxYuT1E2bPi5$mCQ-(EK3F$#&m;$AuRj;xcg@;acoE2#$cN4BJ1drgNQYGaHc zG1n?apt{%@vNfN0^Qvz&e!zjw+GGLb7t2$heDi7HB!{7z;MSqD4y9d)ZON{y>bD&V zDUFiGrbTNP`7+)-p`mpx;1~1b6L&s8S4%ousk9eR0;Aqt|a)pcOJ7}kd)7w-tcKEmB{^?AE+G9 zamm?FZy~t6C~fC0IjQ26>cX1Yq_L9t`9WHjs%zRsDdTy<>{?%i`MHB41Hjs+xIgW1 zCzXFrU|cJt{jUnc)c!?=baG;(FaJH(j-L;rfsD$ne+f1a@m*tL`jbQ|~Gt0Q!B9l2Krd&L@z zUb%r{mltZD`QK7};{Qj8{%bOFr^VUZ+dKLaa_)7M+0wi#UC$USr#o4hvZrfc_UASUu-hldsv)Wu0n;d>H(ajGmA z=qKwkFZ3hfLxR|@0eE?CkyEy|)v(OQo9xH%*?6=gOT^R5`=FfkRKH4lz1;X#n6GS- ze!`2SMjGols>bMpqSmm_31UV0!|K=)iy&&}Um>H1&AG{E_R;fd+5kv1Ik`(64C@pUyw%iINudK z5;bo3cEA(6U(_W>^x{X&=x^;&pgJH|!jC+eE}}7wV^lfG5z2)XExS(z+jRhmB3G=f@PuJ)sU$#L z?4j#J_;h@NTF;%i0Cs8U(@de6kszn81{5u2^?1Dgv*x?!%YtK46tSDrnH}}h&qGo- zGj|~gL&pGPzymfK{->poUz`o^AA3|*|5309>7OptK-=|BNrMU9?F1vZ0clr0ZNM=2 zk#i-WoOtVouAluSTYm1>b3RtT%omzh9r2Wr{D{Xt$Tg}+j3qtwSz_E02!PbzUAV-F zGyGFFL~MFVZFz8Cvb`Vk;2%JrjbS1N(80uT47!*;)MCV`Ul zTY~8f)~i#uzDgj(Fbp(%({6iD!Gyl{^;&cDO1rp4^&2P@3P38wE}_`QN1y&d?C*fE zFh?cbf>AQU=`nkG~(1CF~i;Eh$UQAmQ9K8mvqwb{1No4EW~z$otX_OLjyp9d~VJqEMPuu!T&3 z{@izkkj#22$EnC%HwkSl6xUwfukAextSZ%Zsz3i--c`8*%orf_d0ni=^xEkh8l0JU z6(srWr=Q6?kO=SG!fJ$Hi+e_rBO8x1GfU<3`C?LPTN}5oG7yyD_`3u;^4t>5Emddo z!3{dlGSZ@um#+Km6?{S>4+Q;T($gFWy1Kkv-ws(zJj%SiG*??F?{5CDf93A!X3wnh z`|4_btJ|m$mfD--TmGnkg(7NWvcD7xherc+TkGBA$*3ni(OySCF+VtzL|5Wi2Ff>i zSZ zugAXc1ezRBC~=Pk2rj0M3DO~mG8@CKwXuV@TRX3_z@I@(q;drt4C`?Ad%q_Tkde3t z6kQTU#sQl7FO%jsmEyvY53zp1a@IwYuNw#hFP3hqNe(1=D3X*V*0qJHg|krlA()NP zzs$C|!M{J4^j!tI2!p=IwXz6Yg7O+oatKwe?@YZ9FtM}9*Xtk`0X4t4l?1L(66?k8 zL{Q)Wp0bq4pFWjwIpLx)04eI}hvypKp28O@gR|q$F>9mY<;R%wV4q&m*z?ut#a|_l zkF@5nTQHa7{D5^e#rNe80=y(;-zA&eza#RjlOQs+Z6aa@RkU-JH7a4@j{rG7Sj3KW?@jJ-Jq=g zGo$sqc9vgce^Q3_TvzViUx9_AM4|Fd^W85{`)TlGJV2(LnK2ASTZqmLa3?>Hz4o|= z43OXjv!SQ&Y_R>#Q{{!jLG3nvVgf~}@MoUyt^*h#f}pI=1IR^D|~Ovqe2g}YQdk7Z9r==h1UI!??m z>xYMgc!X(6AHrDqpOE-5y--kAc>$oG_IahY>+`GG+`N;&xtdjnznPUxUO_=mPXQ0k z#+D6FiPb)bIbX+nt#4VqpztLB6H=Ix7Iao)cKWko!pu6Tse0u>y&JyS> zLg9uD1HWqVF=htl0J38`cq;x38{+V@+jSPY+2_x9y4a@tm~UKr_gIDeO%@rhWKIOP z6g*R>l{}K8cvQe%A~ji_pe<(e)CwXmT@fc%zHpDV2e_C6mL^>9_@W!Y17$l(K}ou1 zqo!@d?-SO~gxh!c`;Z+I=*D-;aJMS{`ob*u;8g^_?=s7V1|s*3#=M4|2tpB!eht|x zTYYc`npBRX7u5eTNa#tQ6W^oi3gk%)C4AV0C#0-Bn7++LMN}bT=;}yY8@yWO zVlw|&Ll1togYN&vnhWUKMJTMZ@tnZ&9xm^s)L)1R4%%$IE4bW-z&JQ|T`kGNBsV<% zp!$a_Ac)gX`e(m&it~JNfEf?SuF$&X!|uvLa%*GI2q&wF-h<;m3fFV3tI6uHFun+x zTNQzAZ{A@WJaItU8wX~SN**Vy%E>{y^SG7k|rod-_`E<=i?;ptee_^x9V=uPtvA$B*ItEhGyUInQZa=3haw+Sa1c zrc@1Z+z+HUda@5Gj3?|Pr6byefpad|zlQLsek`h*89frB_FBJ2rzN6SN86RKO}FWV zO%U95rvek_?vbMa27NFaR3O_Jb!58(zs=_A$}~1V9{6k8noB`D|Kx34QAqC`YP?t& zu`r4LiiV$;p~lYVYte)Fl z_};oZcq@HMM^LTv=-T0c+kD#ZMMqXf#6YO>T~?uRwTT{DsIpLpHUqxcBZ(ZK;xw0V{e zZ1#T9YZBu^XodQndr_sH-~|r%f8KWRm9>e1(J% z!MM2j7McaRFz>=`uUg&}T{nZh&tBY)arh$^vE+SbX99s&#;(li^r*UTcsKv4yrHA@ zd4_$-%YVm1xq7UZE+JQIZ|ps5665+(;_R1sjomn!opdx?3KpNwTd5Jy4R*Drb#dI^ z^Y#)<&bnUTn9!?014b-iFI$9X_Rn*D2etXtF-^Tk?x|8!d0?{IOOA8JM(rLW5B@Gb763RIaVDO@zkNGsrya?*Gmm zi|(YXKBu}cYjaktGVX@p-Zpeqos3vgos1X7QqGaIQxcLtbzI)|?K)URW>st5*zXrM zJD@g4Dz&y!X9|6ev`Gw(NhJ}I?~IvMMb29UObn7%0xFLqxb{sPt%3|KwetG!RsebZ ze9glDX?{a2IDgIGT3kSdY*ATBSminIKAmK_h=mx2e|1KArT71?R4AIz!eg6sjHAU` zZ;+J9ps;&mC7o$V7BQKOiJ;JV5{^&Ib1rCVByjo1_ze{^mo?m`W`Z?F{ws(a?i$0t z`AnbgzS_HRRy8QO9c!^c#Fp>yYh|E*4hHXTS=G??Zdl0 znnQ67w3`x%Fed;^uZGmFO&US4`WfXItou3bttwwX(t;II;O9hhc$u=88eEU#L2r;`zpgzqCw_F zxqno@H=F;)YrqtnRErT1P9A(DdSVaJ5F^-`dA2RJJD$j~ zfe7sl-n*z7;brfPmO;lQe@d|FcOOiDwE)@s^nPe%F0++3P4ZMSj~tzF>vd_yZD%ze zo|sU;Ma=KibzOD5fAaPB$*tfkeDHrh-Tx}kLljWRZ7$67qF^@tNNFhZx_s9%>hUhl z)X+?Vx4(Itrvsgcy*gD?=kML~%oV}8lEF82Dv}wDN?2ZIDlNbkg_d7(hvF`$T+WYy z?BL9_&o%6W&+N_KmdgkiORK8zc~tu~8|4_y!Cn)--4v^d<&q`eiRB%D+I7)u`G)b= z#%gU*8fJ@d+ZihYuG{tXn9~v6qt@%+nbe|RXr1yO6`~MUru%O?YwV~4=B#V0b}Hny z*>EI_X$=b5TxoVL^Ii;IS&DOoT>oa#GHu`_N%yl126b`l!hK_wm>g3#M(7#^%fsM- zQ^lNq3`LVt?^%PXlF5x6R4nYW&9>S?kROrUBpTB@EA9Qpo{A^0^4Fa&%&PtO-VMF) zL9NUKHr&L3!C)U@nY9#?*~Q=1I;OR$78J8oZ>A(pD$a@)8Gbg~j&efM&ZkGeE&VKr zkmjcSaqc;5P?M-6#@vQuJz6omn?*64b@yA6$pwxJI@U$^(iiGAT$is>bkUR~{Uer! ziZr)l7i)Yhk%s6->!=(nbp1pFfQw&zd@u?! zjIF>{j*xOx!mU4wWC$J(@_njWbQ08~OW2(-_9gStRg%Q3H<*m0R?bX*p7HH9>8{-q z^!#O7z*YFRCrT%nfM!LoYtjb7ckHwX7ZbQmzipr}9<0^JzA8W)mZAGE&zQDNTLWF@ zyvp}RYXbJHyZ&wo%KZ&$1au$IYRx{UZML$3CjPJ1ge3GZ2+EX?ckIkZ`!9L@sM&fn zDRXOE?Kf?#VHI?q9ItQRS;YA`Ai=5Gd-lH?RbFXTSl;G)+`v}Kqa=Q3sb}uV8tv(V zvgaGg3X>t0G(#*QfpkLl_NHC-9v3~`K_64Qh7BXw^)#qv{>IOyxHH=k*|X{q=_$v( zB225VFzy7}yQlaTE=tSigJj_$8RJ$=B}@k@Upb1)j{$b1bLz@bi%Nx@IdPx3b`@Ry zEgb~y5otYy5O$C)Vc$wRqlDOSC#59Y5}=UK%;onZh2(oGCiB|z@*r6ND%iNvJdt~g zCM02MzaooG`DBz#Vy;78`?4yLoZf8#Q~vkj=)lj=`)0CyH*R z!*ZcS8^KzP4U3#lq>a_&$mFW=ke@Zwl};Q{=!1HNJ3?kCRtLz;dli@?FQ7XP`Udze zo9sRreT}}S=K>)d+#TjMvavP5`~KWL8!5M#AXeLo0Q8Vvd^w#EH&nDCxS!|U|KdQr z2yB9Nsd2$KB|WSQuF&O+dx$5in?j&f+7_H(836XkA8*F*xJwF~Lr2BE9fa_K^{Vt% z9@PkicArL=v=j@6nChy48CV1RvHotd?sdU|(I#@c^<~iM*QC^4lj8*wN)m7SSGuII z1;!1SIYxS3K+%Ep37#lJAUbWBGSie;p!WEwucdA19}2L8RmaY%=hwCa9;c$yv+n)) zFF{}ak9eIR3H^QR@zV5*h$hJd%D?+KdD3rEm1X0{N3F46-#?6fE-$cJzU}hOx~Rm@ zU|C^66!U0a#EirUAVW#B_ zM>-qj>UhKn#~5$9@Px;{9{3gv2K&T4AE+{X`m{csVAIqYC|40}fb1|=CIL-;3M&v8 zqWLhUhU}c@l&6@gpWHcS(M21~Px6eQk1{XV(nCK=131^ML-HQAMS~f$Q_3%(GSFro zPtmrtzI;!tO%LFUqs%!y(M}8)#HE|te zh?7eoi1F)Arg=uLfh`$hb;PcNTV%8AhM@@IV;V0jFIcsNZdo#_GOLmf=njw%tm<~(W z;7(chwO_uIw+Ue_HACqddOg!HTP-GAY7K?8(m8q z?^C7Gd`Ma(taB4{@-9~F&y_;bwzqQ;N~_R88}^F?A4A*aibbb>sx%s7pg!SdIcYgx zx3W>E9F&dzoNBG!%Pjz`RpXF8xv4_lxDy~SWH+0+b8PC;_HQPA*}vM){J@Y1mv@Dr z`OVD>>!&D2|Av!rTVMR30bib9Uy7{OTsdWc1f@L+(6ZN6r0Lh?`%PV6UhVx)yOme6 z=>#3Rbz?j)ki4zLYt3DzAB;!wLP{MV2vP5` zJzpOH@#^H>{X3wInW7o6`z~`TMO}_GH0ms+P~9?&?ZD%i84IB}4lz6MNA9~g+6QcL z(kfrK4FSXb(%u%e=jP0K9HCP|V@j0ux0|mF7);wn15$onu*SfX!1!cH?)OSM3=HHF zvF5aMFKhxhKDRr|#keF8nYvN{dDYom0XsshH3$S8$Xm*XW7s_yCge+QWGOO>z_ zn659-%~kPiuI?~+L-9-AWT3Sx9;;$(-l_vGj)oRr_Bt&@9tUIv*aE*KE7RdWAwr5- zkkt^H--7YIpweI*%#U5y{_++KnNCyv!YFH6Y??I8tmsfZ1Wh_uSUz>Zjh*QBU|YBl zX7R`^xMgiJxEF=7?f~*Q^fKXd6Hc=L#gF948M=?qpRG+$hw@$9_@M^K%deFxRSXO) z^I|Ker-{~ct?nWnm#BG^w!Ccr7`88d56`zkSYklh&xWv#@Yc1EC68tpTC}0G`#1O0 zJ}t7=KQR$4QcEV>W%hfg%hljbf84H@ck+(QiDuj1EOq zhB-pstW#MPkPeAHAoE%k#V+q)M>z39 zgsi|O3{WvrY;hs@K-mGiG~f6e57(=fIL{8+g04#IxFI((USzs{EvgdQM)Y)j`WUuITW7?maMess6!wrrgF_x9>v61dKk z_0TY6+{4#LR zd-WZe{Tt&64Q=gKmFd)IM#6)E2s>GXbSNPlUioFf*l_oIY~$aJ>vao2<;>q0XkYNf-kye88mHjMVM4AjNE1Nb|c-@j!j1>G4 zi>)llZNQcE6uOA-FuWRLTr@Qu&AbC1Ri90AZ;{%~6UiLaO1u={GvW|Ap61=iQe)o! z7TszXtn%uEr_gu!WKV7ZG&)xDj_aVb&n1@=}d+Mf8_TLRp1gE zNn3XZDcBAIdl0y#vJ1FspNGFtE1FM#G~8=rPx9vD+5r4R>oeI}hjc9%(=v>M9iASp zsqDr>dU8McU-FY3t91u{!2VkHR*5~;y!g0Q1a-^`NQIf~_FS!E7&m~+{v49se^I^!p)knwuC3MYsH>-hk-xGF{ld*;|dr00ybazL!G`H$F0B_|? z20C6~yU(G@ZX~w`Qpe6*neEh*6-so{HZO86Qfp_s7pDGEuU&CHy9DU;bL*>E&yD=N z(S3>{i?53ZoArC2YC4XJXs%VtwbVS68ZqI|hbPwc^-q;0#60_(K4 zu@XN?OUPcUvFg{}vjCKG^wH48@2X&7$)hjTRW`-o8l6UUpbLPPPW2m_&{(7c7rA8R z1G=K;50U&~q{SAIZcIgy`4i48cX>{yrX>`#tRa?=FqE3K5wL3^MV@*7-Me_=$D4m9 z@J#WZevDRk(Cu9IFtM?!1=CVDI~x?ip5q-;5xs#DuH!cUsgTPcGekU^;WiO28pIQ& z?PvL&NUQdC=8Y!DGO2DJ@4QL`gGBFsH(omdGgzu)C4eZO@H}M@w#OYZJn_UC&&ORYq9J<4 zk8B@3Y|$U_e78jpVu3UjVlx@#v7Vt_32jMCSaer#G`9vcV>@~~>yg`qPLplzNPE^KGh+XiRJ_8Y`sANQ*_-uAAw z&4?(q!CMjLQONUbeIlGmnw}C-hB7T=rRLNejcZ&Mlw&I$##ONM1u_Q3Hz3`V^_RJ* zr<*0Fi|Gv0k$&rLZTV-LhU?OHpyWwOd$#k@QImCd7h{j1cVuS2)m)4Fr~tJ0jpDK! zH#%jNR)K;J)>ck#7It16y(0F`Y)al|4J~I`r##q+y);b!*p^#`# zcFFL`xG|aa7qBOJo2^3AnJ~5_HtKLWw!}BzU_uY0>=rwI=my&dUlYrnWDw+x*DC3Y zO`0-EevnoPBnH8x__maxaH&@-CdY@GR>h{g7Fj)p+66yHZTea^N$c*Sy4EBb%R*os znr}Qki$JQ!!EzLgu_I{#t-D{zD43q{4|36bHl8IV)ZX;Hgj@GmvkVpxG;Z>HlE{*{ zxqOV746GwiwK^BqOPk%2?9OMnpwAa!$ygW>_BSTf6^Lvd@%LoqSJByRY_7_vs(Ik# za{S0-Ol1z=?K(h)^xy~*{CbfGal(tqb?Xz$Cj0a`;xXw-h0G0yT4weUg)Cn;f_iNt zxoIyY(}Iq4?FH(6=vo@m5%#3l3TLu=HlB}6qx*8AXeNyWMLpNWluI8=@Qar<$N^VsVU;Nz=fy&@i6 z*S@+$FsypxpDO;BcxJ!}ji!hSI%s_~n#qu}7*dLmJ%CN}n(HhzkLLwgw_*BfjqV6r zEzO|HhI&$9Q{f>a<))31J1}85P9``S3k(~1qdMGEWm3w&$!V8>;wJJbWfp#LOsr8< zZhr@EZ1u;%gtC5xS1K#SC?UB-p6T*}j^K?k@@F8N6EoGI&8B-9r8_z$e?Ax;no(Sn zivSe3&ujS^9u3C7QoF2RO*fwJy+^m>Wf;>89@Z-%-{iPYOb*~qlD7ErEaHu+jP{lB zB*}dea|(k^`-j7Y%g!L&kvLk_DqtCrQ_AN{6?J;v63X5Y0ruMNwJ=mPgs{ywdrS!WBs7neuDaW#HJr7{Ho&@d)U4~io~E*bt1ZiZ%#JE zZXA?)P-N&~7x1B_^O(~QUzew^C(DQyG4xW^a(NtG6ytK;jlwtt#@@MJttX}o&NFyy zCC^Fr=Hw4yTql*+mHm$0N|$+Z z*rcML&hayYwAB=O@-S~w=(YBzY=%cON#6KQvu?*;4D?`C7uQcQ_>Xjr(!&+3nx5M9 zyz{)_lC{(cy4FbkYuKT28$9rbYOCi~;!Bl9!=GXgi?`$58C*$oK55LxqFw*3;fJB| zZk?t1zNcz4uWwQ}x?XaU7Bv=s34FQtHAl1@+Op)cwRv|-i|)S6uRl*$Al)*c$Mjml zjTxK0bqAP$tipBKg1q2kfmWPO+Ux^E)~cpGPC+`8W^BbeX2! zx=|Q|a4{yEOv8$Ux!cnS=^TCfZ1pz09aMCeb6YJV{JGV%j=!119uP8mfVE=ey46p_ z%szUiH;xN%uHa;!NuQ)Iw@{YWCQqi=u5kdA48Aa1{c@YtA_E7378o7Z3rwFP39qi30gHAK2$AZ2AdY9#U?t{ zwfXJ2d4dTZ!w*hYlVC+KT}CwPS(@M)&ZgI3kLm@eH(}I>0za#s>bsTk2oEV_BzhI! z>&&m4(dZ|KhZOvUk+5T>k1eZl3I%yIU6z`PaI{PeO+$%b!uV^WYcM_pe&z6hUoyYw z;&azUSX%}`|M!Dq|0y$>{hL?Gf}{PL6v{>sQF!{YMHf$`Yjda|DLXx}?}TgTH??$g z$Sz`z>f=}LHW&)lH@}SYj!XUhK3Q|O>kwLFVO&4{He5J{=bL1-o6{(-q(x`ehx0yV ziJ1&ojkP|?ZrO2+X4-9xlCA^Ur6rS(=7e~;X&-}UWyl*wLhCIViCNMQPQ%5BRJpM!n;I6c5~dw80SVd_j=({a3tg(*n#TeI0s4ePw-v9P-|;1zBvbd8Aq1cs8cHt$@7o* zCeA}oo%R^J&y9>UgnIwi3$XPGRAeoCk`ogAa%E)i`TxC`1>!L`3GK#7g}hvTJGW8| zISOGu{at^8;c~(1J$>}g9jc7=vDA7DXf&;`QK@aGaBgco zzRT=LUYkM`z{~bSdibe5H*hjm9gppJ(m8@F#;UwY?d_~D>*VoMaS_s3wAdzV*yGq3 zS|tJCuL5XIxg!n1_vp>>sR2T;a)r;aKVXw3b?Qon@R>b~>UlNMIDy ztmk+^74Ho5EnuG39fGxR#G2DJhFQqzCaMWG+|~GcpKuuZ<%s``wsOdM=8e(oIt>Hy zQ|iGlW5jrnjb!i+HvcDRJkopHUKWbYTS2U#E`e-Nq@MtxfSltE9jssj-ya@Cu11ypKQrvaaH_;uHV^$v=#+Ap6&i ze-K1~?-a;yT2^~f`z?K3D8q^Tx&vYGCAwNLLW z9EhgJ3qn(L&8M8ll(Sec!1%9-+>kS#@;FaUKt;p6*v%TGZG>bi&THzbEw4+OWT#!y z%Bt3_cUsjpi-I*z#gb*Y2oBpiIIVU`Ps~)>EA6Tksa@~f3fB#hlfxR)rW_=s_qkAh zMTm6~8+Ns|G(WQF=&De&pjIioel<0cr1KLxX#%NnhV~=Y-A3^E2MhJ(g9?KSm37wS zO!GX0b@A*m!M`f%pK+*XBZq!|P?Nqz zd5#9ing$8(UGLo1{xJK;p>?;$Rbo55+Y%Trd0R@J{%Dv*Y*ea+lo;b`1Rwn2L?c((pmSNqriO;B}eaf~~=Y4xs{G#(Xfi>aR6 zHtLhg4O=InV9bk{mX56V1VmND<$MhlSu1S%CvQ{@iyPagT3H);HKZietv4msC1S;u zc@zQMIkQ@<2Pr|^nu_C~_R~;zWghhB*AZ+YqzbX)FMkrw#l@&%=^x#v(w{RSD;rkt z6IBM=Q7QaB?a;*4`|21G&_yE3UU3tcnT_{7sVCx*9l2GE7V@ppb5N2}1W+TWu#QtV z5A@73e#rG~E?eOVMF<^ND3xN4CEDh0bGJ=P(>HCgDMm7^eE?}OXgqz{%*$fX@mD_O zH#(^rj7K)9(=iXNV}m5@zKwfUOEtcug-McBQm-5=n5VAcm_h&`dT`(NI#Qr zy>0R#YaKjL@Vin<84kJVoM85H2=DQmdlZsK5||g#cOKWiZkFf-T@qC^tRII<76|j> zc`_GyALkXx1XUguNjJG_&){~ywcCb zIM@qvBjXKmbG6Xg98F(zAFc$ijcqsCe)u)yW%q%-*8WsRfwKAZQ(*=1d{xH1e5mik zW!0fY{h2CL^D^B%>zmUPYo1!!qX@N2p5@Yzke#rVEE=m0SHs3+Mjuznzy)0K-yUKc z|C|@~8dDNok1{-eR}YL^N24S{qY6#|HEm$%L_8kLjKMrk5$A zXmR{$gYZHZD{~#D`o^lwu{U$+9JL5HuuwO}nfU&c%13rzlDBW4MqRR&fb0QEV+J;? zER-~3+$S%_nBq0toHcY+EMSS*49nc|fUN#_Kce74ff^d|_%T~{HEOOgy&dmZyS@E4 z%4cLNlBL2wGcq9`WKR6{cIy@bM@K5Z?Q{(r6FH7|%E7Kbb|?kchFsKIBv+#<GCl z^J`yQTd|&>WGi)xyB(f0H%pPFp4jb55bRY1R{zL0xgKo?ZL>)~m4k|Lq5F51Yl&f{ z&Vp+NCXQQTcbr?HatsM9Y-QxpW_GaC&DPhD+;jiLP7KjHzl6h>uK}n0+dhXQ+nf2GPirYe z%|Zghk~bJP%L5-odkx44C!>{WKFdkwDjC6k;Ts8!{xS7b-vEC7QH1N`2{ox7lknh1 zN4#i22l0k)2;bnAMF`e$lkyNR0gugVF9NrSjLt16Ds=6%SH#eAuqbmhLFPgt-`06e ziv^YJd!~aaS@SuSPh1C2gS~czD7Dpg+AGamnZ`ISHrJ-?3IaHFa$gpD|BW9TlD#>K zj>u#9b1PcC^nWJ(8B(0PLs+x>MTzIHENhdQnQ!`bA|)0ceUTqSx^=#E1@FGD{^ywX zKW;F4Jm$j1?)TdJ8^#*UJ}BF*kpStY#h`7LQ7PHeCV!r{EVHMlq@LfnEx4)xtK6Av z)Cy&iuy{YUzW{!=2*5m^Sp_azJiuOkm1T)x^V0;LGR)}yiJ8oA>C$w|LHsqe6HsVV zeEt7p>%8OHaND+Dd(j9L}c-n0}k zY9`dE5S!4~{XF;cd*1u~y#HtXbA8V1I?v;K9>;j;H5;E7{qki#=@~}25c=TXg6rjz z{M|Lo>BTp=SrMVnJQ8vbbgDa5r)~JiXtL8_Mot10+ITHlF-3mxa+HKl0x;_0?X)3F z#m{MrzC{p861T7sgj)b8CoVe!d2lud(H-YHjlZWOb6=>+6Wc-YEiGMWKviW&Yx*cGpXAaox9pZ`$Bm}WY0dt6h-=zFIbAiy9+->|S_Nz9;@@UYoPR+hSm9Q3_EZJPY~l2Yz}7{gvlal zF%JeNdV)uU3u?<>XX4BRr$2^?K9KdUGMDyJeN(n{I8rsg>#NZ``}Gr?PM-3jWopg7 zvszunVhMYApB&O`)u&d$XhzR8F>CWh#z)2*Hg>n*2{Wmv6Ij zsnj8DeejbTb0WHFYJu6FdR??!$Q z^6emA?ie;l$qDl`z)sdDT4WPq<}6;j1ORMRkK6y&p;v_ZJh6W@Juy2GElWqx`5u&A zS!OCAz+MqDM+SoQRUZCxq#?!B2UqAhKK!_a@TK)g%o{ebf!nTIuosV0L_es&?ZyNR z3aB-Qm{Ut)1;`}{WfcU8MV5&~T(_ySPEF~&9$LFZsEWNk-lAt->*g#|{u&9#AH2eS z2H$=W#5>|>6u?*RNgEj@fRujk9y6UUWeu$omQ3sjA%x3-Y8)XPdiunenPr)&2FQ9a z5Ej;QE+JGV>&*`e7RnO6rs=m)R&1eDn-heB_aC=aYILZ9#{QNbE7b^a{hf$3E(-DO-I?6QRRRXDy{wETJ>8WGG- zH=ytA1?+b9^5nk~+b*uHS2)ZmeQTB4R0YEd|ewB4**st_>)M47c)d8}n0* zs}*H2cJ}7BN=iM8sp%l2Bw~o7f-3KkU)BSI=(eu)E!AngIU#}nSwg0Uy!}1T6mak$#twhN7?&- zRhUA}&xW4RF5*Pm#;`$EzbiPK2muhsz6f6<_Eef>)&YmS2u>mSwK}M2r}AYkn&L;{ z-Ww6H&H^ggQ`lIdukH6ooP+9Kjp4yx`;qN`j-m1;Q_#=OyWffXV{Mjg;aftkCpWqV z^#fHJ(2jiqfCxlAv;6<9Z()Y7IxXy_K!>VJZxZ1k?D$UO<<2pVo%hmOs1{+PTrV?4 z^mXhxD|03|=0qz1_-mh*tO2;qghV{;wxM<2tPj0d$r4_sEPfK9^*Fro;uGKX z__t}fM!g{iQQg{HK$$>JZj^wu`zWoAY3LyME|iLCMv+B{ zFmrNeG{G62Cul4~fZYl|ZaHVD^pbE-W~EhwOuyitRX1SNuep%LAnu?dz67uE|JK}D zQN5zaw%AJf{oX!5ywA_5&NzP4?Jz`HB$=wamrQ2PHdlXDJ&Qnwy|YaOM1f6SjVvII zOT1f+o;`*n@V#KJLRg((Y^!OO{bOZ1d4Hr(R|VbK_{>!rCW1QjoQP4|tr6S3$%WZt z&E1#ttnHfTCVP_OJEII77ci z>%@kRShl*YQYyaX7LH9o1hwVIDk)XobQxFWsaJpg!jCbW*ca+J=-_F4Q_3MatkvAb zHfI|6$XpK@8wkx-0U#VYnJ;&mZD0IL-DWJ&rh@y1nON! zRm7Bc^|`rvGr(VH4=*{3aG_@n3^XIQ$P5eW7sumDUG|GH%_JN9Z33R*xs1_9o(}GJ zvR!eaCOTPCw!03vkA1x7x(+@02YBzi2ph#qZj_BX-8%1O6|^5T@&9?DHm{DzFWMf> zdAAdK<=gAgtDf#z*%wsP$cye&XaBug1>Fk4wY@sDChU$mt89Ex&zkeSJ=5fwKCO9S zFJYN|tD5J%rcj*~+M*m0EXlRzL}G`W&&s_b{#~CSVQm1pG(oneO2JS!@9E|G1*FE(~<|;S~R#~Sz;nhZtk`Z?q;OfIj+enO_>K*qqqypWl zEa9?2nNe!wBEi%cp+J6OvIj5aGn{&HsB9+3T8g$Y&CG0d#s}`xAdE z7xmg!8ATyPkfDMKsygyqL6x*vQ)@lDu|1W+@B zgN)I|ekEFLJmfoN#JxoYvLh=c2B{ZAJYDH((;hE0U0@1SLA%$%gNwc21*UXDsppZ;~UAm_F#bYI)h?qc}%4oq8i53rX;w zJg8+|9uDWzGS{_0-7Fhn$S0?E)Z)vyi}YY-z9}1>%SSTt7C)yhi6Yts_5GV?$zc+e>#LezD|j7Y|9e) zkm)Gj7Gj7ne6{1jc$qt$!WTB8M!>vq5J^wYc6iZ+?%)lERP;M`emF4MUkqe=T?px6CO0bQtR1v zAMI~swJI)KfI#KO$Ek8l(mHF6=1>91X4UqWnr>siJ9A5)vB7~O6^A`kCp7J~{Lyss z#zgCnb$c-%u4Jvas0Ij-Z_HC{*M(c4UTmm>FDOe)ju+2O}lq{}E{g}Da@DvniQ z7N2(=?{FnHggNX|u%n@MG{WScw)rtbn_>N7;Ayk;wJhCCDV8%_cB9$LuVPaTa#2|M zV-NrmWX%YvjdI!Eebv_xb0bIEr8i6JV*M3PTT1zB>Lst^MXvuodNG?LZ2xfUUclut zUJ?$e`+K(Wu73O<3TA{Zs!fcpWur8IP@U?QsZsF6P25M3BKx&UE?YlQ-~1p`0RNOw z$Y7Tip*Sy42OZ$ULAEZ3>*imv+LL+$$CwTa*|Vw*=yW3vt9u9g-Ayl;QHd%&dCvRi z7B43o&a`dsxM#WdXLc__=}8NQ6?-mL?n&oc-a}?>SX6102uWsfH-+1|3SA&MwRek1 z)ANqC=r*gF<~YTZZukw+Cq?PL!X)2uHj^$d(6Q$akS|gPV`NmaG!pQ$CKDG(^53eW1UE6?7rkdQe3ZULxvstcMsPj;C9lNvMw zV?O@)iFyYrmU?1QsQ1%FHLSNo^eBY-E2bQ3Y5oM5wJd(NDh_2{{6Soz$A+)*?Q`z0KY+u(Q`xWmB8CzR4WW;U!aDn(UvKQlL60^*?G6~J z*nQEQrTpNM-@>sdI8w9n;GPU2IKrbstyV`tmro?e^e`p0BVKJ;7b^#F;fu`Z1ISda z9~O-zp59}mGK?SVNRi4_Qs_!;f1$ZJ*=XJuvQl?)-Q}+8d_n`(v^;j;#R$B7Vm9D8 zwHlv8tlZyM$$Px^JT^5w3tVtYXH7;Hd)d6@TtUbcZH=8ux%-Cg^O#l>6SfsQ650806h6Ms`$6Gcb$l4EYZMDuF>KLdM>L^l$>QM>5AOk?@ek7= zy2gUHp0e#dIw%%nl`;NHt+B#FE;ydSMDYa9qXh*eXLH^Hd1^7137feGu6b zE+{ca+*UzoKjULz`)tIs;=fus&6`4>-60J(&bD|srP)kp)(>$KzV@=ybaq>>yooirmZPW6C4U>FbJ94q;+*JOT z%gGPqP>92d;gUP5U1J9Wc*0lJVo7JwZiRd%G|0iUbUY`SKI6TFPH1W=xO{KcM07>T;P*CKxo`xeuG z$7U_$uNZKXSJ&-0Ee(|0uMHDtI0x}Gm)LS1@M|e9)HI~f^-xY>|J6;C)#Ia|N$K>u z;hxr8Ru}3$a`~y3K}>sBU@YwW*_WB68j>~Z{$DM~C$+VO^o)MABH$tiT((*jqDJ(^ zon1{ZXS%mNy3g$uymD>+%|85XtQ)!itgQEJ4QOK2W1_W&#w9t`w*K9};8fe)NmR~i ziXyZtBG0=U5H5?KBCL29qEjgTo+KS--L%0sGFN)o{cn?m2ky2mIy_kj=n0*cu9m{4 z$S(xBC~jAG4ZbKVvN}?^9C7VW>oDuiNX>oyTiZagkbv0e9xEqf2F6F>ZmtC3SzY2O z>>_l&OSxql53={fIV)2MV5OZkPSa0}vz<nq*X`-6%)zWXF0~!KY zZJ&@QF{6#|-g1VJh=v}DG_t{2MB{0C7RmKiG_D|L_(P1Sdxk?bd<4qO*n*hBazfHs z`@52InAYEW)DHpUgj8LSO-~p!c{HD6>#Qa#XhR=r55G`8yOB60e$4ZDqpC1u_g1b*{xDdh?e->^;H9OkUKmtJdB2{h zeZrveJ16Y4wT&8fRVmr~2{yr>Y-2_s8OF8nYoo;A5l#$0Z`uS?rW1)} z2JU^I_?i_Oo`t2dmGINoV;j>7U%CPbV){pJ3+{!=hxflzb?RI=q0lA_ZdEv*jZj!o zwVvEF7XwM)g|%ve&2IQ9yZXnP`-o0qu!g^AvyBS%gw)xSTZxrx3VxexiUg`lGMBc^ zbQ?DuxXl{%)z)M%wnN$hB5S;6$N;%iAqW)f$%*GOD?l#ox?rtC1FJk zXrC72Ny3rz``gx!P8FTl0Rx!f_?j(;LW`cXro`}cGIu#Jtn1J9)v(@Y3`uYQ1doV5 z0Tf>A6Tq+ig=M!^XMQ-7FB+69T2cQq>EULF3jnPjbDukI%0hjb(S1B!jO#9{%4>0- z40aHS8LU&~bNihPxq#{m3}JJ+wGDPchTm%W8{_f%ANq93H~JfD{g<^@fuG5Y%Sz^u z7i-{UM9Np1F2Dr(Nx}cH03r-O;%5H2?5`^B(hp$uGu$}zdrpcdc&hll-pq^nBb@ly z-*uhyz2u{|)}vV@&tAcLmIIM6jmzh7u>8a4rLiMmh@^i0IL(&YwFF3@{{dIlP7)wz zM)W^Me((TDX2zhg0DQ^(l3JHk zw!2u*-RcjbntUVfosL-TbsB`=!%Kls=HKNKe`K4>YFSx0np2aC7VP*lN_e5OoP=^! zlu}31uomV0KRZ_Nq(9{apLGxNgQM7aoL)WF(gO1BpWVmI7(4&~XVZ(dZeMazIdjDi zg%=^a&zAU{oJu^oZy9x*HBK`Rg6s${cS`s_D!&(yrEw{+Zn)|D`^#^OLs6ihhMcDv z|3q%CpOyq&^38O5eZS|c9WJ(OOf}ZVag6cDv%f7T?JjaO6iZNJEJS`w8VDjI4O#U% zS6XQI2MUD9OB#zy9=VxYJy|ak4qC5DZ2oikjbejNjZ}q{x9xddT+yxodFx&mO)2b^ zF>re%>h$tKE65{UvNWnZFmV&dvM>iOD5zKHe<~?zesmn}(I1c2HG|n4pSF8~5v;3!Bc3s&) zxvGIKVM5VKq$9((re0^cPdfXhsdc&wpmOXVa6MXj1k%0dw4qoP zX!HV=Gj%+V9^X+ET&-8EG`nj6?uZL3zh84jh$;_&H<$BHr9nsT6%3lvcSC22S*kHb zmJb2^sA&8=gFrBAdirz{qn8QI$FHi6V#YG?Udg~CE|`Z`MF^qkTfgRSf1HXmB@hrX zOhR`cCH2}Hd-%@TclEW!xCApFSAH%arMma#r>|~wr?zt9(79F-?VY#m@vS;-&IQ|* zW|d#b1h;Wphr3zxzUr7VwbR@J3Y}|#mvB zBz+v-Q0l|cZN9cRTYE{I8=G|%_zR+1D|)N-lwgedoZ*#nLY+@Dj9m=wXAp*3d)D%L zr^A;k99-_Yon|Q;{bg8r(RClCxSH`ayyX+y%UqZwGNvpD{LaJwmwFL8F5?d29vH)Q zH_&H9Ec7gfKOCV_E&I26t>{WZliP{stuwT*@?I4&h)IRrP=y)=SMNQO3*5SS1ejb! zF9xO@+t@HjhSUzesgHJP-fQD53&c1Clkxb+DPZWR)055IeWi~mvjxf!C=?htm{ZhA|N&0Oxn zuPhh3K=aC9UjL)6{)?@G)Kc5}Py6GP8tu%HvA62G>S&|eQEFE}i@V~1Vqex=#C{@S zM#{mI2K4NvFGV`p6LP|h8FS;iff|Cb?~ybQ{U^)PdJ(~~^HU$94MqkvL`l`fzS@(Y zrtWHgXAR?pW%rQRg)HPk{arih-j%`s(|EQ*dhy~;-i z2811YR|NaV?{#*ERy8T9GTP+pQA1X^{&a{Tvuh}vM$3lFMiMsN1y;kV{i1KiSKQ%N z0{CG|T9SFWDT6BoNqNl6sj5xp0H{5wSh;!TK58!wiT$ebNJY2MLeBL*kl3ai&xbB~ zWspqTk}B%y1Mm0z&X|X>H8zZov;@%0o+^AZ%ohvbn4YiLmR@jX$7g?TG2(pb^(P#_ zIXsL3MD7)91X-4ctaVXC_ERE{6;-K1oyD+9bj(Xt#d!*XpbRtzK+AsZ8ztG5oiSyR ztP!_3EV=!limsvW70GqaI1O*o{=K1B{LK-{frsHf7D``&eNyx<8JmqdlB_OO%(W5;Q7F57Du zOj6gHk@>_#?_Z*`xuVyi=iqeP=_2m;x-t^=7qx_{#asA!i%OkNaZkNi%^VAHP;}f> zjWBEZ)AdgehACifS{dcd*7s^hCvo2|RgQqWPseC;pEn=Pf`|4CkI{tQiADR-p#9IT zztcM89%hU&x%7{84+5miPo7qY2p=NYx0W-^m?PHNm#(xZQI!>yz4F}A^?1|G>{YwN z4dpEUUKYPWR@1L$FCG;>dZu+C5YC?I>dNcso-KZrdX;N;wZ|&J1RQ4(SSPs#GSQ|2jf3oz3bSP6@O{8a>^%NRsLS~u}u>R(Exv49aZ2_ z_5@Q;yx;QUz27UB!*<=DS0H;Ii)Mu61tfuvtYvi{?dvg8Vw-y2)TZ5&T2G*Ip8XL! zz8?z{^e%uRBF-6P1FV~7gXw6&)~JzW1do9wljZ$J4^%9G_c_13RrY<^ zz0qw>7!PaQZZ@))?BoSkwMjrl{FWFC;#*JX9frHSwwGcWr6hZt#1)W_hp9%g=eJ*< zE|Z-douJ#2+5_uF@vc`BI9%Y`J(S5L;0HVJ>zKjw(*(t3FjU76S6BU|&_{08FNpGC zbypdd|Ndz`tuxWa2D+}M{CIGeYVBT=A}=J_9ZM`v4@on)ye59!`}#VDb_=|g)Q*}Y#r0R0otU-|+^GV6yU zs`6PEmBg%Uc=JT8&x6PV%$+_U!5pztKVe1BAzB!1w(^L!5&N){!U3e`#jskX(iSL~ zpK1?ARjr+po7#q}?}o#MWj%g=BD62;PC?g{{#LCtG%Hg%>_*o<*FF|<&}-h-{%h9D z_@^H^;+p2k^Y7DIZNgIt`xgLMqV>f2>vEnNaS7 zdsij0J3^(SrK#*GQwBXhzFXue{SQPgQX}uZx)^IkH9k)9hk4=DBqxJaZ;oh6KbXjCFOA1xIjS3Khy}sye)K#0R!JsYhygFlcWY!l)33>DN(N%{hIcp z1vIDJ6-lV|jlyUZ*UiNEXqR?g(e^Yi0=VTnIhH?kW%8aHeq zcQ|$L<8Ar?aw~CtrWIm$X?&k>0z7Ol@C+V6F(4m zu?1_pK}LKsJrX_p>euuLgYs!nvXtkn!o*e3k0l=TI4K)t$@)5WM3cPbm^pIhY-)*h*xSx(iXVe)m4$AI9Hskx za?_6$Vq`93byK*;*Q*p{-Q9EcnabcShfv`eQmQQ}6vv^HkZfi*-Wc@woN%Y6BFF*O zAHEWvokSQ^lrGQBH-uu?uE(d(6im-=i6d{v$E);;%lm!Py?oQ`!#+J6A7r{UZFo6|hcCS4oylz=Lyx_Dbt z;v0AlR>W#UVwG6IaOVaH&Jg^(=voplmK_dp3QSn_+JIUrNCa0StI^t+Zh5RSDI_?f z;8kmEs57*weCMO*s)fohTH(1SoAwvKzT$qe(=4>XVmFvYC#b7Dp?`-_ZM>8?5wvA{ zhq~=sD0MB7`$`VxWL99AXEqohag;EAZE>H;MfGqwjnN_Pc!~T(wgSg>S!Jsgc;~}$ zSCTHx)K#{i&j@TX=&7<$yPVhR>crt@@2Cjwju~WKY)aj!2v%+%rPxq5K939Rb9c&_ zmf=#PtE4$WvaLD3)eI-kCB{;VqqOR~XLKI-s%}l6gI!y8#B{gV7@8=G_Rsn|ZaI9> zcC-_ZWc4~s-Sj3(`iX=KOSyc2jV$&CjBJ0?-@z$;TMY}Il(?jxTI*IgWhvM+OR?#m zt(n4(sdiDGq7_HG+?(EXOP*3}-Ddprr`_yJdMdutTJg<^FAVd_dqre>%>|apIKYhi zwT%fjz({)y_d_=~yruAJ__z5Ze@%8QUs@=h6oD6wmPMLj~k@d67gji=aEh#YX1Z(nSB6m^37{B@sBc9bSuV6#Inw65kyg`^O}x8 zG85aVT~wK)&QTh={NR(p>lLWy~Fb$++5(%w#ePKkUGBP$*M)uOrdu*1B2Bw$?82>06dpAAGywTC3h`B7mLg zXpj)$p4hmFoS4P;o5)20Qzy+6GFCx>Fszkw{?X-Qz4#wG$d27@&kb#Q?!hK2q}jx zaN2D=?|Ky>cnM3qol-q8yNrbGy8F584A<2s&x1JHt(my*O|d{mZnh}HGK_cSn^<7A zG?WORpnWUPU~l`rjJC~lQnzYB<@%_#^H`Gl@lLXj^DPqF-zm?D!zg5G_P0Ao#_|JP zD17%vV2t9LREvMU+IAE#9reyrg~0%B;ScaE=x!D#H_TIk6MhJ0?F-CKR$b&>i+8wk zsKac3E?ddz2?Q(x?QIpdjtKK%fntlqosT$#I}dv1?_b<4i?(*y@e%J&UqJ210rX1V zn%(Z3r^Ng-?HZwv60d41<_Vnl#eHckG_&77o3?P`eA3#aV=D;A$ozHsp!>6PH9E!a zBVf;qwwUJz@Bwol%VR7*4V4IDm&P0Mr8dOdUR|=36&c(fWKtlt@H&Ut4lxivi4}9! zXwGS}A{qr!_eA62eBXQwYK^hnrkTqJf^Fzlbv7hry+}mj!n>Uh z%e&0djXy5M*@>1wRo+@ouU1DlnW2Yo>;;-X8*e0^k#(SUdnV+B>_S(mgT%dQX-+yOT`b<4cNOshxrWD_kS5LM5 z2}?X5d8L|UWpHQebs+XVQ>Rk&t)K+FO7HE!^t$*DBex(UG~DiYc9wyJH|G@)dL{Tr z0%?hgDB)|4#2J3>(p(BK;>5#W|9m4Y$~IGp0NX8skt=}$mk%?8;HEoSgO=7 z$A4A zL4V*mKb)?R1+ZI~+P^UV=SDvhBQ@llvT!e*nf&Ke{#WI!?qI?=!EX(U+n<(0|M;s( zUNMNpA?)NVY@%^Iny-au-iW+Sw~3Xi(tRKd)JTW-hiM36?~qV-P}R~gC(&s=0Di7d z)zTWD0^`1VxR9&%yJ+3dxY};p7T~;MC#h8?!GbYvFY_UEl*DDY*?fQOiR`1zbt zBp%KKg_l!IwQsBW8c-jk{Xh)Q3^|8y`A2qX#=1zR*B>Tye&jFLELo$0Ity?sAQ^ z{#=hw`fYTDYMe^spQ@?n$t|mE)fH7Dusog zXD+>5j*K?7wYS=<@*TjdfuMo3iOb5=kYD|s-*O!%KEwQ8gnN?mPt@xKy~s>_JJ~m- zWhS-7Vst3zFY0=R-G!s4e)wn4{sa^_Rz=604sHchFkbEif}~Fx<=%FxH_a|R7~X$j zQ3u@H4`JA6G+C7`orjpJM|)P8E%yOd1yx_QsC~Bd{7O+%++LKZMth zmREI6mN<7DZz!l{(^wS{bzDu+ey3%-7JNE-EG0Fjdhm_@=+;6GS79SW-KyEmHBT8W zt}~sJ1aA1h8lznVwQb18M_grqE|Kjmk#c&URtU2cLfDpSH|f z8H@ou`|XNkL<#1>-NrkK!TWxdO+tN^-|bY??)7#9PW5^Qq9iS3Q&Na3uY7`kli3s0 z!}1)QZ;?tV@+-y9+D`6B=j^UA&hGbHQR9>J{bk_Z*lida+%d(mQw008D*Ile94{yH zN!<*%xA;!ms;p=Q^6KX{n#b)KI)phtWekh>ve{=getV+=H8}mWWudYlGPYy_yT$?J zSmCw0b*IN}E$#0ZzC2;5rnOH2%MTlA$KAVHpn`xTJr@-kvnYl%DgCcZpzTS72b;GwLkni#IHg{H}jm% z>eX1m^NB#)*0zRo1{ns-lP@FYL?2`(6*_bD!W`{^XW%?A%8e z`0ZS+H@O$?Ar|}c2f5$eg_z`hc60xC%{+LxRG(8|Rh55H2e%K|eBF{A5i!~axKW~p zbjC#E5(jaC_9hSMxvUs+8;oz>ub#X0{N#Vd*nf{cY{n-g8aVSKk~^qKMi0eq_nU#2KsYx1{- z62{8X$3z2kk-HQuz$z#ERR zZItdlhQ+1++Z04$E0X3J(a=4Ng&xWP_p`%dRt7M#Bu4^XArE+{JZyB7udb8 z_qvnmS*-scWjj1R8G)9eHn*~h8aof)4eL0rWWDtKGs{+;_gdr5k@eQ>%M9LTZ%EbF zmq%BwmLtSjYIRo&wx-)7OSyxO~GkO-B4Q}x^&n^18KRg4 z8|AH0CE<8)1h}`W2;}rFgW>t#_r61r-I`~fR=g|NyZ`KC1y^RE^vC(cclz@pi+rE~ zT6?JBX?7P4^nvu3lk}6}_8vIoMnvKt8uJ4?apypbAMqE;-C~4WHFfQFBO(Cyzy#JX zqa=Npn=J$VXPG~({TeybC*0tM2QPNWNKrZH`*-yQHosLCQ0*?AMLg|sG(-3 z?X;(=uuf|cT^t7>7wnlRN{%=T6Ro5$|GqHDBDWj-142y8Wd9Jsi3XU7(RR|me zVM2;9-JF(idES4?{?COFKmLaWxa!om_;$-WtIIr_Lr>@ZRL`n!t9UqJ2IY{0{2V+s zuPc(0UMyA?T3_P4RWa|puL!LF;9C!9$Qu?ywxGmE-eU3&5&rCPhxlpy`vha%`-1X6 z$U2z@?2VsPiQzixKPD{zb(^g7kOJ#W5yk*skv8T=qqr82t{5JzTA{qSli{uVOiQ}5 zl&!|yr?MCFeKr5@Fuu_`mYHG6mL_=Rc?>88E7t>XrCqVZu`?xk?gJ&^~_3X_b_ z$O(&i^ZQ?Hd#FECT~29dE~9o@ov5N`U__5lICI=sNVILVigB#}k#V*1^^8Ipuj`Vg62| z^XpkG-}Q83toE~Y+3wg_7XT^9~^COKf?)7ojlVF%*e<@_{>yXJ( zdD2YelKWJwZx9L;k540|ea@Lc1e^@f#dbYu6SWfMr&VNJY?yKh0!I6(_mos+>ZzA&DPi|gUn<&ClwvD-6 zgh0^zE6Y778Vow;o1FaZc$U+I;3zez(w61d;=~-4j zqK`+Fi92C{hw{qqe;tv+!K z`ELmkm_eK(%(ToL%#ZD-slln77R0Ue%%RX^8y>N$9J00gXaY1CQSk8Zu1jju)Ja{! z>xM&pM?r>BOo7UF8sldfoNU$TFqJvEGT|0#J&Fs@e)OghZDhhAYRom>p4Do`wIim& z`umelrIf# zo543tIw}EUK$hO#2XyZ_&T_#!ynd*W$mdW4AdYh08qFxnBXjYse=l8o^9D2#O9#7Q zG^Lqv6KBHkbk8POq$u5Cs3kt_#0Svb;{SxY#kQxPW4Vt34EISX`b`#5e3da_aJerS;P!tP1|t9SZky$ZQ!-ppH* zix)WQBDLSn{P5=b*6^D_4;b`U$glE*9WgB9ZCuX2K!lrqLN|$bi-oMU^QNbJXgS&U)R8*gMeen9vYWdrQ+8|HnV}WmP9CE@;eNE`Ra<;<*@ zO%cH)xS(OgLdAbOK0D1$EP;QK$q7*>F_#*oeP4i!vG9iAZ34GC0;0(!AVm|(l+&+b z7Qj@}&3X9jxR!6MT=#I7qXGem_{Rlw)9r?o4O3gZ=db=~Sy*WgZ5jCr z53CE8sWQQ+byn{JK>XXv{idsT!v*;|SriS)i&r=^*sKt@4;2{+1&^r!X0SR1T}YX3 zI-c4_@U~O@#6rJI@r``a!}7sXC&qJ=$k*8e82u7=TcdNJQhM28>_HlzlxHbcP6*+* zA2-}6?F1mqrd_Sg{sr;&JH>w9o9X5!s;6*^6K#{17sVv<;32ox%5zNwVfJ{E2&`X^ z7i6rSbyjDWo7b0#pPN_n`&7bXqdiql&fg;OuHg@;;U!a4w$$*Yr`q6OYmt`_*tbzP zN>a6fFuZ`6z7tb1hhpfYN=@+cOZft(kzT!PG&$&L?Cg;b(OnE^Q}>WWh_9 z%jE$M&&^NIwEKG)@>xL+!w&66y78YHesoJ-@VCTYROq;ji0fRHx$R1o2s9jj)4OoW zOb(4MP|Z+5KH257s-#CkDo0E~wKLYyACh(U8AQ0FLD~#E zpaO{U8AFVK(!Sn=_XzquPuTA^+#s)=Ln*$_o&8rs**_f0zv`#M!zJ#;lZU}0k5G5q z3;ewlPfPxq#WCJ`y5A5Yw^;s^A(v&ZU8g|kp??rRqglTVwIY@Vsw#~>u~oK04f|y; z6PVs2a6Qf4x59OTj$(g2iF$OUsk3o!7omvk>veN3bY~g*{We&J-kt9$0R%l=%inWgLhy=^ zB9*WG+Wj?nkuhI=G)|hhXQK_$!%ghmfIcfD44mo>q<3I287ZoWh9?3!Q;5qZE9)%T)#o63Fql zpEwNbcf~2ru9xk;cHfuAbf&Ao^Z{7^qZM)DC|dD*&lnGAyscn_SYuXB;-E~ijl1iU zIkDbHHeRquE_~S=}uSsp>3>rkNV3L5W8d>iGmHHktf53q@=7RB3IF`Zn7O4 zmZVI0ZNW_)ULymO^ljN@_<`mL&W!v#KtuLh2z_&fGwbV4_9mRsar0{F>ps4k@G~hUJQf;PcN1n#O48A~FPHTAfGJWj_(hGD=|CIgi!Pi1F8`27N9sI6yv*!_L4Mbv@ zN2>?LRhKotxhf;<*e98W`mc;qQ};S&-4KR+m!0b;vBAD9^DdVMea16Uf)9mRld@m@MarYbKx(eCiI{7JX-9T-dv{h;VKhMJdA169K~FpsNu!bB{ugUDc#C{F(~Rg@@BpgU>m-5}}dL7PV(r9B=jJ>%^% zc1&ADoR4s@FuD&7IbA+flRDpS*55F$nJ)Z~6Bx!Bl_>f3QvqyH6gM1LXkmX|&WgO7 z^mzSD?O}ycKk%4UDPgkb15{iw1NIP7ko~TwAy$=eO%cv1kNkS}7N%JsDm=6`)t$W} zdYT3QjiUFA)(4MS1{fA)$u)an5(%=RLpo|E#@M_PW>X>zNpqdEQDWhCuh*#QH25H`RPTBZaoOq3wuDY=?5-&I!gq%5MGwDNBA4P?LAcVvAs zwoASHT1<$Kz-F6lp<+?B`~Jzw4(-J6?K2HXmD5;aq_&`ZmtnGqbt%ISvc7^`W7%98 zxgWy-cHQIMp^R_BsyV{{`R-mRE&*xz5N@% z_Q>K3&ER9pmAYLNo7N!Dmm~G4!Ti>5DR-LUk1-aY{MgDhjj3$h_eYf+v0oB}a5-~3 zng!qQzV{FL;%fd?!pBd6l4HOXaCu`bEuF-XY9u?4eqpBFo2s`+SDM@k5kd@YSY~-_ zYh8`D-*>}VZ_}>_@C4tzUW;vKI{hA)&W(4xf=!k#f5=Z2S|K{E!wN5|8UFK>vjuwp z^+)H>9WQ2BD&#{*bQZd-I^4L?`;qWo~T9~LI-;}y6F zx%ysk6_atce@$_vF!51*2h~&FhWMY4_8BQrHGC}*pJBz&Gj%g)&NW+_jTI7TwiEZ~ z!p?go-5tdI2-h|K{5D}mlkk22^pjBq6(_n>T?a3_)Mrj)7r*aT#v3N@)|PDfm-V^~ z8-F+X$Bg#3AOnLsjbJ1vowBY2yXuEqlH|W{t5X1ZqJXKyk}z!ZbUmK$@|4;HY?Xd# zVO-Tp=s?w<%$``GlwALuFSwqh;ADjUg9dm^T*yr%wJ~oYdUoXK2*yJ3P3{Y?{Fpm- zecf<`u;BR??;8tCiU`y7+AloHTE1GL(Zz;WOTQqp^w@LlBU>txZ2+vw1q1S@c|mng zfb+DOPzwY%pb@I4djEv`k`_y@0A`DzdzScx`-ci~yOWRbeNMf!q%sa*DBzu@36diM zjZOXKm-Rk5m-2S3vbx8VpzT2O=kQ`@wg(^P<-O`bZbv;;-fGYDWV!s(xJUs}nq3u6 z>v0w)%A{s%$}#kdoW@C$8>TaZ0ZEgxz7S9LH~);zVpmnndd8TSB8Z>-SGJZ4*#2T* zRmhoa@VjK%RXX_;zquu&_L9B-&J$ldVX_yL@`rJ(kN_8%ax*xGA5^bp_HX`|jCE@Ql%LIQw%?wosRM#(8ksfpBf``~O8Uf1Fc;eW69i6Ss@-;q0FrifTo zCPjYSSW)z?GK`$8E2Wf|dZJ35>M8@miofH~u+iX|e5%jhxVdrpE&9{?@K^w{E}ESZ zBnp0Nw-|9-0r1ODoV(nXL`{{`x1WD!X=7vOaoAqulZ(@5dm7FuN{kNkWc;8JBQOHD zf9H#o#kThsVyJz$Xn);%{-p0`y+iv!r}>oF^u@m^CO^-geaXrqHemS~Dq7{xsOWIB zQO)u>)0?H3U?X%l==5shC$f?<5m|3)%^6QC^I{PB%~&WcX%sz`jsrpU1nj6q<_7@cT6CbBQ!v6rPO{H zai$; zS8tu&v6*IckK zB#^Lo@s^~KeOKnE&S4q($blaEL-B11sN^6o9~Xbf3qqAyd)2ALDCKH_me_Lv3xh{% zKC?C{-UmtdXL-k@*&kHbCL9s`N9hfRA2NgZ!epHB0!-B1^JUV)IkqqY^2vbb#i(a} zdbS^Ca$POi?uDq|(1yWT8AwjT?MbLyu5+t%VcJ1gCdU#nTI-MpJ0Uv1Em5_zNnx&7 z{L9-2EENhbyy%nPU@OZnC+*KYjc|7J!o%A!Px0?%yB2HH`_@-{z0!*h+}pSCpuvt- z?sau}@j7ov6|oK+)@nKl%5~M>GhB3?93txrDwN%h)EPpwqWaudQ{a+0;*!Q7>mPzU zC!mdQii`TE(6hay}oSHQ~%p2VAG>!^LOYgrtv1>F}qr? zaL}VmdzcV*HX2HU%$2Yo!u5xJyV&n%$_cyd3&dp6lNxnr-Xel0-uJ&B)nWKeb&q}x z4b1y`kcI*a6SMi1a%BRn3spjA3@cWcy6@M^vt$?jqMHfuTBS>B)~?=< zA|!EmZ{2M0jY{eUqmbT31nUzcSS{?P2#F;IsR1?V_j+wk!UYzUPlNfHOeEIur})_L z*M971tp1qx{6&pM)YM%MmZRM$btu;r0Q>AMeIM!{3_~z9bTq7%Ye1J!WL8`LcuaB^G!+A74J)+)}Pc;1b^ z!!Oy7*QvXbf5(=SkP!bUXRjx`Mmv!`$rpZY!u&xAFi|%N+>)$P;7h;k8%~*VoG14q zKK8Rz2kqvc{3vddFX~bK=#Ojx95ANT2}Yju&jWa7RH*0f|H6p{v>bN%w{q{Vl^4te zk@mRD?3FRSUC~DEO&B_^LKZl%P3^*=z@6P(qt8iUe9L3a9v?X%8gws#V@KMwue*98 zD2P6JG8apS(A0FXPOr+t;)4vlyfEY9rsK&GmWM8#f0_xQEmao}9gP@7iF6k04LnMuhiBy7#y=VRiNxR}DtWVifZ6wi=@ zcD#g0=4TZe7Qm_zdyw4rZB376Pd z8QktQywyVA&27h2?s=h@e<{Q_IsT&ewD$;4ELT_y7_v~3!P_$r5B4^<+-e#AnmD}S zqk^JeuGLBuyP6io!G;V%`K!~&ese!&QXkudyZ85n;PpZt)LXo&cuCdYKhhRx)6ti*h18_NS_P~*t3%QD8=JtrMssk z;=r0dOlZ8bFN*Atn+)H4?z#Z?wx$NDMQ=OW2C-)edfM^V7W-3s_^bxOpcXSNzNd8Z((*RHMYA8?1Ju%15&i(862AlYOfzZwlOpGjr{cMQO+QQC;&?%j)18DPfl#HO_ zX&@Sfz*2{9vCTW1jTl$*wLChT4=}O*>UA(|JPPwC<-=>po+XP{RqT*Ku>YJGgu1*4 z5V@?h)P6|TXWdqhtgc!MX*KuJW%zruzgg6J$6B%_cv+|titr7chg~-(UPlzni-+Hpi0ON|Tp=ADp!VA7 z3HKNNm51IskD@j$xZAgqb2u}{WmM%-^3}TM#^b&Cn+EL-1SCxb5*nvj74(emv9cfv6Dotj}A4Gf#WXb<-wa_Ds zZSQGC@H+4#Xe$QZGCzndR^6#m_YyUsYstMsNxII!=fy+B#r|CNet2R?I8 z=}3Gw=cWN=De)telia^@coCe9&1Fe=!boS+>flJ;Ak3aews^bU`PBQ&njuJpGVQzn z2kY86oqDP0_??;$kFkRRp2B*}Dew)Z1rpS$s10|;mZ*>K58dDt)6|b9@Ye|Vb ztPyr?eQYrjTjd-K&C?f6)2}~w*Ft$%p?WSveP`B-O+(Zztaeqgu%@FsCS2U zTb1pv$Nby%udX<*KC|LdV2{Yy&x|p(Sv@Wme8zNS-&)i~xs&zdgoIY(qmlV`FK}YD zNdY&8Eg2wPdH>P9cxEFvV|zXZ?;J>h;FWKjV(%gT>wGHLNqWy;oh;Gr;MyX?hec_E zkN)C0j^hM+zm%!cdkio%0AnU})}xNlXQJ7|^G2XN7CUkeG`~zb>b3sM($R{SyeKaA zS=(UpcgV&wu4CXnkXVo61$8=Mr2zm?IulNjVloFcDo9(s)*tINs+IYye>Wc4P)N^B zf8oFiW4PgkVv{UDDNFbB^T|$z>n(il1$<=ju3ZF>*4}g4p{>q0_ilkL+a^=QpOP&O zXxsbv1tu+?TgdyE`L6id^JL@X1H-+)&-1{1;|4LHcReq4&}t*SFORz-?)8F)7u4~ zj&g@S{{ciPN?f}g$;-i7!5)H2Ck6h<%l)WWi%bqjUMc&LQT=tKBS`9G^^T7iNEnuJ zXRyEiCRJEWX6!*u>r7BF?Ec3*{o}wexBJg=mPWsh+Xpuqw=Es$IV9N$fM7kU7%6WK zU5*Z(9pBoCg7+J&Y%Ui^CVd`KIsW0-fHgRihmR4w;{24~`k?)<&VzN5|KkPF=?Lyt zSfw3dvPP9T1ZR;yNnt$_?nqnQRtt?y*bkZq&kDc{1V-E14^9&=G6%HJ1r(ygDygOn z15U=~)}kr7`7r)9E3m=?(6H9FQyH)glj#{;-imF*hK9FeU@#9F5HExbOsa@-87 zbK3m*86#ya3%BqYkH$z{yS|^Xz361vxww=J22P^Lms(wuy6-ScudH^ewzs!7&hgLwgfrU9SscMTz)~Z^yNTm89 zFla`l_@7V--$Bec9~VQ_jz2I%-%oXx?r+|mFQD2K?~Q0=TR-5RubG$MNC~cmw(f-|xpVog< z+h5FrPkUm-%%GAvXW^eoh(mJ84ia8S`-N`(5r?@&zhm$3LXA-VnpvzV?=SZ2|7YK~ zTc|@S24M)~?tIADt#t}G`wq?y4gW`Deq&sY@dfS0?4(T4%jWI_sN|! zVIihBcV(EVbLOS3{`i}cPe!27wKNQUKfAG?{N^XeoHso#!H+yDvg%2X*(e^3rIW=k zLv{4r#FPeCkD%6zMOllD8 zX7s&Z)USvsuz3DCd#Yi?+AkkcT(hjvRk5O+&`xl+golw*hJE)`OzeHmxbK*rJ}34y z9v^x+hE@zvaDV;ukCw$3C8c2sWQm=Lw|jg9@)+dodpCcCKB%56Q`Wpt(61>^kGG{uYB&MmOX!qCcv z8hH@>(ycJZ*-#)RcAjJGO#J0>k7m_!dTh6b2Zd1U9o97ganAeogTszh4ZJX15TPhA zDuK1@U@vEJpA4R8p%SXjio9N9T~|rwz8+_Bfm=uFf5=|x zs5j=Eou;Do&U(~M_|)i4Nz$6ux}MUyZt`xZ+=I{<>%f@H*&3B$)*YqF8|!xc;0Quz zGY++UJ&j7(AiP;|sHzGR?*j|&5UbFq`uj-+C2$xS9@=JqN_y-l>bAGmy^i{(Vz>j4@hv2` z_Jsvv`0?Hck6?}oa(KBB_rC4+WWhxY+aKqQ{kFowLZ7WSB{@n#8;B{^emLmD-+wy7 zI+GF${~~PG)N)~M&SrA_@yGU=c!hM#|3EAH7VT+DY?xsaOY`Wp_m}^|;;!mYy{o`J z=jk^)EB=MLd?}k_Nc62+?rQ&@kx^;qrYS)_2sUN~nb)GHMc_!ub3D3|;X_fKVMx(r#nTY#8E z^=#nnG|vGoV%Bty2T-pYsBJlllWkOEHZ9@Uo$-E=8KHc9{+xQY@l5kDni=dw7wV8z zudwxM5&61Vggkw$>ha;s2ln5f5nWrijSs z9>}?owYMw45=i?#EU6dd>qIH&F?J;qV`SEu#`d3B`JJ=cR$H$6Bu00{eLi%*eG6CU zOQwT#y^C3j1M|O*Vo|xV)4fSfogg>M?HuX8|jHV**fVf23px~6y_rpMXz4p9y!H_fHs0Qvi+|Kh@ndVt2#Htq?4l;_DqQCW;DL= z`6-7rJqY{LdHc>VL)cZDq@Sk$x91&K)-@0QZ-u=Qh)BnOXnn-lAKiH-={EJ>q4jub#XoX@9pBvdGKau_{VoN2uNr7pS}nd_GnU3=Rm%iz2ET{Vj)JIbrPp?=}u3La4P^b zJ6G@}XF~>ck1|w6R7=d6qLS=D-k*l1Ry=DPwA{pCSwd91cF!d$EsR9_q*u8l)3r`N zDX8ofTkb+x414JyKi0UV*$ImIYeIe8-xyMUz}m`a2N!=)_Y2pl%g@AKpif=ZSE{&j z;XZ@bTi>dF@t@Lhl!$A;*t0qrH0)gteA^1cz}88RP5(B>|C*vR%;U$&AeNnz-QEH% z(#eK;TzGjQ0@NvJ4nH4?8#?t>=yyvwcOxn8Uulsz-i{3a_sL?0DmwkZ0m}voc|s!0 z7P$BwA_#f_tJS$8_Favrl{-QR(rx+B&TANHO;d*%!R7744xTlJ^)C9@lsp z6Z}&acRT1Uq;9Bkx!WoDgT!AZrQ7^}ljh0&yS|x?yPM4S(CA%|iv{1c&eK+TtP$kn3^C?3b|9ABM^4x&cQjf$pX`p8w+)dDc{*e-3RfEaR|gD)Zd04>Xm*%tZU2_XbFKq<=92JS9GC^7$vu*2Xu0 zAJG|cx~DlK8Ijr`<4^ozJhqBvKXs`sNjSh4!+oUBpcml7%dK(RJD8Vq?;n5SmdHl# zy}UF}solVZnbwjEl-`R{>ZW~@)G+@hn6@eqNKNsEcE+?zAcYdx@Vt%*=pj=c z=BhLItbt3p-g@n6+N(b#h^Zpd^v1nR_WR)N7Td4$<7vZ42-G~QBDKyCS^pGl_^#_K z{qM~lXL4)f_?{6dsuA8_pYHWN=TS#GE=P$E7r5%kuV*TgNhAg|ntZvpB7mwD8`Mpp zBR7y^?e7n=fs3YOV?zXrILOsMJ`d>> zy`L;zlwSE-X|o0Rz{)^dEOwMQ{~0W=Bsf;pqD7|F=z zX-^94H&;GI09Gu+y!gtvG_}$`!YDG9(qv67xIQ$He?EK2;UyP3d2WAOIK}$OH?uZ3 zWrOhpQKxrAzOK~sD6GDCY0<+)36D@uV9a9ugQmW8NMT(s^QkF=E$G1{)_{IOdgYHo zAW9fi`lzq(2UOx47Ha6w>K~p8V7wUQXpd--*(g)q)KV&=dA(>yj3`M=#1>SDc2Z+o z+8bTFR3@cDx8(^Zxm#R>-}# z2?AHgpx~`x=S+L*>tXzae3)+2AG-<30I%k|r$y(ByLtE8m&476$ina`+w-TDGGU)v zeO9PZ7hZ_se-ASYC()%Lv)_voff4vJtzzx@#O zR=!auG@3z=Duutrx-&dQ=a|`pWvq>H)}|HgWKC?++gVh9Vm~g_VN} zQTO&>(z*MVhPI|xTz~hr$7UeS_zq?hhM#(71Y;8T- z)C!#`y@4+d?O&#c?YD6K#9tiNe=0vy`$FdmRHbO!5%#mL+=& zCSi*1ERk6-DVMUMjzb1RFjO(s7tB|k_uGFy?t2662P%!0jWjkWQlW^8#1ZNNF6%fd z4zZ6(vRvHJn5w(;Iqoo;tq!2_COAs=8EyTbF9@>G{qVD;yBY27jvO!4?aL#sNl|<0 z*O8mO9)`?TAbCc;<4+H7GQa?rXEM?We1_tUTu`rWktm_%40XC=%wAxWa1qO%roFt& zD8TM1)8o{x9aNct=8TBO5!)ykf@f>@$COw&t0W9flb7-nqc11{Oa#>UZJINC?@-Q- zB0`hKyVGkY?VXH{cRx%u1&wAj8RY)T?#-JQsfNwpL|+CV5+796ltmqV;B2=2dT%0) zq12<0iTHYD@Aaz$D#jq%8SyCoWjdnStSD(~32I-*{s4BFbcl?RZKmZvXU0#$s-;o=#s)L1H_#zqU3;s`NS0ZM4pzDT8@s%8 zPU5$2jZu@X*a3e#DjkEYoFj{}unO1?>~s6(rB$}l{^1H?OuTQ0Dd^z(ViA)qA8OS! zV`uiDj-9f{X$%g(Kxs`WKE=;@%6)IO>-G~d`u`o2|4rX6y`kN#?ykwu0l|Z}CSam4 z(ACWwx}*_}jG-!*Bi9XMPjOz2Lu*w%x&qu}R(zKf$>wPp(7uPi*-xm=9%Ws5WEcWH zZ?wG=d8(z(7C{-7uz`$Su@2K?w)2f+elhas8)Nx8VE3#!4cQleKOahu5x{ew)M|x3 z+G9GwIAs>FOMDu#wR4lG4A8a6WB7<{l70qw^CsoB=F3N-d}n~>_Zsa*)^?021lqwZ z`IiF|5HP>iKkKVB=T?Vq9L633IqoBZjFN>#vk?Or7lvvE39%IyTy?P!#sePSi^e3` zy~Z_P1gDja_HdOPRxq2J@9ByN$dCsbjk&)NZwB%Uoz-cxbNTxXq9|}{wMZ`kJGzsE zNkuYW<~$u3T*pF`y_#^a^je>KyS{>%pDEIiy=~J9^7IE;d(HigZSw^Sdjfwq)6A0! z89}b$@->qH@<6IIJ0p^@0#%SEZ7xI7pZv+fQx6Zp$UOLJtdA%&&Ja!~KVu*yDMqRL zv2~g^XQ23LW4YZ8hsuodE&QH8{r(K;QaB-vSc+~=jSXTCo^x7x%Z zvZSF7FEIia{4%|Rm-E$Zf%+cMA8YNMe~RAd@%EI3&-jcNLfV}cwyUr zwgyLQU3c<(;J{bym61q}FCu60=Fn@EGW3wCUzy7=mzgLDhxVzzPI)wfSoIP5q=dP3fl?+~!w`$S`Ox@IKGL4DG7M!#{ z>G`N&r>ZMaVfnv)^IwzcHWQKYg1;pJF`Q!M{A%OS*B1TjtJd&;84|qtL)t9fFP{v> zm1v8}C#KMW$$h_Y+j;dTsqL@T-NBEMsC+2@5}B%=P2bO{N%3Boj+4!hT-AvCW7=oG z9hBTLx(02tTO&R36w0(qEqq4GcR0a40cjlj- zo4S;neqcv;`TDc3VV!>pQ>iyDlPCpCI@Yzso}8S$97`qLC6N8OL~4UvzW zh}~p$8?NKs7ttBfcMy%X2k{g@tFl~CDyp{oK}E3uea88 z6=%v3`Ob)-B(0$75eoeE2kmpKXLcOSTz?=`$$?CB#DZo^a&oke1T zPEG%%_(YY9bwU|1&^`+^L5U-2@Zl9DR$P}o|EPgPOYj9mH5 zeEx$zd(QW3mW^I{$8h`eq$0Ki?K51}Msl!U7gBcqj?5yYJ<^YP(Ld3R-4WS8n&n@Z zQcC5NZ1~8xYk{cLdq=7fg=$S$cbYos&;teih76DT&s#Qy4*r&Wt9jC0VSRlxIc_1d z@eMDN!wMf8Xg|mf6BC2&*B$*wXKcV8`Ju5b`WgQxgJk)uQ=f~tYa{B=IauZcntSEm z3s)0%2On}5`e4y^Kf3@oF56ypJI+8g4>#JBc!I^s>l_muixNS%zTy?a5ASa>K2*j= zZ?Yrf=zCXucqG1|^ETC~G%R?kX>$@9F?Wa`2FBG=Bq3{7i~|!|+B~G00Yo%8xvwh9 zX#A$_SNo&>L_PDd_AWWi`y(yFtYy?Q!VOoQLIKOZ?=D~ghn3vJ1z$tf^?>8!9$vT!!C%I?0F2CHR2 zh0CYj=`;{^p{{?Qi|bN75+MMCWLVhXDZ^NsGP+#UG7ppP8%mj_Kb%{52n#+4l_+Hp8ixNuNLF$hA(hU>huV1&@WGYXV}=PCVtp_l6S0QgLCZtXI8@X#}`FtEz7NPdAVXGMM}nEoF;WOW=?d!UFF zWIaM9wP^+SD+Wh*iV$wMw2+Uw@!?s+ziZpKv&_4W)bwzCxt>Yfgub#5AoKyR4wDI2 zGePx042yGtId#yv`FVTktj_-e%tKe=!bb6tN68(_TA1{pYEk-_6d`O-FJg$L8C`8n zQ}w^dsQ(^V_s_Za-*(s3FCI_U?e>{pUL{?o6`HgLc;`KSqenPj?J)DWz{z1;onMOO zCsWum`_SWJ_f^v!|AslRh6B85is(PDtI~31#5JT-X&7;hv#PxE52mJ!U{ryiBY)Pr z(#>cpM@RWp3SgAD21Z*`ho(pG{mQ>3Fc|nnN;17O%4S5Do{^Wlcjv;0P{OW#)3^;m zau7X`h#iq;x1%A3;b?zy{!EL3&pm+0%c@IrU2G*!7jn*tfI07+>}eXWaq5k$e&|q) zycI35^476=TTX}%@I7%W&T@e~h^G$6c>u_Vt|~n3<+`gX&m^+*>ev&tBP~#)`{r(5 z+yHo%OIKuVbECE-Lb!=GoJ!;UTw_9j#>bD`Tr3A~SfM8FT+T_(<+Unw+-A*eEcWFP zQ!1!moO5DVQLOp{O6dVh!^|y!q?+m@B$TMwlJ*+Y6XH0tPeviP+X;Pu)4Y&cmw(thp*=jBk9imkwPLgWhD9!UZ6=5 zBE?P){B5Ow23<0naS0Tlc?Yqi1>=>9l$r5L;3krX%=-!N4f@ME3m?(E{2R1RljgQe z>KD49itfn@nR`m=ZP4nOto_Vqfe1=jc%d0qBPql9{50NQ&i}=M`xhDWouUoTn)T~4 z!LMs|y#=SBXB<`7 z?TlLSb!fK2-UJs_B6>-V63XZi+Wd%-weC6C&iQtt`}(Y?&%x?S5MygAM_k8o)hBvN z8}WtU7d9&kKTKMlt}obq@Yg^vK2bWF5z7|4-tHeBZT@4}WQK~imD+$K&l_Xkv3Wdd=TI|Q;(d7UV(7vA^db0>jnlG53yMarhX6ImLlY8%~y(XN#Z`m zH?D{%ZKS36C=>LIi0CrnrFu(!#8w(%fb_SJpMb>|>;UhMq}x8#4cgSond$uoLqZr$ z`xp(;^!}r`14AyZd0oP+#-qT0Oy&aCS{q#^paa&(6ny;0JN{Xp4b>u2Wb4?E+9i>S zE5W&}L3jf%;8ce8sIQ#=E1pTPUjJB zac>el-N`QcW|6vpRUm|W!zz)+^&l;)Z|!HHa{?h-$Caaf{qD;`8Kq#g&^Tw#3n})4 zj7jup^ZYcQcmbuta^{M3C_f}psr9A}DJRyx=cB}uN#b^vq2>Sa0+=V0)5`|(lXK3X zstSxlpWTY1Pfeh+!09qH-TZO$cBEg+tjUwQxm4t|#ZgGWqp~BMV$*xi(`I-k2gd0U z!|}-X!0!5ek-!@G%jFVha`Ms7{=jmT9)|WPm%g5pqe?|PRo2vG{HE?AzjPwbQkF$0 zUbhY5CC*2v4P3Y}jFm;%gDq#H6E;Q-6^(pUpx68A7i7`_v94Fd8y|1m)>yy$4+j|GSF)> zL~0l!dcE?6?NEz4^gVYRf^Ldr~EL3`5DuzIqiD!83NKBSxKB&}66JMuOr9F{836zX>KCZV2a$b~JL06sV;Fq?H-@A)I?fj=N^t$UWV; zKM|Nz7ek=f3Oh%BQB^q--M^5%o7}X=FA+%VyD2I-Ia_M)bbIg!%SHLVhnMh6j9rTQ zhxZr7tFC<{-g{P*B2ngn7LUwF$yjM8vJCd3!q-QL61=&iL}+s-Dunws2#vU#j)& zB|Qqhn5$>t+(oh=P&Q-_9@RQi+&Q{2VQ2>u>%{-HS+ShAe{P~q6+j3HkBxrXQICF= zlKPjz`Xu>{mLVynm*)HS8^tW0@RH^E(q_x#Uanz+I6=|*^B9UTSJ(uv&t%<{YO|L_ zLdsv@r7E|MwjZv5EtodVyFGs(-JpM18GEg0C4b(BUmgb4jtz_jZx1@2i3B`O&>sB@Lq8v2Xm>O zmMz8y_JYBXS=#A@Cz!|;Q9|*&x`H;+wkP&{dweCOfBjbNKdd7mQRhdwCzAl|8yFiN z97bPGcoCXu_`0{y&IQ+SkyNujNzcu3P(Pob9Dh3Ol-!C8PEjx&iujs_Sd&;6r&X(J z5^z#ptj)sH5Yh@X76lRqwV(RD@NO(n2bF6RNOp{>1syjAC9IfUH1a|?O}K*5gh6}+{LcOX&@oOoFKs}tSIMa@4i#nHHx>oR5E?MF~EqRD_wbA!o9 z*zpE(=J5@uiNP9$SgF8D-0#uncb=Q7rcT9`pn0&?D z5{EBES2I|?LA9v&9vPztSC2MQ6m&u>6fbV=Jy%*E{Yuk9*SP#y?^Qo4PJJ7Y8A+s; zPeyo%P`pX=Z1W$%&P2sZRv)!Yq~ZQ_zFgF%QDgwQPq6G*p4~Ls{qg_(;r5-O6tT9l z#97ymqm&KUUSAN-+hRw{j}F-{U$9@s$HgS{>n>-!9-SaG#SDq>Csm}tFC{2Y&LGM{1w^glbBe*Tux>xbpf z(3E|dYrmi6r0Zpc1w0@zKOgVFeuJ}rarxFdP|r&2Ag9n)>p z;we#W0SOP~9r4IOBQ`0|i!3GdAJ=JVh4hA)EOipZAK&Cg&)WW#EZh$Wi6}NXXo7!t zY#nlHcV!ohG}jEwqF6G*nNJ#rcbX)jR6oi&l)!c96fW%-lc9b(qJF@WHvMM`D4r>^ zhgiAv{Of=PRaN#Z7nOwE45f!b>=-+%R@Vh@(6;5iJff5km!OoldjHjA8BxLt^TPy{ zBTd+{gfV%*s`QE5h~*4pURkM!;|MRe^G5rl5C7emk#KzEv#z?i%>K955q@`&u_A5Q7@YuzKR4&na}J>oqi3_ zF}HIiv@l=B1lA75=+rFsda^o;(F}qTpIJ-mcTKmtSTDNqijOF$wDTS3UP6rRxMAxZ8`?jo3htx5WvPlZhCl`H8;O{GCjqHLA z_oobfGPJsl&rQF^2MDKlW_qqz?sNNutVU4?AARC_+RrxRpU5_~@WdAzie(f&jrSYP zzh{N7y00{-#aoHA#yjP{pWgP)i##i9!FcB%wdETOvG(WE!S5yBQ;_z6qbq+ga^z?Y zwzaz6SYkWx_H&c1#!78yh@wtL8d=fvbY*$;aM|m}q5&8Up51M&F%ubEk(3Q+?b>^< zBhm8_|ENG4G7o+kW*3xy!3itV@(Po*uVL*Ly0mo9u7i~$yM;Gw36rZ=)FmglNn>i+ z#GzDU@s^NS*IRyEX76eIubg!Qr@}-X0c+E!fE6A3n6?_<-|zo3g9}S*((a-jxciqZ z=c~}FLiBFGyZ+ckhS-U`{IvAzs|x}KBNi4&nBM?dkI8N$FgsWsv1X-T`V@x5kAbO_ z<1tVO7q>GY+;8&-Z5**;BYv`!bA~p1=$e~%cSt>y`zkF1%;A?xZ^Egxi+%@0>~d+$ z&Q>NRz6Cmq!PtmCUVbA1I6K#0HAHWMUvOh-{r&v8?Be2}`n0))-jB)j=xv*W-!;U( z^Ac!=3ZlHh^z9;w|{aL;K2s zo30wqFF-tPmkzn(GO>!Ag=V0<)@9-juLIXx;F3yIc_@i%$m&@ zb<%#{SuP?UV9`w9%{(_?$E7BpJSya!9XgVrUi_Y_51$J;IS?q4whzBkgtp+?OIzBB z1)SR?&o_&I)=AK0jGXu6Z@!}4gjz;{U?YFp`a1db9~lc(XXVQKy8A!=HH#gUxJWzh z8>+U^sBm)os}uYS-KwMc8olM&xz(vdBIpoM89F8MN_Ogwk>~-9Qs}5oeS8iH2Lo}# zS8d;0Fom-od6oG-of#Bgf^9I%pvPxo0*#J*lVxA^Q`QTc5}ku21DMv@ByZ0#}rUXFss3!yza*Fk3sI|uJ?E!GP8u!KQ3 zN!kOsF;GgyeJs5o+}dr&Io}gHjOM(`AdBm&A8N=o4g&M<&?%Kk^K!hpt+@d7<70jv zt~pAP69pCVeFjgmc*ZKi-jJs{XPReP2qbeFre&coj6X+KfJN-=SDE+|#EuLPhzRp& z<^*Pa4{iw0Q@ad5jK3QrVyU$DZ z7JT-ea(pAb8#`kzTA&PULdf?2N7j2rHNkD$-_k*9K$>(yuL9DfcchAR=>!F(g94#< zLhntQ3W@^KrAUVWp^FLxq>}`sC!yDW&bjY-?z!U~`%AthV`Ps#)|zXs`9r7M&3NN; zhMR1hX5!$CU1xZGsft8tes=Nzt_ z1p69C@wF&5NIW9TeOsewh>P(F{dngWKN5}e$SDM7_jfIM+F`6P4jT*ZSA(2im##hz zuS%`a6j|YxaW2X?mYv+|ld}$}Q%Q z3AJ6W^u&n~hoqt6M1Dh@3HkYr{*`2CqqR4zbT93kMTNz^Um2jZ zD8+^s9Q@&+iZG@k_p16}$a>#a)q*p7cwm`$NP5^;3{tLb4*e6b;kGgu9u~w{eUO^i z?pk?3SH$7_un$V*qOzHgRYQ1 z@iC&)tA86`JHgd+xk{D$KMy>;i2t)F7N6QHN_|svwfFpgml8~u&k7qiV)K?l z_6`@5DQUDKP8yBAPRY^K22Rs>kiLXU)B3o13kq9k+bC=3Ro{bJC@42Yr?;>=o&Lr= zJt>^E6ik%ceTLX?$%@q-Z(jG z!qiov?1UmkG?)8-g3GN^>fx&e)Gk$E%`_m$UsmqvtlZJq3&KVsLd`(-%KUx>+lL|J zT+s{MTI?LJiqi}q@IhP9^WYS#Z;lm|JTiXpenQsyESEMdb{E;F)_+j<1U>j`>1mS{ zA)%m$7P)sb<{9wMcvActG;SH{6?M$w91{Q@q~8Ndn39Pu)Db}U^Ad2NIE7asW3oMJ zSZM(r;>nrmR1;Rv_j83bI9Y};X$ard^UQ9^x4YSUItTOhlku z=}j(hi9K@Ichq_7PP2l=zQ)iSYp^Y`)i*K^2%KI zm?3_Dzvqx@+1sciiDuaqG+O22;a4UhcQdevk@t5`%RFZQnFRy2v0dslFMa!1Y0_UD z6C{%|7Q$n-_r($&1d&JC3MH z$m6>?&Z0M8Nq3?psKrh%OJPZ23=Z%t^7v&t%ZvFrmGdaS3F;|9Dc7ZE_L6V>m?c8P z7~-!Z(w{2-$A_(Q%Lw;y|3@n$PwHj#QvP;zSgJp!tudT)x_@?pnfD#CEmWR>zjlhsmIl_GJ}wiG+uka3YOm9&Wf3ewV1 zCp~Vu{~8tM!7Gv_P7@4=z7st!m{6}G7%7DQ6x+!CMH@^L3jxj59sxJ+HH{%FNHCH2 zC>xgG^X7wgV+8zdpFO@|V>wb=Pc>{DevNPYCUQW zPqml$xaCuA4HiYjGY6W?*|YKQgYI48g1s^wwL3gUJD+NSDBs+uaQv!{jd_xQX*af``h4@(MZ5o7-1esx9I&hOY`3hi9>I!AF}KPWg%j-a+>J zG5ilKlq=b8@KyFMfT)BXeH@$&OyPk*Vp!#?3I{QXf253X#tdo^}1&<#qLXy1!&_kQcLu{Xk!G^2rJI5%9eTWzQW$rV|qUF6m$Y2xk8? zLF?yw5OT8HIAc1~CgLVdY_R{>pCifD@(``;Bd++A=PZf;nlmIu^Cf`3O)B^M;1*Pij7+BjsB9 z=55)kr(L_aRvha=yLiFNFj_82Q$vb&kXLW`Gm^xrNgA9dU~r40$_W?Ic zWFe1IuG~5uB+%_nV}pvEvvqw+N4* zLB@W&0&{!%y>F4xp;{uq_khp)YQMREe@nb~Oroq56a6{iOVyL`7Fho*m`yN^Cy-gZ zu_%lI-x2u}mxq&?Te9jqQM}KTxi%fKB+>O?!I+!LqTZJRX393d6G-Z?E`;^Oiz*nu zLaSkma(7a&H-jcbW5u-Zz$B}E2Rjla)u8<|x~pdI2A=e$2gK3(ZT&_pvAO}fI~-1w zjQ-nQ^*;R(fQfLKctx>z&%;Ri6t=lcg`!l`qBMCfzl{yC*mfdf8F}>qWY0Q@9A4%i5L=ifR+xw)qB{6mvU+Iz9(W}xgaBxIOjda@U^@NlHIA^R zd$hgaz^>cS-wNHE2&>Qj;LXmRE2q6UReI!E#)hhYpVpr%P^w3pBi%P_xkHW%DgMv)$LA0R}LZ2iq(1PSVE! zq9A=sbxP=*n;1YlCuI&qP4JdtclP9z4ZHdu?RGOEREk zoK#7d7A8q?AF#-!W;q`9+e%Sij+)eVDw5y?6q{hMnWuPtxiHx~C}Di_&sni9BR zg4qYtQN5`-9`Zx~;%Z9CC!nU|O3mwigs(4(6zaAfB+)XJ*zd9XBM*)1_T^&@QR#Mm zT`4U_}sS54?R zb=XkA`CY|%XR%JYGXPfHl~X3V{ItR%4Ob4O#uKqy4~nKkT?lN6=?sk^=Yw-hDj}Wa z3y0qAg~?NMK>K{cfFUY^mW%lzmV-pwZ0c2y@!ZoaafC@3szm7`=W--wqV>vBS!7BN z&0BVK^Ogi$dEzuIPO83C8KKj%y%LCT|8mdJe%aTbod1BbTX2P7DrC5;wI`_J;IsLP zlwfC0_UO{8n-ueSSYS|~lVRJS_k=WS^9WT?aNU~j+1n3d^7czV5W3T3Y!OXDq4ZQ9 zaVjdjz13GE+d&*~lk-@eG7%CF=?}KPdz8<6y6k_c+|r$iTOF^iCyc6Xc7~Mj_ndu? z5WMvIei`xp+llT!RqTx%>D4L6wO#w0_L4#{@}A4x3A4o!}g{0f4UsvEx#H`Fq)8lW!CO;VU47#G!Xw|xMgQbRLy?5Mw&w3ke!z?AmImjSizY| zhW|6JKx6m3gEqTdJu*&JrDp4oO^d$^kB3)_pPf%}Q%?$Ki8L{-)8Q*GzzahzD!Vr%*_cx}kLYEooS zKF&$2-u9LR>a8?1?OUdQS!Kn{FOjif!YWOm+=v;Cf=+k<+pWF+ji1JVf)YSTYl#Bg%cRWEI6;SfaDzMluWY zI7lH9z;1N;DiY%{NU&i_)0?Ked_C)b?S~BG%VUPI%W45)*336fMBklVFIpjE z=upPAatJY&270w)zO#(IFy+h31kO4*G3_e*paICC_ErnlTaQ1o`Q;~9x-0pvmtX#Q zZ7HJ|iSTvU!7k@0*&gd|%<-*x%K~_%@k3w`{RxcARv!O)8nZAKgPeupe zH%$r6dcB(B1bch5+_aKRv72S6*i;Qm7JFL3V$Wc`v++4(@yX(@o4UNX8g&$P2w4&| zrae*hYEM@kXLfeI5>mMr*ch|t{4AfzQcMSS)DdUGJfZ}ZSl7pA2MZ{|c`fo*wk<_E z0*-AxN6DZ!7`{E2??m#Vt{1mOzQ)Lu`NjupuKkhGG0i8=uiLY)o`qYc&#*9_7cwy~ zmomDKnIPJCv#NiWQ=9wj2r;n`aAm?*LK_Wl;`og(Y5(H|5a#5JrdA;9@RPDyDsJFj z_h}c5%DO>#nIkp}Mv*u?Bb_fP@cKLcA?JRy8L+l zVv(oci=wxy0TUPD6;z#Up3aY+M+1Xjgft4ts9*lWOO$ zq#~Yql2g`d`U-7#&z>u%uZ&co{hs9q>g~s}Ja9z~6o)K_IEQ<_!gO!Yp7BbQb^f>< zdvVTbQdle@oY416YCkGL(TrCKJ2t${H#EF{867bYQ&Nwf#Hct+C`PqRWMBQ{>+PyM z2~(mY#yG5SbsyMEN?viCJjjo@4}AY6HEoE1j%68MrGnGelnzufS#RS8sDKQ)f*acT z)wM5h`5eD zO)}>;O)UgeQMlAeD!WdDu(CK*hkT4A2Dr~X@Vi*!{pcYnq$#QEKWw?pf=+_+Z~y34 zftk6my{AP}zt^@W)P+RQe{EdIUTIj_gu!bJ;@*K&o@NL!2FkGB1un+;+*7GD}eV%oHz z+}?KXu@9^D=3TST>@4nR7y!32s}5y}6=Ie^CIfO#u>vx(;bp_`3V=K!z8MnSU&(?* zPB@-@_!s>O$;)iUyuO@s`j&dDkPnl_x#O6z&B#2-KK zwU#`U#XS7cHUVwa>>kp@Gk8ZMV#(^^b>0zz1Qz7pFNWnjgm|6w7GOP$|a1Tc4~;b?$IeZJufnHdy#3V!T=C?QVVqqc!No`=RInzzc&&ZyBGYXr%t+%+YQG5GtX(H|Tirf0SP zsUPm+M?54Gye9D^p# z)o&|b7sZE`7QJcw*)p ziQ)%;@AG;ADrvnSVc1bUkh2*JIc{Qhc7IbGhin$AFEpNq_LeGDYNmL2fOFbAvvh~r z968f-R<q96m|Jm=Jg!CT-3(7QfVn|5&8SMx5b2E#mto$!`i)U6B zsb!}iMe{Gcp!2b>W^p7Gv^MO<>NyTo*093$a!pL zL2>E>jIG5HlJ3f^dyTpO;ojVSWNxf z->93)`wBy}H^|5l>+C(*-Z>*gJT&`C1f8Y%DbTJoo;Z6!*Z)Za9txxT~7> z)ebLl3#K)`*4FVSnZUvoq!Zty^i56~#_MQFDrhdn6dZ@} z)G5;xLNx={xaj~LyIP;Vn@y^x(MEAj97n;;$SA(xTnsN}fjZyIk&uj#$8pgD196Fu&zx-_gTkF(LA?0=h@7)Qk@Lka&K~uPEvo`15$a9 zK0*oG^ebTr?oaFeW|y ze%V3zVrZmQ5i4a*WkHw*@oecMV6GMQ2dTv{{Rn+Uf)|!tMMfA`UTzmr1Po8hY1zWB z+!m&eZ=1$95_`r}?OO!GP8h?-7?dU3{)`-S1^<}5iE4m3;thua+b8^KQQ&5>m3sL5 zvU^|W$tZPl8AJlMrlJ#DGS(p8Abz)IfsA2qM1$bI^owMi0MRrb4JewBa^klc_q14% zN_*Dj?%2sOWS`Cs7J6{ZzEJ1|Ao{Z30qt5iHNpzZTox-aJjTG7yLIltP87wU9a9YC zPeY@_h7u8hf0KJY29d{y_I949Nd#U=SNDPW7*Co94w^=}LPB*K1sU;E1k))R+SWtX z>y}Tu{m{Jmxnp(TTMSAR->0y*-;fVn{Ec`sQS*<~h09jv#pk$$503^Ksg#GJ-B-lK z!AN^#?l0}%XWb}k><7OF+wo6y>vZ6EXGXTd%HN{YY=vsyq)*T}E1gHdz2*|Rl16~j zNaOYPx74YLG6+Ry%wmOxqnfR+sb!ol64eRKksYV(fM$SmI$g0$dI1o_!6;m(t$hM9 zUdk?_hu5eV*cx_c%s`Zc^TzMCUVH{^r^&<;pbN$hpisa2{Us^%sdHubz$1)^7wf(( ziO_k~nqi#)trw}3Vt$qZ$#a8Mf|1rCrlb_e)NgwJ`Ij`)zm+{43ph?-^L_-Ass0)j z1AJvo5T*#oV*C;lw*UsU8F?4=lqGzaF@#019ppsp}!_*)6ypJnCze-x#%` zj)y=(pH|zr;N*ZJjQ6~8)3IOY$U7yRpK;Loh$5-e#{RuSXnf~OP0QnjE41OMS>H;d z8^r;vYcQRE8bx{4+vQWuavQH{E`0*?J!Hfvp5kTzJf|2jG+DbFJJ}bN0$T6yi2PFY zP*L5#$x|3v21PemqRPi#qOibJp3vYXeraG9jn0oP*JW>sCoh?UcZ ze6*Z0G$52ntuWAnMdcw;Z&O)lX}9GP!~dS(=W% zDIsL}kXGM+yzqsK&yNeQu7FJw%OXpzmJZ@OPKX8CHh>tM>Z<6XDW`ScMayXh0f{sJ zKrEs~Tyl8M{ZrY7ET=}Wiw+viY>}1=ub)XURadjyhBdOmu7T8I%Kp|-5X(4>Pqe<+ zPn2a{w6E2_No6j>-9z?Qr2ezp3;WFSW5^&9-4sW}^M!3p_l;GNLq!c(q86>OuB%$c z<(fL(#6STVf_sU0jO&>MY@()%{vHJUd^M%W3rN@H>y|o&1j-dE1RQEP3)0V^6 z13#}`JbwuKP1PadgSI~7tRO=jeO<9Mf^?se`n~loAlZ=B4uU_fGYE3ZRo#S&VI?aAC8su1MsJw0m14)Z2&PSz1;b6ye4a^Lr z@NK1s$|`d}Ha6@dflx_d!9#s~vEg?JlgeYaVt)0jgvCuMpVnly%>5O2%Ygbeq8PMX?o6FYp>8-n0BOp2P3*u!JH((dh@Fva0((r9tyrZ5yaFX6~%+&n$xz5+NZ=AcF zA^_Kh#^3k-?0&o)c((#VUo9x z&}35E+1nqQasGaFS;e&{V{D`f`9zT5c{h_zl879Oa5yv0WI?Zr7*ho1mtEp8CCN zM_exf@-OmmF_C*7QVi%;g18$XKdjj-bnA-;Hwu2jO<$BrS!v3b@@)CQ{^8TzZsa> z2pjIem|q8_T`wD+ZRo?36wu3GhP21?!`yqk7=+NJeMTyWr(Umq5sv;r69owzu&2GZ z$lVt*z}l2;4ynx4k})oAUg8fC1|1?nS0f!?zkfsH?4u$#>o-KtJ0|$@n47%BL-sM4 zW7O??J_;M}PmPon|JI#6o{(M*YH>Q}X2Tr(K5C_3ygWx=pC9-MEDe@rx5$i_+2z-U^5H18HcV{1rQ&Q znHZ!sPV30oKxUZnQP*#ZOHTAI%_l@$ES~`|-ULnJyW>npTa-cYH;G~#NC*gTC%3#Un>})AVWqBb??9SO?x8 z0Ush+t4qK47USf<gu!bNP(UU<_|XEl4(Ffyp=>+2I%bh0`H8NJrslI@{R|v3 z=;Q%PxDu*nz>UwT7^vwQA#qaFbFO#YaUoGNxgL7lE>F=DMDFUGXq6D?>JX84w9GXw zia^{*mTE(adjAW3)Av+~p(^TjEw6>2Bq^+>LI=5*iO1%zvgY~-W`~A!5HwfO#%m0I zP2~IUEWGyp0?$<~yoOlnM_h*vIb>MA2q{X+RIYLPr6IV6IH`a2_*TLUSulUz6X-L~ zc=k#L1kq4om;*-U5$=AhGUxMv)BQ}~E0O@|d@%RbD13kRmh9I7)PR>?MpGnnIW;9Z5IG8zA_FzeO04$Fk& z_axqIVsd2_hm)KtSB|eS^VSY_B(NQ-j&~_rp2NBxPnKth|NOUb`i_I!XH2v;=d|bc z8tHwz4?>PhO$u-*fP=wH3^Brjo9f|QWSI@tOLxhVuheU}0@&;#ar~PF!(;(oEA<<| zIt>bu83s|(L7cP5qvUx?+1^9U`hifz%*<^&h$T7Y8(CNWcYd8fHR2BswG0B@1HDBb za<(RxG-lmv$hpi+(4ExV5>36?Q`i1UyG<_7B3Q6Gv72@L{MHm3Tfxwrdb&)N(S39MV z9+wUprZig*TuKG7qiEM&UM&Ej>Yuoa6T_OVwS1Q{84st3=qjW_05$tlNxl%F5qu{B)vl;r?Z&o9pp5-u3z^? zI6WnU2qkxk9ieS~WAdZ)iB%O3X@{Zb)BUyXo=m_M-nMmxWj1mt-IoPtN+PK`m&@|x z>^G>Xx&XqdcUFq3Mwg3Cq5Z{&VaZP*5ej3+dyR64C0@UyG!F@(fCQ{kz_Zub+WQ^o zUg`AX2h__^!DeojCZddcKV7kI_dduMG?KM@vobh}G|RtDU?NZ3l_en!J|HuI*K)_6 z4kNEOBgDw*uAUX4NWD;6A0@*{G_Fw+iVSR~y_UlFYe@kgfiJ6#MW~z@Bj6!=@Es*n z>;AJqCJtp(bi-X21{%^W%x8;m`}EAczjUj8^YFCm)v`Q?+hIFo0;9t-nZh<-W`*y< z2eZ_-;%@)DYw!+N5=8crsGRj-t`EvAyRl0Lxk~m;`e4U%$o-gP!@)Dn`|GkZiIOCo zcK}j!mN<_|ogb-0`B_GgftM~EIK=;!vp+*W=?iof1CY;R)$hQ)s{DZDwkL|hb6bZg&=8zjCMzLal$n{79r4A12Br%?4! zQSo0bcblBmPUc0p_oo)M#B|U2WVx0z84>7Q%)R_>q(l!`gb=tOQM+=}<6C}$#I-X4 zy}Z@x1G8`^*Yf^PK<+g94qCfD9E_+7gUi@`%T*R2;$kV!Lfp=kSdgG?R7xhyQiV9( zH3I0Pu04R!PU+~RhQS?~8>SemQh|w3Pw7Xy7?Fq!8yBQKynq@U*N&nK9gM%*!dw|K zR_89=woj^}&Z@jxY8W2~zWo8yM*uN4%0lAL6Q+;>K?Jvx#hG6{vTj-URX8y*9}QoI z7I%59ZU}L(=v$cJM$)=BlRAF3XA}X7+e&|hM91Eo)dQK%;mW_b9gzHf4)R4e2;|U2 zzEd&%S*L($RI78Ma>&N55bUxzfaYSX%u3Eg8Q4)zTUdm%9F0%SCi z-GF}F{ZTZOvSTOok&u~m_l$(Y)$h}!#ng#~jMh;tb0yd8Vu$5j=4%?CJ_`=zU(~mT z=<;raf9PUpFb9VX!^r@BYc|UX+(kK+rOf2go&lGsd>n#pS<;Rg>5W2*ori_DE`=Q| zVp>XM1e?6Ns&_A{_Hgg%>;K<-x|~cU9ys0;GEs2L*3uMx*`6zF*s z^Uor!s)i$h`f77oiQVM+IknzF?=q%#Z^hy!v?J#7c*}L-t-12m8g>3@=k1~2bh~Sg zafE-cwUUnyy(^%G^KqhFhHXH|g?#lh`2~XWWLu3NlMzC?mM)!ByCOQ1ds5OcT}yT@ z2yFb7g)+bMG$Z#fXM8k;-x~;sMFGI*OzoQh)ez&(&CCu+44F+cR~J2O_3}-@S0%RgsWCgSD)~+UD5$h$EXQc`QiOx%U z=?eDP+*5GQ+>d<@3yuXol^fB3ejvh|8Olf`eB7>^$t}TC2~uQbRx60F&{n@>Q8TE> z`M`6snRyptty3@eHae1`)Q306x9~nf@aNAUCnxrlF|pVFlYITP!OgJxrvZgDNkc~1RVZx@F}4Bm0=LE`(DG6ZINs;L%nK2)0FeQVv$HU zg2W=7z*jLPy8eX9DznR}SUs@wOF`2;0S7khUwJ0ZVWr^!mwI0`{YllUJZI@{FMB_o z<^~+kNMD^0%9x2|@P$KQr<1)FgLu(Ax8XBT&c<(p{23V3lNFzq&o~yUseGT;b7ey) zEJD=48Uh<*1rvk4hvf~<%VFmH917&!bPr3AsC zCnD00DKJNTmtPlOelAxy{5^85tpu+@&XLL={ewF?`P#?N+YY*w7A6sX!bJUZ$tu8{ zxn6deEew3?kEZDJS{{#aJ*%|42voe+VnZ`J)&c4xpJdok+_{d*4(wGZoS`>kekiWY z@6zN5t(zcsSYz3JV5F|8!;6+b-I_ZENNj|xpl2~-A>npnXKRDcF|jz!YVMXrk~`CZI!=8Cqgd=asd z{?PftFVI82FSnqTZiD7H`2gxkNb6iv0}-+R?h)W2nL(XIX6I}Orjf`i@R=Zv=jT@D z)i(KSeIsB|48j@*n(;JHq9L}Xwj!VCr^dvg+0H)KZN|&mt6Y4!n@EW@A&!tV$k242 z^Xu#1z;#cr``DiO4n`l9>TRPd1+LTL?CcPAu}J(;LqZcmA``$Pt8YRA^l=sn-@ZMP zDw(t@V|Ks(bHaQJayPF5TB)@sI+Er4kg#yVl%hE~iOK!++jw|v)%OcGo8~(FXcPXb zMj?3S53q<4`GbS7Q~Qvw-y&SU9^CAI>zGgz1`^+k!Qm@Ex&S*utx7YY@1Bvl+aVII zB^F#4ep7e3EPi2oWX3xhPul);iSQ>pUPbIbUI24htVl`l@^p|NzOM{^>ZKp#J6QYM z%9qSv5ZA$vxJ5GV{-nh|EXJQl8S|XiPLlh1rg3O>M0;N603S1y1zgxLN`@3E?3e@5 zNF{7Jd-60W3fX&1GD4U91VJuR?S&^>Hl<=Jd9JXJiyHgubaJoPeHxTF>bc^n5dPJ3 znhHl;uD{Ya~4)wRb zO8dr#iC)C5!Or(;-|J5N%p-m%#m%&G*f|Ag#(Xvk_+)w!IId)K$WJI7d{PQle*vZ| z)-2n98FRgLpUg0=xGmQAi#;+7JE$-Ox!0m2tL-`MuSDEjedXa{T$}RB($B;EmlX8jz`S>AT5Yf<<*rA(p@&f75K(JVs{rWbIUVE#!JKNzT1DDpuvZSyH(GTdn$5` zHP3cZ9=B}gKWa3dz|lqRP1V%Z&Unkz?*py;1^a) z*5r_;KLCjlq6~ru1UY*bMl>8uWKfcLq(qvKHJ(_Xt?DzL`*(blM!kJ*`_=YEbb|30 z@$8*kbzNbCcl^une51YwB;oJa-I3SPzeB(#nmMfH1`Mnx(7?9!@l;!rJ{i2rNfz2H z=Qzjzf;IyRiSsYSnE`B)k1|v zv>%3v{7y%ehpJKV&-k%^(jUud&p;pi^Gj-bUEWFf0lJt7Iu<6X?`Fp_O?_4$G``a? z^v*E1IPz1kL)f#FJBL53l|_8+jPAYCuCLOmKN?i}PH<-9R{sF`^Y)m0U^u1Sg3XF? z3jftN@^>x0cXo-k@vlcnb%~!TSp0~NA|eVPy7?rAcNxCy>fB6j&?09hjK`@=xnCDd zjNlip&p*gc3==W1LOjqJ=h!g#v2gWSp|mx*1ZEvK<{wJ`p8Rj-ByL{SQL(eX%f;0b zckgx!E`=iO&y@!S23X8|h>i>!0j|4k6x25@VX zJ3EOz43-J>*qmlRLUf<6DO6`kB*c4-m~4){y8(&2n|H0DOP)7Gi>6F94Jkc;ZN{xl6i1!Sg+;Ral`MOn@Kt`k7XB}T@k zZ-$aW(wQ-aQXIeYtQ?HQZ?t}7x^rbeFV&&;;9;a`O)gIz2$LM;N*NAuex=M)isBEA zRGHE1DM|f1=P$$crFn>COS6=36BN;cEL!*8pgH(+RqlBnV{5q28(z6Geu+SZ>XZtG zSKe0q%}5v@dK`WW6CdU$zg34}%rdX@tajqMHF6IP2N$1_&2itb zel}Ynucq|#GHkXuka+Z4s)>zl5YI2Z+!oIW9``~ zL(vO8!kbe)9$_GkC2S0`=`vy)%)-&^WygSbGp7)t>K^zsx>0oLxnnKu=2gGIKfoNZvrIZ90?6#^}+JC+V5PuB!^ zp9f0c`5ng;^*idFQ=s%ARpb}R0=uc+QReb;KU!zN9e@=?z@%?l4G)f25ak6E>XlY7 zi24<&&hym)KRh81Gy8H{+=oi_^HVxbvGf3f3%qf>kOdM0f>jvVYJfTS_*NRvHvyMf zc1K2e@?J6+(FNXgR6oL|USyxE15Yv)@Y!|Fl~#RMK!+CpkQY*G4DzY?L{4+_wi|Az z&m1l*v&xy3Wq0zLC={2Lt)jX!#oF*Qs&nXZ;ISKI|FcpB`@2WI_g6{XwC-p;BxrUY zmf(qea!1rUGhW1^W<~lBjuc>m*SoL$qgbpCEa2g8>p{#!e1B&JzC}p-=%pr2oXAPg zGS#zyZvq7GzUrtS4LL`}B{i$uDeI0p>-Bzg)J{aMs*KN&}pZ&wz5 zg7!z@RXI#H+o-OSHIc#PoH%L6&z^Hoc-sN<9umNNQ8Ro-k)%g+F~)WY4(07aPAe$4 zZh<8F&d%rcVmrGthOM?{gU+7PwuuxR3ZCMjkGzcBhKrja+JXDOuu91w^knD@4RTTs zmp<5sWH6LOq=t7s#Ut`|aB7e{WfZ^Nodc47_4pId+n>Y52_L33Tu=UFE23D+#()BU zQ*5tf10h(ghw{OA=A3E7GuRxp#PQ0DyYF@tk|Bj=n`|2m6 z)?8!zi}}>=T_@$6xus4^Ua5wnPv%y9huJK}I=4PxuA5x*A1fT#X{YRe;og%1WhE2w1arwF7mbZ`fcfWB_QN?7*AA=+KILrf znYMb}{9ZX6+<-6lgnNG1GMTKD>UfQm4`n{du==bgCQ6kHT`sdXxm-`{DsO zYmwF3@P>~+Yo#qkR@ObUFy%^{qShNKa8e7t8HL}$c@_@raOq<7$Fg+ohoyoi3IBQ@?~c-ie^Sh=OuQYG#^5+{E6wi;$@i96GzG2G{?sb?Nyy0EpTxo@>E?7 zl-4kzQ)f#!r43f4TZaU-NH^9$btN`@K4XgRRGjj+UNWE>&Hp=kwq=vX!-%Sb`1^1$ z_UPi%7iBpt+x!WBG$eF7)@c_}RbQCiKCMsx8f#XDu?F!f;J!8h#o}k6DYHU}s_0x% zI$)*?YXJ1Lhdf5i;g$=P#jfAL5_)y17gj4nA-h8Nvotzx`ed^VU@K>($|<*5MF*k> z%Mo*u2b1T|522R%VSIrKDKfuKugB*1NG7ji{8q9^W@W)k!(syE@oAy*9d4e||1OOG zRUCKL+;8*k`-(KKyBfnRZX-ViS{ciuSHeE-N;u7J?rp@;4eZNxgzn`pN$>bf zu~M7-*hx|c(>(uUb z@)ucb-85RFpI}Kt$3)yq9F8v%<#|Wbg>&^o8D3a#gQE7NNtCMRs*43DeiGh9lxKiw z$P|$-@k9K0Q5haV;s<1}bzt*L&I2E)ME3VctfGSNXcNBU5##c?Bedh29fs$zJY%km zKMoW?dbtwbr|;XnPqe>Dq^r5Fnj!xg_z4elr?7W`@RTiCE&LsS{fT)O9*^rs{>T^m zVnhgh2fU(Q(><=^&%ilZ?$`SKcb+`rZUa$g7T)=gc34i`LvS@uXif5i3{KR0zo;x~ z4rcxuPm{OObtTgd>Os%wU1RgjarSgQpR_iQMvPn%5ksp zix!}4W+h~R`fTUwBi?Udyi6);H{XU(0{?b=V0ql`F8v8or8n|X@Ejr%T7+gs7 zkX$Nsn){j9YqXY&@ip2U3zfQx;;`{qWjt~TpH2su%Aqnr0>>L6 z#J1j@M|#vluXbZklvcw){AEP2nqu@9>h7)@JG967v`UF7Un+;qQ2O5J!L>?HVe2?a zT3O)J!mtDSkKRGv=UrXOX3HI8Yc?p=x4#Vu{&R{{3G%PTiZLs-HykX-4=-V+qvH~G zw2fpgLEU0$@~8s8zaR5+BOcv;z{JlTn6q2Wn{IG6w8A|P#6HnlazhX=ASuF`-=R*V zoYs9~AaC|(d`xDE!E`iis_j88|wT%FaS_kGK@oz}H{8?B~7NFO>I zNHogXDswrEPjLIOVUl_P)(Dk#Y5LBLFtb-#J8({J`+I+0*@zgH*3|OUWlz1dS+uSe zP-1B>zT3M#rYLkxnaw@>JHZw=#bG!R!zBQH6_?L%f$m_`d@HRAQ>?=Zm5~n+>d)TO z5i)~$xoOzWbQm6GNWo*(Z~k^=mcO*@7GkI^%9$N(Lz2pL{fOCY);9uS(pCmfh748w zB<1zl2BJA%WJ;y`xUlSvs@Y*2!G()yp+iU@D0m~M-sUT}?B{!$9M>ZQhIH^WVOlN#(0n2N+hW$+uIO41Bwb>S%mzw2rZVZ}BzS5VAyrD;~ zEX9wnJr4am?g5yBgFkSoxA~#2!4(ZN;PKcep*?0{{QS_AQn(}<0Ll*e$*5kcHwf`u z04abOdiZy)jZZBdVwgFr3-w*{i+(b)iQCbeH=LC(YAYLwJ$e{;?r@CF{eN72WmuCB zzyHQaX^;{aqa`GDFiEAQMCoRfQo?8?1|lKdATbma3F!u5N{t2)=^iCLa)9vXIp;jj z?|;s9zqw!BFYoIcpZe+rX){1w?RovA-v#D`p#9FO4z3R0EGcypDt$RDo~cz+IKosc zfv?s**Fs?;3(B*0GVLu^RVlmV8uCSVSQnsFZ3&ZU3n1^Yxn{J-S$V&CD!DUj=#9JA z(KT_o2Ls=>)iJygj5V=;tByX!sPi9n=5?zbiw-w<6m*U7dA&}5no7R=tyQ*eISV%8-~pNMDj;J1GTDA$rg_vv9)I+9~#^kkJJbDan9*1l|M;XPjUx*W+dFE z@}ZW_3)*0ECsWq8W-lgsvB8c_DhpPLm;FiCn2{C)Xhtgn7QT-uSxOWOrvgME^orCN z&@6Y_1_^-jOl!X#oHR+h97~c(egsM;_}EJwmHV6aYc2T^5fWqw3`48fJy7I+mYF++ zyf7aCfd5cOla1tHB_Vf_le-ta+yp!%I*tp9_B-4?kgovMIFgqG9`y4>KR}W9+?cuL z)~mvfo+V5N3M2PAd`K-!T&($#Tfi9-X|qzhIMT$iPt^I|bW5?LZXsy7AmH)T2y^Wl zLVFt$JyP{-wfh8d_vk8#_9vKcGONepuLyUW&r*hCS(pbH(FnBs&3(q5kHjDOlSp89 z6U>U!_SmSUxfJ~%P#M!%F(^6vrFf{dwO4#Lhtt@Po0WbdPFEB4rHrG!FYGbg zUOgmu23F7!e!|kCPHOS~T*{*~`G92-m~ZV%+9E>}X4)9{l3L&wJf}>u#f73L?u90i zXbd~qw%N8d{nckKV3xU^y`vfiC(|hp-7h=Y?1+D*&{E_o@u%nUE%v5YK%_`-Qp@_W zjj#wROGwM)ulG|md9PV*U)i#oScWeeX+O@s;uReQdy;_Py_BZzJt@YbPnrN2-|_%y z7Z>2u_HK)4bBwR;ien6<%~x36bmOD*V%tHf>hEhD9=CtS`RUB_JEX|GE1=QroWUr*cH&jIC(C&2ul74PX&*#8CoKrru-P8T{`Uj$fsyfz9~q>3#bu zB&=w+W@wJcJ(k~XnC-qij5%ZS?^ie|Ycz=uGfp1Q>MaZSHCg@#TCG8+HeVeSD=tNE z&uiU7w5InC!Ruc*t3mYhC+b>n@|m8LHa?%L148ieCNeP#7a*CG5-CiSIOuxQWPSW{EXs~3^? zED#)f{nrr>=@91F4BSS5om2FnUEttl@)#z8Mrn0tnzJZ4Zf;HT;ZF`3d>+RCm1KOs zEDxwX?4S`LN!u>vcoY-v2p(&ICHgCTt7^aec^c2wAcVCLT1gr+PcNwcaG6oQL1&VP zDpoX>YUdoe7k`Z>YE?Ijdg0*qI~Ja&>~}G&?O;)7Wj=(y_-RevM?V&$_a!bNi=`LZgW-#l*K;8c-%zHuJr# zWZwOI7SAFmcAu_kbGdC5{-;L&_gc7^_8UD&?j{s+#@Z7~arDk6!z$Px|Hr7^H2&h- zf@o2mPXK=G)sSF|7F!!wS~&o4f7;+yV{O4W5nKIhI%Z<8AzNh{Wq_~udcz+XpIILn z8=wooa?ydv%z*X|6H@dUMLv~ro{R%+4zhb*sT1(42{{6g4cd_ukJ2KOZ~~oC7Lv#Z z{JtR7S5%E*M4O<$<$ggLtc>~aR+rrpqw&+^+bi}WiY1ceegj|!hNE$1rl~UVw1t1- ze~li*iUW8^cNzB~RFxob@NcY|Jx;)@R_RdT4-k!vV{d&M;U$82$)rV_f`vtb_~v6<`$-&5Jr z7o+SKBiGqQgv{E$XpiBIhOng@U?00;)`Ya9?e1DTud`ml=*TCjJ^m>P+nhGOm-(W@ z9CLFRxBs$A?2WQY10#vgpBTrp8?(iu`Xw`@*9lhv5tYmbg8{ChE~5MU#O~4I>{q34#I?q3?C^s`-M|wJsIT&aQh#sJReti1#E$ykTlkVZbFW`7?F06H zef7q;n*V$H)dq=bfIw-?KEL$T(u685Jz#suLjCfu+v+@|4}OL@eE*6bOTuz5kMp!(EIMnYgkv+!RvkR@G_SZtm0TX5 zl&GVp}j6xmwlWjcbv=6@lBLuIitjAOQ+>BCMU`k%B;=KQoR+!zA6O$ zv(FBzo=wQxe6w>j=o84A{*it7oK2v(PeI!K22US2aR)S2hD607B${q>BnvfRhJ5kS zc#9xA6>JZJKOjs1V}gfe-{AbMLL5B&L4Eyrx{y?Ayv0>Kk~hTb`!j30N?tz-md)-n zY~&GIVRfOqbA9Z3sulcqik_8r)hzE5`GP1E?{~dho?HhT=9o&#sk|Gmj&}vW)R4!$ zR-J!wg}2g)(O3KYK4n_H{^5C{ltiVZRk$SN#gDrKtWB~X3-H}wyQ6PP-KYgoL3tXZjPH?+zhX$!#M`KAtLd~ zg!2T4z-punTEd-Nj9Ae#6#!vLyhYJp^6bc>lvj?8hq1F{Hg{X{G}J-SPci#6G!U(z zD2SvHE2ai;Nl*f)XyWb?ZF_DL<}+covo6vJ&u;mFx`A6jUqwB@2XdM+ZQJK33ch|^ z6Xu8RfU*)43q6whMYEIltdyHUq_FU0ms<}^aYPPMY*9X~%?@nZ$tPhXV7jL&ZAj!! zex~A3^%{IYSb1x)_5~n{=wS+jyW2oFiTN>wB3NAm1?Z8j?viu;Nr}w70_Gj5a-4IK zxQ`hhX=Esxq9ub;nB3pp4&PRCkVosFjbkW9aJ-~DzKddbWI>ebQ6w^4Oi5vTli~#bbARq>)T3={ARymG-i|>8Dn4I!JD(k04W&su zBXhTS9oY4Q`0oL4+aB!UqbHhA!HvTx1L`j^OmjIB(7h_w7C_73sOe_Dj65A@e5{L; zN5o&s+MzVJ59K1ay@SkT4=5DW5osz-%`5&-t_foMUdMaZ0x+WHQLlWK4ik4ztvYWpoY_>BcJT8B0QnYLsDTtu1<4X8z5*~gU*q&?c zv2iVFzy6B)9QyBvsWde73B~K{iv_E-fu@oi@6jBI9fOwiuB`wkSFE`UF+p^0+vAJrS zfc!v-iJ;h2Am>8qcuHQ(?30BkYXs-sdeyE|*0Ue8L6% zSr7yR5$YxH%wM*KRh`DB4axDZL)vpx*vw1s%TNVt#G>5dY7G5%OWLcFS}9&?=fuv2 zb-q#L8Rksrv!+&HZ+d2^$eqnLB>ol?;I+L_&~IB1ZT_T8q|Jd9k2F_p4=gZ*u`=wu zX0(ah!DMIWZ;bh2J&ojUH}P;NEsl!FIlkTF$v+?;K3`a)ZX#x#u6$do;v3kj@E1*X z24b(Qk;f(?jG#&U=m(2j2$1Vbv*14xs(9tE?^}iKl0Cl=@Smnk%%8oW$*xuNX_owa zWVJJO#1)dpSB1K#G z4>pkh(t6Q(k%+z{_YR4xhAl(Fp>{FOr?jEP^7Br4nl?)?i0)t5Lf^-(L&Ft z^J$;PX%Q?E3KERappn`0pvT zQmGTNvDS3O%Kk|XMkdN1QY@wqptT9vch1=T$e*e_0n*5r0Q`1gd)dIHigMu)kHPC% z^-1!ChjjxRbJpH|{mf5Ns{gY;S*N_P&Zp~`znDKwFRf^6Hh1rutqzU9lQk~mFGU?vl%jTA^he`cAFD`aoYk(S@xq@CyQ%+4bnJaD%kOhv)jzgs zV%C7yMD5;*%7UE))!yV6E(fOPYY~F1f|zg`?gqt@q%F>8-+Wv)etzadjlI2qKY!R_ z0)F=bDo!eklWb15?H6(i{;PcS_I4VpAr_3|?JVNyEH2y$j+gRWlKq*-h7$tcLklCZcgE$d!`qR_mPk*x@yjGW$@WA(wMr4%ob zJ*WeWNg@aJ_tk87a$3Pz#0jQ^@*zAVI1?XJ(o=z=X$p13@=On}2-Izz3V|sIKK$u* z5vbrxsA3hp7E1`^g;BE=xfb&p5$jUkU8&=1i z^x=B@W&7HWvyv(-8E-Dt>B5xOj-lX&nEio8N6ruCcz3j-hK^N4@aA86CYkJ{n99Pb zH*4ImN_Vz!lAejz@1>3&yPZhwO*I_E_+M2M{a;lx7OSa%zmQlD-4eWe-gCib_Am4m$ze@j`JX@Ye>VHBmy79d z6@;#@lCPUbHTl^O#;X^8eX}xG%HH496lp4C3%NX4#U!v91$#Nrbg0I$=>grwbC89v z-roU0M`%4rAn$&`8~6ZJODYDH!Ufc5P%v-~5D$PYHb9XLCS7VA>W^4=ZWQu5LO|+g4{ZvZ33%tRkvvHwfivy6ek*~`1o!=jE;nyv@xhp6_D{)YIYf+$%_^h} zzX5gASPmCJCy^j1$zr;4WQA1WZ)Lmq%ZbMT_B$=m#UhN3-J{yae7`DG2*wuyHC}eY zfR_&e%wwqj8dieCC=HC_Wf+HZ2zKN}Wc%2Ir&-(EuR`DL04D-NT{pAYXGw(DJyO;P zXma-sQM02~34itLT>WA@J~F^I?+X#^Q`$X5w;dGenxK~&7{kwGt%!9}#&W8SSHLX7 z^)HNMF8i1D>(v+$qqKNDJqk;{8PiQTxduA#!{?9_US`Gv7D+!@DyNt9Wcl21HVksM zoZpQH#bYGaZo^WH?eoU_oMzry^g4{9eMzpxGG;wpw7}XOmfijJ-Mc}-!>=~;hGUYz$2Dom$2H(DhmnX?+? zc7B@w+7p)$?}+7tD~e}}H+B}ljqnwI##B7>vUaM0A|f}AprZb0*#yXEjsq)jOO*cG z);IULEtR8aC;ByOix*6ToQy-_izsbN($Wo8RKyVPhPzI^9N#3hj$b#8s$;0bwuA(3v{q@YeEX46w>?{Y zV}>qg%6B`g;LT{NwamT@xkwsj(_GF#t?J&`K16*TLzp-`DEN~ha`y5)sMY7wo-EwU zkd_}h7$MQHB4(L(1CN;4wKfv=(>}hVr{bwLpTN(Bdd5yLJIf~LYQos-V1CE1pP?cM z>*fMA=<1=#@g4j~3qt4QBmZH1d-KMv53vh}8V0SFe)n4P-s1lDXVRtPJl82)%Yp1v&b}R?}2j{DU)T zgFyNq(Ra*H#h#H)r~k9l`>{Nz{6yE$h97*D-XVWhh3JJjrj89iED|=6`Isz=98F+BWyxx;YlwScPLzr_$q8}iZM3u^81nA6 z8l$yXa`arC^Fyv@$*q7OMQt?hS013Ww?zo>xA+B#ag-L};%~;eM1z}rGexc{a`WSl zWiLKkD2hCz08C2*zbS6W63~-wNmE;wJ8lv;YB5ELp?yw;=@RE2lP_x;uHc%8sE}p# zljt;tLBuTjNH%~_af<*me~j%`Qz1}be_Q3_irhkaEP8?~A~Wr5(3618c=AHg7&({T zd9SrE~7SGL+oc!kw*Y6=#Y61Wb4yukB;ql-bPQ?E@M5-_cP{d z6M}6<3;mZXsO~M36?>8x<4f{HD(SVD?GIs=hS2^MrO*+i7xc5{U*;A(A;xOXfOjQD zLxLxvzIR1V0`}uMO3^QEF*ch@@PoTW;yTUHWyN_Y5oiVL8*gSq?wLi2Z#X@iRKWqp z?hyidf==pR{_VL0oE|HYoEusQ^*Yr3oXo65!O%rJYhVYx4yse}1F#vZ z08jS8?-lLl6(!n>z~F4#cL)-E<_IrpM8T{SV6BsFs}5HTjv@9U!3_8L-X!SjK7(Ah zmv3cP>^~qkovs7)>Q1F!O{LE#Y9-AG>J59y&W&bwe?)}&JC{X$&@eq_Z{FMT5pCJV zy+{1GK97_ruPD;W6Wg`FjFGH-|2}AMb(P%n+K2mt$koWEy{WD*_$QjKL1XM67HaB& z*wbj6NRD8?rrqYPK6|4D{gX+12bQTZvr@Teh1F1#Vg_3Kp$=N9Z?B7@KA9tAszeCh zN{b75M^n=ZcfN>XP4f!tti2DPi1l5405`M~#Y%{$G1Fxcnh=Rwxe2r>f2`tEk6-fS z;rtuJO5~3a_COfcR|_aH@Wa9dt(TRm;BJQ12VECY`pesR@sOo%C!1A6Pv|W;O#^m)zsjo<`e9$!b(pe`$$RsXqhAG}; zhV>X@;`0kkpBeIeelFg0fgr-uB2s6c#P0d}g_ca(m5x`^QoiO|_$i;2=#GWvt@=V) z4wzj1BZ9CR@{TCkHOscR?IOxHF5jU+=ksLWn1wiYy_mMvjtbfhC;P9v61xF}%kQ44 zQc%6ti)l3&;?q^uO6lkuL#$9;U|!E2<^*Hu)H z_wB}u99;qrqz``^;5dS1#|e5c%ML5)JqXyWk2Vy}Hs zTNcQWlF2I21OL0B%|QNqG$DNt<)ex}e3kpWFOhb2kQHDm_=HoQ^DQU55rCTTP0Ix! zy!j9NzLc*xqS+R2a}pUxvDxtpgfS-4M3HuutGSnv!AX!h<063jVl4Tj)-{vXOk(a* zqSi2$d0AehZN}-n#{hUrDlsWv``j=iHCz9>C$fb7_fioK$}+yJHS^Y4{)4>po2i64E4w>#itM=-07xO z3tF$4$kc2Scv4++5=$EzA4C%)OMagy{(Qh7VSc+Lz=Z9?8*?5&-xx>Jjdz#I9eH!> zxC(tO>-6mPIWQ1ZcdSxR)K7pV;xV&(`nErb2J?iGa-%;iVjb9Zq>9?63M7%8`4o3l z6X<%h6SU|9=B_!MOiUAn+-*v@+pVxA&gJv+^oZl{i`XbzQir1~wa3XyGHc|O@2{Hx zvl<(4M;_txVWRhChMz;o!HOY^P!rlT%o7sD{a@_Jg&XSWF~(&ZyShc99n;0ztX7Xm z2&!9}E2A+Mx*jbpahnt8BDDYnwu8n8Pi1}5wU2CRsr`l%lIZYZK1oeyu$gj&AuoGj zf_<_D7DiSC@L7wAoK_l1@Y;s%L%3uoH0C*$uzQRHwN~V9)L7ur9nN8|B6~PGE9P)* z^n;Uby>s8295TJn-W@E`z7@HX1|rKii&8|RR_j*j==)$Nc{JjgnmGbjp%c-zqM2r4uD4@jEreeR_wwJrRO1padN&2Gn(VUYT_3L%$yu1ov(pZq>Bp zx=ykBnKtbCCjDfR6xgfxS`4Um%IV^-%hvuOx0gF|R9=Fy#c8au^F&b9@mTWoh*x2; zCUEc&cT3(W=UgfnMji4ieO5xs-QGa7lhZb3YrI64oexq0k<3bbgH4%UHrs{_^Cjd! zG+yE!`-B(y{qFKjTC(Pc_VZLFg^W@DEq${8s9ni+eXko67tIETf(bkD+B0?Pp|Y+8&Zc?(?OFeYT*?g&S%u zSyP^be5q#lP^~%Lvnty>I*)Fp>Se#&B;DM1>7(52?~_pp6WsS?o-xgE3*MoA?VK@i z)Hl>^pO;0Ag*r4QLI+vvOId$67x1nouYHKQY_mOUgb}=6G|H>{PTAej?z0^OOByFx zjoaK3yN}?{N}{|GSO368Cj?dww5;S+Z^e)GVQA+v`qnOOf1Uqh^O}r!;C`{$o zp<+GKfFHQKfI?Bi+xZ_>$mXp{8s>_fZ$g?(AUqO`0H(@V0I`<}Qsc+lb)*YGN6~fn zHxVD7z`gI!TW09PwLfP4QI4k^VXAz(a2)kc52)p&;QJZ`O0of@GK!ib?N>(Jz}y>| zs+}}wPO%5yp9Vp`LfQfJf4QaDHzM0;-p+yu*7dj*T@|^XPG$;jT)422ko_v8jsXI! z5g=}7qUxP6;Uc+V(94}NRRAcEpw;3Af445Rl?$!4`=WM687&SpsspmsS%Wh9Vsg8- zZIS?_6cGMW$BmBN>sI7%uOZ#oe)u~TrgKo|v`w}Qik&k>nRn5}kf=b$R`N<#u#zse zIrq00ajioS&)nu17b#c%I?Y>iWPo)g`&Fr755cpqU+VedZ*0Eee@)K5v|wZa(|%J! zVbr-!s9`{G++Ghg@!2-1ighiDcBobHTF@7!T15TG)nsF$#cOE7=|Oc~rVO?S)V6~w zdkZ)r{>=T57dU*@>YSG;dYO@P)8WQ!=#g?Pf48R|PNYYMRLSAlI+A22lfKLLSWc4^ z9>zS`H)lIp2zq>EU!}P!I=rXAi%Ae!AYv3BkUX3pg)kCf>?!cF3K0HhZ0OmlLVv=z zwtHBEu&KqfUGcpsgK;2MSl-3O<%bcTJDH6R^_FnOcD;GzOe?V5;_rx!A*ZBoZOkvS z{i5q>QTm6|g)#(n;3YW!#ru63a+33da>rPu77fLVHx1vKx7)9;R$pCRw+jWy74)TQ zu-EuNuJz8O`h2b~`}>$G{8~*zQ`LwQnhK8F@ z2ma2_P=vRLuALj)%kLWhsH{&j8%n|XDte-G7Y;utJ2TDUqkiV^JHL`NJmsFKd>8iq zOkb97{<8-~KY>lM#F@;4Vqdcas#3A-yOjX_y8x6;onz#jwX zDME!WBX>5R=_nemd>H1rbHK{_4pVV6Ajm|mbnOO)TdYoFKlmabGffyY2kfz2J;EK+ zpRiG#y#G5DH(j;l+%1%_w0`Q{$tka2T2UI@DjbLuZko~x?yx%A@jMK*`eQb~1cOnv z92hhuu+0o3FtbMvIo+Zhr3~}w-UjhTk%Jo~NyT)9PZAlHq!7NaXLG+AM7#uimr1&N z(vMuZvxPK*1g)#o3{(64i;~mDU%n};yp4TIC|1~bkX!p!iIPp03)0uzZ-S&+dl z8=0ihFm_I0T;wa`)=78S&%d5wCebXd>lgMv_95sWvegfpVb>}B!$MY91EZ@&4c8xA z10r|&za+7ndDes+wtE40P4Bngq^(p}dv%Si%C)qXwtAmuTLu(-efw{E^mC}uQ&3H? zzX!e)73dm7y%0FDkov~44e$TjrL)p#gy!q!KcD{V?VbThA?PR`F@LGqKe=SvcSEBa z@WuV<15%CvE&P_=u7=a=+}(ry$&}cNbWGjL^Dtz2P*OzT01&lix9;EVE+Zh+nx*jr z!AmkhVlsXK*CKA{B?D>_MORLsU(_Ta76h1nSz>_%ThogZ-Mtm1d~{Ibi);i0F5bh% zz9q4KFv3JihW53K@WtH)LJEC$9@m@6hyvE@$O)L`YK5?>pG|?z`p9%7uB?%0q|BgP zzv#|_?wy*MhdmT%Udd$AXK5=`VB)9*N}A^=jmO1>rMnvxXyN@a9Vdw32wfAQb=-d4 ze$(VQC6e$ltE4q6T3u?lI7}IZei<$Vxl5235~Xbi${`pAj-I=7~b;Mz;Y*scMOvV>zHIbs0tmOewWY+vZd}0x5{H5qiig$!?~Fxq4rIm@C@X@maegPMb z)=hZs+1Aw>hxe#nXa_x z77{!8e4?H^vm|?W-`donN8U^+<9X~r0n{f~2R5R6BG1O2|HhTLMqwMoN= zqAR0J*-2xwTtz4M%Fje3sBsnn4U-SxFNC%BlVsyl%NT)F)6O7@Pp$vf0`wq8zP}Q( zy4pqGVdEOunpS0(=dXyy*@J4hIc|%#w~`|K%@=nPG7x*uKWz*8Zjv%t z){j0GzNb1UI0hdr`^Z~=b1=*-$?}hCLiwFB%N6bjgH0A26?2jL)JU#r%qUhNiv76p zs65PgPXE9~zbO3`#+SyVb108i)bQs}&7(Vgf6SMSs-ur(9bJ|mRlkNNG}4c~Q|JmK zeRWKFoR-uc#ZYHUADW}4u&m+=@$BWriXy;OZKugk-$LM88utqn3~K)zq}y_fs%CP< zOj>{!^Vt5B^K+vT7xd|#8BuDs3FxX->%Dw(?O*m`y=&~q7W77(KU!n{JKxyqZ_O~j zj2^-2;uD(b#H89K3w#!(>@<^14RBU5uVt^(PgS-Y_g?k}t}7(5<2T5wwHer6ZM)EjQ*24M%(?L-19Uyc~#TVeL#LxBY$oY#t-rsqs!phA{r z8i2IJc*=m9^>))&kBi`%UK)+Dw*x*iwo6usU%v$KHY~eUk>qW*k`p;GSH+iesNk}j zp?DWp!ZMRFT2K#$PbRHj+2F^MFD7QT_&H?SW5LsDrwiL5Q||%)9woFv*G0rvD`{4D zCuIFP&Z*_k-uq^>2X2J~TN*ro8z1KX?~;5|lr2F~HvD;haM;E-4grNc?*N$ZZv}aoTHnxkXOb(^tT3r0WXc|U9LUZ%2Fhgwm! z_`$&Y|J(ASzUWY_JLlX#!W_gd$~ zXwaLf$;K)D2gtIsx3Ts)Ug&g2+uEhW+;f8Mx~H8+R5^D7IAwx3%{W($U%*Ar^>?Ck z0?}`0yH(=i=BBb3?wE=(P$u3ML#eApIa;tv=UmNEu z{#m|}kP9{8gA7BJmir5&@d~dii}5z`E&7yN%h?M%Cx&0+1J~%;e`X8b+P2%xT4CfI z2nuTYNqh+y(LHINoEXfOoqxNRDJgOoJ~t^Xnnt@Za;jsfj3r;YgTH6`>z%!`txcrG z#@-jHp}Udn!G8EltE-#(CRgpE;BQU;)~b)AwuyS)_^Klix*$wowGC@dq+H9Ego<7AwhS8%jFXvb*c5WKCf)R9NcxIXixQoL|-~&bq2an+ee> z$FHjM)zu&Av0jw`Q{A&XRL3$zIdluCCHIe_X7#A*we08XY)J6LuvKnss$*}_BpFas z{{G%5#EkQ8v1(WMvGjNT0!kh*o(6YEA({K9y_@1ey!3qIY97W2$5S6p@+&7$TOh$Z zU@ZRN(7p+E(C>@s+_R`8XbLL?F9}P9kCMx(Q0Y=#iJdlY3J3CJJK}1D6Y2lvHiS7ygWX zR)UxRNf>?@@T_CLL4ZF&?%QtCS^U$utn%c`PeBiHL6FF_3rAk`XvfD_%J;6cn7Yvm zlSsK^fjwM%lXFV4|M8!yq%9cE0Bn+=J0~K>#4x(MCHOn#t!UQRQ8~M3lul5_$V^;9 zgoW$n2l@;=KU3NT?m4We9$Suh^w7qU@IDNNQcgayDEbL)H%!xN3;3(zn67Yjin=Q` z>ti(K9a;RzQQrvhS`!aC2uV7OwvXv=8G0B{pxYk2kvxmiU(FZ%Ke#UXcUh>d zLrC-Nai{16wO*Z=K}X)6Q)lJZDf;Qj|AX_=QV!(FwdCyQh6s~fsaz?T2&7R22+kV?6TV`ZEqpJC%kH(r-`By3#jNd1@`PbX7XWkCbxdyb3SS!@_r=A(IQ?1paXLEoDhBrW_~|j zo#;;!i5!a3LTcc+Ds1?Z&#~WFv`=wpPb4Z3HZb zdmfY&z3rl(V6@|tb|G9^#`0R^jJfm{;3%B18wKp2=H|9iZN0zNpu4A_6TUsK$<9)- ze(xwTbl8+xjJSFD38>p~-t@}{Nd{bPha02S85>QGcD1RI$Zt-*pLOz^w-eo}tUGSe z1!h)kQa4ij@wXE_jFAp}ckfask~LYKR%NpdtWhm-G!%g;z5K_!N9}0Nt@C&E3*#}a#HI2(G3vz3hH9{Tho88h=@m9DQ zkY!7d|LN;_qqcn}tGnOKVB#+0)8ng)lJi#S6_$Fg2Nj_LGZ@qabl{(UfA1OSziu(P zDqNT}In2x1FBe9C^3{43N9NfoVtU{&Cc#lJ9{*>hAi`#lrpcc@!>M7^o~t4c)Nt5iBV0&P$=HpKhg;U z52_!C0lTW}<(h^G<*6+n#`~YY8yNoNnE9 z#ADa31?kBjvT3LqPIIOB?KZYsU|adEF=B=Du>|?mQm?&3rx1fpeY?C_UAn^c z?iAHyRx>-Yt2!NOH#B!3>XW z-}T@<)Rex%fS9CT+{vQAKEVQOQLc|E9?S7puj#7DH_brqd0)1J3wvj!mBR5;e6FW_ z+vq~8C5W%ZT@_B%Avms?0)2n+DGH#o!_~UcQEp=GK3Ck_YDJZgvM54_>;U6|)pkea zL{thOwou%2Rgx--FR|Ni%HT@`BThiVCBkTuD5o;CQN}Rx@cgrA=lwlXb3l4B=q`mH zaXkT7%{gNtBPL;h1oHq&2!)7RY>?b_J(c?lT8sl>Zf$$Za^c9`ANURuyuPPg2~3r|b61;WcP!T^3~|FMVgUMoNqFv%Sb@(`w1O0{hM_3YvhZyhGetHFf!GKdG z9LCZ&*(&wqQ=?go@6Mis+h4j>Hrc8U@YYLFPqBpd5lwzp81`52ZDFNx<@wm`;P+`YG1yrcFKsH_I>FS4?uT?=!S4P?BMZ_lus-H4y zoeeyi6Y+BWlIjpYN-a_B%i$;3)*d@ov)gp!>M`gWMkIBJzFd4AxUkOH6U*=0Zw8QH z++iv3PpP-%E|gyICDGlwK9mUliN%?a4s-9^VL{*R1^EMp_tdzMNPbVhwCiJ4fnyyCP^&4bc{XJ_V?0zULT#eO_j^jz;A zx#rOx#%TUk+Mv1M%7d6uvfs{D!^R*)t0b~I&v3WI>n2CLIq)%!`lynuXq(j>DDPC6 zvVt8=LrbE)c@z(##$HG>OIi>kpf2WBI@kQjSsB}mqdj;qmsn+3o}K`)h@<-E|J(Ci zc>u1|e2{fZ8|UOY)A~r%g@4F!ywot#@GFJM=hxJuI@yTY*{7*#&7Sl`>=@dq%;42H zyE6a1zI_&iGlxLbH#~E;lakJWls*!!lwFEV*(ZBAz@|cFlR^eZwPvw-p|7SBGqy5N z@5ZK}hmeEz$vlNS&%vU7)MxLsEzeUsiciW!VI*NRf4{uXWew4nwsqzy`&DW#E!q?( z-cL@Q2fl*t9Y7j4b$3fF&ZS5Ur=H%I6$^h~;WJ)?Mdq>BSqLZuab3>KE{#du6JE_K zU(T2vYg{!)+%1${fvUyL$SODE2Il986{@+Nd*2SK_%~lq^{euRm&$(rB|X^)ZfX?7 z1bgjzcEbF>d5p&?3W1ocbsv_DcjS?=S0waPi_!&IM^VxK9Wb(_#1zJ|BsCC$94Uhvmc?9+(sei8F4K4I8)RtFGSj#V6 z%gxQ*WMk~wvoXPXOqsuIJ<|BR7qs_!bqsZ-l_j$`RK0T4YrmZNb>&Of4gF=x9R@A` z?Pf$|*@ErCyV=!yzY$lln)Uk(f)7LWecyWOiF z{U=i9zbyqW;&-I4FAw{B`BCQbeptO#6345`z=KZDKHE}NeaNJU5fhSP6X;GpN%9z^ z!k!IW1Q`<~14EH65Q{m3Z-S&c7$!v+z+5i4la^+f$S+}JxkPQ>TPBb80gbge@@Y7) zGB^knVKEKZCP==?eS|P0*9puo$_(#s#SheIjwgkoatTXgRFzi1wBw(JKt?t}!yBlp$o>}u+ z4rcZXd%xjT3#^aDwahGlb_s%K{Q($=yQ};tDGGG>026YAA+F{OLP}(_c z<}xLa9qjuCZscb}c8-|idsY%Rco79l7tF`vL5GfzXCpO93yCk*71%i{BN1L|5wMeC zyZaET<6B`Nj#TS-sY*;S^rlYG>34)+5I1M|Da_)!)ixW&?!=wJ8uFi}Id$Kq3oSgO{+L-gwUX~lM2j(e zmuIlj6kiFaE^koGCD7|vCD3-?jw62Q2wKoeEn_{9jrtN2P%S3zTF&nY zjvRQs#)wKi7&NG7CP~1n=eu)ZRyKv#^O=|+z)leIuH%C>5A5k4RO~V4_zXY-$ zhn2hax;OOPGr+y&ebF%pcL1DUD&S(~Er@bZX$&aZBW$%~-HckP) z_X^_LNx(H}&qqNS&g*y~a_0HdwszGau)Fo(ASIBZ*52%I>)!7&jfRQu0qgSVy$yXRe!KYH6TnZIeX;{wvN4f z*Vn@sh02aEHQ?6w@^Q=VC~cacwYLk#)6w9U1Exi0FbLgM8>m^$+2$xAYE%>5J{DN= zhpesL0MvUtY9CUJS=+cRpZDJx9g3UeNE48XrE4s_t#I-DXP4FG^3jF574q?Y&0O*? z>8}1T3emq^m;sqpWm})!hl(DH-VLNyRfxR%&vokGPyb&&_2muT{HZt{_1Z$rLw}1w zDjzE#z=aV!ISPLc|6BG}wdG(7@C8^|dJf(~W)O-gu6%=UQPsc`x=FiUOD(!;Ns>L+ z^TEG}4AhI13VYX$MgAMlDnbP4CanX=BxgK1v@DDjRWS0_XTE28(c|i)U19Yrg;t@ zv;lf!cLS3FFd`y=A($iE3A z@YDOv7@}k@E!GAo)a#KApTrx>$pW~7?@eV&&NV;ZX)rt9yGD(N=nIa4pzCDPcF}xX4r^A}AnZ?>4BjJsu#bK3 zh&!sqpJi16a&9X5iDa)>$d+WZSMCKQe9UmJvvAm=;cw3)+863cMFO@jp$A0=Z&O`P zzx}Di3KJ-RY{oz?Idg#p$NUI9hx53A*Ng-XIe&?cnMt&n&C;9rtSyP6nOwF4YA;A@ zA`}(Qs`@H;h{hO?Vmh1q5OJ8lRD3Vi<7t{aImPef*t~(8TimpKiI6o0)B6~@2^bZ&uj~Du?GtH=n&e602Q-6oL(sY=^2=GZDu8du_eYp)|l?mZZ|{MXr*zeLfvUx&|?P4qVj>Zom`h6 z1-${&{f8!}uZh zC*cJ_J_8KgZF?%zJ${gN*g3dmacSDJH6`KXhMb%@}1{&gP zpFF#-p{Bv!>yB$EA^Vh!RRvwkhv~KkPqC=p4BKjTCHhED&Nu8Lq72~zM0f;@_Z=b= zoT8c3@|f)M!E&@i5m{b_Sos&w`RV@6zs%V>(@T8rffflG*53#Dw2y(~% zVY>G-R&L>rei95+;=!WB&pZn8{nL0$f!yZ1^*J*A#VL3MBr>ESPl%4mcA-(OB|)fn zmTtG~?iRzMnR*=F1`ZttLf(^ zHDAsGdvCYo;5VkGul=Hf*CiyzndoQOxNFC~^j{2p;5!~od=dCwVTCO_oiop>C9f2X18GuDFTS+h&p`JigDPxhkP)|w^ zW~-PB3saGaqxV!S(+{Cjv3zEy1~oNeCOPlqlln#-hw!LtAS<6FH82&O^A~KD!Zfb4 zPhORgpT5ts^2Pbo3Vi*l`?Ob4@ix-2?(=ga3e)aBG^=m2Fe0ND|Cbi@z7 zyTq#TWP#*F%!>cfH;;=2I`YQu0W@bPOjGhYn!T{xl<)c5*1T_j&yizj3y1@3N9)-gb>G?-ua)9UtW1EVph zq|DzKA1xF*KsQeWb)J)!d>;{8)t-?xwWSvyEaf2rLg!yK!wgReo<(Y%>RHvTcn2SK(KzrQTME++eB9(f+lc&f?&v+l4-r zyW0*}gMC+2W`-UVc%t{9rc3?Rd*P{>ac%xx#I1xA|GWM2?>3=&&uMwyYm|2hdRkw* z^cTc6ofBd#em-wxd(v+yc27QmTLOmIOo97wedw(4}GB&m}J{-R65>L|H4BbV`Ec3}K!DM&L?fbquW4 zE(4bFOtHihSKxKM%Shl2ure_y!MQ){4*Nyu;>oM{%f45$9a!J3CQB_re zCXgUkS5Pi&8Yb=Zw&`No%<_dqnMexln^gCEf_5eLa}%3qc!2s3vXzZMZ55xA4dp1xF(v||ie&;WEj|c4o^SNDKg_?@s4i}sC|GWe489>O1I5D4c z4ZU0RdP*s8mzgY^Kajtdm#z^&X@S;BZw|-VE^BBhwWU0ZB-Veh$Sr(n=b#xS&)~r` zBF@#24$XcwO;h`4((F_KvIc^c ziD(gqCtFcO@$q7fNh}y8954K^(A~548?6gq7W%PILo*!E`b_hfj?zmF))FQa-*<{w z)d=h}92TVIxvCMuVglVWIM=R8l&2V{&t$5$c|&enMpLZgZf*pvE~*8+uH6LP+Cn^M z`~Ne||BQ#%j7R=6_;Sc1wQWBrkB2MB_OL(a5w=9F=`QEgFzarjQSC%svo?kwK}8kC z(DR{R#S^P0fIV!$nvqqM&+7SQ9MT||)qPO4e2u_cfWykzEk|a4EY6UuhU-`*GY$2U zK-!b_C|@M8%cXuiZiIWqNj$vSLL$zQEz>ulzB`J3Gzxp|Ny`AhD7t+mq?TnRK}Um? z`P5B+dhi}P3|;AqxexyK!A^Z(!}?XzL)gsD*5;_CbiOL{;i^Z;Z~1d8Yk`uxnU`5A zxmLyj;f#IF-Gh)c>=LYXrl6rO=zdx8$lJ9^H0A22>#>i1=XA9493FygkpXK#yFHa? z4uL9?M=i?l%F*Jk{Q~{+ImQcb{Wl-SBTve&ZC;XpvL9h<5RMv-(=k9{?@vjr050^}4iZ16+aV z&P-05{3H4fD8?ld>e##sSn&B0_Jj7iRUR2I)5KmZ|`BblC zVX3_T-QUzjzb4Gh`m?}h-Nn8>z9Wv zbu8YJh7Yy~Ju)JALgv!2Di&`KDB$xbSZD?G0=J~) z&w=$_{>%K9pmCz=2RQhaSWpW61Xw#h+!rWx>_EZBC(T?-o5y5H_zAlbODNf9Egc_F zHzegsAI7@RMflRE+O6g%3jpw9Ko{VN#*zdU17s}Oo>6y|fbyQ?QD638z576f5|8GV zerqW#O&`!J;Z3NdpzRU!faO)iu#KoMzC0|)6>tH6iz5c+#Y)7*x|zA$Hh-52*tnM% zR2=23hf!I(V#Br`80;Iv=fD-1CMfW`+nct*#P9IyE;IP^Jc&@lXgq*D^%(ryeK5Ur zAb`6%S1kZjv@xKg>@xR(8NLs{Yh4XdLcYJePJk%SW&3{;1ByBqlrt%;pRXM4I*-v zos}KmHSB)9CPC)qmL3-Qr^nyh^A&?lTCaevph{Gvv3<(FpifhO!7jCtC`047 zIgBOCN1#XE*|yQfNm#P}4ie-C+>x@c8Sqo4q`tJQ$ZO(3n2?Vl#|#5G4ZF~0YuCN7 z-huPjkD~?wc=&%OSqhy|-^SPCq1Qd=fz`ODQmh|SGA17hxJcj0xcvCfsdcd;k!%Tc zY}pRoQ8}p2L8orH^W2l-=esE}xeI%Zb8fb`n9H@IHqeu=u1utl#7s2w#j#E7ylyeyD?|Cn*cmGMxLl4{0m#ZBOKQee0L3fg$!8|!s+$C_~p zV;GMP3-gGWAp=_AsPJsy>4uD9N#C}3PEgqDZwP}GEIKRnN%(TO;%%t_T z`8oc@E0~o`VPX)ncH66d-f#De^u1*3Bw1jj`vsqJNIvw13>zy{>N6Qe4S$YfvTUU0uRn;B%%_n+zNzn-d^@fJ^iejTD3 z#g%hB8Pl@eR~+RH!xxWuV`(1o+CIxeK^%P#`dq8-yxlLabs3A8m}GKRbdo;PBkS{^ z>MEcAK$Osi2?dBPiL>_N@4hSt;9-%3Cf<{%A4>C8kA8RsT*h7R2vAAz0;G3I00ga) z7Aj~SY$wJ!U-^B2fsiD>?-L9NLPw-utY4ZH%p?^3mVjup9)j7sjR_MrdTgX_scHRD0DjPpoe_IK1K zS-O%zmkAs)!I1H|39jL4{P0`fF0Y9Z&Ek1AAvv%D7l4)p9sW-~5- z`Pl)_f%$MuFq2cau=L3V@onXT0M&#k;1268R+rRARGMHNEA|jv`v%);D;hD3`&1b~ zxEaM(bRhqzH2#}&P8jogk@|a%Re1;Pibzw6s2B@CaY8qs`_>Uepr-#08{nDppdVo~ z%m7`$E+A4!pv|LsqnU|}>15p_EsJmgT!s}{x+Oxrfdfw`^Zrwjd!b(r?naA{a{ zI@&MiP!=tx9y6}Sd$a4F@}s;0Vylo$14 z2-rr!QvqVq=x$1-4-^gQ%xt=m$M0`uv`{|S6W)i=W>j&88@?G4Ddl2C9DV^kd~19`5UYly=$xQF2}+ zyn(Gr<-{?6{u~M`sLZGJ9&Ybn5>ri)^CCa2>#PtE}y^|w!@fhI~@Nu(as93tGJ1VQnxd}mDfAnjZbt>z*r*A!qj|*hg z-(Q5hs2|b*!X9inoyLiWcK3wK+58QwzC)I|ICq16lZQ5Tt>l9uMRq%7i>&5q)-H_t zO+!LNp$8y_EX@_!ZE0C+o!ajgnh{TT4MaH6q;1494{E}4OG$z$CDPQ-^HnU=J$u*) z=@9mJEot#Ta#u5}uV>!6*q>J)AeSWHqUg_+Y5w}Dd|Iw*3H&vh{A5l1ekp;4^^ChS z!fc37(U?`}c%td-LRg9a9FejSmfmB3|DkZ^XOfX(jwSE%*U=cYBiBopb*CH*V0HN; zxC0z`B+M3aNEI6ya(6q=6LM_3TK8w>rZ7L#1HI8;5TR?+pTj3odz8q~snCW|@_eed z5+jqTq`m8dMoAo=W86sD2YPD4pJCq;OvLT{t;vwKUsxc*K4g^2w-3~6-Een)_e5dp z8OdASI6>6UYbmCY=4a=ThJ_P?nM?i`EI#VBpcSJ64v5;g)K1hze_yrR)w;EzP_B{t zEmeR~NUJ>l5HDURqW$cN&EOQYiNbQLPKw_8|YA{BKk~HQ}m*HiZv1U zW|956_G-C!(`)jxW_A*?$Tqi5_niohP)_Tx6M!q(H6~m<@m+j%B0TM$*NAvpT8OY%M;Ne$b4;vp6)Lyj_D^ zi3&?hq3xok_ru4d^LHmhlPl$QzoObe!Gf)RTMs)VrfaF7XQFp^E&wV!1Zd8*vk(7~ zxcg4B)etq}0_xoSw)I~}RRt>rhl>!yud;li$QQE|+iL4U2AzA8i-c7yK(zPZmXLU^+$GAtCkRZAs&zPCja%d|2pz2SfGzM3Yu?u5dp{X(6LkMxm<=FACv1< zx?hUh$Gr^hF*}uQe7*}lP4f4Q&(h45o~={D-!D_K%pY@=NaGkP;>a4xJOcIt147>b zUga4-esgJ7_%s8da)x~fERuc+eq_lQx*EM8gxpe@`DE>xX>wvLdjY5h>QYE07y>>@ zJEO>#pS-Ek<-o$hMkfA&`vqe$0}uD9nqLtTkA1VE^(0InUEXlO$#Vrx#S%*I2Hd2o z=_)b+pwrvUY!Q}>d`6wt&engRUE}Eqq42c`>0XQ4s@Ql*$ttpQpjYTfmlwRi#UsyN zZZg25=cRoQKVQdlNYkaS$8t@W^|78rp*_43t`kZE<{qPNR?~pn?6D3SL62on6vIkJ zd)Z!NGH2Y>bZk-Qp;i9- znW%B2OUAvqoK3YwHwD{p$NmM2NnN`++yl@E&=fb8T~<1LpCv^$jO&lxkE>1|oj@Ff z*N?Y9VF9QGJh%kw0_z8e4~gbEk14@444>^wLsJ^;s+OOc}qG@srBnv z+6N`&tW$e|D?AKJ9B56k)~9IP58>W9Pwhwr>|;^yv-7{OYoCg_mrOgHUAH#7#WQ&9 zvxgD|*Vl8riB)*v;3sLdKR4{Gs9Tb1YG#k82JGXGst-2J=s7cHm#TixR^tV?mQ<$wy3sN{wgvL_;ePx2uOf8(Sd~2}ftU!u2GulglhCheddD*@X1B#PpU8Qlp2j>jcL?>}>~&JoeOKNjTD*vo*;&81nlY zc2s;anyY;i>DsP8;%*g^_H}9IuM#+dn@=zO!p&vGGq7DnHG-3X?C0u34lS!qX|NF0UdsQq8^UXxqUB;j&bU@ zk%t6!YPXm$8EvAdPEsy;>*E>|$K*6T5r15G2ij(!c|gTG{S&s-s;2uLL#AHkRP;~1 zPL(6P2-(@~3#J&=^cTp!@*3vASJ;?p82%KJexS$^?s!v$j3$T(Q@ zS^J+-|L>Z;tn)NTrBh}6xw89N~OIgB%uWI+42y_)bGk&Ow|iwa5UEgU?6aoiwc+!dV-k7 zXNkQ|a5hgDgL0^6QgQpbF_j66D4(~ByK0%<72~f&Ld~h>g4W8$k%fx&#?OVA{jb&x zn=NIYJCAdnA=jM88&uQeErm2@a=A!S2IZB6={fL{)6)Xy1TlhWz0OfR^ik zhJrNJvuZrJa~^vDP_iN!0x0j&>+%ivos6MpQ3sbV`vuyKwwF{W1rRDa{UuN zBjEJ=o4?omd%C7P*XNCH%v2oevCgBQhxp8a4&`-gafGI)S$UL)4--_1;6ba8xtFVs z2v{zd9f-+`!@R>|hBd+8#dQ6tsI+Fv0B!5GvBRRFtqp2!==`}g0<}CoN`?fG40%Az z;+AS^bH}n!UbDwMtZzgBl^&KfkB>nuBVoB=*5mmckvNV_wtTFXiIJK6j$bCi@)~gF|OBcN4XA-PGA4|H*pd2 zq)}cRJD>L?p68y0wvf!7PS-2KJ3hTpNuN z#d9*6yBn>cDXb^D7!Uq@#m8ocoa8$*Y9?pD&b-C9I}$vQzNRnFxR)Z-Q%Xhu#1Sj{ zYdtlPrC^AWibVFTTS1SlFZg#Q*cv|ZtX{Ny$avV}Zke1mC|@{84<)GyTl#L`U+s%B zDd9uG8uJ9U!kBX6OmfXtF&VVuwT}B?>fBp!-s}j^S3cbiJljJi-j?D~3oCNk9~z%D@S0YkIkjYIYF;)Bb= zoN0cL%_{SZ-%1)AsB<;x^rHaX&{R+@XozfsJK=TB-6idt2fi;XRDCRO{g0SVzFV+B zA!^FuR!eCGjPkq#;Mt8;Rs9uf=Xi_d+?K1w=EUIo68D{U&o&-1L^JH7^om#h`8`hO zsyp{+)c(V65A7G7zyb~4>u#excuQ@l?D* zZ!!K#M$Efp)=es5V{>pv$FuEzb}0K;z9?!=c*!Iev!`3wo|Ihi=*)_ad#V5KTv@%&fpc!|g5KZm9GZ_y4xtT_M_7pbR?SDeJV}Kch>{%?D9*XSt@~r4|3~>i=oayJ}fO?U!JW z5jSMr)kIBFJ{frOw}&60-3K9e*M{zyW~+$T20c|(3^YODjW|F?0yn0=68Ce;PNGK! zBaRvlCDvnLJ-|U)=#UtRNB6QFfWg8~d`j+RTWCaytnsm=CiuxS zDxgbU4n2-AXBY1?QKrWe9)w1}=5a(43ITDod!b*Ytp!;&_(tseDEdgE=G^-UVN zSj*VGiPZok7Cw%e#`*@<#7-`c$TuKD+JTJ&Ttfuc1Odjm{4<(}t@p5Ow&9xmqBtY81krjl&lH!NA>?ehyztf0ML#db07H&&*Oo~*@&>+YeOh^1A|>d9C0 zuJ4TfS%|XW;-K#ja!qz*0d>r{{d_ZjP)YBY-zf?1?|4?ro1h*?n^pI=5 ziajPWt7A?qxp|xpkmi3cR_pn@M2aVG*#W#t^E}V5*L`m*Z6g+UvvMy^`*>` zlo&^H@0oFxHL>{dFw$vfFTb)f;AC$hXy%e^2P&DA#?kSpZ3&ayQ&Bnp#eKeci2v?q zhwjLKHS%kUSVLR_E-8BYk)lh1LJEiz<9eq#NKX0$2SqL7H!O1ZlD^+~cQENmHy?N3 zMT^d#Jnhg(vleizx_Mj^w}@Ts$OgZ1%s4ZHdJ--y4f^g?N-0ouq;Y$asxG^V1OZ&) zW>@{atBVJL@~}G{ikjGA(ZbU1-~~S_QJJ;sGwm)T^0%d-hV+7B3<*d0BU(|*L z{jwJlk*8PY>gTwDZPcXKwQN=C)}4vZr9gN51~jk!t~!L>3{0&{Y?9UBY^kk0Kxe`F zkv(aDSyl614;=>}ZwhPEg*~Cy)XL!pB~rlzXd=*sP;3Bm@2^oAhh&Ey{=@>5Y)(>o zXxB@Hz;o13wf|Qo3AtVX{%2^?;7`E`H$`e?FB9OeCb_lP#Z(_3+h4y)g^w79@I>E2Z~Rk1RIY zq4vqB-dv!$^-(gYI=E@R$s>~=)ZcTEcOFOviLFVC@;ym}+Nw`@d?qlXoGn_zp|)n$ zE7SP+N>9^%&bBj(-#n?~!otR`@~MZTMot^oQZ$X7g$C$8vRf6G_wC(gvl@&1;&Xn% zM!&?n^2BI9(gvy96$7~L{F&XA2Cn`Yp4bXA}FvstjHgCe_=J@d9aq3@}aWw z{}$Yw#Co^B$Z`&B>^WE}U-NVy7}_b?f6n^xU1`{RP1xLt4Iz`WbZs-XX3X zkk{55Yd7JE?nABu#$|9dg&3ZiE0Ig3Z2e0_W)-0b3tD0J}DW2=*576eW2Em$Nq#UIOw7bmGDcD@KOJFq^BZi*u=r$F~Pg z%N}Wqbk5~%GFY;q?QqNY-0-?Cz>i1Ej86v)$!+0_C?E#Ex`_3ZgFmX6xt-e$vnD=X z>6$5a^y$06SC0mqhi!m7xP}*AI?IB34n!i@Yca3c{=SZpFuiy#a*6)+)G{BGCoB%M z#eEDL{l{Ymk=A9xL{a^aoN1%d@mhW+>dwyj)4F&?{jtDx?a z54=g3^2`R>hh8(IQ&M`$5s!?#%t z4~HxKI*SZa74ar0(NSoBV6#R?eGnx(EC(rUS|mCn3n0XzOY<1BcP1DUC3Pk0qO0>M z(3u(xeh5F5IAnIz1&pL`8kZ=2c_%+WLz}+&DmR{tH|^dKr^M?ureJbLl@H4Q?3d8c z7);k8+oSlY0N7 znYedQk0K78)ZAtvvrW#)@#Pljd*Cx36P1SF+ARS+XB7OJgN>kC!h9-NqH7=L5#zN| z!_zaiT7tF;Qx`LwC%4jem5Mc5cJZhDG0wIv{=^o7X3J@)eYR_FBb zBA5ER$`Jatc?#67`WgG21#F+UfX%nn>DB%QzABvuui26EHGxo$-rrfnnOn@t1mp+9 zTmxfO#D#TCpjD$yiQurvwCEC1B(BlR+}_yVYxF_am8OCCqcs^bs4L%+m~}7eAgP}R z%d5!@A*P<>;>JQ}yqDE}M2=B+?mSa5BW^q&Fe^gTrS*=BN#YkcHu8Au=5f?KY>yC3 zR-o(bs;~&VK+MBinG&U8`xJ+-?>^b0+!h4>XhdbhvMS0C zd1(5fYTX^-_((2O65`o-rWAsUKhAe=+?d$7uhaqS9T-Va3ctZV|BK$f#9;%1*fY!N ztBBaQHfB#E6(j6Ua^1hWLh=M=B+d$h07Hk_qkpBQeq*-s&U6}%a0B=sQRO~J{c1*w z^+gySe;9cYQjP75V)nexM39tNsS#pMNcZ9YtRu(HLnl<)G55}jPYk{_1z`UmeB96zAL-)DhvLPB;9o<|F7Qo zEM8IB_nSND{GbKpFmN{jF17#9+58{*JC%tr5UG_k^tR{OpCA8Xs3JXzvsat`C4HE_ z;L+dyEeI@+5btLKXaS7Gue$7|UoX>zl92f3ALk-CnpqRUb9)uo@Y4r-bb`>YSK6|Md5B3;OlEB5^UH8t|@5Cl}}sEL3_T zJ*FKn#F$VzK|$^w7a4SR=h%KYOy$VK;Br=oylZ}3<5b0`%pq-P$p)u9eBco5y0S~> zdqio4bS%!M!NwydVEY{DC76z|DlH^zqj)=ONHhG?crZ9CR>g0P^!}XkBV0%*VkfPGPe6&AlNk6 zDI=poCtu+Qb&SUX{5wEd!&3N%0#4^-N=Zvl=Gj;GUj4YD)10rxXE%W|MXij1 zUc$~(JbZ0e;Yh1hlKWQKt)-IgtWB%w{a3a$#QxGw@8+22r8x82(pCJ|)|USW9~e38Ot@Z9G!L?S*X@S2#-C4b&}kU$bvZ9KleVjm5H$gn#D&O0*kyWeP%3Vy zeqT}iJq9Q&Wu81+@~b+et){L=kA7XW#MI8TFsino)YDW#+*uKtbLw}5Pd+sa9OxCh z6q!o27$6f8g%H7(?5EiDhD*zCq@Z{8i@WFYcuv`oA-&yXUNs}^u%q00>k8yupv|9h zy|Bsnh8kvX8FWC1T;2v?uab}|${8f@}js6f)16Uqj< z<<1Qayqqm9eEjNiu``L-fa?eS*U@_Gk^Uva%UY#Ze27=2jemIJBh3$+%40qJr9&jcD>K8c$H|6+|5z*M*?^bp)sNXNr6I2qQ<02tv=Oc?jKV19+7`GJj^JKmH!G zgi&b_qy}Dk;(k4Z=dIF+av7h_1jk~xO_;Ub*Sc`F-Njdx{>nLS#3qU_zP zsWH3S#-Z(0#;*7>b(OV4uw!YE{U}1M9(I)XbES2sG8(#cH=RD0A5^~3CWqcDEBnvc z)1@?QWIs%deC^kHD~-V**t;;P+<^kW=B^5CVZMJh|5M8Ur%d0v5(XOfRNH3L`QA%% zebujjT76{lQ|Yc=GKKQweEuyMYBg6`Esp~b05i!(y4dBFex(6a3O)r|!TFZy$0m+Q zg43i36L9H>umDTs5`Ycqz^(um_)EMEcu&`xjCR~|{8sVL6#;b-yfFK&U$$zqxHc@A zE$~7JKDa1mgNB|lZ3{D1|FR@%;&6vQlE%lUW_xz$W#(C|zRF~|Vc8T3NzDPF zob_mx8}q?Pm8PZ0tFiXbw6*jfy6i5mIX1ynfG7996$MMqLuw^8ye^~fc*i%g=W2NY z=L>$T%W|ZV9~EK@c~DH}7aEw&gs=IU);s<(BHQ^9ZY1>b>KR)^c76P#j~<2~1Pq9G zrmHLhUzhD!4}K~U*iW%v-73zxg8#a@RaDGjE~{u8w`#%O1*U)Hw5_J?ca$1pe}e7Z z;08Ac%A}PYo_~4iE?pyj6}WwpakTR#Y;`fSQcbGby3KNUlnH=`MBRYUt)0j}Q(oLvx8gx^S>7x~q`!vzC+q@7*I%e+Eh zUZ$Sfm2^{MW8@OM)pE~$w<*amdl~*WABwGef3wSry=eG(P8wCUclp!7WkC|R9*2do zQ>pW+apdZOKP^Fm8Lz(`@fjW5^l*fkLifpc>g!=7a1_gX-Rdre$s>sP#rk0ndgphC z!LHZ5c+Ezds74obhOiv4HNfnkQME_0s;u|WNio(K8cLXpJ*fjOn*teihzsN2r$5GY zGBVt|1}qux(SXsWW@ph2axB>CY1_8zd_<un#Ab7VQn!eVMd2wk(3O`<9BiO#ewY84N?zs9=4bixY4M?1)03UtMjW8&6?MWxIvt8U z)7t$lMkb2)&z6eRGkaSM7HH<&F!!4do|h06bgPzVAAP*(NvPl0VTW7P8Lx9p4_Tnj zjn=l5ZKnkaGv2@8HMAoBf4pRbBdg~laP z{ERX)CN_>dbo0A{{l?J^QoWx8BP9Z!i@g~ldmY01xq5Y`G(v%ks)o=W3=|%1q+wX- zsgxttW*YDdd~U-WOvGR`x2xwX+!hx!kn=qF417HzrNrf;iptR58<6U);uU#YB4ShC z|0eG3v}kU)x*4CRAT4!PVBezm2@Aav!C#4ElS}Mam#x-aipYPPM?T@ju zRS3VZl{ITYZehUwz?s913r+9-C9d2IFJ;Qds;zQlTkTs8v{6v za1xbe>2H>yhfD(xLscx40w@bNaimtJSy{$GUeb-ST@$yx3&)ZJNMr?j$VA{k$}aig zg`7}^IqSywc4)g$ z|6tH;rp9;Lm*sQ92Ljh&rhA-7!o*S|0JQtq$leJZIwXW~5hoBFF2W*u5C_ss;w~I zH~){Tw~ULbZ@a&T0i>m*yA`Fo29Rz=I+c{}W=K&wr6dLkk?w{8rMslNK?a5)2N>Yt z{QR%`KCk=v%-cQh_TI;Duk~H)VCeB*nKgS-tV2-p-+%ZAqgoGU4kcvyY+60~qQ(yr zch50;Uj@2tTf5_ShF67Vi|)W2lQdO z+cJRx2ID6aL2QS zs?*fkDsfpMT6?%IaXV@9iB9v9@@eqjgZL(Z-e+O`(i!eB3Sg4ZK(D2k5l!A z_Dwn%b$ZwiwQg}Gw5F}z{j`!3A~`j}VahLPIzi#*2hF43G^vXj@p(Ime$eV#bYSR& zOv(7Pb{O5#vOa3v>v)5SL5V50yJNX@QinhKV-Zz-DdVxR67Xv^$?Lfd20uDb=Rx{r zp6p&P=gLyC7@%-;!D#;7tPDGJeTJe{G4|Wn&;uRfGQAyD+ags1@!3KJ_S8x1Gjiog z=i483TP4!wH2SE&r<4NhDF{7NhYrz;Y&?r@p_^@0fHGK!k?x~u7`Bz>_+14L1@P;Z z5e6rQAO`QUrBleov4`|Zqb+8ssvfk(=w+*|hUqcpJRW|OGhh|%=(0?R7^9l!0CTfg zZQ=*U8iv9YUz*U0yulfLe%UHceDw^sA!SycLP@DiQ@hr{Qu~pdC9A}tjIAnp$2goJ zWzCMpFnrNXD!w68@TI;p(Z@6Vm1mC`Gg86Avumk&!62VQh-F8EG%45LG#88c29__q ziAp^8UAeyRyQ)9VS(JF>gHGd1oXh`9I_$(q)3umaeOd8rt3M&3KNl?aL(qYt%-bOa z`z`F1H(@~PN?IgCvW0yQ{iSIM|ES(Ov2QDV(12QRbqQ^$;E9M&VK5%FFAs0I8^2*- z2s@Yj{wsk&;7vKDsp9xNsPK~5M~6c0T-L&ZA&uo^z5qoJeFAAj=10C%7IJ(GB7~$9 zmJN|-5pDFCAb*mmu^xX^>V$nr{cei-;4KJAS&P8mxtbzxGw*$Y!5c-E{8Q)rHSe@Y zXVlR+{f-Z!siB`(@w3B)gG zn8upYL9`7NAj1M9erO0>51^t6(@MY0aH@%=C2F1&^o<%2iJ1C$sr#N4i6U%omBw?$ z!ow1}l-SLM#oBTc!sX>afqvq@KnyPtMZ4jrp(e+&t0{k`s&>aGF+cGH35lr&GRrw= zb_}3{Wb~0?vjVzJ$-ewN!=wJ}js*&{x;F;cq31cBA064QW5yBp<*`^3hW;9r=g*QR zuLj^&i+{wQg{*1l6}d|=3}uh3_!pPjxPB2{=;wyImU1@o-u}3Z6l%zw9bKGuIoq;D zU|sw&7X-m6u1>D^`AlVCr($9R3@Nv7#j0sL4*U*}sXp;;DP*v+ur`ZZ?q^(}>xInk z=GfAo<7B3;>2nq#hKXf95TEd6P?!u*clRJyXaD$qpa^)_5219Mt8lvl6DnTCALTpj z8D;+7dFX7n|NJlcw@%6xnxKwYFfy_61X-r>CerD5wu17*J z4;@~=!SR}mBoeO$-xbRh9RM6ZPwA?!q}tb5XsiG@@cXoS?@5xtO+O^XNmdHnsIA!TEQI=&L=S*q$Jfh@UB z&qE*B-Dd4Ldfq*}L4y@M&yO}Ot7>pAuio9W?1jGUGm`+bv5VMc5!tVcF&eOQQ=w~M zx{@I5WS{ubbz4-J@>{BT^YKncY}-l=I!%?0_lPMS)nx?MPbVbAx&&S+)yOvO;wUSe z*Ky}&N?qqPpZB0|MfKCG963TqFWoj;U-3M(5 zNZh4Qk;$xxLxeUus={8`;KLJ=32Tsr)PJ`Rl}T$Lqs&JK@&I3j*ofQ49gI!8XvC|H zi%H%4PJ1yZoIiMKt&_WU-&~sVHC|7pTg2(!76E{Xi?jf^CnOLs2C(*p8UDQwxk?=5Xv8hVlr$;=fJ}mY;_tUNg$`_GL^FvreB%47IZ4;bh?S9i%`5 zloDC!cyynhzj(vkw&HP)JE;yizQ|!TDRt(P?nK8xD<|t9FMKqZPGfyCyR?!Rati0# z%9mM>Tdtt{byJ*g=N?&-KgD{uM-S57 z)#kiF>*{6j-Rw~JRJqrhlhnIp|BWZHXzbdZ92>C*| z#t4@D@msqVA{QjhA}@DP6knD!#C6L#bNzQ3>JSN&W-f1LrTsZJ+PG77YP?Vg3-VC- z_NYpy+%L4CC}vz1PId+qPInJ$+pfK%i1a1&_~<%UU!^P{=O2wszvLXT)>swDZm8tt zc~7M7;*M9H-t2(5Q9!M%qiX-)p~G&W==OT}COnvqUIKh5;kKJ+X{ni#7DFnHWbEtL z=M@g>^vXAe`Q1~U5@mk1l%JjqF7+54F67^u()Zt@`5zWwjugJ_mPnjTe|_>We{sy@ zvQF`!HlJ4u~crfS&tx9;noY5rk|bWD3s!jA;#nV*<*Dtse=Y`8qQ`ThBnlJ05MesyD9gft#a z4SbZ1E=OE|v~r*yg?F7a0?Yb}(wTNMAk3VKD?+dxo}@ROeq$8%r#KnjXRT%KEPMa# zGW!~Vc|>|Dz%I{sXi4xvEQs0AzW>pbe031z7x;s3O9MOE!7JX^!9M8AN&jGav?~PD5O!qlfTQ+AFk)Wj>32)He8f#xXDhnB%(tf$9+LP&(|i|&fv4S)hV zO8_nLRQ06zfM(z-AUjtMN+biI1hU(X#bxmGSQI!@S-nc%}!d-q3w)Wtlr2#7F-pUy3_VePIm&k4_@3C@)m_WFVt9j~gu->SO+;H8Q<; zmoJM0@OHLJ-A{)7J^h>bPr$^W0!3f6KP-)}z*MBKqhLm%!~ixTdX?Qc1iU}Bn2sbk zVE;a$P~c@%jh&q6GZHgj ziUJFZNVgAy02PgiVA{^fV$(E%uB^(hfP^k=-X&zn>TG<_@E>RTn9Z~`Gs@1E{qltR zr*v%>(Q(47tc{D0SgUQ0*AySbWu#Qgl9%3tXnT_Iu8Cg8TQ<^t8;^u!CI6vRvk3oe zL2TkJ5Pvo?2**1E&yeCh&BBEB%HV*HwvQpR~oLPNB?}L?3I`1~v3L_CiiP2YhkAkJ3LYB!Rh`GZ|3xgwPF=luU^;g-eM! zn@OI+_A_p7>L#7z7d=1$6UtcSbh*3?2)+*mhFjj6FX%TwK3!5o8Ow% zJJBLac{LiB9jCT~^$T!HVpS;NY34dY+n%mk9hz;m?x6MlH_Bv}<^2aPOkjJk$+_EV zdqj;P#K!GVipAn0RWGu&b$3{V4|bikInvL4-4#T|x4!#vI=?UaPFbR)^}jwu1N~Wf zQ58y?c>(daU7sG>ZyP2%K9rmqGK7ojnYi`(!aJ@uGm{DmEG@tIdi%?rmJwAZcALPI zykGprZi}oK;!ZaB9cCDR%T8NMb5Ro)WAb@2ljyL6eHB zJe%ARE8okmN0n7E+GlP2h%m8{csb`l)E?8@k@|fa(T{#oV&@h=vH zH0MZ@^4WQ~d2}K3XWd2Z|5gS6VF6FHNalBMq%3}`H|(JObN@%b3+KoDAP0?5@TJGiIo58SPvjzZ#Ch&N6sG^>GA zCz3BxIBI#55-&9ADb>DQW0HnR{5@khxdufqQf1n5XryJb34CTyc9ksfeAOgC&$QFWhX|RUeT_&sqc^b3}6&M0#iT8)z#>!}&sVSKX+vKM) zhTsRF5j$)e;N?}0hcx!(FfroM2ZnZCh(&PZVt+$H2n zRt<^gwM#COJ6Ky}NuuI8&G4$Z?{WfE3MuLp8nbmT_&1veBK!F-==))p-I{8tv1i5_c4P+IX=uZ?Cy!g7hpjc@RmH){AFjmK8kZB$^}`y zkO7!9?d~Y!x5^YBk+QzNed4MCxg&VSp$5q;xO>jaGEqdpTA?#Kgy4=7JNXc_Ez>kO zbc0@WB!z}MzN97-fCkb2p3uHrme5jX(=)PASK!k2gRjHyO9rS@<(nFC-FVMSp;e7) z4JaX1$6W-wP^q&#Ut@^kIKGi>n+`Op?_G>$`fP^PRip9O;a9uMa-~QedW_SKuLM`vDjZ_Ek zF$wHNVZHW2J}rY0(8|i*uIlbrN$GuYCemk^V0sfO6z~ylod3P@BDg7{6#g$2&{eDr6xbb&)u>vv zLoDn6t$vNaw4=|^_Dav-LqvwCn}y`XG9F)pU;oFIiA0!}6{ixDxOlQt)fDe7QOTFl zULOmOI*x^-csIFLZM0~o-P7xE%UVd^)yqL?Y*9{|8bdk}!`fP}laIj=O)%oZFMRe&cXHkjTf|XX z*iS6_X^UJhGmeI%Gqlw0cKMFZ*qUZa3R?tQ7G?!X=1YudNsPn5336p_cr~=eN^@^0 zZd7%hy!9{uKeD@~1VKeC()gHY$c&AQi&Pt1YUe64g^8_nlBbl!`c|YIO$RjxX-bmAwWKzg_^Gvif){QTOdz15Th;xkrhwq83-y4Nf$n^ zIZBnAi`&%Mv7=7R`_zYf7*0Ql6&CF4wmGNWQS<9awG(-lchvkS?TD|3B!g*iK5_<7 z;Q>k3!V<=OL@2C;*ABaVP$oCOC}e8|Kn}X4#)zD9uIog!N3sx7m>y47lzv=O8AuYu z2;OQl^eB8G<}+^X`~2dUQcYzE=k=d*0g%$}ODn#c5iL3O*MgiXFHqEnPV}=>gE*68 z+1eJGqwtp*IGLGkcO{D}0d;lD12Y|c#XPqYzWbkT57^WrV$Z+hN?cPl34KK>C0wfq za(FZbq150w;+sBaVGN0M=7WoBQ*T!Y&-0F^s#?gWIIb5NBGY%--oi7f5B`|(Y0gJ9 zoV9zga1=@%Dta&Oo)wpdacXOgoi!;A*q;wcs?CUjL{>KTlJ&()>wb_XPo}Z=C1u{( zK@z^v*4W>+v-c@;z~ubx)mLBk9e~$YTnFDu$KD>d@f2X?h=5<#~PbMC3atBkeZuYFU|k#_aCnRO4NPaycIw1d|TB&qi;$^VIuTu zHFJp{?V~b-j6)tf5g|6NB9pnRvZ2*fgaQ!kq>Ekv0wr|l}bKue0ld&y?&Pp)vtaERhM2hlS>T*gk< zjo;1Ni^iL%cL`Ao0=7L*%;$M8QM2?llZrG7(|S&u^>~$$OThwyaTnFyPh*at9`AvF z(JCW_*|8llWxff>2wJxZ4+9=onYtQu=AwNDI-#`y<}nIql?4IjGRIg%ugibb@reJ&mmS0?~j%4G&nq|>qlXyDFDG0?`6{W9q&L0*< zEFmmdy``o`#eifyQM~;p)Og7K;Kx%lqPX8Ys_ntoL-L`C`0;I;`Ko@^|4;!AE$dOu za%7-!to~5G*r8#>&}yH}&7ZadvK?FY-9MOo3{eFL+U8mwk2j0!2yX~%LZ%51|A;}g5ec^s?1TyfnSq8IdK*Zee5c0i4viI zXYY>t75l7<8R)9wP_EhrcF#iL6D$c)+%(6uljRDJ2}-O4TDuq3ETT4q(_EocH^+3$ zuF6gO--L)6zn6u851D^jFyL|Gk{EHosbz&~J=9~R~I84mQg=E3*2rgJou(RbnQ<2fJ&AM^O}5FFi$wDcV0a@VYv(hdWLR+|qIf^-H?@}S}b%@&tTR_?}k|dA@ z%DaG|7z_)1H?wjn_I)^GZ#?5!dXDEC$@BG-{m!FHw}B`*i_QE-yWHeJRYf2@7>|mh+Q7PwGyiESKA?1>ULtQz5NJCV z5mHDO^wd1LvZ#R1VOtPXbct*^qu4Isk~TJ$R2=}`$xu~|; z5q)>~1j4HYV!6)Xp%Zwr#ix0cHuTO@fu`inh|P+>UUBhzchH=o*zRhO+}Hf!J`4Jw z->gV}5@Q{QI_}F{Tm$T|8w0~NA^se{qylh(MyqPjzj(Bcj{@9t!{tDBk5OTCsolElRJY&17LVWCg}dInYL|1&n0dUzmhk>!s z4+N}^+y&Z2sH=OX7I1&CqhPr2@I9|$v6zS4lL&kh?4jdKYF>HzXNax?XG zx+B~HD+sts1vrb4f@Cs7{R*IvKq^@Fp z;6FvmkLUN`=%c0wBh>Ct*=AbSmtMSSBlbMHE=*q!w%=p0Ky|=6iK{_;96BzLAxR=+ zywn?QPUnJ?Y6S3Mp}iIynjYfT#k5ZRF>2!U_cn?S@GKFF72`aOg#Z^jL1!08t(B9K zPzSi7dbv({@+1~(OrjP@>0I7ru}~JW;68%hb@}U%CmL^@DtVFi^pM8#3zsl<2vjDt z25<)m>C$6_+(Z0q%}j!R8M@7RrT2ZNaiX<_csP$ZPYYO&t_7`iBMwstqg6;(P0J0+ z${Wm3u&*8kYE`Kuq6F=NA~GB&@17jnwprz!R@lYl_tw|&75imztf|jk!3joVY12`t0m5e8a91HNsVy&~g zQn+cp?-DQM(-?R77IGW~@3H2fqfC&xQ9d`cOQrPRV4JL8vM&**0Z8e(z20`njIy!H z|3*X9d=R$4XK~*1(`D`j_0k^>#x(pPw(9VC*1SI-W|!QK$kKZWM3#vJY(|jjS@jxi zPiwB92K0Tu4U)b&-%l%TwL?$r(A$e#yT5F5Lm~Q=ijO-sf7Wp)xH#WuVyLg3&JVlo zyuj*l;E6L@_MW5f*c_N1OSsQ+OynPD2nK7bXM$edT*BxpjD$66NqBwt7xcIGu#PA! z&8W!Cm&=1q0yic;Iy$QN$NhX_8%LMG4EW%rZF8M%LZ-=x_S_Njbqi#x<;U~+sT2== z+tp*k)GI6JGE(YNn)5F;^|UzdGr)-257Cnx80x$uFB6g8@sef*B$M~7UGyw@ ztrEM!@NVr2iEcBC9SwD)XV&$j^IJXf9SxtBrS8urfK!M7gLkyt(l@Y1BT5H0ASZ8-O&1I+barQ`LL;x~I#2~pB)gNoMGfl#J3ZxWim}B9iQ7dCmy{NR&u_{3q5Rvht zdHcyi5-JDfXGm|PvxEA7k^G_m!-g5BGgI2S5@dVz$V{hLA=6O_^Yeyn0kNIcIMi zzC|gco(Z2A%tYE+OMG*Q!{7cnXk{@Mk1WK}YUg|LnHKy;@^0V+`TNJ+*2KHdec~N7 zDG9Qm2vPk=A*+4{i>$NoVd8UTIg|_NGpB8oqmJ>r=J1AO-dzDux)8HvbNB@!_Ii@b z-yRkj?m5=V(m$ynFLqXpe(b}uWKQFmJgzvl-D9qS(UsY-C3koRKabEn4IM`045%&^ z;VDOYB6Fy}mk|A3e^pRyC8ZR^sk^Yj6-Cs@c$#j}!yd3CBAL{V00*{xNRq zW4G2je-C0hd+VSSer(_om_nm zYHr+M^PEFfU`eNfD75o~5<%AvD2ub&&sI^a(aXxku>>{4l>0S}Y_R<`ThFQ6!o-oE zh4)1L`*hb+QipI4c_BEKC^lH9VdDOUzKUd_esv7lqO~9MHr2F^)agz0dx6W6;LCWQ z9Jl%DzDh0wtS~y-;Z=PqILF$9cl+sxCE|AJIJ(hkvFC4x1i1D-97$0?C*Twqc=XvN zczsErQIfsU*lu{oB4~#7e<;xR*I&DwyMvxpsRw}(+l@zU3fV0)ZVciJk8>mNuZ&RH z|9@_%0`<+oUdxYN<~6;IOZ^O$E&M@ex5gHK{*dDcq65*X(L%aZWq9A5E?f!79KBWr zVgT;|HZp!P2223}#s|IOPr9kNcDD4lu2<=EqTJQ4012;nr=J|~#~^+6M^sT*`d|4lygGBR z;6I(&f|XSd>($5&DAtcWd_yFb43;AElO!QV?Ij?(jB^h~Xj1R{fU;L2T|YxGsnCb4 z8OJlS!U^ywX07Jg(XT|LPjqOT) zy9_L;q5u~y^JjCbt`4Zbu?5MR%{Lc^6afU3L{gwm({{vI52Y9?&G$r0r1$G!=6Pd( z7y4G&W75@VR!9w=+UOOtIigk>xt8lOWkD6_moBwsEDaJ}^!ZEvveAVEJ|69R*6{*y z8s5|cuWV&D3qG3hgzG<^h+w`G>jv|+U|i0UH%j!Jc)kz5!IMgGHZ%gm?aNvp_0n33 z>pS+74YPp$dykTQ#-AI8aCA8FFOtwF0jdE&- z_T#Ql7gYM4?KizXvcBTcze&06J#P`Ph;i_Vi-M^vBgBViQw`b8fo+OEI5EyOPs2C$nkMxLnY7Q36!xZ?py-N_l=DeM;$ z{UP7QpwJh)Cq#QPxh5)7sr%#g(+!rI!|S*mq{0;w>|}7HLg(`i1x=Z1)1vEWwwS=! z?amD1{H$P;W(}zr_~GutVl!-Z@M?PrImo^vqGn`Rbw=`#7K^NYJVM=5WI_em@(VC> zE*H`*&(i-I9{9Ez_2#<=GP%dIzK~~;Cm)o7>8RlLjU8WQHhgHJSgY32;Z#~+*-+6t z^~fSSMIk4*dqC`Rht4_3q`qGGE=y{(4Jo^3Gy^O>>cgS)l=SDPbkyKBA$F=WL`tsb zsK+d~Hf4u}1Iu$JNwUYjcrEsb#6IY5fbZvr?&6IqV;869`VW=oxP#@1SIc|Nejxsp z^>=OiU%ftDv7Af_9RA*Weq>AoP2GM0%5J&Ia_Oon3Rjj51U{}n=<-QcH#si4?Hj}L zVLB2c`D1cK?5=*etvrjlpmiQtGO$zZcV-O3qG&1nYjvE^ei@kB89(fl=^=*8ughrs z73_K#@2W`NXBz1=nzi$ZLEO%Sn~ALFaRGeL6Qv;9TI75+Y} z&WSIABa3_xPlL5>CFkAfiDJ0Uf~ms6tl!E2I~%3kHucs2umC0g^PV_V?nh&O((`M9 zeje&Qd}KT1yH`oxgHvEDxB$QFQTweK;!lcJZ}rjx?FtP|zHQS^LH=ykyZ)Ydksuze zTJN03-lUqlA>-iny1xnkj*#kCDtXMGxzx(u#ufd0XaA=;C)gDD*bAbILX5RpR*XWa zyT6!H^*@}Z?Js3SeE@WFtPTPwy2^=)5})5+YCZvo92pLi$Kw|Q>(LhY9ZhAF7QV^| zt&7V%BgVl&Z^j8h&qpIfTMGH0M9XZ6hCxbnV^->LuuYz1r*OU29#TbEUmd`@l zw`^?I4ZRp;7+n33UVHr!dpS8bZ|xi-$Zq?1o-mMIt(ReHvb3{GXlqD82&vgca{4F7U<0N+)RmgRi4t zg+r!{QJ!!feH8vr7t~`PGKTmec4S%&en_KtB}VJmNJyzy@`8N@?ZdvrSt^y*=gP7N zNdXuph`ld;Gtrh%~t7+BKq(sGX4jwTe>;BcQxH&@SI zm?{3|`h(lU-PJ?NF}!|S6LAD4jEcH=srZE)uiy}ccw7P4)DLlK=I@KAN;y`_6JI|e zwODh3lY12SF{7>c-Hw)oWvPYauZWw+m+{cjUaflWd^xT<2J1!RaYI@4jKQPN0KAQ2 zLhD(vgMZ$>NYD0tgUikveKViI2RZ=*b^ z`!zW~{{mB=h&Oaf#x^|<0JvMRJKIdq&nBC@yO|P&eVX1XUVG3}&st)or-y{|!=>Ck zXnlOs20s+z$IS>|yDxhVt~Z~3CQKiU2J1(goA|4gx88h13`k@Oe&j)$gxoB#xUJ%R z>66$xr7rIrB* zCyVhxUI+{R8u*gn8jl$phOGyL9}$Mq=sp|J`rO81-(i~z`P!ddN-+xv_J{40z_~q) zn#kwt;%5!le-VoUS-8bLhViDiXdIdnIzjhV^$u%HMzb>ton-b|$8tjOl!uKr0gj$K z7trGa-P%VE7QWxs`uR=vU-%IDOqG$i`-f&+aV`~=sf18pmIqBTLBuni0j{3kKddPv z*h${TNcBK09cy!lvyhA^;eMi1eWDhmZ>f5n`L!Gh*b~rL3qqO|wU*(kVw(kWT;{;% zodY!)93Ok^sBt*xN<`Tc!cD&@Z@Ra4Z1pV!^>ybe$@qELv+FV$B1}>54%CzbTX|rtzJRSdg5}olOrW}Zo0>^s^MsB@I73HUy%sc+l@#p% z`N%O4W*$_FTOYssvXC~lGcZ2m9j5~mBU9Z;5z-KPmQuyd_vJ1cCier|(drAJr+AwF zlxV3zNBq>VB;>Pe_$MnflTXv$lWk+O7n{SDQ4ycMfyZbT=*3g%^E_YF z#zxIf{8}`rrX=5Etw1V@Z~PQ(kk+o4mQ0xR4H0LWWscP#*<*fYN9;?XrGpnw@d) z^VUu6IQ0n040Q8uFbe0!9-q?@p^$h~E=5HnU_sIeOu{a~|IS0)wQD8as*@T80dEi!^keuRvoPD$CR~8mnI4C3Dke;bpIH})_RfTq`CT%7ysxRo}>zy&n+Yt#sZZFhrR5I z8FI@kHSUyrk>IsOZx|f6)`}QhSG>eFT)3^KIz88Pg}u7t<33a)SN?o61syUy_A=x+ zgh_3I3+%HIsaD)GD+A*SaFp71jnReZZC`6Lgtfw`(yG+r9|SISaToWRyZKo@eNLn8 z@a@N?NBv>WEKt}PdNe*c-{^Jz*NnIu6!GxLYyU5fWi1bEdrCuDr5R+2_;_zg*8P8tno%uHEJq94f3+EHYF>op*noZ(eA8!|#o4PT; zQl4TxYvgSR^U3sD5im0^bVv_ioktHK8F=y@SiN-#PZM&Wh>H2ENYVm3(=v>ZAtMpH!2t2;HL!b|M$-|`mFhT^nil{3HS5IRMOqOdY4h2M9DAn(8T_tOX*k{W1gVwzxW z1Fyi9LxwFQ9kghcD?>@t+qj~b`P4KMx(rTpJ!ZL^A8_2z#)|RkZTy4SU92rkULSxvzz>+Z`pZw8S}Ovjn+C6*05x@ zmSFjGg=bgLLHRw;Np?KimN8d06@|H*lZl&MCGTvhms~dJd}`iqiA$R;!Wyev#?}`1+yRidhg3JvW zn?qps6UV9rdu>{OSN6{pHM`#sTPfV}mr^%u7DOZ|H8_-sLK7xrm^Uf46SjMdy-rus zB%Pwy6TVf7KW|pM82Zz%mO1obt1r+tX2aFZgO3rSCvASCrqUADA5rH^62)8E7*ZHo ziD(CbOwaDY%P`TidRe~BS%=NCB^L(Ur&J_xF2^jHfc2}6?#if_pRF=!eMd?VLsR?* z6HJ<7*W3HGzwO<&la%@n3u@K1XCHn@6f-K6ke@8hN4Z^OD)o>OuI3ys?7lRQ>3B-@ z%*#oFLuT!!kZ}=q?RMjr-XoNroqD|Fmu|XM|4vIq9u(SWYv#*VNnLNw+$kiueQ}6b z;7UF&G2#F)i`tA!OL7k@F!sY@0IgxsK zjiv=y%qNRzTp0g5U>-RdmDCW+o??wGdM(gdpy%-)N}iFA%2A@&irBAFGTz*fSc2Y5 zZKl``U6N>I0?c)r>K=mu?L`b-Y;5SCe2pWf@amHJ!NuofCTthV_8&SCZu6l~%YtiK zrf7)afPLSFpVdpH-HH{yC5Pl#Nw^ON$$f$Fg$t_BDi-@WOstVV6O_b!KBx-0+ zH~vk{e^OXF)~ty0!+E9CENMX6G2&`1(9SB`3)!_T&EN_6KH%Rzd8_#GCx#r^vs4sN(Ci?YE%mCR|A7?(;pVAx|7AG1>EhH`18Fg zR3%?2nJcirFL!*+hH)KD2?!n6iIB#m@xWu7vXzW>S&3Jle)OV@L{@YL(l&KESS%zWpU) zv0TQ}N6@rP>_ly(06#Vbl|1MMzF*eut=_SqDpUE-xOm4A`aote|BuTLUrws4F~ur> z2&Y6YNJ)GBu3$I$*KYiO%3C!|1;7V;-259i?s${hyw~^A@`S8O{;ZWeYJ!P|V&F65 zj?a5fG!;e;KQrnD{A1@2|+JZq|PndzZpISgcq-gaW zeTB#_Q8WbFCA_{GhDrPguE;!lj?;tgj=nua%vfvCKhvilZ;TO}_G%w1*H0*3!n4yC zeEt*J*A>3?r{HP)&8`MHsrq^Z_VX}w&L?Qk-d&!+7Tl&4!Z&x<%+Iu9qMv!+92EB} z(l|Y<(ljI5%MCDFzR5b926I5%G#yVrQ=}9v1>bM@Rbx+Pc9Zc!^+ZJvE|u_xiybW1pqtK(riqA(PO$5_-IA5tEboMYH$P+Z%(9Bxm|G@g$)c~$b>61j?`%Lv#@pIcR{ZQ%aw<9i# z%#RfqO8aHk9g?U%(env83PSK*oJb6*mbA$1@sdzd@^Y~TzZZ^qu(PWmrnZ-Sw3F^r=C0zFqVhpu2^>Da#`NAx%676 zo06B3rX8-c#K(nS(Pi8at!GHyAdqfO3_q8f)>QVC;LbTDUED9TptE?dX2V!t5Un@o zDTpa=-c|b^H*`JCgIYP_Q9lFoWZs3`Z&=m_m4OUal_CQFO0n#-bbT0~ekaa3VCk}d zmU$&GpmlYz0UG}pkV>9X;IXHPSUxMrw=)I*nGsWUzkY1y(8ARP{|C7Ff0{$!ChJ6< z?YPxmS)@>t^_QPzF3Oig#?kQp%MV$pTbqfk^we2Lwf7zAiMmLMtB#yOrmQOy?S+_% z2<$Lqp{hqqqF!Pjk{f1(!Hz)$#bl<;$A#d_Dga5A2%@5bf33MPn4>K~?jq+mj288_ zQ%+Lm&jNKkWQ8|;sCj5;`4pENeFuRk6A-ZlLFaU!uBUg!+bgr|;IYJJ1Yzex`eAz? zwL{NX0E`ZDH^t3Y<(DP@b0eUJVc~n=6L%qY96^{mm{E09PTL_x#TA8nPspIPnH+7i z)2!*CK>2#}1IJl6Ydzz}KGudxmK7ZCF3Cvc;Bd!nsJT-MI%W$#9Fg9@qBu?UCjN$m zM`{OV4_cbX3B)h+tWrvGvhjk!kS)v2u$w=X(dv<+FduLJtV~k^+vhVSu?ci>ALX7L zMD}59Y(sT@nept7>HVtGu`_}Dk#Gjel>YUf-R`u{S86n8v7Pf>0*1t= zMb7ftiiXH?ic=n&`;AHmnU1LNqzAQv+QhnEA#Vo2yZ{6AwVdqp12b{)g@$;#z z%feoAbnGbYK7mxeR?wH438mX4XO>N-!j^uM^DU3U*GDvu8_rWTKT_DFTzZtTVesA@ zg3)dn8E!+J8ADoPQ=r$&ZT%((1z>BQNsc2!4)LdW1#Q@0X9bgPsQE$f!LicP|LH%~8?7GKE#Z%{ncc06#jO==8>V>rQPG!bDvi=XQOQ%S7vTKFZOzlmz=YewPfx^ z9=v^MuODiz=+_O&jF%qhrbroCKT+F%%-4}E#m16P$cqFDKf{!brj349@?G^UF$vKd zA~M;UJVAvXMYzJ714gb;8Eh`J*RSs%)2VsX5 zk1Co#r(yn>Qf2w8m09(&od4ha1lZ}aJ^{2MGkTw)w@qWt%VyV%Q2$-mNI2A_Bn zD>aD*vIX>RhUSAM?e?UC^=dy~9Qx_D&jf5^tY~+(5Wgimqq7}cX1v_IcBJK#PMmp9 zsDfvRn>4gGk0viP_XDCa^YdPKI(xQ7P5tToa=E2r7?eZEIWeZ0l zcS@4%CmZ-~PZ!MjIMu4!g=OEd8`d!D&MkT$RxQPR%G4l|*VmyAZzj}lHDe$x?mYuR zzdwM##J`QZrGLju8f>~Nco{ZYWNT%2Ah9Lr^!Jd{sI^_35K`VsW~l+ewlt@cmQ4x5 zOhM~bEOYFzQH8GC@(-uhn;tDTymNQ2{VO?P^r)1tgOfqxmR;3 z!PPB0y#9{heDTpuZq|q_O z=#uqBr=w>}u4KPYzZhiojx97>uwcHX&+2|En-E9h_zdPtJHzRl{-LhDznLQLaK3Om zXHQtzKn+{O)G{Se70VHXX(m||cztSjce#Q~_*5s61OGN1*3&jG*Y&@@^()!DH{V~Y zcDl-{&E8vm`rwm#j_C_6z$CWZcqh()?e-*?2$^9y7pTO}oY*zDB@k#y5_7V1qKoK_ zyp5zPRa~+nwouptJ1S8{}+xl?JISmsswBBQ03lr!k@ z@I}YXF^M0U|2V^>{c?+RPoC`q5}Q0bJO%oF?F-#;3t z4@ru7C+V1%85Hd*V{}W~6rMq}bklT_@}E(!Ltf!`<}^`R5cE*OWdQ$5lHxy_Yr+BW zl$$3&Y6h3Q7o^(+CvFrJt~DBKka`Rf&14V=3|xEtZPX5u-~$yyPd=r4wJri5c(A~k zCurYC{&+~s=;6=jTS2@0P_gzfiS zmRZzu#W1QBzMH&X!a%uJN5l9?Pw!dMxDVa&9g{2_(fOd>nZBx6^ zr0rCz!|Z3+&9?#KyHbIxhVgL)LR%KSJWeR^pm_m71u5Ww#dZ0op#Hl}$q~>olMgGn z$71{?S@Y;v-lq+t3FAr@2hUc+de>4bCE~Amoo$93YE5~!AJ3d?h-xUI`B8>HqVIve zi!mxNe>7Y)@E~dV*{9?8gRD-Wbr6H({ZFAP%XykW^!6uzfX_=kc+;O;6Zal*EJ#~X z;5_JZ2i0-2J8!ZXRd1J-3BCPN7eewY>vAQgTF4Rqg~a@?3eCm}^X--+D7(S;15yQv~E(^=g5q3)bQzWALBIZcY=_Yxf1eWzya`Kz<$6Ol`N?RxKepX zY{?0;bOR$BE+kF!4~Qu%0oq=pePZTX$A<_*iC3G@JL0;~dy<^!oe<+1r;u!8i6$(~>Uz75IW5C3Rp#6NE zgrk*@#&ZTpsZM~P*zAp~2OK=$ZZzJ()Wq_~dY5YriRq5?Mb9H!h`;MXlT{}(?%t1H zfA5{OXqwr9WQH^Cqaj}VNaxyqt#Oox9 z`qCMfN(B@)n2l-|Yvd9ewWVAY8U3bl|4nsm0aaQReqa0D!zdJQKIs;x)P(uBGeBCe zZWjsZQ-s~Z!rfi!F9C=03L@QV*LXzRSyq_p4DX7ElCku>vue%t&pYyKJ4uImw20s* zS7Qk%0x^8WQ2Kgmh5FE8i&4^4JzGG?V<-yoO-D?q0sCVaZU428-flI20=b}aUeI0X z<5BBNTVO2X(kWuhlKYtLM|H0D7jmVN!BSoLA{f;jX4z9)jeq2@|{cSt4tQe~np;4-69 z%=eHMc+{kLy0*>asay$a9ZZQ;nss0{C(yFMC6IIXe8VP({H ziAs3U-RCVGss;Z;vIz>3=z_D&VW;U5EiBfc)*1mp!RkS@^D4|<=Q1s`U<&H0mO}aj zj>(MR*|>#?!L>j2*!4~k>V)$_vjA(cgj1`yat7s~fgtFh#YSSn*VNrBKc)3LBsibX zT(!ucpX*_noR?(j=QXIbjocS+W&7_-5gY9TuPxM1$GH1zY;$$e=8?00IajI3->|U@ zOBc4qS_HcgERRzo|4#{#BLjLlO`1xo=044(RVznEEyh~hMBnHsPROq-B@wIB6}bjI zB^HApLhMmx@Q5pb{j{)tDcShYdF;-3#Tk4+5BNX@N19y~CuZdxj*8UjZcN4A z9n=CIEPs$XP~-VTpj&#XI!9F(hA+1aUK{=bObl; zYs@BaJQTa8x^$YzxrQZ*G01yn6^N7hQ@F|?pD0K$@JH|k;o4V16u0v4aTvbnd4wny_#>s~_ovq)e%yS+dR53y zl`K#j(~M(2(odcuuJnopQjo6@6v!MEOP4ojDw!ziCf1q>9LY$CQ18`%kas1)K>>lT zWrh<7y?G!dYTB~$>vhp}L+=QQm^b3lFJ{>ifFe5vVgxgc_W+tPzovkUWnk{X35s*$ z6M_@Phl|J!pepLT+GrM!mZbNhPsaUYwQN6+l{V$e?8DHrVH9CnuCx_)+M5>8Z2434 zj#W^KeJHdbcaH^21`H1;Z}Np=U5h>2+so^(5&FLWHf@< zv++xm`mfD5-hk<%YHI#V3lMi1U%?j?0bRwUjweQ#i|I_bz30;ph( zY!IFWMhs>*o*TBqF2qru8u*;#Z~c4giYoh+3{FiPB=RQ}treXbZobbTfg6~f_8>nt zkuXc<=&W5=m6X-uz5Y~jv&S}TaPyubSka929`F(RJw!h8i!~0Vi|gG>+^k!(&#Obd+{??QPLKVxL7@TD#C7QVhM^muna-C;#~xKcM|ebq=sR28*JkLwW6 zjTR>GX5)+E^>HbOQ-}UG=lFhQvESRSl@;JZQe)Br3)^xFgU08H1m@`uGTWvWg5%C_y|b%RfZYl1H>BO1~3BH z!1YFWyt)jQ6uYt>OJAQ_dOy}LWA!4SadT7&+1cu^wjvzW07unm9`XRCTty?xpxv{X zEWx(c4;qYRDwjrCyl!orXo@A{Au@|6X~c1pdrzCx67zxeSfmg^LEPl=`8lfQ??R^J zFqiFdt;?*2=|K6gSQ1wA0Ut2cA3MHC_H~9*Qhw{5_kj}QdhyvJc9k&QDrZ5BFy!)z z2X|)UypfCo{Gtxn(2Ns#sKL<=@}2pnWV=9l9Q5E%DYb-Oh76A45zR{} z;A}^?KZm&ld#*_>dxnTg%$btbJ|v84e+p^tK`g*Co{qo?@f7AAf4o=CXM#%F4YQHb zr}%9~XgLgha8n(@--QKPWHzIPq};;`*H?TgQaV`s0&_GI8FwchyHI4ge(q}1A6 z5Qp!OxqG&18)oL>Z(^J4cRAy4-NuQowD(o~N&=3aHi$l6OA$CiIzux*-No!{SB->f z5gfSL#W8s0A&9cTZ8d7K59T!Y20Pbay1msl%Cv5cOQ^kS`l?RXG2W2$tn=YJSDSGc z+1gvB8TVkpID_MWypc!U*3_9FMsyX6PjU3&Rd2XoD0r;D(z_=Zk3Wsyf3}tX*;=O4 zSMTkh2i6=@Y~oC=jm(W68J`XZJKhyrgB${fZWYE*YPQ;}0F^*)oxhqsf|bjL<9 zXK9ye<8*%2PlvHsHM$`VI^WSqEr|%;KqMeMK$W#T(Heqrd9ny#6vOnXerY4>R)Aup z8BCAP+Ji@2lHHO{Uwy96Ts*zm#D4@4g=-1^3u%g#2Yd^D1O>rO;jsYwE|A$gmrbXr zS7T3gaqGqJ)NxO~anF|&&=UK^ht`5b_zboqD=&m1EuoosltG|oWX-Wf<;~mZwFK4W%K^Uzi7_O=D5GCOnL2G3mz(miU9=6n##*Gff8AEA{Np8lla!o7V?G$7F zB6$`)|60uEQxn#}37vV!8*T~Jv*E@-debdYeU8ja>5)#)DaA{d<1#Y6_OIU(RNp6u zp{d0?!-{Qe9fwcZZ8iadPD8q^R%6w~@>&q-W+^YM#PIjPzXlteQG)%lQGZs>U4pjn zG-tThB~CVTLzPoK4_Lbnr8uYx@AN{{aZp^qVW@m3y<>*n^JFV5&Tyc;1=>&`VE6rmOX}}>(~3GT(?wfcpd1LTGt$R4(0EyBMh&in{8Gi#4|*&!wmDv&>>e|`2eTJ8<5ceJVY}B?dhY4; z^pY;W?~qNWN>a$6MUD;^mKP6uiJ+yA=T?%vG3+KorOSk6Lygx+cQk%aS!*kqfGkWLAl6+Z%TKs&`sW zY&ze{j!M!;`jQ4*v3-vywM_=f@ybauRE9k$+M~p1${k#Jz?k?-<0B_<);$z{P+@0t z0~?c00Z}7ve*RaEvl+{aHA~L!5mAx*YyC$1N6~qDwnt0i8P z!q}YW0Ui;Uk7Q!D^;zN*YAc756{Wog$%*xCw`XPJ13tQKKcRF9g9Xtgkq?^9Nfp1C zH78i6Sl$FbJ-imOiTZu6+r|p8mP7r+m;K(3YyU8U$LQqgv&RY&W}Qx{qC{N`((bU2 zMvlhR${pVCET%lz2*>8-)|Ww*ILORZg)x7dlha3K&8c1v2H8lNfW>X>@%jvS}PD3A+1T0Qg>OKB2zuW^?rHW>J!`l)8Qvy=Aj09IPgFCXxG>r;8#lHXKHgob7dF*BCEI$SWLrS1nFMQSFAcZN zM_ZA_Hru;A7TC?}LZ7Vk=Q-1k=l@zg*1eNMh*Nx+=exE&N7kYe9ADfrN*$!J79xWw zsaiaUsmn4BwV*L9tMk`(FBq5;LK8&iNS<8P@RzAyyrKAS&0LFEa9Yt5uDSmArsLVH zDKF?YY;VNTi#RUDH+Hu!diidt=I#6MA-tSsvv;F^bZA9YntOWd|5X!by_ZQ7#?c}= zrL@EfV2)tC(S#_T6hhQb-kmm|{5nm8NC92x+}HTlc~I|fbO27uu3alXYw_p_A~bL8 zOv3SR(|^q3_tNx_%bD~BLT-UblRJbpST$ebD1fM-XJFhJ<(*0`(mEbOmdQ}VOYzP7f-;5ZAV47)64WM}k1^n6qXh-OBYnO-Xt_3PjMN?o6$NtbGv_=M@&Q>Mz_ zW<#0|v5#e)#_pcb7-OS)=dzBa);IL}P2Gw2XY3C;sB$-R@tC2QWFL^oEbud0uyh#J zf#+d)aSWi9)z&jHm!|RXL`x=|snSLx!Dm0d0==)ZJDDs!OoHl-g-ouuIS^CXR-6>Hao_aCEwLfT;0a9kd#2aDiN-&?&o3`C$=jsA}UB1KeXf@Ua$OpPnE9F{RnW*NKq(pxT zWkY7#nr7;tV`M-eQLGb7+BsMASazC7T6`({qLG)uW%&tJPdxQfIi@quY_SAG$|=@U?qyxa>-i`85%l27xxlC?3W%XVR(HgzqMH zjnK#l_CcGKB0d0`^5%_jUBAL#cZ6McE6h9%x2s5b8_@*Z#PrC8>4c3yPm88l#=Ns7?RCAs?K}uRyt*_4cT4=FQ9({rdu{$ zJ-Ot!y_C7f=i8y8gh4KgL-3_sSD8yYkZpv(GsyzjturpeZdJ?PTkxs4fET1!j#i4v zgAs};gSm#i7G;l>g2zktff|`)eIhw1=|m$WY9jugmNncYye-_oB;9(@LixRZD2^=1 z4Ih=xS|~7bCwf&dZ7i(dV=>1mfE+;Qe`KA4z@z$}He7zQ)y=Y4CS79?X_q_m6nI~M zJ>VvHBkMR@X=-?xEsVtb2m2gsoI85L_sEaTk18tJJ(PHE zI`;*WD|;I!c!Oa{x45Wu`SZ!N0!NmpNnVhB;-$ZrYui!a_hxcZYY{j{1Vc6unB$-N z;c{)~XtCNaQ{~H>(o*f>H%g&ri@u8~Wx8iJPGdPDdvjHJ&1PlW82hD-hq|nZedzSW zKd8hPfUdG416s;%yuq1#-!v~-pKxnFL6yJz%{yc#kDaChi(9Xc_;eeC3|b#x3DJ?! zWdRLhx`$lXWt$S7L{xjl3`)?Qn;4?NR^MNxUV26HAl)~*Itjc-jX0B@qco(3`gyS_@%yJ6EAhiQ2#jJERC zoMs*V`zF83ouUS(xvW#I(Xv?y{M}l!YGkgQ>2)s2IiiY_UWKbEjvmj)bgu^*F=d%kmrQ{`GE!#l)fs8Hcw0H0= zylgZ|V);0)Iot?zWymM`>2I&`}Zs$QgQKMR^{REyaX+Q3c zihmB#xV{SwMPk(z-zs|STz&bL9*gl|(ms-r7&U&3@4)?o- z;`fG?mi4<&0y_7VK~Y*9_LosKI>NzUt_q_UhP07pX=FpfOR$n9c~ErSfRv#AHGMi+ zdJ+cHe|GW|a5}(O;3(~tOn^dp%&~l-MI{rp+f07Wo7nLi!kk`&*Gy%WIwucbLJ1O-b_n>$epBOiRaY{~)8}QE3#8M)_+nb&CiE<{Kj8u}YgCVU92w5d1 zAf@|8kM2cW2;`;Hqt}prpVH*<(-dqMcye!4KkL!H@tKJh&Nz&E|8eqRM#}Li9&@q& z%;@1m#nneYw-eXRw<8y^6R({FWC(H?Rjt6i}V-2eHBxqVk z&7%i^W?eZXMRlt(So9aL*hSEf8|^8Cv^TNxX#x@Hi0i&Q#4%Hcg3mvwI|iR+luP5m z+&^J9912nQFR-P#Me+354&U&Kz$Pn&m~G%Mw+OMozGKDjy$@ClaC-RFMdbFR84HlN z5o8fuqzPCD5+wUXeB!p7$J$WPK?uNqkC3i-rmd`^R@=P3iu(jK=I);-?4j<6Yi}ta zk+jlSRRq4B)IR_~ktkU5M;Lpo7w_Gv8sTK!RLKl;AbkQQ^Wgb64`>?@*#V+Y8?FS_ zIl}myXa{6`mTZWj8#Q|DvHP(yD-84lqL!~1J1a&jab{O ztQfz82|gKY=oF(!ReJVI^pNv#L8^d>$wUL*%}}h+)ri2R4a!?e8R~0msInge;^!%O z4U3m18n0x(X;*BsEvLz*w07Zfd9E4M&1Tt$)Pe<`YCbeB9)2m;hD8ETr|jH?7FbJG z)2%CioBZWzwKTxpG1YH%FF{X2WF-#s`Pky} z=>MPKR_RV?3Sm+80k}_~0Y5&jYH>T@ZV;6_fECE*ipUE9rV%jt#AHV0? zq2L;#=VR^yEU?IN7Y+xc(lG}GfH|BDlrmHPD$C?ngWj*@nNcYinf{4J@ZnCP z!{1WiS(~1V%+U>nv_-DAGQ31k8zO*-lpth>9hifg=78gXh-A2lVgXXO(jVXTA6^HG z|KWIdnnP?ZUiP9hNZwT(o?d;t)JZ4(;?`#-7W`y*`nZ_=;(HNtF0ZlbXI{~GX1w(7#?xAD59pITz|T)+bg z16qR2z^IpS-|<_%(!IH|RxfZhaW%7APC4>S_M3?nP4Z0ri{Y)~%ex&JQNN3lQcDsk zhgx0+D_m;e3>bZa<|!&8Vn^xUX5y>C^hoT5cL>2ADuDJDGJW=K2+-35w;A7}WnaE! zmebEHZ#<{uMTh*XCp_xuCKtWm-Jd5o&Tvoe@rFFWlp(5b_uSW`Y8e|zxB2YXTW(=E z@U9q2Gue%zC%kDi_Sbj1Skpzo<+4eS2A|uibw)}{!vBh&zI?uRbLP`OYXu^xKm^x0 z$~#qN0iO&j4!q;{FBI=`mZ`qQi7MMwQt$-mFdVg+IE~bM&VLiby_Oq^(Rmj8nZ4t z&*H5&mlOI6a8~~EGSx4w(CzQ=+bU)GlF2Xt`T4@{4*yoJE&bX3{Ne;y`Xj%zVX7y8 zWTK+3F+C9^(csx@l*XxV`mS^HS`zeuf{MyVgiyNS65g9BiQNb z52oL}XB?A~_KVvhOLll#KwjLZQp!2Te;M}(KrZ-xo{UdPA<9JT_g4nj3_Iot=>6vksI;QeABkyh*1 zQc`5k_Lc98CD#63vmxp!_)+E=t%nL$5F#orjId# z7DD?s*#QUn8B^+Y4GA*2xA_ZsLdZtQm4{13xj?JY@)Cjxr`9w-R75vL?!6D7K>~CL zpR%s=rkI}WH|3S_#*L5gl^`N$RL%%wgF2LKXymEHvwgk@r;h92!8sVmXyF!Mh#n;L z^l%4Xa)ZmRQnzZW74oqJz0Z5+8pF&*r_-*2yWC-T)-OLeM@2r8tQ#WceDn~Bb7*L!HQ`4KlK8z3wKZ*G?OXsJqMVjdO)9!Yq>k-d&(O2VLC zfC9}fMQ1VHG(8@?Uui;K?hT@j4C65@x=A80L52)_MaV?n)HnvJ_*}y88D`K;YG)O# z^Su6afsJE;u8+*L&@`s0&?1KR>!qa`%cDU5BMUfvQg*p;(gC#g~VaTH$+K9mHfLHVhscSe3g(7{L=DJ@FyhyqI$HG{oDvKG+nt)MQfYa;VzJCo@V`DOe`e2D-R=dsrYI?`IgFsY{CgQ z8fI~t{VU+^Vt1;+c}a9>;d}ne1+r2FY*OW?(7PuHmEwlwSK7kl$OLH=OX4tU-V}EI z0Y{I2mfx8+Mw4mYzBHzUWFHc6kt;)mBl#nV!YQK)8p&Q+$jn3L_DqSb zjoi(#RBDBS8T+J?HloDkm{ccUhL2Ap!d{B@`*0(pkUTf)_VT~axYU9sWu$@M`}6E8 z^5xcv2rS3N$*HA2y0qI0MQ)Admbs_FDAc+W!8*!na6Z7VAQ4YPK9+<@CBBLz93Gu^ zgnAm+U$bHT@@cNQn8G`J>L1YYMqR(6JWwqyRK{9PPcq`IHG70e=HUzai=WECuf1R5 z2}s(FPrya1{oD*y#}H(ub*;gFq7+Rj(T$Uu{Dr;sMr3#A8OyEr-gu`*qdkqSrTS(6 z>ys%2{`X->EAZ})JFwy3q88ipK@P zv?O-?VYm}lH)j3=*43w@B!#h-g?7(e#s15wA?fzl6`WuhyAEPE!@Mq}D#1lMKH25e-HVj$kPSb?v;9kyYdLz53Y|BL04k>Wb zarXI+`sDw4bOu9@FW0ZRmbe3;DgTrQSL!y0(=`_s7L02B$~10XO!#3Ih~NTH*&59n zHsLlf6+}wg*mJE&8#WK|3H3>A!jtJJ+Y6_Orin@lA8#fNVnKerF1!wmkkpYwT$dT| z80oS-*js9?D+chgNdMG?oX1T5O?H7uDC>M50`ibQQ3r1M+P}bSt=w3DS2b}LFtbCw9H##B1?E(W!OfK*f$e*4uzf*@pcBIFSQSQ@xla~17dD4Gr0XU2eEav4j z8|T>M?%#5APuyggP^`LCZEn{UYuMl2Xc=)IYkLHi{(6_{{{86D`kCB2DkJfw* zYD|Pr>x}$>5lQm6A-^+*JO3rr!bz(=@&9q5aggm1Cf>=^>3+<+ zCiHHKAAAp=4>C`aR}qgj98EY6!LS8TaflEP>jt6`$b1$ED+%RrgJ)Uk$;%%|yORZ5 zt*j1FCSSbA5;xu=(x56~Ct@-K1*ARrt(_^!P1 zG-|6jKy*ut_&nin4RB#6;@TAy?{x`F#=GM!BIAvcsS?^!X$^|HQ}Q)zXYce zO~5MNxp`3o3sqOjk+-^ikW@n<^T8X3c`|WyOYv%T>w!|Y<{JE zw|gW*>Kqk1Cw>S%Jw#%EoDH#85Pt>3t+N zX-n3?O*ZpT>%e~x{#|mb-ZHB~*u8)gz=a9Je)IuNmG6IJ-Ty$lFS@~I;0*)z%hl7m ze952{tlkt(qxIJWU(nrfWz@D9tXU$c|Gh1EZ7p0tw)v7Lg5mx;jFcCb;I%t$Pb$I= z=)w=N=)iyLXP57^u$UB+lnLwvLV>@4p}?mAXb4UUkt`7%dZ63{QZn|M5;KQ9-9OHM zLZ|`CR|@T7Pm9S3owmL@tgL!lf|1)CC9%ZL+Tnj(v?f1newGx@#K#>dB#@>akL z6%OHIY;VM60~OVEth>py6CfR_$#|H2;GOw^Ct>t(IR^^;8&k7+FW z84Hx=c=Hb-<~ANE>nXQCI+M;W$aAQQtn=h@UL%VQq_mp!Bal2aS7`fhDcxVMy=HvF z7*J}Go)+}ESn$)L_0xfvkAeU7f!<*4uTsB0W1Tt#73^bc=c`TPIt?oijRkisTPjWvr?I+adb*>n*`?CWdoUyVD!L!tl-Uec zr1wnsRDWWwG}D94lZNEcOo$gx#awL(BSx77MTNcS52txX6OW5Km!}Ny8v}G=c9GDr zS(4xswCkj}5o?!9l1>kwdzd|Kh4(L218j52aP=8r@ytFZUy}#Mg8=p532t zw+w1WIQ*`5S!RAHU~dAjrm`5FHo)_=DpuMd35hmNv-`RF82bYqxm)*?YYSE*{JK@T zb6^7-Yn$C(A(owlIl>F&xo*GJn5&lkmQl zxz%f)lDs{B`*PN6Nto&a(&Eiyde+2q7Vzpx@Z;r($2c2@QpQl~yF;;eN!RG8l7Z_U zU|_acwm*BDgyHu{r=9T?TzD(SHt^9TcU3{9~Z zwa9LZt9Cg;HdQNYCZ>y4<(m!M`M+Z42uwUa!c5b!w}@DHZ8;mv@cHKju^1a&F> zP$4_T?jQ=IK&82H16ZCkKsF&-zy+Wj$*=?4FabUR191FQ+&i9H@~fCp(tPB$_U@pL z%$06(pJs8MP9>thSd^rPXK$S?L4rqtb&SF#`ed zjU*4pOzVzV59t$P+oaw3XN$NE+6Vk!$Aw7kt>kSbXPeNrpH=b1S~E}o_yeyH_8bp8 z66d*RX=P43CHh23QnLW_t~9cESktP~fr3eBo`zJ+hI8WB^!T5~y>NV~**(wwG#}T_ zA-xjVpJQ-AG_`a7#Yqn{XJI1bvWqyBFi7;26j`cT+~YKsGgsrcrayief2DQII#s3;FVWH?I{sT^@&|FkF`Y5NF*Iv$e3;z;Zp`$I zcN-Dr@zG83BE_BaY(M0lMo%;0BJ+ULx3UbD4C@(?5h9MKfaEPsqaPAdp9Eq|;SCAY z_Ju|b`#b+OX$kw^H!Qhvm8BMpu$Nl!#Nm&Y3qW7ve)-L5qIbG_F6z2h>#n5mR+PPK zWE>y6*y-;MMH#`<;(pot$&@`YuUJPt&ftc1a^hb*LSf7Yc~8CGAyPbiwc(=kVBp&D4zTvK_y~ z+yXXC=oxEh0>hL}h`;d3svku7q&JMWMqS#$ATwpf@!(+f96{~;?ob$+@y^PHRIE3} z|Jta9J~JYB8j@XIE4}*)WzW~l(!AHa8g2dc|BgS$Azd_DWV1!;6jYJ{2G>%jIUNty zMMdL=-%%Vw6(OiGiQk89uo;+Tm5~O~M8K%}kQOX6F-25*d|r}Tny^fCEp+m91(MJ3 zhMz!MZIHe6Ke;!uKv`G0mNT^@{Ctd<;0gUvjP=y})Cx?Vem+`4I@-aV$b+pQh%_xP zC{FrB_V?jJhr+QYndN}hz+A?M%?7F4@|3H?2|^AUok#zyot;8%IoettBnvzTKq$V* zAhY5Slj@nQwCuOk8XUUuWD@JvD2#<;=_px7IpHu+V8}>>bR_LRmw}rKO3n22Iwt7b zx0~_M==2%l+fog>I23C!RHkgUc=C8&K0X2a3!gylz~52^zRg1d(|5aGtY&4&56EF) zy-zKHlJg7`7Lv68$Z3LWYY%Ku{)sio-L&jkP}0>h^erM&Fo5ebZZANMz>d{ETWsPn z%FOZY3u1!LKdXi)7uw7R9&hn1MFi=)=5(Q!V?KDYf=|*2(I4r@((|%oQ)XM4>PX5~ z-s~}hF#OsX%4-ZevekhC1W>sRDN=%y~aPxIG8f34Ffmhf#((^+!9`(dH zOXVBk76|MMYf0=a7?TzJXB2zG&I7kK$()f?MAHsK!^g7t1cj>y1cWw(W5dFXL$-VXd3E;bA`dTz4-?UzlS#6eCW8#(i_kn&Ou@bhwo6Yps$f5J$E#N3GUHa5=rcmOrXvJ5O^U5N}{^t0~7 z!8=ltw1L}iMAJ)N-jPj?brOOM`?J@}RYjcNbpe1br)>S6MivwEwukH+sGI&=C3U zNWdgv_+Oh}L7{3%kC_c6q;N=H#n}5`D!Zz^V3e)%cE*O_n^#c?UyiyM#;CvQlAWqH)%9x_lSopmstb_^%A8{BZdU*OY6Io!;MGxVO47w4A7qI z<*Oxe&tvB@ZS^b>{p*=BXF|(|@`MwY$_5`>r{m?oEwM8FifQO42ssahq*Tp&MZLzNPDvS>Hj(atD(Ch)6-c7K3RKyOO9$HutoYrB z9LeZwN49`W-l%euXydz7=2D#{3^g2ilXn5~5B8`r9Y0w!JtEZL7U^{y{W6rA0mFZj zGX!n=Qc;gjMttKTNc7E2lQbqai}{ z|Lb*np*_#;s85idYN3^YDIL$2vLKZ9^CLCS;K0x!1&ml6t+@^fOEf~ANjlYxEht6A z-|#M$Y~tTvu_PWSeu$evm=*?an1{th)$S&}O;F@l8;Mk_Wy)(pr5m{5)IgUl<%svH z!YRafHL7{wLQXcvwdNO+nl0T}p;AkpndD3k+N7wNQvW`JOa^LB)?Ngl?iej$rAZO1c-gAyius57K%W#MuK zQ(btqAOg@D_I&MITv$a?d=ax!sLlT~KSyV$b)d8E{<0#I6E4<6 zgX(KN7RJTM3g`UCWn~S$p`LrlaqN=-?tsH+zoKv*4d#%JIaEbY^v`x-5`6PrGeV$k z&Er^Ed7w=K$!aE1V**8cQ+@;I%pJC#V~P7@Zhi|1)(&{tsm(7NLhX{5bm%et@87EW z@u6=~5~SmzT(wqn)YdgCEkM))TX(eEc@8-WbA*dpf}54bB`0NxS#>Az?PS$#`Gu`s ze%ZM6*#|z}Ut-F9(0FjK%0YsI^Z$_Ts(Tb^TNl0{dsp@$fa{?s`>9X*qb{EoF$*&^ z1qXpeHH~U%m0&T&j#mz&*Lb-N`^g>jlp7-K?6X)A!rATtc@3jH`~ZIS&2kBU0Z(g} z$A5oD)*5fI5GA_|pDy0l6{!r4Im`vQew}rBG{jC1_~|*E&bm3DFmt4+k?jOy;1(JQ zCB*xE-6N?*n$h&rs#@tT`s0+;0fv9KS4k0KE0y7Fft}VrCqZs2G??KIMM@h)-)CDb zrbT%SJQ~lk%DD{4rZO2`v{>#Mpkf;w*Ku9-pHIIJ=x&kltJX7QDk@^Tgu9`kp+A*I zTJSZFYV{QTay~(x{dFrV^Yq30#%yywno{4pI=oNb-qJ8S9tq|aYSdh z=NNv?f`qX`mWM?{2?9I`y5Rs}c$CgDDw#2#)4CWGGQJ?bCx(vJxceJwx89a2QpcgBO4}A1 z-|~i|V}jy3ec6%uQ;&}f8U?JGWv=ztJMYwKrp#7?9@Cot?K-1ejMN>biOsLT*~Bp> zv6}eC2%78CI_whsu8%5yKFcvV9Hj|~b|U)w%bs#QN7_MxAPOFgNLjwiW&gU%qk@{} zq=WUlEH?VYMK78XSVC6W`y*Ngfb4svzmz+6kGr~Nqf`5GEpe=WPp9n4{(aI_*X1V| z>iU+ECGxjr8zLEatR7f*k{Wy`{waCDjA<}N zGUCc8Ped42FKZ@o1la0LvE)9hrJ@x(ip={nm<9kyL?U~zlezI#~v8oe9MVonVx|d!jAs^%8FUt?SNw6pd{GwXYna zn(GNaX;yu0N#$m3KWK7g2oD)fGy;RNn&l~HB?ADbT*rWQw{@#0<_;D~)zRu0e`wh% zwMI#dbW%nx*;nDDhB!dlHlYx`|34=KNsEpKN9l)mLXIzoG1}RwVxs=wr(0< zW_?J6by&#*2Pg0W!pb5+MJ*)OBLC>u z+2LZq_!k!#gUOHNwFF^Q3%ml3@z9ZI%FQN8K{1%3aY~{(va$)UGU#11mi*snTma<6 zN+)|AfXMO6F&S?kpp0shH?fMa%GaaqLq`*)#n})>uCz%F#RVwq!qxubhovOoL1)ZS zka$Vx(?-z;)KT960&|oTB{?M6ZLC!quonEKE5ldF*MgAP7agj5;kj zGhoj|7lqqbki%_lS~I$P1}=%b5%6e)X(QBR^B!!#EtFJKWR%sJOC$Mn(i?%;deyHh z3c{6T6Lh4VbO8U8-jgjlQ$rXrEv9stH{3QIQh4>R#1k94Ut4*EjaUoP(uUU99E9vS z2Wz6%y_HoEtCl|Lt*dr|sn=w7jf4HTods(${0AJtxyQV7BYeW46b4q~BNSJWtBDB= z-~pZFbDLSd;-sm0bf;Kc?h|C>0}$_P_eF5i&B-5G1xemq4@1jL9DBj&d2_3rq_GjC z#)yifccVq929|SxCE?e}{tP~r+R?agXUXZs?mTwdh&@d;55l8Mb6giH8J6++ zAIuh{-#LvpCOqG%YJcdB!kuyJdc$;2%4^x-?h8hUBwP+hQJ&*HJ>=tqo6P&$P^w)J zQ8!7!=l(ER^G?c*&_Ju7ZFRr|3l{LJX2g9&LtmY5@u{gfN?bEK7s82k^Om*U?LYW& zT`5@-cpEd@ywdR4qoeLMNugO&2}*P-5s`9VHT|;^*(0dqA7<|ppzh!0tiV3 zqBs8l!tuf=oq~y%5~rOfb*G)D*Ee{WIPf=c+?(l}v7743-<)oYPo&p8mOtZWkJnu+ z$@D$5V%2ZL#c4VdgVwBCg)vq{xMWG!lXskEOhNfT&Go7c!gX%~O;?Ey;X%rq0M5`? zQptAf#bNk0ET>(2Oy~ST)fG9OW-pgACd4#u-J>&9?BN`Mu_msJ4A#yLtG#!#hD~14 z%y>sIxY-pv$ab{J- zdyOFQmn$?S_28I7$f=Nr3s-+Hjhm2xU>r0TaNCDG>Q|Wedn!QY{Qo2CEu-2Bylr17 z6ez{rwLmGw-GaLocMDPo7Tl${Ln&6YXp!RX5`t@s7ERFN?(lN%z5jFWd+$>+vd11H zANJmB&$Z_I&6iq55s7u(IocpI@@LD6uj`o3pCLiOp`Pl9-{t}gMI)&!Z2CV82pKo? z8LQunD=$P&tuGWS5qchv*XWPmHz{PqC+Id@2kQJdHYX>#n6j zv*i0S*7u*YgTQeyLKR+4NEe1rZ5eNmkq3kDl33wpm)ez2&CCaqoKl&NV4aZ< z-XGYwRLeLFBoLIg6Z_fL8D$j}?O*b0bH2Twx07PI1hPFiv?+JheqY{l3z)G|6(Jze zx@T@_SfvmzJ}!C9G+$Ozmu~2%AI%h0zqBUq9j+kS4kis>f*#Kly3UT#muvg=0%RJm z&x`Fpn6*{cv8L`dxWxH2x;WjtuLF;+Hr$_x$p4jH<*WT}&2W;ab{Pb!AXp`=?Np}o zs^g$yCCkCb&YG+L z+EkY>T(8O=W;U>SOTjJnMvj?vWU8892nnF@5xNZFFnKLLF9^b2iMHEE3A8zyaq)h~ z!u{cd$jThrI6nl(8yXO8L-Uqm;&J?PLxiwb`wCKB>b~({;DS_L#_jlU!Bp>&yd_O0 z9)=;`%BozGg|hio-E#0F`s8jQVXin9<5L{YJGlyjmu3^7p9Z1t)pB^$ke{DF=&le( zJ}tN3051M#(7*J*BuDlyxLNT3+ea;i`pk>*m=a-WQ!yu~pcK^Y8k%T_O7R05mEj%P zkb~r~l&W;5%!X898yt<^6n|cFW%bjvUP0k6QRf($Y8hFSzA13nK`Q$QZeNrTR$cl? zI8_7qhYH8V7T8JG+{T{A0V=?7BE^F!`RLNN>3Wh^&7`6Eo z8{}iaZsMaC@2fM)r_ft#V5tAJpuWvG^#$?>Ru*oXQUqPJuK@CGwQwo-sx7oV0hdLU zH!U|n;fDSmnWkQmo)cfA4bIz#QHMwQnr~QtSfx5_rHs}B(>L}uzQtiGuomDiW2+J+ChG4Fpikl1+=@Oy`wF zD64@HPn8lE9SOo`Ia75vv2>2(zlr>KbCWU4dq@?URo+Wt@mj=j`P32OG)_o~h~@el z#p~6_U%gBT+@Yw*pP~D%M*Qm|DA*BJD?BLFcW>Q3vGZErn@ToRlWo}QIg9f+srBvn zKB+XnBNj5{ja}6^DbOhGbH0jK17krQ6Ds(aO#41w98o2v3<#s==oW7Bn{T;q#Il78 zUc~-VC-pssPl&P-afMR5;WLM`3Gz3gsX0?pxRO2C`eets*~#`3?JXg*KuQ zB343<iBe4EEYS7U{9DhbcmTT+lH@pK;4vIic4+} z%1rS}q=JHu&KhUYXTwO=CO)BQjJNMQHtNoI{^(zAL3Nub>KNF*`7^ZMjc?Y?vpzpi zxL>pAmYZx=aDC+BlC*h-^w+jD2NuL++pe7EA4HasvT;S7$X(^%xh!;4{&$nMmzE?n z+mMWe%sRr7Rk-k*s9mP8^ejJt$!Su>c1QX*sj7>oPi)sPpQZ!0ZLK%aw|M5{v@xxy z61cSAP>}-oAH|PDUu;m{16J{fgX~R`XrtEU<43Pd;*ug?)Ul)<2R?1So8IVfWAEk# z2s~F-9?Ipw!k-qBK< zW@b8althcU9Mmpha1K6s7q?A4CUFvfA>i?$U!*(*#n1i<)41;7UhgIkeXdu6E3mpG3wjt!CV; z7i9)+X9|wSVa}fk2?}hqU;0|P8sI%?5&_lI$5gPWHYs2&dRV6`2A%)8py02~V32vj z*0)HM?D}XrIsWH7sU~K)brVIyTlaTap8hJ#GBTJ>e%1Z4v$Tt<>;#Yx$`f{-wn%HM z=_+?M74B(t+6}_qpOf?VKGH;hKCU!(rHon}%Wb>;fD+Y1SNnjXKaVN%(C%ob7`aWL zfgMB*fhl%Cq_BP$_ivW&@8{>TY|iWKsHk)K8mXwoc>PM_c#|N)fV9d5Q1ns-v{I!| za^BPbP*g)Z|bM_cRJEkiXZvTdld{l(ptxNXBVy39U+HfI9^&kH}B@ZcOP z(Q`WsL)@396XhZ?I?axbsN~G#t3R2<(AWO3+B|$5H>{7KSJkg(SWn)52xzMa!x-;P zGQbtvF!=oLbsH-G{1*@efe?bU9N&Ig^WRv$3jO!N=w}GCMwUaO7rQpY4()vSP3V8$ zPzb}3e!xmtkc?5}JtF&RWP1C(LVfPjAvh63V-I&o z<~#g-AbBfQrF1_1m!&Pb>|FTkY8P|bX6cGZIrg&{uGf9ouf)QYlW#Z1p|7@nRLaY3 zvwcXGl9qT6mi&KFaUmN z^FI<;S-WZlXzl%32 z@3pfqJ1mZCJ?BXmxyvreSNh&k%Ds!6j(Z3Z&2%@v4`&CK;ms#+KGMV+pg88VF>3Df zX6xY{i>2O4D-Jf4zk+_3`5e8PU10~Xd?#czK$Q*qz|TdyAroBZy^|r#G)Y(6oZL%@BVfm=$F;v ze6xzn^22@C5SHspzrjB*o*t;K82$e5hdAzYn^-(Z09TfFhOZUQQ;o0dwA=CL5)z z*hN`-3M+;HQ&fP?V!wG2J!El^I~IQd_VLliiu#d9e=dV8Wa=kLWOTgHTkf|2`ezy? z6e_9R#7`_ZgXYkMZ2&~=9pQ$cs3lntQ(DC7gK3(fmG)`DW|;QOVK(PZxxY6XR;6Cc z548Bag7XN*@rF~jGu;?|yqQ*m(4eoMBTjNXtvi;RKxN-jN7_rY_!EMh3STh*6O#C(($jnt#x%DPehF`Kem>3bBK7 zvfdg{?kBPSOndmtv?ZwG-U6L?oV*}&BkulowVXmCsI!fOPt3$cJ+~r&_?V~e;~5e7 z?u$18pYw`P8msxIDeUZG0w!g$e6r9fjgD)OM-Rtx3utK0j{ds6!^XI2e_Ad`YSY$n zoVPAc_@B7Asri(@`NewH&jvg7Y-J-jv%0QiR>!wamk#Djr?Rar>(%x8CXwqH?dn=Q zM$~sSt_q{s%H8{Re>ya1f8EbxcHtc~9g(xV6yOE&ic0Y^C-5#phFIPUF!$I_<`uPu zJ~{VV@8qRim-z>qa~xGoxWLZnDUxRtYi&GT@7Xs z6hb=8;~y1$?fiN&V%MFT>J?R-3e>HCt_F8$S1`J+6FI@|3@}hC3`mDw=p|#Jl@Z%N zAmK`Wp*i7Rz`cKagCqcXkSJT;9RT;bgqXrbTG;9(@dq_>idF9_r5)pAc)ND~uSV4A z16}R@qSul7Y8gdD#ZR3)f|o`@1it4ci}s}aKXb+Z{VB-*N(~UC6|jcUdeG9{l@xV>jo9`Rb@$Q`Q=lj?9+Euk}|F`Uu0@iSYtn<5K#AH!Egw0VdQSf zkT_EBNY<`=o7a{Y$|g)q-6%0=$cfRfUy&AKY2jMYNMrx`N7O8h7451h ziUCkms*bRh4}bmXOlE|-=@x*5H&%9%AS0hymyKIkxt0ByH9+~xzsg2X2ju|`pJO#{QLVzHS$`aeAZ8)W4eQG zL$Pie(WcQB6#0zPR&aFxAGD6lQb8R+g<8S9H{H>BwJ6@dMB7u_26ymm&aebPpB=YC zh{+g7t%7{eo~*>ysq4az#Rm~1OEkw@Ni!hJBLBs3BrzOKaQ+pxCF6zUoG5fK1%D%Y z7_+J`!)_)R;QdN?*fJx_L}YduFQLL}1Cv~%1f}H@#I6ecu*(*i%^BY2xMNXP#j5Bc zK$?kC+sm;7#1Q#};TM z&P^I!l1aOR=Jps{vy1{tfkJVBMYK9zt_c85r-Y_2xAFd#AdKHX4btUp8ZYF=+ogELv4&mczVii2u^9-3B0t#`h2D^6BW*x!nF{r4 ztu7KhQE(6ln^Ll#a-z(kWhVFDWreMgdz(Yrzy_rx{%#pITr(XXqr(0#C_1HE`8V%J z_WuH=i+d``n$PwJF;724n)1q@nE%hEy+EJ*e-{3)QVH}mTo6aiGs*;xqcfV3uHO<@`8~iQ^5Y!X*ro0*S zRlC0mS8>Ht<=fSEbk81PHywa$VFOqrVf&;)06x`@F~+ah6eW;hpeQ0Bcw)(;dzShW zS<@bLr-glFIjE6<}_LEIA$BXI&3)P&W^LfFYga(D%VV}uKx7;*r3XNKOvAfLQEbJtbqL? zY%f)vBfp&pdcX5J5)ltTFTzBH+0U^or&6H~f6(pH@+I#0Rf;LU%Y6n5bvkJD(EBo9V+^W} zi=V7XWGpM6q}Ekki9ZUwKa$5jv>1B0IXc!my4<=5{8M0^8MNGHoGaovzlq|n+Xl_0 zxODq6i*oTj^YvPo&`?#F2kYgioO|QmSB+m>hq=P;o^7t4zygdRgEGi`6&NO+ku1A1 zw2Wkzp%@t*>rZ&$tkir)PTV`ro!9NK_6XSi(#3L;tZ80|=zYblZxR^bV$>t@@8Z~1 zX;`%Y|gTBLOac+nw?8E3+_ft%+w zsz+uGOux3tJi{ld;2WH-FFCv;aYcPdd%l1~Nt7MVd=Xdi*qIgPXP;B_CMiFw6gUXn`|pb6+CX=Dqg2g%qM=z5 zupQjwkElhqS^PD$|8v9s*Ed1@R}#4^C3pVaS~wCC;-){+Ftvt93@(;`p{S2LwM;0K#2sE$RM}|OZj2S7 zD_-RD0cdwKvw+vpeRv7_6D`_!9;HIJqMA|w<+N0hO ziTjN(4mS=xCW)hnqyU4nRS{|*_EY$ZwMl&1R3FY4q8|hUw!gzCaH2m!Lx=Ym3|s4m z!q3TXokceZcTktr(0A~-DTV0ytQuHSfvmr)G||TwsbEI3GCJ{vHObwMRa_*{(VX+P zN-LWR|LxkWLzAfCeV^}x;$xRvcxqU)CH{k`0DC4U1yF8! z(0yqSfe7gf2q9(o;Fu%~ym(EC?t-ecbMaBsJUVm_+~3Ve|u+*zC50adgV$_$Yof z02R|{Br&K;Pbv=mw%x%s?ZW7x;FoEKp69>$xx!Uaop-#Se&}F55F&KX(R(EXn^)~N zzk7k{|F}#-D=~7$glewi{6P!lfw4FVvoba(+HeBzaz~xR7v67rG=?=(H-SYfeZq59 zm*Va0?JF-0b}tR+^jI>j7ptS5AGGLd+#;hW^5bRSSdmunbwnmR_cXwySq=E$ zr8QbtR9K<+a=Y{)2&4-iI2QJj*EaiClV)yQH{;?o^BTx!^{B&Nt~~-Fd8d7A-uBbJ z24`V*s)}p4smwgU&|Bj6r&54at(@wcYF2db&u%9AM}Ir?nN-qsS~5z1sHxG|GJw3gACTehR zr`y)&nSu)nQ3iGQ-RI!LRrJ>M!6v|?+)4CoUnE1pqGk~pKa>O%4&lE50>C z54Vi4Ik1|YU1j&n3=8a3Jm{WN&9UeA6mSS7G1fl8sFcikx%dox{6+y%bhAR!m0=FJ z25k24&ADe;_rMfEUjks?cNJ6-sEMC&&J8Oa z6~+E}LnMVF!n_r%<6njzOV{*7k<{5WZ<2URya1>1*>X6mm4d^ zwrVTs5r@V;QV!VpVWdD8Px#2QKkTS_ElX9ONe9r!L13D*)!Bq(v$X4(i^|eFB0*PtVOdioW>=bC3@lV0l3F~U z+nus3t}nN*Rv3G{Lj3I+`#r-F+;>>Akud3Sd+n%tC9Nqe@(53v&~pHzo(UQW<7M_E zxIR~eT-d4#RxwaP1pbnPCfjr;3;w7M~3^j_5qtI0ilq$D~ZBhp@+wLNACRp6US`eFq1-8^K*K&3 zo7Sh6WXVd}{EvYav_utx5j*_b|g-<)=5 zPy_fnIotnYtHQvkfd#~$ds&@QGkQhpnB<)D)JZy;IqV`90=~AL$Yw*5@Nba%2@vJZ zA2txcCwiyJ^4ydw;Gg5_lEceU%TF1;qa=VKTc!%g};^V^y zIzM8LT)lW60Pl6NOg<-7s8l9`cQszlPwtW`9h&~xTpr!J({H~Z5_|iD zw?xnP114$N(Zgl@Jtph*o@JLitF0-fel8oFqfieUT+YVf7ZA zgeCqc`dsOQWf?CqhQ2j+c4JM*Q1pD4X2tZJM`mGkB>DA3yv*h3>!)NZb|p+|Q`af7osubB^W+oL4-1FCL>Y zXhuz;9@}H?BW)6RHCO)clbtz5&;1B9Yd5)YkP%hzxjiBqJ_G6is-Z=hqM~ABulpPe z8l-Wu3!n%~5`Ui;@b1QQSSbv6LF=5#6)L;okji@!3%bBFH}!loIMV$hv? zxWyk9KrV{wwDrc!kxsQ&Q?U3{jp(cd<6^QQs#0NEUh(JiG8Ndrw;E%9=FWGmTd=o9 zzGXIAXm0O%jX5PapWVRBkrSqP>)-k830ykQpgjQ#E?4|k3yhQTG-DqL5!u1IIWHYW z+PQ&=v{VxJGXku(-vxZFO#F7h;DhyQm360%p}v_4xr^2_jol%LQE7{>+G2b5i_oOV ze&!w`HlH?iAmveMKl50d z;I^6AnT{z;?!BCbBB?O6nFC`t*Uy*58dmU1@7pg4c^xxv>iWJOjF0`PociDO<-dwY zuk?>l_L_D6$OKwp<8<2Vm3Qh!C(*v6diVn*v80XC(eIvoWpCG{H*<_JkR-X0B}Ad3zy^zFZ5Qr~+6y7_U$FjU~32%hYh_ew;0bpVk1! zsu+<91f0a^5|Hvz@{xYVpz^Z26w)ctU(rq5vzaOwb}*zm-lM29%)W=-OxHf%eK+MX za7fI1d2c7^Hdhc7Iv}ZWj$)Ofmux(aUr20-RNSkv!DcaMi`n(+H}Z6Mk>D1!z-jlP zX#)>_wMm&G^H$A6lr0YyjQR;F)G3oprY({{6SH+r;?5@9sL_`hvOmVpRqsMRGX@=AjG}~3z+8@DA#+FOYM(q$cfn?^yx2VIlfSE1pg|b!MYJiqc3@MZia?Cvv zF)a~l^Rj{e$$5=K9tTM7-HQ?T0;L`eeOD}N1R2&iemCS)OtH2#zMQx=5}aIsX8Hq* z{(4PZlzwZ{^!PW5+1@}u;Ec3YUvDT14S)P>=2Uf;-F*cNn`qtJ4*iieZPQ)F?WI%} z0`_}HE4*0fTcWWg*;DorIMHz2cmQeU-}ep?%_4eN>!D^-soJca+-w5>RBg-OnDAF50%LF-|Tkz-!&nsOr1Q#ikRD9o?q;s{biCABiY_ z#*yiTgZ?&pI<_4`_m!)NMXwFtTR#WW%l>V4DpJdC248d~DKs=60rh^9jFick!35K> zgdPK*-v_6xLxQD;32x|2PIK}2Z({?5x*sv)cJgs*qN8rL9yKF^2Q!on0~sDb0e~&C zDbcE42{m*ATN??z&Y>hYizwCC>WKKmjIT<8Pw#nh&!h%(ut8_aZC_V;aV(;DA%N|B za1fxLfc87B2D;*h`>mTe1xUMGFlT+LJc>dY)+5aI5TOON?+dSz>%}ZrJw2R6k>3Ft2;JdyVnv6bparH8Jj(SmE zKLPKZ)jcN#ldop0#Y&h5%7_JAq@7(;)=UWCPu;9rG%Wg+by-DhKR}|Du(cf99gnzX z|IC~Wy5Iol*5d1<`~Tww2)HWY%$z&heFQWwIW)-Tu`X5ZZzUwL43*g7#?v=dqE89g zxgOT83N+-4BT@)qNu)W{DjY2Vh`;f+NFm8)HsE`W-P!{mhxk6?1@}x9>)~o747jS+ zqSP%Iy#9FBvcL0d>fGdaLhIv|9c_XP4&Xh2{OMCwt zxNb$PgFofkIbO^5;DMtXh|@waclL69;KSfrk2ss2;Sal5?&^*!TJYlY-STu`q6pKy zOOt!-Dik}A|N42ZoBQ&~yXa;V1hKcnO#5ZQb?}LK-3NCME%CHi*nJLo5Pje^^+-n_;07QSV%Wg{Qe z>RhCw6(j6P7s9(!HHqHa?oB4sq1qLf0)Q6$!j*b$AiulAXh%p&#nIb`tpi5`GNawp zcC52*)H8(`C6YFptPeVOns6yY=zSzcJlNUjOc)2_^S7=}O!s(C7z2ArsKm6;OVMOB zJAqyXE)lnr4waDdvfLEdUo-La+y489(&w|jR?E68?2bUGN9_fP4+7@411|rPKeVy^ zFA1b+zwr`+_FsA@RH-`B;Qy_5e)4fCppcrlN_9TpAIw$SGWoFh)6u(jc{X$|X{r}8%HB z#Wk}6u7m}+2Ct=2G?CNl8Ao8@258f=G$;-Xg7g4-USuSH`mM&{jiPu%N#$aEx*v9q z>3eU<2S*GkF+9qh?&tFlFvQTqQNJmKEJ+bB!PWgiQ3G+1O4?3!6{oJmkW-(1Rjey^iDF z%!Tk`F^((@yuSFT=MdPk)d>$BSw5m1B+RId{MmIEu%+4|m1;0)0v<;OB=1i9OvLaz z=Rgm-U`QZKME=_kl$>2=3^7i%Ti&EPSClVL6_wQ#4TY8_H!az~ zy{c>Oa;dn*`k@?4;h!4?s6CYs>+bE;5x#KO(AK^wg4EMrw-Z)nu&9_%$^F(b1U;vO zcUF&r&$R1l>yhLA3Lm?hGi7N5+yHhl_IRSVL))iG0OIaKAr!;c$O)VyEE&x5y(~4NAX@#)`e+h+M7^`vsf+GH35)VO)x=}P^h2P|)r z6yP#Rs8gQouUE?xJRXRfP?|tF(Os`e(#Soqo+L-18eih~k+MeJfz;GkQ1OiPK{2Za z^iB~XA1+ngq2;x*Tf6w2@gRS4ouIu!&q35uJ0h`uB}>)e-R$XuS;F zo&Ppqc{Lrml=wKs;pSwwTJ*rb2@&p81YPvppXiw(iHO%0I4_2tr1m<`pmg zG%Fo9P@UHAK-BIDqT;wmj{QNb)P-PAhbgJT*D?FU3A)hQ! z+^90P30E|w>#guN9A$aVGfJMERx&iC>={yNHmbf*HvQPi=#`#KsFQpu z(N903`b+Sw%QlZjN(gy0VY`X7p5ta}GH*yd1Et30&|48#|I+SY0KD0z^j_I|(HRh-)ZdsYEfm?YE0=q@ zOQc78(zY?X<-HY#!h{3-A-n$QAV>AjQIGd%nJ9}1#>fBwk63Cq(w%^4qYKtBuevD; zbyG>MM*_SN%Y=e<`VTZ_D!0H(E>snm$1eGF{D92J7iQqrqO)Z%m&~%yw@`x8nm}80YsMl z{rc(qLL)1WuR4?o_-QevIeEYlI*qMWsA{o|nb}yU&G;4(*+;%gcN!?dpISV7#fW{8 zQ!$V?T)gNi2}8!osMOjUffAoC+hM2(v?yP*pZ`6jC+#l~ZYtks_&S#mlw3V*ryaw7 zuS&UGr~yJRz_~^5Hr5WZbAM*0TuPa497JnOdN*A9ZAji)6y@+QnM{F}F+O%cBZJd!ncOANQanRwHmSjP-wtT%C9$^FiO2ejau2i(CZcqFGQ zU8UDQO5=|mpB(GPBWK#}kDl*d1Fj}2rc2eb4T@I_!3SMQ&|=%8BBcwG{qJ69ajkvG z1kXQOqdB9u`dVON>>ZyWyV~`*>D3bE9+ROc@31_nundCFstBK1f^%Ln1O8pR;~)(i zY}FzMCu?xvZ1{_!(l>ADUo_#5Y1x)g>{sAHM~P2kIvG^7jy@~%tevO z2Qa{S*V8#CUbJsdIfHTnm6sABhFg+IGtZagOJ%Zp*#H|Ws*4U@cqH2knZ%0c`l9oK zc+teSB&*p)ZtzP(&7k{~I4y1WwMhUUEY6zi$-~?sYteZs3>e4W62JXdP+i1rZx7CW zCjw=m@uh3$m|>jk+9=GNwY)2~bNek9gT9Ay#$@Wf@b17Qr*Dc0go`e)=F8!==DWMJ zRo?c?R3@NODxOA=+qv!E%~{6!?xEI%adWH`sTo1GORf5BMt61h zWoK(edf-90#0s8Db*=p9+L-#@ZD;^2!Ggi?s9_3FH*(!Eq`+&;vO0z~m`I^c zO$%QOVt)3y%ZHz#pJcJm*Wd{A1f5@W5oD>Fa6N7-!2cBC#3o5pzXn_w+ODtr{9Zg> zu#=A(BvFEhyT3aYzN@-`eZAPls}@+iML$F2ASA?K8x5fZQ;`Xpin0I~Ink-evyWQ! zJwU&O@KY>k@{`Dkho}H_dBOkA44iVY;d|O>a%I1rzD`R*1+U&?$^?m(9u>X_-M|MgnW2X%-i*28wB&oE;MRa)z$d<)((8l$|4&_!mic%4$keSn?WK>xYX6MPP9vtUGwwt zZtY=c>PD1rx#csTXr7Vv=&+DQZ{OJ;!lc1^wrZLl(St_=|GQ(>=D9qEbRtSd*FGOA zXU5c8Y)kxRDRtl%{b5)2oB7HC6DaFWp>}2xcU;1f4t)%RQ>>Jw2u@<1OH@rK@AI8K zq>)_MO96)sy+v6743cq-SyUQYuj%q1=I{!{1vlzW@*SDNRwX$gpSbA{Wyjf}gebAJ zOO;osC-vQpDlKc`Ym`;8QYbTIhN-@|@Y$Y%bPEi!ouCsJT9M*+t)79NH{`lpc? zmxTzrop-$f9%y|?)xBba96xzCzAV2svlp2?^JB$oe{rY=OFrqJ~^L%(Kivp16lO9qo9z*^1CZ z)u3Wr5#5EI@K--Qa#^e}3kbNBgD)ef(5&Sdq(5pUS0GIbfI|w0xp&^;$E}4l4$CA^ zZu?0Zmbv0Djb{s&s^n(@{iG>U_Z_;94b4l2=TWibFg#YTGPzWV~UR~92n7ya8= z%eL-6CS4V|XTD)_B&c%D6-rHqM$kwDoAK(w0!BJOwz?q-)MoW_+*6tMpL`qg&|zz2 zTYs9{j~99QNG`Gx?rdXe{C039Q;CF;v{WQ7&k0(?knws5@+xt&+|bTUXFVf?+1-vh zWk?=1Vi)zEjKPqe?2-ZM>Fzh2s) z=D=ejf7$0${na%ns=AA`^I)_b3|9}4SZx|E=4Balva01iAw&IyDggvdbGZa$X}CY# z7&|BAJKn%skFH{7JpZB3jO%_~|4O*&-e~-!_di!H)U|Ay`a} zJY(6@SGi16n^p?lOMV+ohpmU47`Hf6$8pR9oT<(gJ@)?q(haKQ&OZ&RThTmw$7ox_ ziAhhn?;HZ?WS_sbwWY)h9n}XzH3%in?Wx~?Sl~Ywq}JNR9RsjKe>3MBWd~`X3xBum z$&*;AOV1^>Oi;}uPAv~R+`Os#eg1rV=u#Twll z^%UnSBd+5&@SgJn7WF)m0fmIb5g;I5o}2fsHDDVpbqGr7(n+Hsaj)6=xRQGH{pxtN zW5~JZ(Z*x%CH3=+*UX~DSruCZ=hNiRy0eE+^r0f$m2AMnpe4&tv*;ngs*#|gteCq+ zRkXE7!thW}+G+wWJBcI}uQT9icp*Pg#``ZvQxn__UA|J!3C5X$l^1wbIQ4LKKsu2v zn3Q4e%MVTZRbrnQoqyDSh#DY1`J?VxsVYwp|H#Q+Mq#w)q&{ys5Ps&&HRoh z$5$&0rYXY`<9G&-VK-dO6zW#)H3Y0@uKo0leUYF5@I}s5&wfl>=9uLO>VC1Tn6$pB!7f)ZIYf~LMoCjOiDZ~vTxFeb(%1hv~?7bK?IVlc^+Dnl!2FR0hx#MZ3y z=xk|H`JJTp1M_bfM0+aspr%!*sbv8i1|RzCoVv3tMO^na^RLv0y5BH0W=PBsEn)!V z1-v}`%c(AQ@n}#zL@+XWrOSl{J};7Sj*Zh8htysPEb|5No?em(yU!4@aICqi=J?NN zoh|e{^9S}EttmjJZkAc}45~iW(>JT5D}DlbO}2gk4ATSH&q}#}#?djY5RUb~gJ>*+ zhlh5p#K8Orw%q4{wYT*}{$)PQHi9+Og#`I=*$E@JSGyL5BN*7;q3G}DWrVHKX@9uX zQE63plbH1S|DX!_!a^TJVJ@7xmCN9D9aM1hO{vR1(TTXeTZDP2WzE{oofVN+J~*HsyG%u799twB<>{6IXK^_=d9zF^j4}Y^MeY>k|KGAhRJ+5js)x zmq|VV;Lwa^%y|<=)_VNH16%~-=93Hd2wQsPX4D$_J6qmrh9#N zb4gu#p1dxN1Fu@||AF9{>+3{>wgT#s1|u+(+jR9FPq=Vdhxl7Xg@0eO%J6yvI8yOX zsrPntSh%ojao;KINkR8^MB=l^4P@%yv)HO!Gxt)yigVOQlK%Xo8B^)?49Xt1BjSqpp_1k^?fiV{Lzt=!?9CJMocY@ZU0ZW;Fx z$_md+zz-<~AU)q}CQ)?@Mk(?Qf0R^_WR;#P8sLpKs)jc1ODQ16LMwEgK8cCR&LK@f zB55ETN1XB#>vmz3+wZ(nNT%WzK_;H|GSV-KQh6w78hKe7*UJXT)emOexRSb0$Yvw= zh#64T1b#kB5B>w5-(E%@e*NC6Jlv|>6wG-$}CJe=H%37xl(7HQ?kDlfX@0 zcXnf{4Kk?|@$Zb+10zRB|79FX`&LyUiUiN(?eRrnuTqe+7~lx+{nvlb%jXU zaAc$8IB!)3T4Nd12e z_~b2wDc{9Sh-n~~`@K;Un@iAadN~v>HHA=mhtA|HqW#5{tHWdzssMbmR|fZaGPuA(SvISRA(GiT>3BF?w6| zp!Sn4Tl)bA;RE!iiJ@6tRyar(W;*YpwO!A_)u{%=E4&6;NjB{LL7vzV=U91$Qy2Hhse@2b}^pwl_9JpX07<8MOYt?Y|FqBC?k@_;4FApKay)&+AltoBINN)oSDq;6!`o9JT zE*b!~dD9X*3f}eXmw2&VE~2}^_^nb6r|2`t+kfWnQ@^=S^|Q#Dr$S`lr(xG|5++lsz3oS`Vr_NPhALKDk!115>GQ5(HM$mFn8a)C1@fiCcnBN49(p=h@)t zY?DcK`4jZj6RSQ^iMg;_sSMegZiJ&pZrIBE@rqv1jF|NCcu(ia&Yud8^3YGQ(P>R+ z@axT*yLXPq<&U}*OtvBCD4cj~z5NuAvR+mQ*Wt#|E^SPo!f_f;jcLG0$xOuw^VvM8 z1|rw9>h2}qntLO`)BJR2Hd6(LH_AiAVN`X@Pg#uMP6LI%`x{zgAIt7c9HE&)7k^T< zY3*{s4xPNe0`FHgnHP}``ybO69WE=AO5<&411~>4UKdhYEv3yXUTfCt_27&pTRyC2 zz_!9JbTgGtbm!v%PxG<3>prYz!JFkvOIzvYHVI^WKCY^cO+H4rmtkA1L0`l-IQyBj z4p;bS(?dn01An;)e7s2aoMKKGGm`$wYYP&i&iQ(Fn5s%a{LrN}Ahm(MMt`WcZq<-yjcY&|bH-`$A8q$>G1J&Doza3#PZy;2 zcXzOd<9yL5S#xiOsqT;eG?Jfz6t_Iws#gCGS#KTGX1GQVCP0zmR@|*v@!(dp#R{~z zw79!NkmBxEw532>Ah>G_1oz_F0Kp|laM;}W?cKXO`)}sU%$J!r^Sno%bNFz&;2-wE znOlzvNl7YE(jx8n^KTxn^md*GFb%y;{&$)C?=UI0`*^zX8*$uz0{!)%7hh4``bx9y z!c`!^VbhD5l(L%98NdPb>#6aY?Tv^L9k6jDj)}%V|Nd+eZIXzSNF+2!uG@n*x@ZZ_ zh{Y6ekRm1(Ua76~#IgtYoAQH5AT(B0Q%0c>xK&oE?Fn45dUnm?oI-xR*x)WRg)fKl zG^5!szDx%bsjxFI2NjCa*%TQ)vC5+NphS@jfOuG7&qu*Ue#VOKuq-(n%QX9{&@ia= zy=(B70gk|QBw0WS!_f;8qK~VXI3GK$gr_0+l;2sm^0QTJQ$9%N&c}^{%u>GXj-4;@ zl&65BD$&$Gy!Ckr;0#^;5CC}JhF-kyx_@yW*c(MG?KrO;FunljkuycgmxDf!5)#ek z1Hu<${A~ya#XwA`ltA;GtQ{;jW5_2A^?DYGX9**q$Ow!UB{!C6pN9Ah>qpW>NYhsg zE2X#7GCG7rSp@kJWq|;x;_Nj*`=0Xa?=|pTFDrVJg6A#DNDIE<5|%UeOuYB|nNH?% zAM^MCL4x-4&{P4ma%c6aRK(qQ{T5TZw}QS?bk(i2>5(AF*6CnLaQ+kbe#+`~9Jn4I z&=C-sV*j|ndP*mjSYumQNJlQv=Cf)Uh@r0UIcZ%YKA-fXJcrLybQlXI1f~ZOWh6gY`d8;uTEw`uCd7!o%(km=dh+4?9+Em zt??=z&#V4B72cw+Q{o3n_&cFMP;93JeC*lqYV~lVmN7>s+-yI+X_E+x)&*L82$MI$SidQPm^M}qIcGc=&G3mf5# z)hRyCjYaqi7NS=G<@Bp2u#Ds?$PA3dfUQ4nI!re{U9TjGpCyp zhJgt}33ffBI&hq&$+`V2+HnD7Mr^ZoR#AvC%tGdA{Uk&pX@!Nf7n9b*j#l`iT!(Og zUdP$6?B?9aM7+vLp2C_#y=wbH&I zCw$m!GN@E2r@Xk6RW7&Id+Q!APChVm`Q3hI^DD)AA_aX2|8d~PZ@-<=^6AU8x7jYM zmaXJ>{ZV<&bllZV$_@7OGGk$*V_OuPF=Gy~Q9?I^;Lv=yt+l!X%YQ+!-hA11&4+sFz}xVww* zgi0)2J%X*U3CTcPIP}TBN@VytEIbAF8_6r=o>ArJ8@o3mFM5roE`J!eYXknH+o9iK zKFCoRagamfaYq2Ru4UwPt&gTD9<1XC^tb6OfG z;Z@9s;D0FnudZ=T=5=Ld$eA9!!QUGjT)o&nJov51~GpbxY7x47Wn04cFf(O~HR@(E7X#ly3EjQglr zNm%g#f#Rj{r|1AID7qi;D%7Ev_T9=QbI0}IBgCc>)OJgIwLv`&HB$pKg3u{W`vh7-`( zQuKIv7C;RC?(D9(3=yb=04^=A4$}+T$IDXS7c8W}$tAx2s4jWm^NlM-3}>Q$0gOJI&l!D0&(34?H0txW`=oL!MK%?7g_3lF2w0nu@$`$8 zZ85I<(@KljjD&e_BL?mLdA5zneQceX`z{YM0#BP|5}w*u>!mrNal0Iw>sReNS4z`U zDmQeNPVj}O&_wL@o6)wWH_UmatyxOlYb@BVb1af&95gY93$(fQ9*HrCx_A_=!#*zI z)l4ZG9HfM2{NstDGNR&trxtgvr{H9*NY#Q6eBJk%TcwYyKrg1Ty_!Ccr}Gh26IrA| zPjdx6wQF!$;+orgEU;Vr?n7N{)_EVu)#$V+DB*id()_p?cKzKw+U_rk>@qYJ87JiZ z{z^s&4Lp?7i^YYd(Ps%mB_G&2EE?bV-dhA0UB@%@5BSr6_v2hoh~QzpPIt;zht_TV1h(uWI*sL~aWeZB)YSt*tm0ZzF9HCp3YKj$Mfka@FYZCZqA?8bAx%rdCJq=eU~|(|WdreiQVgQ2{;pD2_qYPb=~J~2koJK^4%*bE#o!YK%koaF z?_z@@@#*Cgar4T-^bA@#^!U^uRB29AY!=4#(;F)*m1AxF4j0H!5iDh5J3HJchJ= zseNHwXYBLOA43VW?;5~fFIL;+J}#vAY*6i|zsYj({Ku$2gzVSrBqRgjaZfYcA(Dv0<8_DF zH6tnP!q+}AWdS=%`Z{hksn&>2mTbW+_A+9^PXzHFDdK|XOoD$dxYbv|B1)1I4C`%z zD`G#{20i6m%O_vjE^A(@{cpL%U_rQM`?4KcgJ7|ZzuYu4?sGq&71RXfv{{M z{!NU!d}og0b(%37(*6I!k?ULG8kEVaA8RwJ6f;lQiI(uhLs!`GeY@wz!2#US@h4Gl z+m#X9aqAJZWzSVqxt?9TF~d>*dq*~ifwF=T-V#N$5mTcp=dT!}h_p`ydKR(#dl`(j zn(}7Y`!gFl*+0~l0P(AUghqEAwY#V%|PO8~5xj;;w(_8ZzyhMaqQqU3;8&PLz4jU6~v zhFHTXC$BDz=7fuwu&JUoo^6yllI~Hw%+rKuCi___V%m)arKNX;{S$l#987#eS3Wrg z2I`IhNeslj6A$|_hy5;rp&*;kUt=tuc&tiG7KB?EXCpdIPVIQl>m^ygdbimWa4x;f z1rmkthOg$>`n+L)&vZ~Lw6aGWFVM@)MUkK8gZF9dH2L7Ow$KuW9=>JyZx-30)=|>a zZlcgZMU`y_n)5g7PS{M-Hq**#Tgb&Xt@}{gsdR5`5Hum%wB2mLU^kQwQ1UQd=6w1? z_DX~a;KwQ=Awn4H%-Ghr>3_6HblQBVfAOKtBz3(d6n@bYJe*nVq=}TI5rn^mV;$A7 zh_#VUL*+ni$glqI2X6TmCwH~;)(H1AD&AclHHZfyb6ZP{1!aY$nr9qSod`>%q`9h9 z@^BB=pw!EPX2=*uTNZ$Us|iSULEsM2ieGuggYwYmudggG)Ip0VGY3BV84kK)*A~_e zeM;KNvfqv92vZJ@T>TmHq@iExXbnwfV5si6G!s^%>g}S_j|_5t0)D{%@c-hdaxiJp zsLzhW5y@BdG5?B^e`R0PMOQ8P_ZxJ9Cw6NGb!yey*(|5vi|*Ht=1uVvWn1mN`Uf34 z_wMnY`?K)wdL5Ji`pq*NBCxfFW%2f$0D;mE_(QyD zmclQF^fy>KNx z1d)t)F`_9peE=9vEq80z4Ch_-y|4fggVAWez6u$P^z5$RJ(%MBwEw2-UTX7o924PF zn)(>^xO&$bF2h6u4!)5}a{rg_lq%rIpDnK|Ek*^!IBrbCL}dno??c>cuN6ZC=~J<8 zloC3GEwaVD)!7SAQz>#m60an-e^3fq4LS+Q!3;wV=r$=NooM2{4>Ru1%kXWW%O@Q) zC1!21$6cFEeme!eT^~WRYn#O_h`QfA#cDJps;2vDtdJ7Y3do|xC>Y7IQDTe}y7XFfI@N%MCRF)tv z7u`8t_kyF0oa55eOWDMKY7r3~|H%EqyiSGIV@YYMbl@yjku^$Br3kLt@rtIdg$Ovb z!Ue-%Wn7%^GiTE+L`-4@a&F0A_G=Q%G1bQ6_HnufOK`h%-N3Im_f*ElW6E5rQ4|vT z)uK?_g_*RGuye+K7wG=nZ?m)36t0)dA~qkb;%R4 zr2D^MsT<3Ao~%8gh+K5d-+U!_TM6-qq69^5t7&K+ z-R|txZI2AP91j-fP2y@7t6E<-kYLq4LvlwUWmI!J96Zegf3vLl%ZhD9@>>Uwb&JFn z?{O@SmHMKY?hn{nNFM+SnVXQB{Fo>BO`C3WIYvHC2X8qS;+>-QZs*HxI&}p<)N2dW z1@||((fZ-QDtzN6`XB0sS@hxx$XWFf?o8>mDkoT!lspPN5L1u1yZ*WTI`ZiIO3M?7 z=`9~Ijccnjp4isu#iK#(d^3yX!T{+&AN&0MA?``eZ<->q1HP%J^&3njQ9Yr~0+N8gtZ@)z9L(zMjUgQFpziW0(@z{pH90zyD&A z|3ew~{P_X=X2UY!`mU4dn|Uhm%+(>B zEJ#8L{4lLBx|T2i5RGhYTV+hDunK@c1kOk2w+HLYq>V%=Dck^yI{C~oK&Wgn!A3kb zIvG~kq6wg<4T}E~TM@GhZSSof4}eLI$_>BcnE^VQ^{URiO=!3CYsLb`QpOBMZl+{0 z)93G1b~*Ls9cK2lkGKGTg*E3%gvYE185z)0H89U8HYiVTcL#YalR;F%&GvwdQrY5C z4SdG-U|@`#J>YxoGIpvqUd=E16e?AmAPu?ugeCG!U7@|_3z{Gr-V0tD8n+G;Zk&W? zbxRp?WN0;L+Ith%qxk&uD*%qLql@B@GY6Wm1p2TE96N}$>RGF8SYwa&S=mgtNDVnL z!O#F4ApU@ljR_c;%Ywiib7OtjmMH^kQxzw`l2$s5ljMHJgPw50mMUo)QZE848~ux) zTF5QeI!aw;X3%&5Gl%hXwg$B)jKhuLW-PyKtiZ^IbBh&R>w0UMyyU^W2v=w|Qx0_M z&W~uu5vlVz;u5DNX!04ISven9CZl;3(X1V|(of(_V1OQJ_a1T}_Y%D+TLarC%=%!lnZEpXa=z7cwl0|u8RGXGK=y5pHAZm(PUrYX#$Ezc}pF1t1H zad!=x)oNimhrL|;>aqm?U;s1CooSBzb*cYXnjTrxg8`Fz;89yzmQ3X*iKb(xzcrDH z5E@aA^N2AT&^uN8HI>_@tl`E|)v{896S*MAw|Y1dALmi7t$y}FtAdIH@Xka3<6=!B zt7Ri7A;W%YcSpX2+9)WXl__hy?J7uCPWw2trN(+DSY(FM#Nr2NOO zH4v^)6ob%p;2?!h{X!CX%^kMwqbUVl)f2)}KWUZn^mu1s;#+Jdji4G9Pop=Z%bj#L z;VeZr2Aw0rm(%|q7^J8b)&lvJHGY)dR4EK@FY{4A`V3sA=)DOnhZ9f+0Gj4<8a{T- zy!CsVhllT1SyMl^P1N`N8-k7;oBK+73*!*yQ$+~Kl=ZfoU^_ToX@_M!qvf%28CLsm zqHlq7^K*x;t-6|Q`E0Oefq=-v*du`n_)4+vSA}}609z7xcht{nS6ASZ5$`i?x=gy$ zwUEZu$9|iIBU#j2Jt9z}b8_<5nmDgLUQfdaiUc>sjhMF~ai!P0H!r6k%GAcisgHSF zj8WcuOCl~*XCH%>zJo80Cz;oN_T$_yrEz9;Z(ijJo&P57;wXCG(a!+mKy;6g2ik7}vlo#7SLTNT3Fvl6;sV!6RLzk*OPFHDYSxc$F%&%Uxh zmj02Q#j>DWPcz?pM{0q)u0z{jM1f1KG86;lplFWe9i}t^Ct+{iwx_bFKzqW_H=CAG z+K{1qHHt99Ts|i0mff4YxYxRp5e{xW2_;)+qRSi|gR_{f8pPKR64DUK=5QdSbNBQ| z6$uN;eSeH}zgjF;gFEywZ*pTLq#zam7TMS2i(00hpH1FFB18w4Hk0E^`WvjY`|{Y9 zPbC#p`im{G`nV@#=<^=z?!6d+$})E_J$m)t5tWke8JNeO4;N3rzw-8kjbxz`qisg zZQU`51->A zbp2b$SusmyC?`Ex{B)-uPT1RJi0)s}c2TW4$mQkgQsa*~JPS(c`0PrBx;!;cUNfc9 zY4(*rf#FBor_U`7|11^eIENNYKDBS|3^LPOci-Ap$AiXl!mq*%Qg~~BUhWKUd?2`} zv6T>cbxB%Vr=4WjN!nqX(z)>KDi%@edyw6Ey-L|}zJOoA^n-KTbXP9>vO&4}xb{KI zH)i!plC^9ihd7?E@#4_&)sqTl9g%dygGS&$UQw5%@i!IUR{yVqpIeE3{vav+uhjBF z5WnLZQ6X@>r?y$8wVo)_j*Q=&zwsX4lB(8mwOCm5rKi_Q@(cE%!5!Hvid4VK6#K6O z?7yOC2j&{x-6yBO&0}-Qfj@;Z5BZ+sv7dUQ`p>v861!I~uEg&D(3I?aOC*sqig~l` z#rcjHfHnD=Kt2XI4SZ|ri`9b;7w5%&cQ7px?ii|EY{V1@ym4X$Py#~)m0Aga1~{Q! z`{b%vUg~mQN5iVlz@VXLa(}S^6fJmmD*5sa+8oyFzfvavf!GYU>3cExaRPv=AyoFKvIbibgCxx`1jGc$0KDHK zabBze)HqgKmFZ+d)bu!uIZsJ;_ZYy7p{g6#7ub22!OcKmzDlJ$8ak4BT8!A4z}XDI zPh1&V450f|KQr>_F8h;9

yn&*GOnKZ7BR@s!ALd*?a`n&nK6-VldNYwNP_d0E z2s-NmdV@;u)XfwKG>$4RzS^`yZ7Qju$3^c2&OdjzK8}(?eYu3C?2XUZr3xsH4Yo`x zO9Hk!q+L6JzMl;*7p=0lPx<5qfiW^X-g|*PVBcN;E`1?}m~9|uGgXQg#5P~2yon10 z3Y#q;hUf38$-J=e0Ch$Dw8X12)z9aOe?2iJffDqSBkE^(68DO7V5qlK7e_PClLdOE zT?P_%pEz@6$=zF#qrY1PlJAdcRCA@-pt?|}8-|sM16iFK2`ZWTb|JL~gWnl`f*+Tt zc)ai`&WaQtRMtTDCTPUy2si?@xJ>aO?@d1Cn|8{&!MB5heeUdP`+iu|Oo>J0?g>tz zrQhb}MmkS>Ma$=NNOp%u<@ALS~Z^*OL{k{#h?2~Pb&aPmpoK_ zsQF|NviYUSMTlBmqM?}`ucVqWTLU(XLxS<93qgkzPO z!M#PRWz`#IBrWgF*sW` z^Ed~az=;(dh~hvGG}8ejsFgbVJ4-xZtL!7wxq*_>BM8i!BS@f?bg+au615(?++Vky z$zLF^1bAwx1XN<8SJ1Etc#Z&@8F_#s2&TH5}S`+8i%kOP8ya#bB1Qd>_=B6k~rT$7dp7^jfx0BqwAyRz2uTdo!tr^ z*<3W`DY_)#YE!S_fe$K%-*hNU~?|hkm+9o7xw0wxow(iEiaR=u8XW!I_lRx2F zRyO7U@TfIn_9H4$Pbb&FvmoK-z*}QEv$q`0mj`xHp-%&sjP4`BtUEv7ZZr8h8EJmq z%s%2;JWRr`!D72~trt@yVAp0Wq=8{_ags%B5<9fbT0W|^@ARIgVky@u zJ<8S?^D&gU@OxYP*({VsLSDo>Huirh0A~OuaD|E)b|J&)2P_D>)f6zH`sCpPoC0{$ zHNxNJb8-}Xd?jsKdFZFPygwV8q{qtIIE}fd;T^hl9&~lR5}{)naRRf5jurly#q@ho zJpinBe=5wq#_)UcZ?giu4Hax&8=NoD57k4&jmXp#?;JPb_v7P}2Ic3x@5Tz68-#Ng z$Lx9wgdkZIFE;K#^(iRj_?*kC<6!7Lul~Oewf`uk{GUdg>tE~N{-6B*|F%&3|60xe z|NivVRR^kC8UU$=Q2<0YHv3b5{2x776qf86K+j&oM-8=S?VyJ}wsWx@VC+7=_De{I z|Nj%x`9Bh@7FyUwuU~x!#6Ek?7<;U;t^U?Z-jsC_V8jl=KM64T!BsskggBW!*HeI& zb;vF3(_>658o%ZJ^P+bsdNsuzu@f+=_;Xh_|NrRa{L|~@{Lu_o2%lB8Mmj~d0z&0t zME*%N>f_P8uR3ys40M%)y{1x^%rV1*DxYO4RQE*ZDYbLc^@p(ddO*fZ@VrF{XoZbC z5t)^Q_;X0oX(siIm$pZSKP_22VCITu3*zM?(L+h$=GsrA+6NED2EztQh3G4wvA=}9 zi`zMYh{=Kk+=0-}06-lVf_~3p3+fmeZ(8&_i+~N|PT~gUBY(1e;$m(L(+?yySk)*>Pr!;!mI$6&8US1zw-q zbO94;+z1@JB{q$pB0x2y#1}NZM0-0G>7^I7(Pl*-+k1zdN?q?HTGU@qJ7Zvd%xg0D zMj{lVL7MuV#ejzb`YyY?;81!C1>m)|sa=$=NI!lK2x!i%P}D33PkFd|u6dSyuSlOE z&NwT`+&Ad9Od3vcmS*=c90j?XHtp+ulk%USt;WvDD1v)pCoQOAkR*+t*>VXfkWJUD zAI31{kiQK-fPN4`mn~ze`+@yQp2u#5^>sDLr_*oT+VkyZR0sOL5WSB67q}?uE39In zl>x`su5N)(#Q_XAjC;xMb6G}_iMycuV0nOdL#oRyV;7lps@b3KL$})S=h#g6KfJo~ zm<0qAW=iu1Qnq3AYrnHJXP7aM{+$K;!yihd3l~urql2C?n{h&6D#{hXR-XL&Jjo#} zD~JL>4#Yw7Yt5h$fNf(qb-=g|k35hJZVkXrppKGjqrLbdt8)=fx|W~VU+rdD^^>(@ zzt`Ja%RRi1FO!x1%61xt8qgQY{zZe@^~ZnzTXBQze~e}HKO!*y?Al>}I16F@MRW-P zLEEk{v`g0RJ2A)AUwh1A!oAi=uori4*-X?Jy%xLA*&B$NXf=_jJ&FYd9=-`U$?-E% zu&W;f;ozwFi0T{brif4uFFyRh9|24O5!wL}f^%1Y>GjVogER8*>eg+)c!2`^9{xJh+!UCQQeYW`_(w zgfV%bBJvm>@ub?`ti9<_xY2MUJmge$1T+3N>Bz^eX50~V_wswi#)ro=#kz33EVl&b zxL*$W;5HXe$xsi(C|H`Duv`v&*Lxs_0t-b{!CNttt}OxReY*Sz-JAlOJ83)W&vZ=r zcN{X*?7n25V2?=J!jpo8~Z)E@_>6uHh^3h2thqGb$8plj{wA#4|(sP4A-4g z*vGn}!z%4g-jm1knlh)IpTUCK<;%l@CRs2O_4EgfEZ~XZh>b;4^^ssgRb=v(y3W6i zN&6`gh$FzT56tyL-pqnU)B95%6~y>!)cE4xCO7|}9PZcu(f<1<*Z%v%PJ#7is^x`?OJc zGWW*iWMtNZ&k=kbEk)N~Xz!Utx#($L{w zUd2sd{Ou0g->5+}+In-E3;Ck-;BJ~}{r)P4-y%kL!;hu*aJmTr4ZZ8=U)bw25E`#v z)GEn6YyYebCQ`+RzGfGKU99&iEnPP`b60xKAceIHeuDT8HGtdIqzwI$WvTQbR)d+a;V&%hWyuWn3PrmAJksYihu6S~OLYMdnIR0X!eBbp&aJ zvkNKkc z{Y{;W^rqe8fq8h&*i~IN=CH0J6iYw|o#S>(-&jt4_@yAnbk9+Xy}88>hUlm2zHwc( z*|H@#_#H$-R#j2+wB5xKrYK#i4xrtpVuPO9+V9;F93?r+c&@)I+hyp>a}?j=jC;k) zipYYIFQX^Ahz=QmAbpkbu4*kp1kKuX5bD=^Ka@>=y)xC|6I^oP=#n3sv%7SMM>O-& z$;ev^5(?dg-=pUJO+Bc7V>L@mVLR@gYZYL&KD4e2NDpCbs4p{TL90;~))jfh#)AFvIDmVQY&_qL&hfl+x31fGe@X_llp z*c#%f&n)^eiUE==YEc4sWd2l#{_U~+A60Yy51acxH_rm7F!>hH=qtyy;CB3LF*r$0 zC$muW&?)9MU|Thdw5Ny=w%Z`550v?aBDbVZ{+(mn=2MC=z1zP1H%i9byR|8LuqJgY`~w*IUeW3lF;=S(xoCe)6#zUym#`&8q;J?VD2nTFgj?rP;Oi4hy4aiIT_vlQTYReoERs#xTPDQ9-F46{YdcaKf z!V&rFr|ai))SWAn_tgC+yPH!XENH2DA@`RxE}p0&JjqV6B6;8wxq2GJ$lGDcYbPS= z=IJMTi^UDT-9NX;cL<(culREfFrUJ%0@9Bq(&vrrb_Z}+;E!{VyC6Xh3~rrth`#uW zKZ(?R?TjI>boX7uk78JSDC#gJq>73KEq71cq?|*-LcmMD+St@D-{!nvb*I;}#qaN7 zVauFuhUYAAc<=iqZ%B{}Y_uen!Y;YRl*BeH9zJG$o9$!gNo^~_J2csIjV^$(+NaTR z)|+US@I5y5nFA%B9TsQk8~_hIAvQ%=AkBZrKO%JQhud4)bJvn>xMQjI+RH1wp>Iw)(sR z-O&iSQ_!8}>RvJy?z+2&f4lD~Dflg4l6m8mymT@LYTg!r5d0Nu`b?@GKWNp9rAIv2 z^W@;;I1aY=c%V+^0W@DwN1^oIMWi$%uIbE5K(_e+owI8!8|Q82JOPE6=Z)5)RD)Rrs8ic}jitL92#jwojFNDK=T0d`hN zlLoX#fl*P61f(XvI)Ze3gc>8HjoH7xH4Vi!;NH)@ zkpuGFor#bNmL3lHyxuew2Y%2bz#uBN)^+shMh~UR*a(jLf>vK%6ZDUdPJP(RAq(P& z10MlBX$A6!IJg^JklB|moVy-4)>0$JH+P^FY*5Bhgu(Tsh;=s+kUYUl&ZK1!joG^pV+A*(Y;9)dbTPx=4 ztr(_W$qoZN_#@cw3Qerw4_g+XmvGjYIqp#Szl*%Ou_lSM0kHXcc^4>+Xs_7T1K-Je zHl+os6#B?EDp}4EC2EtAuC2bYhQ-XOM{GHzJWM&)e)DjSv{%v zKe%#n%FSYa3T*N35Iro~aO*7Ppm!gL-P~`3(gv-3_?6#T%8pj+fGqA14{T>U_IH+Z zf;+6`KqZqU_y4g5_>cJcZ?X{+lBnp|6;ydbU$Un;n@b*}GtzV%DgRY^qZ%i$blNy> zK6u0*VVLNn)lN+*wwBu2_@zb^mG5&}G*ECPWp z9Ee}+k>uY@k5>X#-ZA@(^%~pf0cARkx4mjN_6FgdGN%>WtGyx3HRhh5MQe$dpd7za zqqm=8^8*8Hum`+u5FB`pCNf_AV3Fby)ns9G=3-gxn0rq!5>MI4#1gP=h&U>eCkMImWkHpdfKD z7n@~==0Uf7X;MRLSW|w{tneS4IsC`%R&3)2cFWCAsb7qE>g2Yu=WdkHeg1K3UAqP}N65Z!!Y+Dj&MSJn23+UcI29?rP^-7E}dE|yK4$pw&s9ET~=ccBPc zP~Z>!Bx|GeqY;JfCf$<_fiKE99WI+mOWYd9lF#?O_IE!@*(Nzs7VI|m5{ud?HplV1 z-oS2GKaOJ^($rc2p@;(9Ksa|6uI)Tkd^M`$gU9Th1P4I$saSDQ!O+_uJ|xJnzuIg4 zs0VLFJP0B}1MGb&<^cZ_lnnH8R>8u3<5Z&fa>d+RPfo6rdrulQ&uU8e*h_(R+6_2+ zE#ePi`$;_DWJyi`gj4za7BMrOl?LA!^c{Yc1@x7(QHo%9Jw`Rp?0S47aA7toRNcDY zpowz)wP)kLkGEe3Kh@3W{^{EZBikb&>9t4mseDeY z>k&P7PcOtQ=}3dG(i(i`Wk}qWE{&+%4$GByy6HS5{c2vN;o0%^kd3jAA!-EpxGz95?)C0Ouu!{y=#4+`<%7*@9cZ-K6n3Nlt_~I zopWZ+=NZo!s-3vomNQ*=@Rp_0ExWv-hF2 z(4?RxL+C9jXWQc{b_zsgztOLyDSWVK&_x*cf{jTGT zHC~?yym#y>^;5g$wHHakNfh`qHQ1bH%D*gG=uUo#>avQf#r1 z8h4VPGhqU;Kqf2km%fsgjN6y2Cqqrz7Z^Yk-2c&x=LZ(${Y+>A#^QSb=CK@lN-?DI zrcYo}Fjs)fov@};o4ItEAKYbtt#3zh2`Tb2eXm;Don8F$3U~{F!7QdX$Wx>JK&V%b ztjwbtw&jSSd};Z~!}^3B<6mnZ9B`mxRqy59vb$y^4%h?S8X$eu_J_}hQbJizi{{pG z;EQ!P1k1c`KMBSxvjsu9n!^y5X$fq~iE`Y${Z8zH(vOli)@Pr=!Yqc8qLc1AahvJ1 zMI0x=q1|x|k;6?_VV2CNrdu_)2(6Auj6>-8^MOI7f{dSRHyf_0gC2Km8v%{jcT@9c zC?AHevUV_F6#KXfR_Da`bqNhQtA;2=vx}**^V@L0*IzAu4Ak%%2|ih7b&eT5UYAZh zOO{ko;eE#5i2!Exz_5OQJ%V-E*o(f5nbz-hGAsnRmDjPKY-oEX*N3A4&j2!N{g>*- zFXOi4hPU!+Jx4`;qJ9- zFh1~+4Z%zBM>%*sa_B$Y3=t_l&E}YNmYocXdsmyRcrGV(X9&RZy zFtm|SdfSBN#pd)0Tb`MMQUKS&S3~i|A;dy>f(1k!Ai|A9z(nDFYBDwe z4rvH2@X_ICk+V}x?%NwET5*qC6AM)7?z!75tLXdK6W&?hrYmOBKi3|ZO3XWyUF<%! zPk(tmqoeaG5NoAB=ye{RS%=lYONAD2p>zk7F%`3*^%@FbZ719|_U%%M(;st0XilB! zdwZs#@0AU^R8{3xnadM4C$n`BJBPvcWe+?WBv2~V%u=)*tE}B>AX(5463fwrUy+zlieR~nAmac>{V?H!jLARq}0W#>pabt=X%`%ZI z255A~=LU=eXcl8{)wg!0yV!e|;-t0JrS@;=e42BR)lgljvAX=ABHSgyWc6m4^v!c7 ziko3tv$#3Tf*V*pa0)OHCJIGirA#?;$B-*6upRo3BMjQgzg-W%P+M^`-krzb`(#k0 zX<60QQtK^s1$N-jxsUFL3BrA_ac~~YRd^(VQe3A&y+~ax;&Y64j&sZyaTw~w`5`Nb z&ue$bnCRA)9v0_@Y5kgVUUL}~7bSC^gL0Ed%hP;KvKL}l^c3y8IXrgh-o&TijW4czA4-P)?J7&U5f$4EM}u-Y-2wMUYm7fPc=``%mGSS^ zuCl*9^x6ZD1t15cf|-|Av zKEsigk@5DJn*DvZ$&u=NIw{wgUbd{)4YAnlRA?uH@z9LGOl5)MQ{UJaa(Kb!F#i*| z3M+TsJ>U9+&8AqgAbaP`1ys;*3h)pw0-v1)CwU@(v7BCpB82i&%+YW=8;2IG~))!+1VRY?F2 zQCbwUO&@aus%Cngn~l_?o!I(Vyk*k8+^CPUN55NhOMO(R;H~$1o=$IS=6i0=1DFoF zKuLy~9EF3PU80l|0v&11W?h52l8=yx@c2_YOnsyu~_@E zA;Q5x&9-*Ck7FrZmUh}|*ygSzIIp=8@(N-HLz*i8bmHl5nc76|`rbv6AEU1L%1<8S zw~J3~1||tNiM!)J(Cw)rC+1UQFn<{Z(1G-?gT>T+UNEPRdCDYDrh2Rx{_a)^}-v53nh zE7-dI3PX&}-;5Gg6Ha^se+Q62@GhW*f3;;&ERtlV>{X?m)2baW{t!$rCtj_u9}`Mhx{+H31x$kLh`!YgsaumFPU{yVS)4 zzP$f;*X2n@26yiNwIaqNxhT}=_<_YelHF0PwI8wQVSzj9V)UtrH5hZ{ekgQ^Izzx=zS-L!I z)TB0y&u>^#C^&ovjA>LKs|US1K>-xzc0psP-uaWv{QQ=@S6&J`=%*77G<{OM5N=?? zhNkx*qLRVZFAiSPgnaTgn}Z1q)>M@dV>Ts7oOMN$;w?7?AGrf;{u@Ci99E2dP7GNM zvP%H;67GeFMQ^_d6DUSD-)uHNFRAW(#pDuE-YQpWC#*i$i5LmKc+n3`Pn@$LGzM(L z?e&L~31`2(xbwc^+W1v9dn*4+ZqTgJ12gQv5)`1Cd=4Sts~X3polNd~qq!5v{#;w4 z!4kK`o>R6TP?}! z)o?7?bXgV7*p6tISABwQ3m7q^#SUEDWtHwV@uTngMi8bSC#oE}{ou%5E;DU*KmdFS zJ)T%^4%oWy4p~dgN7^Rl+^D;IOF)G~lpW2LJxb`MI^_wbsgyvmV_s?3fKQafn=2z1 zRT=L(N}y)i`MDjOg*?a9ad0m#kmVnfgdl@j7SC(qjK~Cm__QHU_rB8FaW6+K zzeC#n>@{*z*Up^+qta}2Aa0|afk&X!HD9aF%=*%6fmqC*7#&4f-ENy$3ZAXqqTsnxB0s*;U4e5hT|dGe1OypTKW z;^%~VC-16$%U&@wmwlrn6s$NUu=00(DQzNQ;xii*kuE-Nl83$peAV7&lrYN3+bKG9 zSM7cdi`OtST0{bt2W~bFFci1HgJ=hE=w?+6ZLkK&}pLuB74P5Y%wTR;PKOt6iHF$>u2u7N^-Az-yzrnq*9G3J( z9T(rwD&g1XxwRDE>uucuQ+vH=?6E2O{1!ahw`*vLC8$hofP?jY6~=|!e@UPLE(A&7 zDOxgz3f&n)3O`hK7G5*j@)GpY+w&`|qVBl+iv|`IiJX}K2zJ|vgQR+s!)&+2153}n zWBYpgyiOAtSUnvAT{Tt~{J;w5&2*1dhHB{UtnLQotlcsJ45c)e1J=i%%}e+bqim$o zyJc6NCc{|*;4F;_?Yv1N#E0j!B6j4Ys}~^$&zwIrNoLA>3vSzQN(uLP!h(7Zz!Idj z7&0Z`z5RNUu7ZntyQUy~2#e3k$bZHBnnPU)Q9g+KVr+;S>>dY|^(Z4ulbWgMWECiU zdX=yrR;qEdkvUwEap3f%++tE|be}TzC`ekf7z%W0^v4Evi_|6rA7f8`45v_;H|;%9 zw0br5+vWNKTZ&55z5|8Nrw0ro+4&}2aPsVP$#2vP|CY`DHqQD}+y!UD5WizLECq1P z6qX=}Oe*Tp&@OtjPb`BW`lnwX9*nD&jxLduL5xLx81?m8Fa8h&{T<3x`x}QH+kQF_G)wxNS?+5l#+CKQ)go%O8Hb!BmF{{67WXuIi>EOF%by3$? znvv3nxqyJQa-!Z|q~ye40gmAN!9V*spUc9{rq7%Ay9;>yrT1(xeaLY==lbItbL9Z!;(@lL=tRs|E~fyWNG-5O)ifmwayX*JdM1pU)AZ` z9vCWq8U9YkY;bMaxS{?S^E6aLks)T{7DR*qF^Q6qHnCuiw>OrFaQ~`aO-`2@eQt4H z&`kaCS!h4yaVwl3kV}vGj!XAxp%iE#<*fGX0mJvpqA^JaoX($_*TG$3JO-6}GqBa) z#!UbCRaYQM5g0d8ArAs%WW`GHC1F{@;Do_l!M>y3uzlq71LW)o*Zd)-9dwXx&fsO{ zl!xsTip&=5soUkL^X2Z0#L07o5vY9E!3$nQAr2pwejSu{A#b>4Lg3JthGvCgjKr)j z^4%%J`vPAQBX8MpsV{T?=APlCRP3LICU+89xj5Od&N}e^e&u^QdjL_3(;3-2HDOkq z<|P@grQuPUbISUJo2{Sl-Zax&{+CYKoKfMZKxQVxFA|3NCJa--&0T@$q|}(T91%tf z-EQS*Ro3)k`*np~9!~@-HlC7lh99%|2Fb@oU?PwStq^i6^rv z!7Ypj7BaH;Q`3vv zYu~pCz?^vEZ}*)4klgOyEf3wYAJcExJ-E^Xc5475r*qc=pabaKezI-etSy;#N30!R z)n}|?nt(OdWYZ!jTw$nKR`%jt%Y@Ln=PXwF1`&Q|8GZ#MvryR}FUubjpcS1{7K#HU zUzKJAC#&DsF|!b|XNmjgn=}=Enj8tEj*W#~s$87}{Wh2iPAeUB1d1Orp&!OHg|dVh z#?Tb02;LSh%24aq6o}2|$>!MWcX<6Z@q`lp{uQnH3v?I2#R8m~ zsPoV|t(BS6AcBYZTHFN@VspBXlUH%%nD5Kk;80T*u*#WD3Y{e z%tx5ZI|4>%NQ!1Mv88b1(CUmqVZc;;Ss8k~HYgDBwd+^+Sv3MG>?hkhl~ycMxw`0e zzN2oCAHCa|_U+EiWW}Pqkx6tBs}n&viGTs~M`!ys`z;I=Y;VIh^R&Q-H%RiCZ#$zs z+v?vx>iycnGWS29Vxg_y-so8b zk5QQ`Y8&APSGM+-)#v>@UM3u1ZW*RdMfGklkyFWqges4 z#4N)2<#1t|Tv+0a{XnQ6dYZa5_R$MuY=n$WbX$gm#Q2_=XEB%R)%OVF9~)dgkO?~^s}v*6V4U&1Z#yn+JSKJ0ZGJhr=vj_Sdw9qN@;lQ?ZOdO(HmTO^{|+5CMT z6lWXlLSc?d(wx*AWDa6^D@OK1E*_;&Yr2RMe+?^tOAU4CJcE0c zpCT8AqPzmzaC`W-1I)|^tGvSuDiR>*Q0$zQCihAMMWB4pq-yI6zd_h_d7XBH za{Ks->5Xf1Ro6ASwunEF)3#T86-(Z)o+c^CxJ%dw0)GMlZQ#Ex5owFZQpZR zd-{g;X~UCg5@FATIywVbg6?25(V01AR{5CfxNMsp*`!JM5a%WKK_NzA zXbiOl%g@AZiX-s%MY%{ z*(@U)_hiHcCv-5Ccyegs16VWRpllTNgAq=RX-Xui%o|eYP9{G{R9DIGl&XSoM9x z^E3u1eVmdFrp=^|XMrSL15S+1)*G5Io}yLXxSiwCEhdVJAGV;7DhQJ zT)30&m?=)+54t{eN3ZKp7)N9q`o+luFm(t{lK^B0`PcSrC|Qbm>Tg*p{}S6o>;(JY zC7%DxzqyG*3|WxX{)t{{$T|oh-nCBn-IB(ozjPgKFb-fu7Xck>7G~E0N$nswSaNw zLlraZkDu8X;7pJ95O-klgisEiIPcN=M2Idlt55GoM1kG-&wAZID!Bjf{=fU||BzVx z->o12UmT0CbP#Um@ab#a8GuW#2?_d?K`qBhBMkanZk`=E5%u-dIbups!^PINJE}aV zY$oaj6MQP(y}czXo6=8qhcTZvutWAFyL^gzGmEch-_llJ;jy-eXCr41OFX?U2dM{@ zj=bR+J+@6`3)aYVu(an;y35wI4hVF&5TF#?)-(HMlDD3Dh_4`S!_Ny1g_71=8vw`bK9LC&0?qR7x5QJ$D@Wj`a0^?mnKBN3}Q zxzrRxO+lj>x2hIG9C{06ORP5`VVf5ua4ik;y}OsM29FtVR+Lw!Dpj_LT{tPJ_(YXM zxmIn@^^VMp%^NVa`gyQqt(&o67nne790>&~aInU@SUG#uX#>A|wvdBGweLq?4nc1V zm*)US^9b0+t6W(fFd+uOM>y9Zx!1GqFyQd%VN>S&lTG;COy77_T%Do$w7-@gDc0cT z;|$@?X;#~|YP){e{-BC3gK0VoEGhxj)sYf9|D7&_5{E{}FagknwDbkdecQK}VT;OX zlhrj@K9>BMk^+jU(G}<7PrS*snnV0i=v{@>!DR?CAJ>_I3{Aw%+kH1>^~Q&S7}S{; z@U+5d>9;3eC;S+=5T)G|io09PHhrj+2G|lMeCV-kHfVnY@QU0RD2IzfaV_}!S?IKh z<5H%l=A4gMXIp%q=g+-R8+7()+P&ILg(7qkYGZ@`5(IQt0HoYJH;)hm7)J7k=+AHc zKw5vitalmqlvOfFglr-CPc_+Dkr+>L+e?=g98lDmqSrvxAKH> z`# zaqo;wOX@$j04D;_N6vZdYy_4TK`~NTHxzaT_$tSDoXpfc%>o6T8ysnao9w^P)gAC( zxDmEp-T{iHH33t?ZTIwcLzu>a%x61YwYgXZDo?VqZ$COkd)h6>Cv{GnM+`*0mr(Br6_pKz^(vz-!pXX0rv;So*9XAnr>{ z&c3`Xlz4BvpAs%LK5&CAiKFrWk}T^{030-BKvd01YOs|Hop8^k3bilq+xhv!R8ok9 z$i{6Ge}fW#%P`knC!^;&(d*D0>j?8Ot2*l&Wu0+rFJ^9Bi^=IQ%}v%!17XfI1~fp9 z7>1O5Uiovv7R(Ti-@?~{Yjk~Pwl=`yWu1B&0B{+#AIz32L)}-PIGMhmVS?DX{hlpp zZqN4?ul@JVKI(D}3J>hX+IYXH*EdG1|hZH^tKIM?h&dh0#2x>}uv-HrFJ2G(5jZHD_qoc#p>9bOUxWC8IP z!_(%wVFL5E?mh!leCMO{33>{3N8h#`Njj*qGi&wri2~#8HaEXIE>B-S8k9u1l-dvi zoLDjQN-t2ZfG;cX2IWq9fK9UaJS?Yt+;Pl*Yu}T-oB@sZsqrhd_=aCe1szGpVF0*5 z%yu$Yh5U+V&E>wuCG6vg`SBoB;{oo0Qu_|hcclgcvf5vH=G;P?F_22hyviGJPuk@r z(N1Ww9)E^X5oDlYx#X11Nk7&*icgID<~%2$dbegJI@aVSDo-4=2A-Q(Zxu>_QLYNL zVB+Btmot_(`_A>eZR>lw+~xIhR^%X`Vd37uc&6Q#^n1Pq+_Cu!xoV-M_Y$#d-`_vD za&Iy)(?4!)gKAIaZVrep9kQo7wPtz6k_SNNzg6;UH>)d}CCVxtQ84Y|4H=l4rp;=x z6FGHZ2M7o*Q(E2in9u4EFCY`!Y(!a0V4<5*X@}Oj!uY`(ILa#(ambLq6Lk_`;k95q z-i$I_nH8pbuc7O%)s&$@7SFVChZW5vV(Zw%hY!|4Jm0?aD5c369^S4Pd}@Dy1TvbO z+PVb#*0!6up&_d-jmEa|tbQo5^?ju>R?E`;tPZ8}wP^R`B z(WPCNRD4S;9ioi4t2ZjiA#*kCWX7U7(BItonKi6EtZsK>$hP1Y7cv!P_R`8A)w38s``V!Pf@0)i z>y1cZX_p&Eu75Atd>3RZy^+e|v8Ai9zM_r57zMi`JKD1adP9mlTJbNjJ`rKBo%83D zx7bnNUb!srz5R{R5xiddw;%Z=@3y0-LTxjBL+ouA@)ORyn7_9K!FqsQEa2$LAj7-b z8U(&+RJf_1xZ_n(SN7m?l=l2)$ z22z?vvyd{ZBTr$hIxxhm&VvrFRgu_9n;wwkw1fJDMNx=8K-Wlrmt6fP)FS+!ksYEy z5VqSNWS{OJRGQ}caOKCJp1B1V!u-{t3C#yKu=j-JXlxYBUIa4(8hn(D|3AE+zvlq` z`QPeYpxiN;e-4m_?&kn4)qxT?*f;Jy08&8rQ<&*PSsdNqB?mAMRT%Q%y7nBW2r=&! zfvDSVvkkNJ3=<7Ld6uwzpTGu~z-qnN)up#i%Uch?e*U}dO8zGYqxUUKp`$E2mH^n; z@-RfeZ=(|CE2w3}0I(y&`SrELu8b>F?32u80OCASQ$A2EJ!@-rXzWtbNGE?WQBQl^ zd|HjeDg6m)%1JQqxZ9W<2=Q+(Y2sE3EzqD)-?GsFm;*iUSv>?O0K~WVz}v*Fpg=5{ z=fTXwQpT~M>Bit^%;k&NsSz`+qEm}#EF%OmBogonL~KF>2kNlMQ@(cNGvlckTIlk3 zy4mDql_5fNqapYElH^|+wV_17jM0iQ&S`|<9VVVZwZt94Ms+3y-cBF3K22~rcZB!y zx%A*nT~nmxiuuWM?H!#c5cCTDfUW~}AC7K?A=P78ws|aACuNG2oJvap7Y#U1j7;Yi z?&PHXV*6GRaRF(967Uh~Fr(>Cm?lGWDRd9!y?G*?A5u6Kzz!i=`g{Zm>MN>7j-Bw~ z=e0aDnN*VwGfn!QD1uYW+7SBd1z9oRk4arj8Q1~{$79!ivTbNq2A4^yc4Ye9uyto$ zzH;_bR?Q{{{W7aNB7G zWK*1d$n4P3kTTlQ{9r@tpSsYpSQ$A!)ux%r538%oLj?|kn>qP}GOMOkyeP@@<@-?j z5tLgURPcKCB}@gK=9zcytd+=JM;?vS?=@R9L+>sU{bVeHKy@r&KJX>F1y<%+F#r7p z6on9J^0|o#*ZN*5k<`v|ykC-0YR&b9C^t%Xn(F%NaHT>R!1tSB42eMy#7qO4j`tG75FlBff-!$t`Lk;0b<)$HJX`#nvODQi zg4bdN8gx-?#V*G$-GPJIB;_#8&pH97!k^b>$wTZFp%hGc976)vylFMRhi=4RgAyq# zjhnQJd<2r*jui=ZAkib>w!MoJb>43;`oI?Modd=xH<*F5&(PIeL2SVO=S8{Hv7$F^y)tc z`u<5L`=?e&V%_qQKq$$3>BZ&VIQvZyv{O=4vu7_h8 zvB6#I0AcI){k8FNL};M)UtEkC036-FwD#gSF5yhYIfuFXRY6m zw}z@3DJ0zacH3O}?l+fV7*@=QIK?neC3`Vhvb(fNJpso2} zGmr3E%YuJxSamAP^jJ?xV1URYu7?5Ggn63K@K3f|&L99$0fD+u-mf1m-Kb@tZ*)19 zWuG^<@0a(W*^^pWT?k}P4`cNaDsmX21?1w_;AA?TjgUiknCn!ev^cq#C>iJ63Sgx1 zj^!wYj`>sx8Xj}e;(Nbvs~=8U#mK;?I~oiyM2Spr2&|=-V45zVRgncp>L^`zuotzG z@Uc%{9ik07RvDuo?A1HG6F)sY*mmxm9ghO%$-iB^{8zVBz>bA>VHZN^u4r(%@2WsI zJQX-YG#5*f!E8PL`U+MIy&EwcMhlnAZ?B06<$B~L`%I~AVU#~9aQTJ*ewcaJ;ty;V zVi-6F2;{Rqg0n6Ug46ZUjR*Ght>;#9@{l_ zYQb-Ho~Tq|jEH<(;VtOwN!Oed#&wcVtUPajg94Jp(ojF#U8K~2t$S?lD$%_sOzz#b z=2t3_IR_Cbju?u|QrvpD9?ChIh-q_4y?sx@H$V>W&E;q8a>);SMSJmFUM2BqTPRT| zUBwT1|60FVVPEPRGuU{_aXa*ge%<0cS&xb}aU5LW+hFd^*yFB8K5G9mn% z2_cF~cSd(**)O=!b)hAi-@p>$SQWK6G4#dbkApW|Dq1}q57hY&4f)jz>L3!Z;0Ob> zr3xX2D3JTfGPA*t1v&iwSdtKhS?6q#1N?I_1=Gv;X3i7(3vZs?yt-4~!wbIJDrmpR zy`U6IKR5&gx?88_i+o-nkStZ<^2CUcag%L|BARPyd6O`8OlD8$!dOfv`T#1JSq%5@ z!mALuO($=*<@Xfe!(3 z&!ZRg#t$idxw_o`F=M{Pf8G5NSHoUP>Ou$+KAcIbhBN>R9e5+YtcTV~@H-8P_S=w| z2dRQ(o^QtSqqBQT+c$fNRWZ+DK8=sibNss_tNmz%6gc3OCV`fVKr9Q8v7&~p_?dAN z>eM`5BV)?4PF*fF{_5qpPv45}j&hy$VOJilZkBtYikE130I^d;ok3s7#FhqG#L&7k zMc_@D?JH#B*=VcEINNH_md$08cJVxpX9^=P<=giaZT(U|$55}Y8uWE;KjjcK zXqBHTTRVXD20e)p3wNkiBv_QW@*H+sgD^uBo7yrc0v#k*4pb}zc{`rFovCMK>f`7C zf~`>A<=Qm*!Z@oBA<+QzDync|4oi^=P~#MR`{e9>)dLs$Ock{5XpkhgL^z&~?ZtZp ze1Dv(y6B;>-Sw_^;qqWwK@`>(4ygz&^i#daDlPW0&N*#Mw=pt{3n8B-D3ck5h2G9z zwteq-sO){@?lGCN_w1*W`TuQt!;1`17I{0SP z#oe`VCThomXYKO2iJ0*K9XSr;H<3?A6EZ(_+0kg`O98-h47+jG$NXGCXUW>oMi(ub z`4F_H#=f7L|MVISdM7ep)H#st;~UufJgJiL; zVtXNQS)@pQkRirL>x8Hi*tnchp*EqRmR5ykC%CjJBHv}N6X|7{&TQpOCExUf+U1mO zu&FaUb?l7Z!tp|6Kv!F#eP%wa$Qf#BCX{`BO%#$vV(lWhTeFq>m*(SQi}n^EG-GUy5UZt6SjLYwI0w9c(pN?ct&F>eiU2}>-x5g0aA7OO zkSx%JLx?g?_n9k01AR@|>OH1MG;;gK)_9a3sPruqJv&o=mJx@4Jh^B2)|YL#8rC38 z1mlCj`?7e&^8pM9^h#ZF?hGADdopR)1VILhv$94wWvCZMZ>@Ox_;ZrR<%QhTso1n6`)|K<{7H%4OB4db*dtxs2e@& zE3!I+;+_awEld^Nt9#{!j;Euw{48g@t5r2uD(WCv*8l3@YCa;8B}l<7qyw0>$^$~oPQhE13HRykkwXLkrblb51<5T?95t5aiXiMjA(;ZV!Fk|{$Lj}U1= z9zLwqPLM#wm@86otyn37Z&u>w(^ypDHt9E-H%g2+l&ke*z4~9@v_<4r z@S69!zMNtl@uNtxk_j{r*ES1)L>uVU<41^EOd0F~pefy!0h#u3^uPuvRwG%#VSoG; z0-%7m08r2IT{6Lx4PK|(vUVFl;E=Oiq8|XW%>}ZwbgRFEgf z{`RT-Au!G{oS!fs_b*87H%tMJt{xDr5R@k3ZSYPW@yv z9~_4b&(%NxR1lUyZ3O^ADDxSH5{+Tm5;>rBaj%c;;D(uzYbdc63qpC z73W!PnDRlKnHJ5rzr1S)N{xz*XJ@tcvUYg~dE37!U9)xax@U-4%=Nr%s;ZK})cxZQ z=O`sCK`qLUJ7J)F+CGR@a&@u9|743qS&))F4tz66E%#Z+;%V0&okzcN-XkX?^*QII zLCaL{YsY<~8WB9%+!-1L@0mMKo;4S*ul`o2^>WrQF1SkK-Nz}Z4&5!j;SKj!n*Ywf zU(o*dV))X3Lp;A{mE&T9>WhDdkNQvlj;m8DW|Al8SMv|IqQ8fac?1Xk?o$6TyZKEU zf`k&YvHTJS>T?=3=V4^qbtiLkic-S10R2Lq(@(an>%8hJ&^RFHCr-w9EeKHfTHfh@ z#gZeoeBS4m)OW>^dTabi^%TA?+7--QG`iVb>SjFSFg3Hu(Xz+(i}K*rzK#=ymwvc& z?FV}LAu*>Zi*<(h3TxZBmq*`Sft7o;#Jj~3Wm+>hQgT*C@O8d!;B7X*5ig4}Q9Bqx zKZrc#n0^g*>FHwJr=-ainaG9g`VS|#=e~ys+#PN<>R%!=}W>2+oN1KZ?JgIy(J08U55Fe+1e}?CP>63|zWd8_i`xQKu9~_()=oPR^F(0 zbzP};>&=UTI*h$HFYza^fqf%70~ps!8Nxu16cbQs9ay820rT*qF@aT6uq^(IBis~- z0SEkEI0#I-;EpgR)o@Boo@^H@HSnYB_3B5E;}D1kbX-5 z-QNXayFmm@^MCOt&ZGQXRoECEY(+S`_Lk?%YV-jC6y)UIPfE_#9V_R(w%l< zudnCxD^8D7_j^q7(9+u3Mun=b2~BTFV2JdAkQg|2va00j%+;Ai4AH#g+I&C5M&Z$S zsED2)Zij2G6TX{|y#iQhFt}3>{fiwFh>_wS`<_0^XLV4NbFOrx-`%Zs-JsD(HkPcA z(g>?BcOuroDM#%gRma!-JxCmF5lS?Cx{PVX+Voz%l`6IKBKNKjjK+iz0z=qpVxR=A zRk1W;Le6hG%cwZJ<|}heJIE;E)4`-I++qh;TsQ$7JjO`8VMTD>bVSAkyp!F>8$nf&B z#~Ht<{hTB)bU)Z#Z&3E~Ls;!Xn0HS2X{mMb&N;L>MJ3i*k*ZvAq2STVcfQ9DCkRzL zMrvXM)1Cx>ENKB#AMK4H-gA1bP1TAHDGIV-F|sn_E7UX9*w}+sm^~@%#bhu^(Sbk* zB7JZayCpa=^}g29V#%c$!sC?kTTGb&j9=GKOG`!$UVay-Hy834a;|9V)1}e_r*-?- zvyGinkZ~+tP#EK^p}hH`Prw_J?(4eTRlJQXITm*R2|K;KT5B}eo!d;k9i<0;OA#z* zIPyTDH@0l*c2L3znZ(0E*Knqf9<3~7M@P1SYIGcim=01P1n6`4QIaThD^pOBxftzZ z_ys=q!Vf2Be1iM#Tn@#p3sM-2D^{w+GIJR+3WsV(<_8Hrt{!mpL&Z|$(tFq2_1cgP z6<`6Xft4U!E^$Z8+<5r2CuhlUu;0ZwnZUtM?<4AwU-d@v(J9k}clPsIq#jietu-5V3o6fW%YS2s zLPNku9RV+wasPhRRD-Ahpe4jBdbt8elGo-f-xoLC5_I0T4-Srm(#-$s7c+d0`70vGl% z;8fsGF$)+9UPOC2H&hs*FkP_Mlv(#O=Rocw?WLF z@&*E_WcmvNLb&N_`*A(2NL<6v43J8CPDCEdoXFtpJSdhl1H0!US| zl`vA)1QXC~&_ZcJuRUKBS7z}iE7#{fRUXs$$rf@InXzU2$3uJeU$kMn*nr@+6j%IK z>s{hozYn`MwCZcqrjBWWy~UQsE!5IY(0F5ml2;7Aet(r(%6!krmCeg}p5{+q%`AW3 zW>H;PV?CLA&7n#is17#=8pN3Q;Q_EAGOY$efHq=6M6UM%fr4ogOTrd=w+uRzYFmDN z9V&C7`UK16Qf|xEtFau*1QIvSek6}ZrpRI|a3ELkCh#%DdMoPJfto{>X6&9Pn~|oc zC~{>`(!{^NM2!1VeCi&zqcNU^n~(437wmR)W;tv1U5qHT30Z~b8LLx#Pl55PMjfPT zF+}4?h6{X@^p>oUrMA?h@JV@rp4HlD71}ZBC46>on37*lP32+XkQ1aw=PVZmB0T73 ztgk?AacU`!AQmjN0~Nx|1&3X{v_P{frrCl-qqODU7OqtY<2aT*WTksM-CLPh=^^{t z|A;-es`9M9jGIe+?K#Zd{@{hr#ptY)h!|t1Hm#9p7VkkSb^@J9--|jOM=WVxirEBT zrerm4?TSG zGjLUbHJd!%BkJCl>vC3q0d`VmVX^ZA>b=doc2}$09SiuR7Y-S`C!<51Y}`U@7pwd# zx4IjS9A3MZ!14wytmDD$IRY1~xcLkDDDNJY9*~?Z?){D}i`wisSoUqPqNCO~)AQbp za{OUWe&14dS>ami?X{wHMG*p&!z|t^vY!cyhx7(bAjp^dMp(6*3(y@CU!NV~;FrYr zSIo|I7ju!eXH8h&j?pd5@MORCeAcsn_lCpqmwBFJ3q6`qDasn|1CBMDQ}n|OLPZ#sp;hQN9w)}+iS7? zrdthTH6)LfmR3i;@b%U@uQRqm$y>0Is+{0{RL!1>w;vLMQJS$_I>4dEFb=yKC7m_o z?T{CuIJS^a58qjE=CfnPgVdrqsya(h2@oQzP~db2p9HYPndN4zpYQMgqpT71l5to= zJjzEzxyLO{{h8$1@h1iiA`fN`6hOcZ{cnGQ{!OQ8@yy?QQ`~%uQ`PUx)hVd#RMm_*-A79%;5}f%EC||;~?2@OZ$xz6DG+=$5qsV*jtQ@ zh^fuD%z_di+^-xG5JZ|x>vYp>ChSw;g5_(Eck5lBNY>ps68N;Xm@5!>bh&FgC}9J1 z`hon8gmV>eyb6iT!y`Zo*nVy_`x+Og?tXu4%M-L1`{rTb-+_!~@vVS4^rLh`So-6U+yJ0hq5Q;nj(R99a#`P%oxk`Ww0Gwa#Y@ zAmK(QuuuzF;uH*0GA|1N?hyK^;>bIT592TP*6F-*t&QP7`%0MY(8B&EBbKuSh7#yC zpIL&_gJ2{?d%$*{$z=~;p)KPdOKsc5)O82`AG4$Q>PBF_~pA#QF*o z*k|^BAvpvleLM)&EYwMVyl<=34#a)><|Q0C03>VYkoRDm4BqZW6_mQVaWH+V4SeOov7Cz-VjQ+YGQQ{wTAx|V+HbGe zyOCb`=#6p)&)5^ooj=(eb=t)iXgKjZ6QBfXxZbk<;q*OV%Td96p-Fmkn*%Z@u3=Wo=G{GHxIDv!D(lO7 zt*<3LZrqk#Yx*8GPL!Q`R^W}H_%1PoamP}4DlqJGSs!Dr5w21lOHEsnJnIrazO$m6 zSOLRs!WDuvOTc@}jX{*l^I^V=N5&%dJh>tGeQ2A6!jH*OEB&JaAJb9bW9Zl^9wTV(c1K{c#F#H}3p?4TmA&|&i?_@=&VeNktP%}SW@-lu86lK? z$9FHf`er2K>`8b3TYG072xY%FeBDT7N!holtcC2m3@s$pv?yg4LSjyw5q^_vt>*d!P3`?|I(y+<#c+=W@+8zwNvH zzTeL`lKk2VMBsoG8rsV)2wYNHAzQc|_Z~EI-W}(}7OT68we7`>Ct*ltOWe=71SvNm z7tc3BQGU?56QidcgU4Jxk8CxSN*s9YbBGQEk|T^WFpa^B4b9-}W3>_K3RYF<`k=xrF09mEUxyYWGz;E7P64kqKSoo=SiH; zMXi{BObYzkSEu)UWIxPU1%IC>OM|MEL)!=05Qfh=WTB8|rFR}y$Bw=ix?jrV!ja{| z#M4xRs)J`WsBp#9xKK;w7)e_oZ#IzmM`f9pXNF*hpa&Bws_L2K10>B-nSN`ocJG?0 zon8;R39Symg~3Y-KNEu?@X<5Ty6!uVQ}-Uz-=%Lwx_-*HnUkx+wVXS8ErSUl^-cF3=DM7Z;dkrL(=C+XPXxWi{YV;XHR zo@v8zN==zBd|GuTk8U-tuj^AdCi^l(K1@jbUV9#T9he1oo`LlNtg+yRt7X!FQEP8w zqPo+3&H&z5)Gxy>dao?S{g!ATs>^8%Jp?L`plbzyGKp?u@b zj+BdNSbRmX{YYm4Ip)Co*@l61(rnBk)RjWutorcK;hl4qU+A+d%4p20Pc*++0B+y< zYSbGr?eb_QVy^43GGusn(#53k7FF@CxTLS9VHU$*HpzB9U3^<;D;EP;z?Ba*3bl*% zO~^~*l(6?%Z#5qg3Jcp~!bw@_j>hNuq|GO@o_gRMKrI0i+nvC7jG*3d6*<(cr80}- zE}v~Voxs!aK^+m_!GSPT{Y<9wfREqpWJfYXLJ9%}{?gM?VCQ`z^7+Yw^MWTyGHC^O znM(|7<4p~+c-kux!5p!tmui^Jz*>H0s)}nf^>7%Y)(R^sIO&DMboa2a@YIDz--#Yv z2}LIVJ8Y)IXvh8DQhihmcV|cD}Fb>A^5soMo{VO|*cjc(vzU zhBDMh#@Zwh?j)FviGFqt4&p!F%I9{c-0@5ItX&TmAgiBq2iwIorcuq(RGKWE5>wIC z$!{<+>2ZC--epffZ3m# zV;W^vK_EGxfpB>4&V&-Iv`(d;9-PhN+d@XifX_Nb52`zOV=z^=5y@(GeE!8t*V~Vu zHMVbJ9c)W;(Qoa_WP(Zo8f3m%0DauoYeVou4r5Kx|a=%_>@ zVMO(#ydtyImM3}zc!75DK-};wC%`Ff(5cqq zgByr|B>T^pPfO!VbUmxVrPwq;I-kdEe$DJS%?d2ezvSKjYv}2zaJ)Pvn_ltF5y<+S zgbk-{Ck0PihntWr?%97(Hzz9yL=+Ec^zWWSR&$>A_2+kt=zp^!VTkx zs{jqiDnCHGU*C~jH4$?^?IcOykw26BrJTFY)l{2VR>{hsrNh_bwUG)hPG(Jh;lV4l zGdQ7h;0+fbwFyLoUwUOo{#BrpdeaZQUq2o1QJhz&bw)eF`Tlcc1GTz)ze-1mKP3*z z8IKzR_zqK23V^pYJ|DKCsX%v0i(s+&nMBCO!(S?R?{kTy$Y@<$&arx!gHJ%>FPxZ? znY5iQyNhsoo%yLgWni$6A$l(4QG43x{FkY~aDYnY;3rG0Q$jw#oj{I!C z);+D8)7P)a@@ryIyURPWkuRqgE5N#y9eL9eEGnfK&_zPDi137hdeT6UOh@XU^WU15 zKRd=>RIkR3(8RvLa!y&AtuD(urX(Sdq>dM?fp1H?^L3QfQrIA_yV5yDy_ z`r?Jqu4dwdn@0|hkB`{x7lE-XQsSk7g>MrA?9089SUfvw)c4JOS-g4pd`ZUgaLF?_ z#u4*piaQ6o5wGZ<>4`W`q9`Q?asx^PUKPPp6^h<9lcFVggltfJ^9z>?;w9Vreu0q* zL>WFfS5iN;QkMe)2f>XN7`G80g7h@;4v9*+`q7)`&IN&&bC}^2KVn4&&O6U2QJPpE z?ck#FJV({IudpMOw~W>OJ&kR0EE-ekL)%#}DY&3AAuglDLkoaz5~}OOC%CvZtWXkO zTjzB=Xm!{`jO){8d-m!MrahJ$RTNpErnoG{VoG6mmw=n~aGL>l6_An0TvSN2ZbntM zTeFG3_Jx6ofnx`XS>hW)uf_So_nbbp&~LK+xc+j|Z8s~6ymCLE8OlPGw{te4AZ6{z zW-IkPrtx&W0Qy}fH%R?)sO(vy*$^<%BE^-NZUzY=?yvR$ochD5*a7V+|8TuSzQ@fM z4_=pla}7QY+^6MALBQI_Fa;>ni|V)7256GvTII<(sGD|-&s85~L=p?tPmki8Ip~i& zw=%68a@0gSsGcxsvd~CQ zY{}R`&t|Zo>cPZzS4n9`%ag>@pvVD#b`4Rlb|qai=S}sLY;nm%R;`J zi+!0|=kq8L{u{Y3Sd-fAo|aF%GdznDmuHJS8?Hd`x5X1N?<$PnlW)T_ctUJ7$Z@ot zf|H2WrMmz+ST6{@ZMEa9-vd>yw=smtS|R0C1KCv`$K@Ekj=p@?B-l(ZOAE(!zZ$YM{9SJjg#Y5$}-w85|zu$!0CvpFkS-6N$`h$fen`+~5?}Ios{B_JGw)x3a0u zU0A2=?y1LI!8lj`N2kmuXH6~ik?J-%Zt98X<0HD)N{%Y za~|+_e#;0}eNNlC3iq$mcs5%j+Z+hLG~l_nysWJ6a;ooFFR?R6#f%<3O51YY@Qp0f z7F#Q-0^?)mPZ$rSkjhLL<_gid&WIp>Xm0UC-)YS3n7PU`g1SDovK(K(8)_U ziF3WAnX^0W1br5iKElxi*PHD)Zz(a+B6;e|a~zP&TLoK`AYE9?)-S~dFmyMb<;cQaq_^S~U-KOX9zAM%8rROO z7UD?Yl%cUFwJqnX9fYOa?)6Ak*y1cu>s@2$A5okr=ELQ*|xx z4x{YnbD}-lhdUQjUpg+)&bNg+e-yptB$$HFfr6anZl;II1W>#C31`YNS5S^8OOKv# zTwJ;jR3lgA%?oQdV8T<9`txFF9#mw&)w+xKLj|-fJ>o~b5->ee9g{&O67^ZIO0qBfqnV;IVndUt!l~>#Z`EZ?@tz(k$YqB3Pd}~>+4)gPK7t{upAXpab`PqXTxA2w}%D22mJXRxQUGZfv*S6fJ*DVhu0T3 zldTY?I%aeMU@8LZ820*OLZfvnpCG)U7;n?4B{<{0N* z;vRgEqwCO)h6XXfc3&0+$%-M5K^I2)U71;^SG1WsE0uA^49@9ZqN?5DSnynx+;~A} zQp`dEK0ZAL$#CbVsG{}+_)eHtVHCMj2#SY{)W&x_do9_hBg9jQ3oOU{gn~8q7U^z{ zalCwQ^m6g*IxnWtto_jvx`P(L;)T(Uc#}Xkp4D{;#kgSc%q^Vp?AEiRtUlY+s#p$o zYKh&fLP}-;S6KKO?eKue2PGCp3$EOTT(j|Du}*?=ePQ^O@`Lom_&zD_hYRwXV&J$k z(*^WH6MD?Dvk)anVS}FBI3O%AOg**PZ1(=xxvvpgLSEgYLgOw>Q?^J*u8Jm*(<0y_ z2v83}YugUd;(umxLxBp2cdCMT`0Woob6|Z1j6KlWMULLg3oN zucgyZ^L3M%Y~x42gR$Iv9PKAkogZKc`Y>MK42gu%_VWbSizQq(d%b>dO*quZh;X-V zXW##wvKi;B{Q-AWbmyVs%6b#(j+4 zt+y?g*LhrMxn;Hc5o+XMsX5`>9Kkd?uPMnv;gq@hs*wX5VQJ&(dv^=Ueu;Z`X`3iY zwCn>e5FnTuPns~;HK=Il&Mhe>TqHW}xHUHZL8j$ddA*i!Auci)&z zHjC-WFg7N;v|EZz4vDWyr`g zgp@5dHVQBKCz)rCBQ{KQSIP?{p4-BkbD~I?B0f3Y_8(+S%38m{7stHM4dlyA{+sXeKJOu z=pIR4qTSiWsBNXq7~cLqxA8wuV}9>XG!y9IzsK#Taf6Qidx1rKpxpLfgUGm%AeQ(3 z`$r5~<|fyz2VHX@h*G!@K9&g9T}eXP46r(WSbCIsV;%H1omV1}s_@x#xg7?`{+^D1 zBK!M0k>n|t>u{8LdSv}B@VJKXD_!mscL*mv@*p|4XXMgV%Uc;IDUY67DktCm4WT_U(S3s#)@-61lg5^h2JFLf+V*01_(O(?@s-yDAFmfq93B|cH%1M zU8AOCS=VuUFB_Q@QW_yFf($cPR^E1nEKeuH!U_bl zn8g&5rs@JpIRZ$%JPFt|+nP;&-EjDgibv%X4z|o1onL;8?p6+Ikx~#)jyJg$K^jP7 zi$*T=A!-J4Dv#7>45&SbG!5dOc_n&ySG1R{;$-(HC5jZ3L*C91Xm5!Nt=~BqaLj4N z?)2*q9$!;0F*UgP$Vl(r5~r*wzXFPi39B)^TQ-5k6Co^?l3frysC--D#E3 zfzFY-C%oHYe8_@^F9xMaM4DfI^7;#fw)YfRwZ2~Xo4pi=4r~YRm&5U^(J7$v@YCJ` zb7l|ycBUiceIyNt@-Xlf5(!W(U(iArf{^Rqw76mHFY96Fr{LJbrBoNYSXkNT_%0wQ zxM+4eULa-1abfY4mD1RP8-@nqEMk5Z>KG6jr=5T13!JDz>fHTV5W)H-$u9`?jo!bA zgJc1{`9?EkZ)TevKSK%{piUID0(gIl@(2psrklimb#U*{Z6go&Yj;~KisYM)h}Xa3 zD4cb^bl&r4HH?kD`sCiTs=`A^9u9r#D{&i)G;%R)5(Ws=1PxQNL+C0-tA?D zdyz>*PLAEnEK9Ogr9aVUo8SqrvA7$cRxs8*0pDd~)h+RCN0p*Rr;u~9rvO~#U69JC zZialGgUdqTOFTuD^33c$T@kAaRg=>Qhz0~Jt_2Z|U&{6uYOE>Hasirg0WmP%XO0vX?7xvUU=o$9R??($6yCNB|t4_4AnQG6qDyh z=)Lti>yw*ni|JF_&M_&04)Y*{f-#?$wF%LT>UW8@4NA#nbi=mka~0AqL3u?N>z~#O z_c#S_E$i#se*o+0X{qP$YHRcou5#yQQ|yB-F%1(ZAdnL1TnCG;p@VwCoPLv8nH)tu zH3sE0*zO_4R*+WON4F*j0oVxV{Jj{Br`2bB%=lYaiHm#UjfSO88MPN^01zu~yaJpu z3D*mnlJzKd5bq@R-oWO?S01jPqO7buw5{|cW$`ZFW)V$yZ?bF|IrGv_%=)@_n%5cp zNO8%i?HyuPf(wxjN|P@DzL)UK&74UM9{aL zGxP(fi(@=1EI<-{#C_RUzvXA9m*?{^vtt*rQye(#7C`*W@V1HWlm)Z0oev zX8K^WsLqqfo)Cq2BjonZtex97_{&yl@)Vr;hw`qQCtme>7orvy7AIU?f}MH9;$PuA zJ5l>hSo|zH$I1Xvk}8Mpz=xGRL%(NmSCIPLdy53!m^;Y6`&((RCgUGT9_mVX$|uIT z%0F^=m(aOL|2FHlRVVc;mD4qcE4wak4338rEvS;9C}x=nG?edNb2dPyhqkq{ik&5f z@4AVj3&l+#S#wymqbO`-Z~McpyVeQv8>>+3IIuT$!AMyfh`2K3A}_A#ti6_D4B%%x zmI<}}`N^LhnYYa~87*vJXTE;x5=CIWO!hN?52C5N4l>C~KQ>yDO}=-0=r!K)TJcpO z!}qVoA3`+uIe-)?8w7AFHNtYhKB$jCwZSw;cp-1q1dQK1QW>tC{@h{_R-?_vpOlA_+b;RH#_63yDN2I_#$mLJw2|y8?i zTh1YY^n*ROd#XCPhxb*rC1&zXX=FQ2sA6sZOIbDU{!tanqi_O(fb z3)D6@7%afP26`|FJnvjXsN2heRb}N?{&F(O6QAFRKNVZ_4S3mb-3p8R=4}buRt8KK zrPYVdqVW|t-hC2ni^u6Q^v4*-xS;~d@xBT!I0a2gF9?q{yp1 zk%^UaOm5uX!k>f~hd058-V#>rIO;iEZ-YwitGiBKPrfj;+>*joZ#p84%=@I4?um1j{Qz-3})_o+89O2u0bSk$gA^vA&@ znkWP#Wl{o2Zje98@o*)H66v{!?aF()7hZ0A?8>7J*_7J~?jH*|Po6(pG^dz$Aj#K0 z=ac-8;|c#W&~TE$E>4zZ;AqP5gEtyD>UgNF@bepYe=EpP%(2l@8!(+jF?4~7{=u2{ z?z`XZcE44(#rZdg&s$r6qXPMVo&5AyTlN>%zQO{>VFxdKzRvD_t>rJw@*U#E-#seW zR}KG>FIW(qbYqN`vvm@L+pu9~@uRqaTVoq+R)8_4&Nzf)#{J#;drUUp3+J?tAmvXj zAA^%+gxE;`5`2`u`Ay&k{Tk``fAadDhJ*aaVOrQ}o^KIiC`{u?oI`hvtsWUk+lgzH zU>`IPp&c8LA@dIg8|*%MyD-VH5QjWC!s+Kv78g}Xz{sFwttSaPvtGIlf4kX9)iWO0!?e3 z_?d})wkAJuyvxKK9N%h~xyinKk>F&OcCR?tx0~-sxObEva{o4)w~w7H$Y;RN#)1(& ziyp?j3~#dIMfot;<8tf*j399`TdU%sSEuZFf&9_;f3Xf$)p=#q6Bm3)1EBcEin#ew_ zyPK6x#5GmJAqxXekPR>WGgEyU&F$ViV-Os99Ne|S7g#dL@RR*24|37&0zm7=vK@66 z*B1B7>p6el^{&iVstuO5igc;J7{`Vk2_u`y1x` z;;oqec=SJd{oi21cxC9lstV-)NjGz>wI|Lp@fVXE0a-|Vzq}U={#f2W+{ymfnf{ju z_+wN3-xGcRttL!$q4lu1Tda=JU=XQr`}42`@z)ZykD>A~S! zZyor8_&a2u0EK?mH9PV!{N1h}vd$Se%-@31vv_{~e?|-aVRg{&tiS&$fB(IR`l0^- DLO^x_ literal 0 HcmV?d00001 diff --git a/docs/source/index.rst b/docs/source/index.rst index 8ac09f6988893..fd741ea5e9766 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -114,6 +114,7 @@ Documentation usage/engine_args usage/env_vars usage/usage_stats + usage/disagg_prefill .. toctree:: :maxdepth: 1 diff --git a/docs/source/usage/disagg_prefill.rst b/docs/source/usage/disagg_prefill.rst new file mode 100644 index 0000000000000..9fe714b4fd856 --- /dev/null +++ b/docs/source/usage/disagg_prefill.rst @@ -0,0 +1,69 @@ +.. _disagg_prefill: + +Disaggregated prefilling (experimental) +======================================= + +This page introduces you the disaggregated prefilling feature in vLLM. This feature is experimental and subject to change. + +Why disaggregated prefilling? +----------------------------- + +Two main reasons: + +* **Tuning time-to-first-token (TTFT) and inter-token-latency (ITL) separately**. Disaggregated prefilling put prefill and decode phase of LLM inference inside different vLLM instances. This gives you the flexibility to assign different parallel strategies (e.g. ``tp`` and ``pp``) to tune TTFT without affecting ITL, or to tune ITL without affecting TTFT. +* **Controlling tail ITL**. Without disaggregated prefilling, vLLM may insert some prefill jobs during the decoding of one request. This results in higher tail latency. Disaggregated prefilling helps you solve this issue and control tail ITL. Chunked prefill with a proper chunk size also can achieve the same goal, but in practice it's hard to figure out the correct chunk size value. So disaggregated prefilling is a much more reliable way to control tail ITL. + +.. note:: + Disaggregated prefill DOES NOT improve throughput. + +Usage example +------------- + +Please refer to ``examples/disaggregated_prefill.sh`` for the example usage of disaggregated prefilling. + + +Benchmarks +---------- + +Please refer to ``benchmarks/disagg_benchmarks/`` for disaggregated prefilling benchmarks. + + +Development +----------- + +We implement disaggregated prefilling by running 2 vLLM instances. One for prefill (we call it prefill instance) and one for decode (we call it decode instance), and then use a connector to transfer the prefill KV caches and results from prefill instance to decode instance. + +All disaggregated prefilling implementation is under ``vllm/distributed/kv_transfer``. + +Key abstractions for disaggregated prefilling: + +* **Connector**: Connector allows **kv consumer** to retrieve the KV caches of a batch of request from **kv producer**. +* **LookupBuffer**: LookupBuffer provides two API: ``insert`` KV cache and ``drop_select`` KV cache. The semantics of ``insert`` and ``drop_select`` are similar to SQL, where ``insert`` inserts a KV cache into the buffer, and ``drop_select`` returns the KV cache that matches the given condition and drop it from the buffer. +* **Pipe**: A single-direction FIFO pipe for tensor transmission. It supports ``send_tensor`` and ``recv_tensor``. + +.. note:: + ``insert`` is non-blocking operation but ``drop_select`` is blocking operation. + +Here is a figure illustrating how the above 3 abstractions are organized: + +.. image:: /assets/usage/disagg_prefill/abstraction.jpg + :alt: Disaggregated prefilling abstractions + +The workflow of disaggregated prefilling is as follows: + +.. image:: /assets/usage/disagg_prefill/overview.jpg + :alt: Disaggregated prefilling workflow + +The ``buffer`` corresponds to ``insert`` API in LookupBuffer, and the ``drop_select`` corresponds to ``drop_select`` API in LookupBuffer. + + +Third-party contributions +------------------------- + +Disaggregated prefilling is highly related to infrastructure, so vLLM relies on third-party connectors for production-level disaggregated prefilling (and vLLM team will actively review and merge new PRs for third-party connectors). + +We recommend three ways of implementations: + +* **Fully-customized connector**: Implement your own ``Connector``, and call third-party libraries to send and receive KV caches, and many many more (like editing vLLM's model input to perform customized prefilling, etc). This approach gives you the most control, but at the risk of being incompatible with future vLLM versions. +* **Database-like connector**: Implement your own ``LookupBuffer`` and support the ``insert`` and ``drop_select`` APIs just like SQL. +* **Distributed P2P connector**: Implement your own ``Pipe`` and support the ``send_tensor`` and ``recv_tensor`` APIs, just like `torch.distributed`. From d263bd9df7b2f5586910e5d006a11ff11ba7c310 Mon Sep 17 00:00:00 2001 From: shangmingc Date: Mon, 16 Dec 2024 05:28:18 +0800 Subject: [PATCH 375/397] [Core] Support disaggregated prefill with Mooncake Transfer Engine (#10884) Signed-off-by: Shangming Cai --- vllm/config.py | 7 +- .../kv_transfer/kv_connector/factory.py | 3 +- .../kv_connector/simple_connector.py | 101 +++++-- .../kv_transfer/kv_pipe/mooncake_pipe.py | 272 ++++++++++++++++++ 4 files changed, 352 insertions(+), 31 deletions(-) create mode 100644 vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py diff --git a/vllm/config.py b/vllm/config.py index 37d062f7eb079..fce8011be4015 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -2171,13 +2171,14 @@ def from_cli(cls, cli_value: str) -> "KVTransferConfig": return KVTransferConfig.model_validate_json(cli_value) def model_post_init(self, __context: Any) -> None: + supported_kv_connector = ["PyNcclConnector", "MooncakeConnector"] if all([ - self.kv_connector is not None, - self.kv_connector != "PyNcclConnector" + self.kv_connector is not None, self.kv_connector + not in supported_kv_connector ]): raise ValueError(f"Unsupported kv_connector: {self.kv_connector}. " f"Supported connectors are " - f"`PyNcclConnector`.") + f"{supported_kv_connector}.") if self.kv_role is not None and self.kv_role not in [ "kv_producer", "kv_consumer", "kv_both" diff --git a/vllm/distributed/kv_transfer/kv_connector/factory.py b/vllm/distributed/kv_transfer/kv_connector/factory.py index 015f892cec933..3e2bb436d24b5 100644 --- a/vllm/distributed/kv_transfer/kv_connector/factory.py +++ b/vllm/distributed/kv_transfer/kv_connector/factory.py @@ -11,7 +11,8 @@ class KVConnectorFactory: @staticmethod def create_connector(rank: int, local_rank: int, config: "VllmConfig") -> KVConnectorBase: - if config.kv_transfer_config.kv_connector == 'PyNcclConnector': + supported_kv_connector = ["PyNcclConnector", "MooncakeConnector"] + if config.kv_transfer_config.kv_connector in supported_kv_connector: from .simple_connector import SimpleConnector return SimpleConnector(rank, local_rank, config) else: diff --git a/vllm/distributed/kv_transfer/kv_connector/simple_connector.py b/vllm/distributed/kv_transfer/kv_connector/simple_connector.py index bf4f40ca94e29..4ace03ff1184e 100644 --- a/vllm/distributed/kv_transfer/kv_connector/simple_connector.py +++ b/vllm/distributed/kv_transfer/kv_connector/simple_connector.py @@ -2,7 +2,8 @@ Simple KV Cache Connector for Distributed Machine Learning Inference The SimpleConnector transfers KV caches between prefill vLLM worker (KV cache -producer) and decode vLLM worker (KV cache consumer) using PyNcclPipe. +producer) and decode vLLM worker (KV cache consumer) using PyNcclPipe or +MooncakePipe. But the logic can be extended to support other pipe and lookup buffer. """ @@ -15,7 +16,6 @@ from vllm.distributed.kv_transfer.kv_connector.base import KVConnectorBase from vllm.distributed.kv_transfer.kv_lookup_buffer.simple_buffer import ( SimpleBuffer) -from vllm.distributed.kv_transfer.kv_pipe.pynccl_pipe import PyNcclPipe from vllm.logger import init_logger from vllm.sequence import IntermediateTensors @@ -36,7 +36,27 @@ def __init__( self.config = config.kv_transfer_config - logger.info("Initializing PyNcclConfig under kv_transfer_config %s", + if self.config.kv_connector == "PyNcclConnector": + from vllm.distributed.kv_transfer.kv_pipe.pynccl_pipe import ( + PyNcclPipe) + logger.info( + "Initializing PyNcclConfig under kv_transfer_config %s", + self.config) + elif self.config.kv_connector == "MooncakeConnector": + # Check if MOONCAKE_CONFIG_PATH is set + import os + use_mooncake_distributed_pipe = os.getenv( + 'MOONCAKE_CONFIG_PATH') is not None + + if not use_mooncake_distributed_pipe: + raise ValueError( + "To use MooncakeConnector, you need to pass the ENV: " + "'MOONCAKE_CONFIG_PATH=/path/to/mooncake_config.json'.") + else: + from vllm.distributed.kv_transfer.kv_pipe.mooncake_pipe import ( # noqa: E501 + MooncakePipe) + logger.info( + "Initializing MooncakeConfig under kv_transfer_config %s", self.config) self.lookup_buffer_size = self.config.kv_buffer_size @@ -44,6 +64,11 @@ def __init__( self.producer_buffer: Optional[SimpleBuffer] = None self.consumer_buffer: Optional[SimpleBuffer] = None + self.producer_data_pipe: Union[PyNcclPipe, MooncakePipe] + self.consumer_data_pipe: Union[PyNcclPipe, MooncakePipe] + self.producer_signal_pipe: Union[PyNcclPipe, MooncakePipe] + self.consumer_signal_pipe: Union[PyNcclPipe, MooncakePipe] + # 2 pipes for every rank in the world port_offset_base = 2 * rank @@ -51,17 +76,26 @@ def __init__( # and the decode vLLM only uses recv pipe if self.config.is_kv_producer: - self.producer_data_pipe = PyNcclPipe( - local_rank=local_rank, - config=self.config, - port_offset=port_offset_base, - ) - self.producer_signal_pipe = PyNcclPipe( - local_rank=local_rank, - config=self.config, - port_offset=port_offset_base + 1, - device="cpu", - ) + if self.config.kv_connector == "PyNcclConnector": + self.producer_data_pipe = PyNcclPipe( + local_rank=local_rank, + config=self.config, + port_offset=port_offset_base, + ) + self.producer_signal_pipe = PyNcclPipe( + local_rank=local_rank, + config=self.config, + port_offset=port_offset_base + 1, + device="cpu", + ) + elif self.config.kv_connector == "MooncakeConnector": + self.producer_data_pipe = MooncakePipe( + local_rank=local_rank, + config=self.config, + ) + # We only need to initialize MooncakePipe once + self.producer_signal_pipe = self.producer_data_pipe + self.producer_buffer = SimpleBuffer(self.producer_signal_pipe, self.producer_data_pipe, self.config.kv_buffer_size) @@ -70,17 +104,25 @@ def __init__( # the current vLLM instance is KV consumer, so it needs to connect # its recv pipe to the send pipe of KV producder - self.consumer_data_pipe = PyNcclPipe( - local_rank=local_rank, - config=self.config, - port_offset=port_offset_base, - ) - self.consumer_signal_pipe = PyNcclPipe( - local_rank=local_rank, - config=self.config, - port_offset=port_offset_base + 1, - device="cpu", - ) + if self.config.kv_connector == "PyNcclConnector": + self.consumer_data_pipe = PyNcclPipe( + local_rank=local_rank, + config=self.config, + port_offset=port_offset_base, + ) + self.consumer_signal_pipe = PyNcclPipe( + local_rank=local_rank, + config=self.config, + port_offset=port_offset_base + 1, + device="cpu", + ) + elif self.config.kv_connector == "MooncakeConnector": + self.consumer_data_pipe = MooncakePipe( + local_rank=local_rank, + config=self.config, + ) + self.consumer_signal_pipe = self.consumer_data_pipe + self.consumer_buffer = SimpleBuffer( self.consumer_signal_pipe, self.consumer_data_pipe, @@ -260,6 +302,11 @@ def recv_kv_caches_and_hidden_states( def close(self): self.producer_data_pipe.close() - self.producer_signal_pipe.close() self.consumer_data_pipe.close() - self.consumer_signal_pipe.close() + if self.config.kv_connector == "PyNcclConnector": + self.producer_signal_pipe.close() + self.consumer_signal_pipe.close() + elif self.config.kv_connector == "MooncakeConnector": + # MooncakePipe reuses data_pipe for signal_pipe, so we only have to + # close the data_pipe. + pass diff --git a/vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py b/vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py new file mode 100644 index 0000000000000..8e4358672b74d --- /dev/null +++ b/vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py @@ -0,0 +1,272 @@ +import json +import os +import pickle +from concurrent.futures import ThreadPoolExecutor +from dataclasses import dataclass +from typing import Optional, Union + +import torch +import zmq + +from vllm.config import KVTransferConfig +from vllm.distributed.kv_transfer.kv_pipe.base import KVPipeBase +from vllm.logger import init_logger + +logger = init_logger(__name__) +NONE_INT = -150886311 + + +@dataclass +class MooncakeTransferEngineConfig: + prefill_url: str + decode_url: str + metadata_backend: Union[str, None] + metadata_server: str + protocol: str + device_name: str + + @staticmethod + def from_file(file_path: str) -> 'MooncakeTransferEngineConfig': + """Load the config from a JSON file.""" + with open(file_path) as fin: + config = json.load(fin) + return MooncakeTransferEngineConfig( + prefill_url=config.get("prefill_url"), + decode_url=config.get("decode_url"), + metadata_backend=config.get("metadata_backend", None), + metadata_server=config.get("metadata_server"), + protocol=config.get("protocol", "tcp"), + device_name=config.get("device_name", ""), + ) + + @staticmethod + def load_from_env() -> 'MooncakeTransferEngineConfig': + """Load config from a file specified in the environment variable.""" + config_file_path = os.getenv('MOONCAKE_CONFIG_PATH') + if config_file_path is None: + raise ValueError( + "The environment variable 'MOONCAKE_CONFIG_PATH' is not set.") + return MooncakeTransferEngineConfig.from_file(config_file_path) + + +class MooncakeTransferEngine: + """Handles the transfer of data using mooncake_vllm_adaptor and ZeroMQ.""" + + def __init__(self, kv_rank: int, local_rank: int): + try: + import mooncake_vllm_adaptor as mva + except ImportError as e: + raise ImportError( + "Please install mooncake by following the instructions at " + "https://github.com/kvcache-ai/Mooncake/blob/main/doc/en/build.md " # noqa: E501 + "to run vLLM with MooncakeConnector.") from e + + self.engine = mva.mooncake_vllm_adaptor() + self.local_rank = local_rank + + try: + self.config = MooncakeTransferEngineConfig.load_from_env() + logger.info("Mooncake Configuration loaded successfully.") + except ValueError as e: + logger.error(e) + raise + except Exception as exc: + logger.error( + "An error occurred while loading the configuration: %s", exc) + raise + prefill_host, base_prefill_port = self.config.prefill_url.split(':') + decode_host, base_decode_port = self.config.decode_url.split(':') + + # Avoid ports conflict when running prefill and decode on the same node + if prefill_host == decode_host and \ + base_prefill_port == base_decode_port: + base_decode_port = str(int(base_decode_port) + 100) + + prefill_port = int(base_prefill_port) + self.local_rank + decode_port = int(base_decode_port) + self.local_rank + self.prefill_url = ':'.join([prefill_host, str(prefill_port)]) + self.decode_url = ':'.join([decode_host, str(decode_port)]) + + self.initialize(self.prefill_url if kv_rank == 0 else self.decode_url, + self.config.metadata_server, self.config.protocol, + self.config.device_name, self.config.metadata_backend) + + self.remote_url = (self.decode_url + if kv_rank == 0 else self.prefill_url) + + # Initialize ZeroMQ context and sockets + self.context = zmq.Context() # type: ignore[attr-defined] + self.sender_socket = self.context.socket(zmq.constants.PUSH) + self.receiver_socket = self.context.socket(zmq.constants.PULL) + self.sender_ack = self.context.socket(zmq.constants.PULL) + self.receiver_ack = self.context.socket(zmq.constants.PUSH) + + self.buffer_cleaner = ThreadPoolExecutor(max_workers=1) + self._setup_metadata_sockets(kv_rank, prefill_host, base_prefill_port, + decode_host, base_decode_port) + + def _setup_metadata_sockets(self, kv_rank: int, p_host: str, p_port: str, + d_host: str, d_port: str) -> None: + """Set up ZeroMQ sockets for sending and receiving data.""" + # Offsets < 8 are left for initialization in case tp and pp are enabled + p_rank_offset = int(p_port) + 8 + self.local_rank * 2 + d_rank_offset = int(d_port) + 8 + self.local_rank * 2 + if kv_rank == 0: + self.sender_socket.bind(f"tcp://*:{p_rank_offset + 1}") + self.receiver_socket.connect(f"tcp://{d_host}:{d_rank_offset + 1}") + self.sender_ack.connect(f"tcp://{d_host}:{d_rank_offset + 2}") + self.receiver_ack.bind(f"tcp://*:{p_rank_offset + 2}") + else: + self.receiver_socket.connect(f"tcp://{p_host}:{p_rank_offset + 1}") + self.sender_socket.bind(f"tcp://*:{d_rank_offset + 1}") + self.receiver_ack.bind(f"tcp://*:{d_rank_offset + 2}") + self.sender_ack.connect(f"tcp://{p_host}:{p_rank_offset + 2}") + + def initialize(self, local_hostname: str, metadata_server: str, + protocol: str, device_name: str, + metadata_backend: Union[str, None]) -> None: + """Initialize the mooncake instance.""" + if metadata_backend is None: + self.engine.initialize(local_hostname, metadata_server, protocol, + device_name) + else: + supported_backend = ["etcd", "redis"] + metadata_backend = metadata_backend.lower() + if metadata_backend not in supported_backend: + raise ValueError( + "Mooncake Configuration error. `metadata_backend`" + f"should be one of {supported_backend}.") + + self.engine.initializeExt(local_hostname, metadata_server, + protocol, device_name, metadata_backend) + + def allocate_managed_buffer(self, length: int) -> int: + """Allocate a managed buffer of the specified length.""" + ret = self.engine.allocateManagedBuffer(length) + if ret <= 0: + logger.error("Allocation Return Error") + raise Exception("Allocation Return Error") + return ret + + def free_managed_buffer(self, buffer: int, length: int) -> int: + """Free a previously allocated managed buffer.""" + return self.engine.freeManagedBuffer(buffer, length) + + def transfer_sync(self, buffer: int, peer_buffer_address: int, + length: int) -> int: + """Synchronously transfer data to the specified address.""" + ret = self.engine.transferSync(self.remote_url, buffer, + peer_buffer_address, length) + if ret < 0: + logger.error("Transfer Return Error") + raise Exception("Transfer Return Error") + return ret + + def write_bytes_to_buffer(self, buffer: int, user_data: bytes, + length: int) -> int: + """Write bytes to the allocated buffer.""" + return self.engine.writeBytesToBuffer(buffer, user_data, length) + + def read_bytes_from_buffer(self, buffer: int, length: int) -> bytes: + """Read bytes from the allocated buffer.""" + return self.engine.readBytesFromBuffer(buffer, length) + + def wait_for_ack(self, src_ptr: int, length: int) -> None: + """Asynchronously wait for ACK from the receiver.""" + ack = self.sender_ack.recv_pyobj() + if ack != b'ACK': + logger.error("Failed to receive ACK from the receiver") + + self.free_managed_buffer(src_ptr, length) + + def send_bytes(self, user_data: bytes) -> None: + """Send bytes to the remote process.""" + length = len(user_data) + src_ptr = self.allocate_managed_buffer(length) + self.write_bytes_to_buffer(src_ptr, user_data, length) + self.sender_socket.send_pyobj((src_ptr, length)) + self.buffer_cleaner.submit(self.wait_for_ack, src_ptr, length) + + def recv_bytes(self) -> bytes: + """Receive bytes from the remote process.""" + src_ptr, length = self.receiver_socket.recv_pyobj() + dst_ptr = self.allocate_managed_buffer(length) + self.transfer_sync(dst_ptr, src_ptr, length) + ret = self.read_bytes_from_buffer(dst_ptr, length) + + # Buffer cleanup + self.receiver_ack.send_pyobj(b'ACK') + self.free_managed_buffer(dst_ptr, length) + + return ret + + +class MooncakePipe(KVPipeBase): + """MooncakeTransferEngine based Pipe implementation.""" + + def __init__(self, + local_rank: int, + config: KVTransferConfig, + device: Optional[str] = None): + """Initialize the mooncake pipe and set related parameters.""" + self.config = config + self.local_rank = local_rank + self.kv_rank = self.config.kv_rank + if device is None: + self.device = self._select_device(self.config.kv_buffer_device) + else: + self.device = self._select_device(device) + + self.transfer_engine = MooncakeTransferEngine(self.kv_rank, + self.local_rank) + self.transport_thread: Optional[ThreadPoolExecutor] = None + self.none_tensor = torch.tensor([NONE_INT], device=self.device) + + def _select_device(self, device: str) -> torch.device: + """Select available device (CUDA or CPU).""" + logger.info("Selecting device: %s", device) + if device == "cuda": + return torch.device(f"cuda:{self.local_rank}") + else: + return torch.device("cpu") + + def tensor_hash(self, tensor: torch.Tensor) -> int: + """Calculate the hash value of the tensor.""" + return hash(tensor.data_ptr()) + + def _send_impl(self, tensor: torch.Tensor) -> None: + """Implement the tensor sending logic.""" + value_bytes = pickle.dumps(tensor) + self.transfer_engine.send_bytes(value_bytes) + + def _recv_impl(self) -> torch.Tensor: + """Implement the tensor receiving logic.""" + data = self.transfer_engine.recv_bytes() + return pickle.loads(data) + + def send_tensor(self, tensor: Optional[torch.Tensor]) -> None: + """Send tensor to the target process.""" + if self.transport_thread is None: + self.transport_thread = ThreadPoolExecutor(max_workers=1) + tensor = tensor if tensor is not None else self.none_tensor + assert (len(tensor.shape) > 0) + self.transport_thread.submit(self._send_impl, tensor) + + def recv_tensor(self) -> Optional[torch.Tensor]: + """Receive tensor from other processes.""" + if self.transport_thread is None: + self.transport_thread = ThreadPoolExecutor(max_workers=1) + tensor = self.transport_thread.submit(self._recv_impl).result() + if tensor.numel() == 1 and tensor.item() == NONE_INT: + return None + else: + return tensor + + def close(self) -> None: + """Cleanup logic when closing the pipe.""" + self.transfer_engine.sender_socket.close() + self.transfer_engine.receiver_socket.close() + self.transfer_engine.sender_ack.close() + self.transfer_engine.receiver_ack.close() + self.transfer_engine.context.term() # Terminate the ZMQ context + logger.info("Closed the transfer engine and cleaned up resources.") From 25ebed2f8ca6d747d63f2be9ede023c561851ac8 Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Sun, 15 Dec 2024 13:33:00 -0800 Subject: [PATCH 376/397] [V1][Minor] Cache np arange to reduce input preparation overhead (#11214) Signed-off-by: Woosuk Kwon --- vllm/v1/worker/gpu_model_runner.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index abcd4b007a326..67166fb05085c 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -118,6 +118,12 @@ def __init__( dtype=self.dtype, device=self.device) + # OPTIMIZATION: Cache the tensors rather than creating them every step. + self.arange_np = np.arange(max(self.max_num_reqs, self.max_model_len), + dtype=np.int32) + # NOTE(woosuk): These tensors are "stateless", i.e., they are literally + # a faster version of creating a new tensor every time. Thus, we should + # not make any assumptions about the values in these tensors. self.input_ids_cpu = torch.zeros(self.max_num_tokens, dtype=torch.int32, device="cpu", @@ -269,11 +275,13 @@ def _prepare_inputs(self, scheduler_output: "SchedulerOutput"): # Get request indices. # E.g., [2, 5, 3] -> [0, 0, 1, 1, 1, 1, 1, 2, 2, 2] - req_indices = np.repeat(np.arange(num_reqs), num_scheduled_tokens) + req_indices = np.repeat(self.arange_np[:num_reqs], + num_scheduled_tokens) # Get batched arange. # E.g., [2, 5, 3] -> [0, 1, 0, 1, 2, 3, 4, 0, 1, 2] - arange = np.concatenate([np.arange(n) for n in num_scheduled_tokens]) + arange = np.concatenate( + [self.arange_np[:n] for n in num_scheduled_tokens]) # Get positions. positions_np = self.positions_np[:total_num_scheduled_tokens] From da6f40924609e084ced486cae5b4ddf97133acd9 Mon Sep 17 00:00:00 2001 From: AlexHe99 Date: Mon, 16 Dec 2024 08:33:58 +0800 Subject: [PATCH 377/397] Update deploying_with_k8s.rst (#10922) --- docs/source/serving/deploying_with_k8s.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/serving/deploying_with_k8s.rst b/docs/source/serving/deploying_with_k8s.rst index 7dc076dc709df..cc3606f0df851 100644 --- a/docs/source/serving/deploying_with_k8s.rst +++ b/docs/source/serving/deploying_with_k8s.rst @@ -162,7 +162,7 @@ To test the deployment, run the following ``curl`` command: curl http://mistral-7b.default.svc.cluster.local/v1/completions \ -H "Content-Type: application/json" \ -d '{ - "model": "facebook/opt-125m", + "model": "mistralai/Mistral-7B-Instruct-v0.3", "prompt": "San Francisco is a", "max_tokens": 7, "temperature": 0 @@ -172,4 +172,4 @@ If the service is correctly deployed, you should receive a response from the vLL Conclusion ---------- -Deploying vLLM with Kubernetes allows for efficient scaling and management of ML models leveraging GPU resources. By following the steps outlined above, you should be able to set up and test a vLLM deployment within your Kubernetes cluster. If you encounter any issues or have suggestions, please feel free to contribute to the documentation. \ No newline at end of file +Deploying vLLM with Kubernetes allows for efficient scaling and management of ML models leveraging GPU resources. By following the steps outlined above, you should be able to set up and test a vLLM deployment within your Kubernetes cluster. If you encounter any issues or have suggestions, please feel free to contribute to the documentation. From 69ba344de8683ec4d3d42d11ae4e147a2a302da8 Mon Sep 17 00:00:00 2001 From: chenqianfzh <51831990+chenqianfzh@users.noreply.github.com> Date: Sun, 15 Dec 2024 16:38:40 -0800 Subject: [PATCH 378/397] [Bugfix] Fix block size validation (#10938) --- vllm/engine/arg_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 5a73c6ee02e0c..0aa367a173b6c 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -425,7 +425,7 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: parser.add_argument('--block-size', type=int, default=EngineArgs.block_size, - choices=[8, 16, 32, 64, 128], + choices=[8, 16, 32], help='Token block size for contiguous chunks of ' 'tokens. This is ignored on neuron devices and ' 'set to max-model-len') From 17138af7c45eba3aba3e9b84a3852b4ba81e460f Mon Sep 17 00:00:00 2001 From: yansh97 Date: Mon, 16 Dec 2024 16:15:40 +0800 Subject: [PATCH 379/397] [Bugfix] Fix the default value for temperature in ChatCompletionRequest (#11219) --- vllm/entrypoints/openai/protocol.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/entrypoints/openai/protocol.py b/vllm/entrypoints/openai/protocol.py index dfb7c977dbd43..6ed7c2e9dcd6b 100644 --- a/vllm/entrypoints/openai/protocol.py +++ b/vllm/entrypoints/openai/protocol.py @@ -211,7 +211,7 @@ class ChatCompletionRequest(OpenAIBaseModel): stop: Optional[Union[str, List[str]]] = Field(default_factory=list) stream: Optional[bool] = False stream_options: Optional[StreamOptions] = None - temperature: Optional[float] = 0.7 + temperature: Optional[float] = 1.0 top_p: Optional[float] = 1.0 tools: Optional[List[ChatCompletionToolsParam]] = None tool_choice: Optional[Union[Literal["none"], Literal["auto"], From b3b1526f03906c935e6ef80a2cdc971a65fdf7e2 Mon Sep 17 00:00:00 2001 From: cennn <61925104+cennn@users.noreply.github.com> Date: Mon, 16 Dec 2024 17:20:49 +0800 Subject: [PATCH 380/397] WIP: [CI/Build] simplify Dockerfile build for ARM64 / GH200 (#11212) Signed-off-by: drikster80 Co-authored-by: drikster80 --- Dockerfile | 40 +++++++++++++++---- docs/source/serving/deploying_with_docker.rst | 26 ++++++++++++ requirements-build.txt | 2 +- requirements-cuda-arm64.txt | 3 ++ requirements-cuda.txt | 4 +- 5 files changed, 64 insertions(+), 11 deletions(-) create mode 100644 requirements-cuda-arm64.txt diff --git a/Dockerfile b/Dockerfile index c1b6e1bbfe354..123703848749c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -11,6 +11,7 @@ ARG CUDA_VERSION=12.4.1 FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu20.04 AS base ARG CUDA_VERSION=12.4.1 ARG PYTHON_VERSION=3.12 +ARG TARGETPLATFORM ENV DEBIAN_FRONTEND=noninteractive # Install Python and other dependencies @@ -46,9 +47,14 @@ WORKDIR /workspace # install build and runtime dependencies COPY requirements-common.txt requirements-common.txt COPY requirements-cuda.txt requirements-cuda.txt +COPY requirements-cuda-arm64.txt requirements-cuda-arm64.txt RUN --mount=type=cache,target=/root/.cache/pip \ python3 -m pip install -r requirements-cuda.txt +RUN --mount=type=cache,target=/root/.cache/pip \ + if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \ + python3 -m pip install -r requirements-cuda-arm64.txt; \ + fi # cuda arch list used by torch # can be useful for both `dev` and `test` @@ -63,6 +69,7 @@ ENV VLLM_FA_CMAKE_GPU_ARCHES=${vllm_fa_cmake_gpu_arches} #################### WHEEL BUILD IMAGE #################### FROM base AS build +ARG TARGETPLATFORM # install build dependencies COPY requirements-build.txt requirements-build.txt @@ -70,6 +77,11 @@ COPY requirements-build.txt requirements-build.txt RUN --mount=type=cache,target=/root/.cache/pip \ python3 -m pip install -r requirements-build.txt +RUN --mount=type=cache,target=/root/.cache/pip \ + if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \ + python3 -m pip install -r requirements-cuda-arm64.txt; \ + fi + COPY . . ARG GIT_REPO_CHECK=0 RUN --mount=type=bind,source=.git,target=.git \ @@ -134,8 +146,8 @@ COPY requirements-test.txt requirements-test.txt COPY requirements-dev.txt requirements-dev.txt RUN --mount=type=cache,target=/root/.cache/pip \ python3 -m pip install -r requirements-dev.txt - #################### DEV IMAGE #################### + #################### vLLM installation IMAGE #################### # image with vLLM installed FROM nvidia/cuda:${CUDA_VERSION}-base-ubuntu22.04 AS vllm-base @@ -143,6 +155,9 @@ ARG CUDA_VERSION=12.4.1 ARG PYTHON_VERSION=3.12 WORKDIR /vllm-workspace ENV DEBIAN_FRONTEND=noninteractive +ARG TARGETPLATFORM + +COPY requirements-cuda-arm64.txt requirements-cuda-arm64.txt RUN PYTHON_VERSION_STR=$(echo ${PYTHON_VERSION} | sed 's/\.//g') && \ echo "export PYTHON_VERSION_STR=${PYTHON_VERSION_STR}" >> /etc/environment @@ -168,18 +183,25 @@ RUN echo 'tzdata tzdata/Areas select America' | debconf-set-selections \ # or future versions of triton. RUN ldconfig /usr/local/cuda-$(echo $CUDA_VERSION | cut -d. -f1,2)/compat/ -# install vllm wheel first, so that torch etc will be installed +# Install vllm wheel first, so that torch etc will be installed. RUN --mount=type=bind,from=build,src=/workspace/dist,target=/vllm-workspace/dist \ --mount=type=cache,target=/root/.cache/pip \ python3 -m pip install dist/*.whl --verbose RUN --mount=type=cache,target=/root/.cache/pip \ - . /etc/environment && \ - python3 -m pip install https://github.com/flashinfer-ai/flashinfer/releases/download/v0.1.6/flashinfer-0.1.6+cu121torch2.4-cp${PYTHON_VERSION_STR}-cp${PYTHON_VERSION_STR}-linux_x86_64.whl + if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \ + pip uninstall -y torch && \ + python3 -m pip install -r requirements-cuda-arm64.txt; \ + fi + +RUN --mount=type=cache,target=/root/.cache/pip \ +. /etc/environment && \ +if [ "$TARGETPLATFORM" != "linux/arm64" ]; then \ + python3 -m pip install https://github.com/flashinfer-ai/flashinfer/releases/download/v0.1.6/flashinfer-0.1.6+cu121torch2.4-cp${PYTHON_VERSION_STR}-cp${PYTHON_VERSION_STR}-linux_x86_64.whl; \ +fi COPY examples examples #################### vLLM installation IMAGE #################### - #################### TEST IMAGE #################### # image to run unit testing suite # note that this uses vllm installed by `pip` @@ -209,7 +231,6 @@ COPY vllm/v1 /usr/local/lib/python3.12/dist-packages/vllm/v1 RUN mkdir test_docs RUN mv docs test_docs/ RUN mv vllm test_docs/ - #################### TEST IMAGE #################### #################### OPENAI API SERVER #################### @@ -218,8 +239,11 @@ FROM vllm-base AS vllm-openai # install additional dependencies for openai api server RUN --mount=type=cache,target=/root/.cache/pip \ - pip install accelerate hf_transfer 'modelscope!=1.15.0' 'bitsandbytes>=0.45.0' timm==0.9.10 - + if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \ + pip install accelerate hf_transfer 'modelscope!=1.15.0' 'bitsandbytes>=0.42.0' 'timm==0.9.10'; \ + else \ + pip install accelerate hf_transfer 'modelscope!=1.15.0' 'bitsandbytes>=0.45.0' 'timm==0.9.10'; \ + fi ENV VLLM_USAGE_SOURCE production-docker-image ENTRYPOINT ["python3", "-m", "vllm.entrypoints.openai.api_server"] diff --git a/docs/source/serving/deploying_with_docker.rst b/docs/source/serving/deploying_with_docker.rst index 14d94b09e9b9c..11a9f12fd17cd 100644 --- a/docs/source/serving/deploying_with_docker.rst +++ b/docs/source/serving/deploying_with_docker.rst @@ -37,6 +37,32 @@ You can build and run vLLM from source via the provided `Dockerfile =61 setuptools-scm>=8 -torch==2.5.1 +torch==2.5.1; platform_machine != 'aarch64' wheel jinja2 diff --git a/requirements-cuda-arm64.txt b/requirements-cuda-arm64.txt new file mode 100644 index 0000000000000..bbcb5cb7012ce --- /dev/null +++ b/requirements-cuda-arm64.txt @@ -0,0 +1,3 @@ +--index-url https://download.pytorch.org/whl/nightly/cu124 +torchvision==0.22.0.dev20241215; platform_machine == 'aarch64' +torch==2.6.0.dev20241210+cu124; platform_machine == 'aarch64' diff --git a/requirements-cuda.txt b/requirements-cuda.txt index 058ab7c1ee9df..5d4dee8c7129a 100644 --- a/requirements-cuda.txt +++ b/requirements-cuda.txt @@ -4,7 +4,7 @@ # Dependencies for NVIDIA GPUs ray >= 2.9 nvidia-ml-py >= 12.560.30 # for pynvml package -torch == 2.5.1 +torch == 2.5.1; platform_machine != 'aarch64' # These must be updated alongside torch -torchvision == 0.20.1 # Required for phi3v processor. See https://github.com/pytorch/vision?tab=readme-ov-file#installation for corresponding version +torchvision == 0.20.1; platform_machine != 'aarch64' # Required for phi3v processor. See https://github.com/pytorch/vision?tab=readme-ov-file#installation for corresponding version xformers == 0.0.28.post3; platform_system == 'Linux' and platform_machine == 'x86_64' # Requires PyTorch 2.5.1 From bddbbcb132429084ede62855bcd6a1023a3645c1 Mon Sep 17 00:00:00 2001 From: Jani Monoses Date: Mon, 16 Dec 2024 11:56:19 +0200 Subject: [PATCH 381/397] [Model] Support Cohere2ForCausalLM (Cohere R7B) (#11203) --- docs/source/models/supported_models.rst | 4 ++-- tests/models/registry.py | 2 ++ tests/models/test_initialization.py | 4 ++++ vllm/model_executor/models/commandr.py | 19 +++++++++++++++++-- vllm/model_executor/models/registry.py | 1 + 5 files changed, 26 insertions(+), 4 deletions(-) diff --git a/docs/source/models/supported_models.rst b/docs/source/models/supported_models.rst index cae4a88de1638..3bef3f3226062 100644 --- a/docs/source/models/supported_models.rst +++ b/docs/source/models/supported_models.rst @@ -118,9 +118,9 @@ Text Generation (``--task generate``) - :code:`THUDM/chatglm2-6b`, :code:`THUDM/chatglm3-6b`, etc. - ✅︎ - ✅︎ - * - :code:`CohereForCausalLM` + * - :code:`CohereForCausalLM`,:code:`Cohere2ForCausalLM` - Command-R - - :code:`CohereForAI/c4ai-command-r-v01`, etc. + - :code:`CohereForAI/c4ai-command-r-v01`, :code:`CohereForAI/c4ai-command-r7b-12-2024`, etc. - ✅︎ - ✅︎ * - :code:`DbrxForCausalLM` diff --git a/tests/models/registry.py b/tests/models/registry.py index 6a8b1742ceae3..fac8c4b2e9b19 100644 --- a/tests/models/registry.py +++ b/tests/models/registry.py @@ -53,6 +53,8 @@ class _HfExamplesInfo: # ChatGLMModel supports multimodal "CohereForCausalLM": _HfExamplesInfo("CohereForAI/c4ai-command-r-v01", trust_remote_code=True), + "Cohere2ForCausalLM": _HfExamplesInfo("CohereForAI/c4ai-command-r7b-12-2024", # noqa: E501 + trust_remote_code=True), "DbrxForCausalLM": _HfExamplesInfo("databricks/dbrx-instruct"), "DeciLMForCausalLM": _HfExamplesInfo("Deci/DeciLM-7B-instruct", trust_remote_code=True), diff --git a/tests/models/test_initialization.py b/tests/models/test_initialization.py index 3b728f2744fca..a4eea7f035c91 100644 --- a/tests/models/test_initialization.py +++ b/tests/models/test_initialization.py @@ -1,6 +1,7 @@ from unittest.mock import patch import pytest +import transformers from transformers import PretrainedConfig from vllm import LLM @@ -11,6 +12,9 @@ @pytest.mark.parametrize("model_arch", HF_EXAMPLE_MODELS.get_supported_archs()) def test_can_initialize(model_arch): model_info = HF_EXAMPLE_MODELS.get_hf_info(model_arch) + if (model_arch == "Cohere2ForCausalLM" + and transformers.__version__ < "4.48.0"): + pytest.skip(reason="Model introduced in HF >= 4.48.0") if not model_info.is_available_online: pytest.skip("Model is not available online") diff --git a/vllm/model_executor/models/commandr.py b/vllm/model_executor/models/commandr.py index 85e24ca660686..c846e42f1b0c3 100644 --- a/vllm/model_executor/models/commandr.py +++ b/vllm/model_executor/models/commandr.py @@ -48,7 +48,7 @@ from vllm.sequence import IntermediateTensors from .interfaces import SupportsLoRA, SupportsPP -from .utils import (is_pp_missing_parameter, +from .utils import (extract_layer_index, is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) @@ -171,12 +171,26 @@ def __init__( rope_scaling=self.rope_scaling, is_neox_style=False, ) + + sliding_window = getattr(config, "sliding_window", None) + # Model v2 has sliding windows, v1 does not + self.v1 = sliding_window is None + + layer_idx = extract_layer_index(prefix) + layer_has_sliding_window = ( + getattr(config, "sliding_window_pattern", False) + and (layer_idx + 1) % self.config.sliding_window_pattern != 0) + + self.sliding_window = (sliding_window + if layer_has_sliding_window else None) + self.attn = Attention(self.num_heads, self.head_dim, self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, quant_config=quant_config, + per_layer_sliding_window=self.sliding_window, prefix=f"{prefix}.attn") if self.use_qk_norm: self.q_norm = LayerNorm(param_shape=(self.num_heads, @@ -206,7 +220,8 @@ def forward( q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) if self.use_qk_norm: q, k = self._apply_qk_norm(q, k) - q, k = self.rotary_emb(positions, q, k) + if self.v1 or self.sliding_window: + q, k = self.rotary_emb(positions, q, k) attn_output = self.attn(q, k, v, kv_cache, attn_metadata) output, _ = self.o_proj(attn_output) return output diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index 4e77746f312e3..68a2467a813a1 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -41,6 +41,7 @@ "BloomForCausalLM": ("bloom", "BloomForCausalLM"), # ChatGLMModel supports multimodal "CohereForCausalLM": ("commandr", "CohereForCausalLM"), + "Cohere2ForCausalLM": ("commandr", "CohereForCausalLM"), "DbrxForCausalLM": ("dbrx", "DbrxForCausalLM"), "DeciLMForCausalLM": ("decilm", "DeciLMForCausalLM"), "DeepseekForCausalLM": ("deepseek", "DeepseekForCausalLM"), From d927dbcd889fb2476cb61ea477ff51e5dd9e1ae3 Mon Sep 17 00:00:00 2001 From: Isotr0py Date: Mon, 16 Dec 2024 18:09:53 +0800 Subject: [PATCH 382/397] [Model] Refactor Ultravox to use merged input processor (#11198) Signed-off-by: Isotr0py <2037008807@qq.com> Co-authored-by: Cyrus Leung --- examples/offline_inference_audio_language.py | 10 +- tests/distributed/test_pipeline_parallel.py | 2 +- tests/entrypoints/openai/test_audio.py | 1 + .../audio_language/test_ultravox.py | 5 +- vllm/entrypoints/chat_utils.py | 2 +- vllm/model_executor/models/ultravox.py | 244 ++++++++---------- vllm/multimodal/processing.py | 19 +- 7 files changed, 129 insertions(+), 154 deletions(-) diff --git a/examples/offline_inference_audio_language.py b/examples/offline_inference_audio_language.py index 050b791b62adb..68b786961b14a 100644 --- a/examples/offline_inference_audio_language.py +++ b/examples/offline_inference_audio_language.py @@ -25,16 +25,16 @@ def run_ultravox(question: str, audio_count: int): tokenizer = AutoTokenizer.from_pretrained(model_name) messages = [{ - 'role': - 'user', - 'content': - "<|reserved_special_token_0|>\n" * audio_count + question + 'role': 'user', + 'content': "<|audio|>\n" * audio_count + question }] prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) - llm = LLM(model=model_name, limit_mm_per_prompt={"audio": audio_count}) + llm = LLM(model=model_name, + trust_remote_code=True, + limit_mm_per_prompt={"audio": audio_count}) stop_token_ids = None return llm, prompt, stop_token_ids diff --git a/tests/distributed/test_pipeline_parallel.py b/tests/distributed/test_pipeline_parallel.py index 85d408efafe96..ddbf40f089407 100644 --- a/tests/distributed/test_pipeline_parallel.py +++ b/tests/distributed/test_pipeline_parallel.py @@ -214,7 +214,7 @@ def iter_params(self, model_name: str): "Qwen/Qwen-VL-Chat": PPTestSettings.fast(trust_remote_code=True), "Qwen/Qwen2-Audio-7B-Instruct": PPTestSettings.fast(), "Qwen/Qwen2-VL-2B-Instruct": PPTestSettings.fast(), - "fixie-ai/ultravox-v0_3": PPTestSettings.fast(), + "fixie-ai/ultravox-v0_3": PPTestSettings.fast(trust_remote_code=True), # [Encoder-decoder] # TODO: Implement PP # "meta-llama/Llama-3.2-11B-Vision-Instruct": PPTestSettings.fast(), diff --git a/tests/entrypoints/openai/test_audio.py b/tests/entrypoints/openai/test_audio.py index a74109e2f5120..b579dcbb5c402 100644 --- a/tests/entrypoints/openai/test_audio.py +++ b/tests/entrypoints/openai/test_audio.py @@ -25,6 +25,7 @@ def server(): "--max-num-seqs", "5", "--enforce-eager", + "--trust-remote-code", ] with RemoteOpenAIServer(MODEL_NAME, args) as remote_server: diff --git a/tests/models/decoder_only/audio_language/test_ultravox.py b/tests/models/decoder_only/audio_language/test_ultravox.py index e100c6b9bb906..c548cfdf53414 100644 --- a/tests/models/decoder_only/audio_language/test_ultravox.py +++ b/tests/models/decoder_only/audio_language/test_ultravox.py @@ -16,7 +16,7 @@ AudioTuple = Tuple[np.ndarray, int] -VLLM_PLACEHOLDER = "<|reserved_special_token_0|>" +VLLM_PLACEHOLDER = "<|audio|>" HF_PLACEHOLDER = "<|audio|>" CHUNKED_PREFILL_KWARGS = { @@ -46,7 +46,8 @@ def audio(request): def server(request, audio_assets): args = [ "--dtype=bfloat16", "--max-model-len=4096", "--enforce-eager", - f"--limit-mm-per-prompt=audio={len(audio_assets)}" + f"--limit-mm-per-prompt=audio={len(audio_assets)}", + "--trust-remote-code" ] + [ f"--{key.replace('_','-')}={value}" for key, value in request.param.items() diff --git a/vllm/entrypoints/chat_utils.py b/vllm/entrypoints/chat_utils.py index c2054dcbfce0e..aaa5cd759366a 100644 --- a/vllm/entrypoints/chat_utils.py +++ b/vllm/entrypoints/chat_utils.py @@ -418,7 +418,7 @@ def _placeholder_str(self, modality: ModalityStr, raise TypeError(f"Unknown {modality} model type: {model_type}") elif modality == "audio": if model_type == "ultravox": - return "<|reserved_special_token_0|>" + return "<|audio|>" if model_type == "qwen2_audio": return (f"Audio {current_count}: " f"<|audio_bos|><|AUDIO|><|audio_eos|>") diff --git a/vllm/model_executor/models/ultravox.py b/vllm/model_executor/models/ultravox.py index ea1e5401d42c0..ebaa8a4c4f38a 100644 --- a/vllm/model_executor/models/ultravox.py +++ b/vllm/model_executor/models/ultravox.py @@ -3,41 +3,39 @@ import math from functools import cached_property, lru_cache -from typing import (Iterable, List, Literal, Mapping, Optional, Set, Tuple, - TypedDict, Union, cast) +from typing import (Any, Dict, Iterable, List, Literal, Mapping, Optional, Set, + Tuple, TypedDict, Union) import numpy as np import torch import torch.utils.checkpoint from torch import nn from torch.nn import functional as F +from transformers import BatchFeature from transformers.models.whisper import WhisperFeatureExtractor from transformers.models.whisper.modeling_whisper import WhisperEncoder from vllm.attention import AttentionMetadata from vllm.config import VllmConfig -from vllm.inputs import (INPUT_REGISTRY, DecoderOnlyInputs, DummyData, - InputContext, token_inputs) +from vllm.inputs import InputContext from vllm.model_executor.layers.activation import SiluAndMul, get_act_fn from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler from vllm.model_executor.model_loader.loader import DefaultModelLoader from vllm.model_executor.sampling_metadata import SamplingMetadata -from vllm.multimodal import (MULTIMODAL_REGISTRY, MultiModalKwargs, - NestedTensors) -from vllm.multimodal.utils import (cached_get_tokenizer, - consecutive_placeholder_ranges, - repeat_and_pad_placeholder_tokens) -from vllm.sequence import IntermediateTensors, SequenceData +from vllm.multimodal import MULTIMODAL_REGISTRY, NestedTensors +from vllm.multimodal.processing import (BaseMultiModalProcessor, + MultiModalDataDict, + MultiModalDataItems, ProcessorInputs, + PromptReplacement) +from vllm.sequence import IntermediateTensors from vllm.transformers_utils.configs.ultravox import UltravoxConfig -from vllm.utils import is_list_of from .interfaces import SupportsMultiModal, SupportsPP from .utils import (AutoWeightsLoader, WeightsMapper, flatten_bn, init_vllm_registered_model, maybe_prefix, merge_multimodal_embeddings_from_map) -_AUDIO_PLACEHOLDER_TOKEN = 128002 _AUDIO_TOKENS_PER_SECOND = 6.25 @@ -72,64 +70,18 @@ def get_ultravox_max_audio_tokens(ctx: InputContext): return math.ceil(feature_extractor.chunk_length * _AUDIO_TOKENS_PER_SECOND) -def dummy_seq_data_for_ultravox( - ctx: InputContext, - seq_len: int, - audio_count: int, -): - audio_length = min(get_ultravox_max_audio_tokens(ctx), - seq_len // audio_count) +class UltravoxMultiModalProcessor(BaseMultiModalProcessor): - return SequenceData.from_prompt_token_counts( - (_AUDIO_PLACEHOLDER_TOKEN, audio_length * audio_count), - (0, seq_len - audio_length * audio_count)), { - "audio": - consecutive_placeholder_ranges(num_items=audio_count, - item_size=audio_length) - } - - -def dummy_audio_for_ultravox( - ctx: InputContext, - audio_count: int, -): - feature_extractor = whisper_feature_extractor(ctx) - audio_and_sr = (np.array([0.0] * feature_extractor.chunk_length), 1) - return {"audio": [audio_and_sr] * audio_count} - - -def dummy_data_for_ultravox( - ctx: InputContext, - seq_len: int, - mm_counts: Mapping[str, int], -): - audio_count = mm_counts["audio"] - seq_data, ranges = dummy_seq_data_for_ultravox(ctx, seq_len, audio_count) - mm_dict = dummy_audio_for_ultravox(ctx, audio_count) - - return DummyData(seq_data, mm_dict, ranges) - - -def input_mapper_for_ultravox(ctx: InputContext, data: object): - if not isinstance(data, list): - data = [data] - - if len(data) == 0: - return MultiModalKwargs() - - # If the audio inputs are embeddings, no need for preprocessing - if is_list_of(data, torch.Tensor, check="all"): - return MultiModalKwargs({"audio_embeds": data}) - - audio_features = [] - for audio_input in data: - if not isinstance(audio_input, tuple): - raise NotImplementedError( - f"Unsupported data type: {type(audio_input)}") - - (audio, sr) = cast(Tuple[np.ndarray, Union[float, int]], audio_input) - feature_extractor = whisper_feature_extractor(ctx) + def _get_feature_extractor(self) -> WhisperFeatureExtractor: + return self._get_hf_processor().audio_processor.feature_extractor + def _resample_audio( + self, + audio: np.ndarray, + sr: int, + ) -> Dict[str, Union[np.ndarray, int]]: + # resample audio to the model's sampling rate + feature_extractor = self._get_feature_extractor() if sr != feature_extractor.sampling_rate: try: import librosa @@ -140,78 +92,92 @@ def input_mapper_for_ultravox(ctx: InputContext, data: object): orig_sr=sr, target_sr=feature_extractor.sampling_rate) sr = feature_extractor.sampling_rate + return {"audio": audio, "sampling_rate": sr} - minimum_audio_length = feature_extractor.n_fft // 2 + 1 - if len(audio) < minimum_audio_length: - # Not enough audio; pad it. - audio = np.pad(audio, (0, minimum_audio_length - len(audio))) - - single_audio_features = feature_extractor( - audio, sampling_rate=sr, padding="longest", - return_tensors="pt")["input_features"] - - # Remove the batch dimension because we're wrapping it in a list. - audio_features.append(single_audio_features.squeeze(0)) - - return MultiModalKwargs({"audio_features": audio_features}) - - -def input_processor_for_ultravox(ctx: InputContext, inputs: DecoderOnlyInputs): - multi_modal_data = inputs.get("multi_modal_data") - if multi_modal_data is None or "audio" not in multi_modal_data: - return inputs + def _apply_hf_processor( + self, + prompt: str, + mm_data: MultiModalDataDict, + mm_processor_kwargs: Mapping[str, object], + ) -> BatchFeature: + if not mm_data or not mm_data.get("audio", None): + return super()._apply_hf_processor(prompt, mm_data, + mm_processor_kwargs) + + audio_data = mm_data["audio"] + if not isinstance(audio_data, list): + audio_data = [audio_data] + + # Ultravox processor doesn't support multiple inputs, + # therefore we need to input text and audio one by one + tokenizer = self._get_tokenizer() + audio_features, audio_token_len = [], [] + processed_inputs = {} + for audio, sr in audio_data: + data = self._resample_audio(audio, sr) + processed_inputs = super()._apply_hf_processor( + prompt, data, mm_processor_kwargs) + prompt = tokenizer.decode(processed_inputs["input_ids"][0], + skip_special_tokens=False) + audio_features.append( + processed_inputs.pop("audio_values").squeeze(0)) + audio_token_len.append( + processed_inputs.pop("audio_token_len").item()) + + return dict( + **processed_inputs, + audio_features=audio_features, + audio_token_len=audio_token_len, + ) - if "multi_modal_placeholders" in inputs and "audio" in inputs[ - "multi_modal_placeholders"]: - # The inputs already have placeholders. - return inputs + def _get_processor_data( + self, + mm_data: MultiModalDataDict, + ) -> Tuple[Dict[str, Any], Dict[str, Any]]: + # Ultravox uses "audio" instead of "audios" as calling keyword + processor_data, passthrough_data = super()._get_processor_data(mm_data) + if "audios" in processor_data: + processor_data["audio"] = processor_data.pop("audios") + return processor_data, passthrough_data + + def _get_prompt_replacements( + self, + mm_items: MultiModalDataItems, + hf_inputs: BatchFeature, + mm_processor_kwargs: Mapping[str, object], + ) -> list[PromptReplacement]: + hf_processor = self._get_hf_processor() + placeholder = hf_processor.audio_token_replacement + + def get_replacement_ultravox(item_idx: int): + audio_token_len = hf_inputs["audio_token_len"][item_idx] + return placeholder * audio_token_len + + return [ + PromptReplacement( + modality="audio", + target="<|audio|>", + replacement=get_replacement_ultravox, + ) + ] - feature_extractor = whisper_feature_extractor(ctx) - audios = multi_modal_data["audio"] - if not isinstance(audios, list): - audios = [audios] - - audio_token_counts = [] - for audio in audios: - if isinstance(audio, torch.Tensor): - audio_num_tokens = audio.shape[1] - audio_token_counts.append(audio_num_tokens) - else: - audio_data, sample_rate = audio - audio_length = audio_data.shape[0] - if sample_rate != feature_extractor.sampling_rate: - # Account for resampling. - adjustment = feature_extractor.sampling_rate / sample_rate - audio_length = math.ceil(adjustment * audio_length) - - feature_extractor_output_length = math.ceil( - (audio_length - (feature_extractor.hop_length - 1)) / - feature_extractor.hop_length) - - uv_config = ctx.get_hf_config(UltravoxConfig) - audio_num_tokens = min( - max( - 1, - math.ceil(feature_extractor_output_length / - (uv_config.stack_factor * 2))), - get_ultravox_max_audio_tokens(ctx)) - audio_token_counts.append(audio_num_tokens) - - tokenizer = cached_get_tokenizer(ctx.model_config.tokenizer) - - new_prompt, new_token_ids, ranges = repeat_and_pad_placeholder_tokens( - tokenizer, - inputs.get("prompt"), - inputs["prompt_token_ids"], - placeholder_token_id=_AUDIO_PLACEHOLDER_TOKEN, - repeat_count=audio_token_counts, - ) - - # NOTE: Create a defensive copy of the original inputs - return token_inputs(prompt_token_ids=new_token_ids, - prompt=new_prompt, - multi_modal_data=multi_modal_data, - multi_modal_placeholders={"audio": ranges}) + def _get_dummy_mm_inputs( + self, + mm_counts: Mapping[str, int], + ) -> ProcessorInputs: + feature_extractor = self._get_feature_extractor() + sampling_rate = feature_extractor.sampling_rate + audio_len = feature_extractor.chunk_length * sampling_rate + + audio_count = mm_counts["audio"] + audio = np.zeros(audio_len) + data = {"audio": [(audio, sampling_rate)] * audio_count} + + return ProcessorInputs( + prompt_text="<|audio|>" * audio_count, + mm_data=data, + mm_processor_kwargs={}, + ) class StackAudioFrames(nn.Module): @@ -332,11 +298,9 @@ def forward( return hidden_states -@MULTIMODAL_REGISTRY.register_input_mapper("audio", input_mapper_for_ultravox) @MULTIMODAL_REGISTRY.register_max_multimodal_tokens( "audio", get_ultravox_max_audio_tokens) -@INPUT_REGISTRY.register_dummy_data(dummy_data_for_ultravox) -@INPUT_REGISTRY.register_input_processor(input_processor_for_ultravox) +@MULTIMODAL_REGISTRY.register_processor(UltravoxMultiModalProcessor) class UltravoxModel(nn.Module, SupportsMultiModal, SupportsPP): def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): diff --git a/vllm/multimodal/processing.py b/vllm/multimodal/processing.py index ce6bec1d49aac..339e193eefe20 100644 --- a/vllm/multimodal/processing.py +++ b/vllm/multimodal/processing.py @@ -594,14 +594,10 @@ def _find_placeholders( return list( iter_placeholders(all_prompt_repls, new_token_ids, mm_item_counts)) - def _apply_hf_processor( + def _get_processor_data( self, - prompt: str, mm_data: MultiModalDataDict, - mm_processor_kwargs: Mapping[str, object], ) -> BatchFeature: - hf_processor = self._get_hf_processor(**mm_processor_kwargs) - processor_data = dict[str, Any]() passthrough_data = dict[str, Any]() for k, v in mm_data.items(): @@ -619,6 +615,19 @@ def _apply_hf_processor( processor_data[f"{k}s"] = v else: processor_data[k] = v + return processor_data, passthrough_data + + def _apply_hf_processor( + self, + prompt: str, + mm_data: MultiModalDataDict, + mm_processor_kwargs: Mapping[str, object], + ) -> BatchFeature: + # some mm_processor_kwargs may be used in processor initialization + # instead of processor call + hf_processor = self._get_hf_processor(**mm_processor_kwargs) + + processor_data, passthrough_data = self._get_processor_data(mm_data) assert callable(hf_processor) mm_processor_kwargs = self.ctx.resolve_hf_processor_call_kwargs( From 2ca830dbaa1a7c30b8ff4d7c860c63f87dc18be3 Mon Sep 17 00:00:00 2001 From: Isotr0py Date: Mon, 16 Dec 2024 19:23:33 +0800 Subject: [PATCH 383/397] [Doc] Reorder vision language examples in alphabet order (#11228) Signed-off-by: Isotr0py <2037008807@qq.com> --- examples/offline_inference_vision_language.py | 486 +++++++++--------- ...e_inference_vision_language_multi_image.py | 288 +++++------ 2 files changed, 387 insertions(+), 387 deletions(-) diff --git a/examples/offline_inference_vision_language.py b/examples/offline_inference_vision_language.py index 7bc43242b717e..6d0495fdd4054 100644 --- a/examples/offline_inference_vision_language.py +++ b/examples/offline_inference_vision_language.py @@ -19,6 +19,159 @@ # Unless specified, these settings have been tested to work on a single L4. +# Aria +def run_aria(question: str, modality: str): + assert modality == "image" + model_name = "rhymes-ai/Aria" + + llm = LLM(model=model_name, + tokenizer_mode="slow", + trust_remote_code=True, + dtype="bfloat16", + mm_cache_preprocessor=args.mm_cache_preprocessor) + + prompt = (f"<|im_start|>user\n<|img|>\n{question}" + "<|im_end|>\n<|im_start|>assistant\n") + + stop_token_ids = [93532, 93653, 944, 93421, 1019, 93653, 93519] + return llm, prompt, stop_token_ids + + +# BLIP-2 +def run_blip2(question: str, modality: str): + assert modality == "image" + + # BLIP-2 prompt format is inaccurate on HuggingFace model repository. + # See https://huggingface.co/Salesforce/blip2-opt-2.7b/discussions/15#64ff02f3f8cf9e4f5b038262 #noqa + prompt = f"Question: {question} Answer:" + llm = LLM(model="Salesforce/blip2-opt-2.7b", + mm_cache_preprocessor=args.mm_cache_preprocessor) + stop_token_ids = None + return llm, prompt, stop_token_ids + + +# Chameleon +def run_chameleon(question: str, modality: str): + assert modality == "image" + + prompt = f"{question}" + llm = LLM(model="facebook/chameleon-7b", + max_model_len=4096, + mm_cache_preprocessor=args.mm_cache_preprocessor) + stop_token_ids = None + return llm, prompt, stop_token_ids + + +# Fuyu +def run_fuyu(question: str, modality: str): + assert modality == "image" + + prompt = f"{question}\n" + llm = LLM(model="adept/fuyu-8b", + max_model_len=2048, + max_num_seqs=2, + mm_cache_preprocessor=args.mm_cache_preprocessor) + stop_token_ids = None + return llm, prompt, stop_token_ids + + +# GLM-4v +def run_glm4v(question: str, modality: str): + assert modality == "image" + model_name = "THUDM/glm-4v-9b" + + llm = LLM(model=model_name, + max_model_len=2048, + max_num_seqs=2, + trust_remote_code=True, + enforce_eager=True, + mm_cache_preprocessor=args.mm_cache_preprocessor) + prompt = question + stop_token_ids = [151329, 151336, 151338] + return llm, prompt, stop_token_ids + + +# H2OVL-Mississippi +def run_h2ovl(question: str, modality: str): + assert modality == "image" + + model_name = "h2oai/h2ovl-mississippi-2b" + + llm = LLM( + model=model_name, + trust_remote_code=True, + max_model_len=8192, + mm_cache_preprocessor=args.mm_cache_preprocessor, + ) + + tokenizer = AutoTokenizer.from_pretrained(model_name, + trust_remote_code=True) + messages = [{'role': 'user', 'content': f"\n{question}"}] + prompt = tokenizer.apply_chat_template(messages, + tokenize=False, + add_generation_prompt=True) + + # Stop tokens for H2OVL-Mississippi + # https://huggingface.co/h2oai/h2ovl-mississippi-2b + stop_token_ids = [tokenizer.eos_token_id] + return llm, prompt, stop_token_ids + + +# Idefics3-8B-Llama3 +def run_idefics3(question: str, modality: str): + assert modality == "image" + model_name = "HuggingFaceM4/Idefics3-8B-Llama3" + + llm = LLM( + model=model_name, + max_model_len=8192, + max_num_seqs=2, + enforce_eager=True, + # if you are running out of memory, you can reduce the "longest_edge". + # see: https://huggingface.co/HuggingFaceM4/Idefics3-8B-Llama3#model-optimizations + mm_processor_kwargs={ + "size": { + "longest_edge": 3 * 364 + }, + }, + mm_cache_preprocessor=args.mm_cache_preprocessor, + ) + prompt = ( + f"<|begin_of_text|>User:{question}\nAssistant:" + ) + stop_token_ids = None + return llm, prompt, stop_token_ids + + +# InternVL +def run_internvl(question: str, modality: str): + assert modality == "image" + + model_name = "OpenGVLab/InternVL2-2B" + + llm = LLM( + model=model_name, + trust_remote_code=True, + max_model_len=4096, + mm_cache_preprocessor=args.mm_cache_preprocessor, + ) + + tokenizer = AutoTokenizer.from_pretrained(model_name, + trust_remote_code=True) + messages = [{'role': 'user', 'content': f"\n{question}"}] + prompt = tokenizer.apply_chat_template(messages, + tokenize=False, + add_generation_prompt=True) + + # Stop tokens for InternVL + # models variants may have different stop tokens + # please refer to the model card for the correct "stop words": + # https://huggingface.co/OpenGVLab/InternVL2-2B/blob/main/conversation.py + stop_tokens = ["<|endoftext|>", "<|im_start|>", "<|im_end|>", "<|end|>"] + stop_token_ids = [tokenizer.convert_tokens_to_ids(i) for i in stop_tokens] + return llm, prompt, stop_token_ids + + # LLaVA-1.5 def run_llava(question: str, modality: str): assert modality == "image" @@ -75,83 +228,20 @@ def run_llava_onevision(question: str, modality: str): return llm, prompt, stop_token_ids -# Fuyu -def run_fuyu(question: str, modality: str): - assert modality == "image" - - prompt = f"{question}\n" - llm = LLM(model="adept/fuyu-8b", - max_model_len=2048, - max_num_seqs=2, - mm_cache_preprocessor=args.mm_cache_preprocessor) - stop_token_ids = None - return llm, prompt, stop_token_ids - - -# Phi-3-Vision -def run_phi3v(question: str, modality: str): +# Mantis +def run_mantis(question: str, modality: str): assert modality == "image" - prompt = f"<|user|>\n<|image_1|>\n{question}<|end|>\n<|assistant|>\n" + llama3_template = '<|start_header_id|>user<|end_header_id|>\n\n{}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n' # noqa: E501 + prompt = llama3_template.format(f"{question}\n") - # num_crops is an override kwarg to the multimodal image processor; - # For some models, e.g., Phi-3.5-vision-instruct, it is recommended - # to use 16 for single frame scenarios, and 4 for multi-frame. - # - # Generally speaking, a larger value for num_crops results in more - # tokens per image instance, because it may scale the image more in - # the image preprocessing. Some references in the model docs and the - # formula for image tokens after the preprocessing - # transform can be found below. - # - # https://huggingface.co/microsoft/Phi-3.5-vision-instruct#loading-the-model-locally - # https://huggingface.co/microsoft/Phi-3.5-vision-instruct/blob/main/processing_phi3_v.py#L194 llm = LLM( - model="microsoft/Phi-3.5-vision-instruct", - trust_remote_code=True, + model="TIGER-Lab/Mantis-8B-siglip-llama3", max_model_len=4096, - max_num_seqs=2, - # Note - mm_processor_kwargs can also be passed to generate/chat calls - mm_processor_kwargs={"num_crops": 16}, + hf_overrides={"architectures": ["MantisForConditionalGeneration"]}, mm_cache_preprocessor=args.mm_cache_preprocessor, ) - stop_token_ids = None - return llm, prompt, stop_token_ids - - -# PaliGemma -def run_paligemma(question: str, modality: str): - assert modality == "image" - - # PaliGemma has special prompt format for VQA - prompt = "caption en" - llm = LLM(model="google/paligemma-3b-mix-224", - mm_cache_preprocessor=args.mm_cache_preprocessor) - stop_token_ids = None - return llm, prompt, stop_token_ids - - -# PaliGemma 2 -def run_paligemma2(question: str, modality: str): - assert modality == "image" - - # PaliGemma 2 has special prompt format for VQA - prompt = "caption en" - llm = LLM(model="google/paligemma2-3b-ft-docci-448", - mm_cache_preprocessor=args.mm_cache_preprocessor) - stop_token_ids = None - return llm, prompt, stop_token_ids - - -# Chameleon -def run_chameleon(question: str, modality: str): - assert modality == "image" - - prompt = f"{question}" - llm = LLM(model="facebook/chameleon-7b", - max_model_len=4096, - mm_cache_preprocessor=args.mm_cache_preprocessor) - stop_token_ids = None + stop_token_ids = [128009] return llm, prompt, stop_token_ids @@ -199,58 +289,45 @@ def run_minicpmv(question: str, modality: str): return llm, prompt, stop_token_ids -# H2OVL-Mississippi -def run_h2ovl(question: str, modality: str): +# LLama 3.2 +def run_mllama(question: str, modality: str): assert modality == "image" - model_name = "h2oai/h2ovl-mississippi-2b" + model_name = "meta-llama/Llama-3.2-11B-Vision-Instruct" + # Note: The default setting of max_num_seqs (256) and + # max_model_len (131072) for this model may cause OOM. + # You may lower either to run this example on lower-end GPUs. + + # The configuration below has been confirmed to launch on a single L40 GPU. llm = LLM( model=model_name, - trust_remote_code=True, - max_model_len=8192, + max_model_len=4096, + max_num_seqs=16, + enforce_eager=True, mm_cache_preprocessor=args.mm_cache_preprocessor, ) - tokenizer = AutoTokenizer.from_pretrained(model_name, - trust_remote_code=True) - messages = [{'role': 'user', 'content': f"\n{question}"}] - prompt = tokenizer.apply_chat_template(messages, - tokenize=False, - add_generation_prompt=True) - - # Stop tokens for H2OVL-Mississippi - # https://huggingface.co/h2oai/h2ovl-mississippi-2b - stop_token_ids = [tokenizer.eos_token_id] + prompt = f"<|image|><|begin_of_text|>{question}" + stop_token_ids = None return llm, prompt, stop_token_ids -# InternVL -def run_internvl(question: str, modality: str): +# Molmo +def run_molmo(question, modality): assert modality == "image" - model_name = "OpenGVLab/InternVL2-2B" + model_name = "allenai/Molmo-7B-D-0924" llm = LLM( model=model_name, trust_remote_code=True, - max_model_len=4096, + dtype="bfloat16", mm_cache_preprocessor=args.mm_cache_preprocessor, ) - tokenizer = AutoTokenizer.from_pretrained(model_name, - trust_remote_code=True) - messages = [{'role': 'user', 'content': f"\n{question}"}] - prompt = tokenizer.apply_chat_template(messages, - tokenize=False, - add_generation_prompt=True) - - # Stop tokens for InternVL - # models variants may have different stop tokens - # please refer to the model card for the correct "stop words": - # https://huggingface.co/OpenGVLab/InternVL2-2B/blob/main/conversation.py - stop_tokens = ["<|endoftext|>", "<|im_start|>", "<|im_end|>", "<|end|>"] - stop_token_ids = [tokenizer.convert_tokens_to_ids(i) for i in stop_tokens] + prompt = question + stop_token_ids = None return llm, prompt, stop_token_ids @@ -279,58 +356,57 @@ def run_nvlm_d(question: str, modality: str): return llm, prompt, stop_token_ids -# BLIP-2 -def run_blip2(question: str, modality: str): +# PaliGemma +def run_paligemma(question: str, modality: str): assert modality == "image" - # BLIP-2 prompt format is inaccurate on HuggingFace model repository. - # See https://huggingface.co/Salesforce/blip2-opt-2.7b/discussions/15#64ff02f3f8cf9e4f5b038262 #noqa - prompt = f"Question: {question} Answer:" - llm = LLM(model="Salesforce/blip2-opt-2.7b", + # PaliGemma has special prompt format for VQA + prompt = "caption en" + llm = LLM(model="google/paligemma-3b-mix-224", mm_cache_preprocessor=args.mm_cache_preprocessor) stop_token_ids = None return llm, prompt, stop_token_ids -# Qwen -def run_qwen_vl(question: str, modality: str): +# PaliGemma 2 +def run_paligemma2(question: str, modality: str): assert modality == "image" - llm = LLM( - model="Qwen/Qwen-VL", - trust_remote_code=True, - max_model_len=1024, - max_num_seqs=2, - mm_cache_preprocessor=args.mm_cache_preprocessor, - ) - - prompt = f"{question}Picture 1: \n" + # PaliGemma 2 has special prompt format for VQA + prompt = "caption en" + llm = LLM(model="google/paligemma2-3b-ft-docci-448", + mm_cache_preprocessor=args.mm_cache_preprocessor) stop_token_ids = None return llm, prompt, stop_token_ids -# Qwen2-VL -def run_qwen2_vl(question: str, modality: str): +# Phi-3-Vision +def run_phi3v(question: str, modality: str): assert modality == "image" - model_name = "Qwen/Qwen2-VL-7B-Instruct" + prompt = f"<|user|>\n<|image_1|>\n{question}<|end|>\n<|assistant|>\n" + # num_crops is an override kwarg to the multimodal image processor; + # For some models, e.g., Phi-3.5-vision-instruct, it is recommended + # to use 16 for single frame scenarios, and 4 for multi-frame. + # + # Generally speaking, a larger value for num_crops results in more + # tokens per image instance, because it may scale the image more in + # the image preprocessing. Some references in the model docs and the + # formula for image tokens after the preprocessing + # transform can be found below. + # + # https://huggingface.co/microsoft/Phi-3.5-vision-instruct#loading-the-model-locally + # https://huggingface.co/microsoft/Phi-3.5-vision-instruct/blob/main/processing_phi3_v.py#L194 llm = LLM( - model=model_name, + model="microsoft/Phi-3.5-vision-instruct", + trust_remote_code=True, max_model_len=4096, - max_num_seqs=5, + max_num_seqs=2, # Note - mm_processor_kwargs can also be passed to generate/chat calls - mm_processor_kwargs={ - "min_pixels": 28 * 28, - "max_pixels": 1280 * 28 * 28, - }, + mm_processor_kwargs={"num_crops": 16}, mm_cache_preprocessor=args.mm_cache_preprocessor, ) - - prompt = ("<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n" - "<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>" - f"{question}<|im_end|>\n" - "<|im_start|>assistant\n") stop_token_ids = None return llm, prompt, stop_token_ids @@ -352,149 +428,73 @@ def run_pixtral_hf(question: str, modality: str): return llm, prompt, stop_token_ids -# LLama 3.2 -def run_mllama(question: str, modality: str): - assert modality == "image" - - model_name = "meta-llama/Llama-3.2-11B-Vision-Instruct" - - # Note: The default setting of max_num_seqs (256) and - # max_model_len (131072) for this model may cause OOM. - # You may lower either to run this example on lower-end GPUs. - - # The configuration below has been confirmed to launch on a single L40 GPU. - llm = LLM( - model=model_name, - max_model_len=4096, - max_num_seqs=16, - enforce_eager=True, - mm_cache_preprocessor=args.mm_cache_preprocessor, - ) - - prompt = f"<|image|><|begin_of_text|>{question}" - stop_token_ids = None - return llm, prompt, stop_token_ids - - -# Molmo -def run_molmo(question, modality): +# Qwen +def run_qwen_vl(question: str, modality: str): assert modality == "image" - model_name = "allenai/Molmo-7B-D-0924" - llm = LLM( - model=model_name, + model="Qwen/Qwen-VL", trust_remote_code=True, - dtype="bfloat16", + max_model_len=1024, + max_num_seqs=2, mm_cache_preprocessor=args.mm_cache_preprocessor, ) - prompt = question + prompt = f"{question}Picture 1: \n" stop_token_ids = None return llm, prompt, stop_token_ids -# GLM-4v -def run_glm4v(question: str, modality: str): +# Qwen2-VL +def run_qwen2_vl(question: str, modality: str): assert modality == "image" - model_name = "THUDM/glm-4v-9b" - llm = LLM(model=model_name, - max_model_len=2048, - max_num_seqs=2, - trust_remote_code=True, - enforce_eager=True, - mm_cache_preprocessor=args.mm_cache_preprocessor) - prompt = question - stop_token_ids = [151329, 151336, 151338] - return llm, prompt, stop_token_ids - - -# Idefics3-8B-Llama3 -def run_idefics3(question: str, modality: str): - assert modality == "image" - model_name = "HuggingFaceM4/Idefics3-8B-Llama3" + model_name = "Qwen/Qwen2-VL-7B-Instruct" llm = LLM( model=model_name, - max_model_len=8192, - max_num_seqs=2, - enforce_eager=True, - # if you are running out of memory, you can reduce the "longest_edge". - # see: https://huggingface.co/HuggingFaceM4/Idefics3-8B-Llama3#model-optimizations + max_model_len=4096, + max_num_seqs=5, + # Note - mm_processor_kwargs can also be passed to generate/chat calls mm_processor_kwargs={ - "size": { - "longest_edge": 3 * 364 - }, + "min_pixels": 28 * 28, + "max_pixels": 1280 * 28 * 28, }, mm_cache_preprocessor=args.mm_cache_preprocessor, ) - prompt = ( - f"<|begin_of_text|>User:{question}\nAssistant:" - ) - stop_token_ids = None - return llm, prompt, stop_token_ids - -# Aria -def run_aria(question: str, modality: str): - assert modality == "image" - model_name = "rhymes-ai/Aria" - - llm = LLM(model=model_name, - tokenizer_mode="slow", - trust_remote_code=True, - dtype="bfloat16", - mm_cache_preprocessor=args.mm_cache_preprocessor) - - prompt = (f"<|im_start|>user\n<|img|>\n{question}" - "<|im_end|>\n<|im_start|>assistant\n") - - stop_token_ids = [93532, 93653, 944, 93421, 1019, 93653, 93519] - return llm, prompt, stop_token_ids - - -# Mantis -def run_mantis(question: str, modality: str): - assert modality == "image" - - llama3_template = '<|start_header_id|>user<|end_header_id|>\n\n{}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n' # noqa: E501 - prompt = llama3_template.format(f"{question}\n") - - llm = LLM( - model="TIGER-Lab/Mantis-8B-siglip-llama3", - max_model_len=4096, - hf_overrides={"architectures": ["MantisForConditionalGeneration"]}, - mm_cache_preprocessor=args.mm_cache_preprocessor, - ) - stop_token_ids = [128009] + prompt = ("<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n" + "<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>" + f"{question}<|im_end|>\n" + "<|im_start|>assistant\n") + stop_token_ids = None return llm, prompt, stop_token_ids model_example_map = { + "aria": run_aria, + "blip-2": run_blip2, + "chameleon": run_chameleon, + "fuyu": run_fuyu, + "glm4v": run_glm4v, + "h2ovl_chat": run_h2ovl, + "idefics3": run_idefics3, + "internvl_chat": run_internvl, "llava": run_llava, "llava-next": run_llava_next, "llava-next-video": run_llava_next_video, "llava-onevision": run_llava_onevision, - "fuyu": run_fuyu, - "phi3_v": run_phi3v, - "paligemma": run_paligemma, - "paligemma2": run_paligemma2, - "chameleon": run_chameleon, + "mantis": run_mantis, "minicpmv": run_minicpmv, - "blip-2": run_blip2, - "h2ovl_chat": run_h2ovl, - "internvl_chat": run_internvl, + "mllama": run_mllama, + "molmo": run_molmo, "NVLM_D": run_nvlm_d, + "paligemma": run_paligemma, + "paligemma2": run_paligemma2, + "phi3_v": run_phi3v, + "pixtral_hf": run_pixtral_hf, "qwen_vl": run_qwen_vl, "qwen2_vl": run_qwen2_vl, - "pixtral_hf": run_pixtral_hf, - "mllama": run_mllama, - "molmo": run_molmo, - "glm4v": run_glm4v, - "idefics3": run_idefics3, - "aria": run_aria, - "mantis": run_mantis, } diff --git a/examples/offline_inference_vision_language_multi_image.py b/examples/offline_inference_vision_language_multi_image.py index 928bbef54eab7..6af8d7768e75d 100644 --- a/examples/offline_inference_vision_language_multi_image.py +++ b/examples/offline_inference_vision_language_multi_image.py @@ -33,78 +33,23 @@ class ModelRequestData(NamedTuple): # Unless specified, these settings have been tested to work on a single L4. -def load_qwenvl_chat(question: str, image_urls: List[str]) -> ModelRequestData: - model_name = "Qwen/Qwen-VL-Chat" - llm = LLM( - model=model_name, - trust_remote_code=True, - max_model_len=1024, - max_num_seqs=2, - limit_mm_per_prompt={"image": len(image_urls)}, - ) - placeholders = "".join(f"Picture {i}: \n" - for i, _ in enumerate(image_urls, start=1)) - - # This model does not have a chat_template attribute on its tokenizer, - # so we need to explicitly pass it. We use ChatML since it's used in the - # generation utils of the model: - # https://huggingface.co/Qwen/Qwen-VL-Chat/blob/main/qwen_generation_utils.py#L265 - tokenizer = AutoTokenizer.from_pretrained(model_name, - trust_remote_code=True) - - # Copied from: https://huggingface.co/docs/transformers/main/en/chat_templating - chat_template = "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" # noqa: E501 - - messages = [{'role': 'user', 'content': f"{placeholders}\n{question}"}] - prompt = tokenizer.apply_chat_template(messages, - tokenize=False, - add_generation_prompt=True, - chat_template=chat_template) - - stop_tokens = ["<|endoftext|>", "<|im_start|>", "<|im_end|>"] - stop_token_ids = [tokenizer.convert_tokens_to_ids(i) for i in stop_tokens] - return ModelRequestData( - llm=llm, - prompt=prompt, - stop_token_ids=stop_token_ids, - image_data=[fetch_image(url) for url in image_urls], - chat_template=chat_template, - ) - - -def load_phi3v(question: str, image_urls: List[str]) -> ModelRequestData: - # num_crops is an override kwarg to the multimodal image processor; - # For some models, e.g., Phi-3.5-vision-instruct, it is recommended - # to use 16 for single frame scenarios, and 4 for multi-frame. - # - # Generally speaking, a larger value for num_crops results in more - # tokens per image instance, because it may scale the image more in - # the image preprocessing. Some references in the model docs and the - # formula for image tokens after the preprocessing - # transform can be found below. - # - # https://huggingface.co/microsoft/Phi-3.5-vision-instruct#loading-the-model-locally - # https://huggingface.co/microsoft/Phi-3.5-vision-instruct/blob/main/processing_phi3_v.py#L194 - llm = LLM( - model="microsoft/Phi-3.5-vision-instruct", - trust_remote_code=True, - max_model_len=4096, - max_num_seqs=2, - limit_mm_per_prompt={"image": len(image_urls)}, - mm_processor_kwargs={"num_crops": 4}, - ) - placeholders = "\n".join(f"<|image_{i}|>" - for i, _ in enumerate(image_urls, start=1)) - prompt = f"<|user|>\n{placeholders}\n{question}<|end|>\n<|assistant|>\n" - stop_token_ids = None - +def load_aria(question, image_urls: List[str]) -> ModelRequestData: + model_name = "rhymes-ai/Aria" + llm = LLM(model=model_name, + tokenizer_mode="slow", + trust_remote_code=True, + dtype="bfloat16", + limit_mm_per_prompt={"image": len(image_urls)}) + placeholders = "<|img|>\n" * len(image_urls) + prompt = (f"<|im_start|>user\n{placeholders}{question}<|im_end|>\n" + "<|im_start|>assistant\n") + stop_token_ids = [93532, 93653, 944, 93421, 1019, 93653, 93519] return ModelRequestData( llm=llm, prompt=prompt, stop_token_ids=stop_token_ids, image_data=[fetch_image(url) for url in image_urls], - chat_template=None, - ) + chat_template=None) def load_h2onvl(question: str, image_urls: List[str]) -> ModelRequestData: @@ -141,6 +86,37 @@ def load_h2onvl(question: str, image_urls: List[str]) -> ModelRequestData: ) +def load_idefics3(question, image_urls: List[str]) -> ModelRequestData: + model_name = "HuggingFaceM4/Idefics3-8B-Llama3" + + # The configuration below has been confirmed to launch on a single L40 GPU. + llm = LLM( + model=model_name, + max_model_len=8192, + max_num_seqs=16, + enforce_eager=True, + limit_mm_per_prompt={"image": len(image_urls)}, + # if you are running out of memory, you can reduce the "longest_edge". + # see: https://huggingface.co/HuggingFaceM4/Idefics3-8B-Llama3#model-optimizations + mm_processor_kwargs={ + "size": { + "longest_edge": 2 * 364 + }, + }, + ) + + placeholders = "\n".join(f"Image-{i}: \n" + for i, _ in enumerate(image_urls, start=1)) + prompt = f"<|begin_of_text|>User:{placeholders}\n{question}\nAssistant:" # noqa: E501 + return ModelRequestData( + llm=llm, + prompt=prompt, + stop_token_ids=None, + image_data=[fetch_image(url) for url in image_urls], + chat_template=None, + ) + + def load_internvl(question: str, image_urls: List[str]) -> ModelRequestData: model_name = "OpenGVLab/InternVL2-2B" @@ -178,6 +154,28 @@ def load_internvl(question: str, image_urls: List[str]) -> ModelRequestData: ) +def load_mllama(question, image_urls: List[str]) -> ModelRequestData: + model_name = "meta-llama/Llama-3.2-11B-Vision-Instruct" + + # The configuration below has been confirmed to launch on a single L40 GPU. + llm = LLM( + model=model_name, + max_model_len=4096, + max_num_seqs=16, + enforce_eager=True, + limit_mm_per_prompt={"image": len(image_urls)}, + ) + + prompt = f"<|image|><|image|><|begin_of_text|>{question}" + return ModelRequestData( + llm=llm, + prompt=prompt, + stop_token_ids=None, + image_data=[fetch_image(url) for url in image_urls], + chat_template=None, + ) + + def load_nvlm_d(question: str, image_urls: List[str]): model_name = "nvidia/NVLM-D-72B" @@ -211,6 +209,80 @@ def load_nvlm_d(question: str, image_urls: List[str]): ) +def load_phi3v(question: str, image_urls: List[str]) -> ModelRequestData: + # num_crops is an override kwarg to the multimodal image processor; + # For some models, e.g., Phi-3.5-vision-instruct, it is recommended + # to use 16 for single frame scenarios, and 4 for multi-frame. + # + # Generally speaking, a larger value for num_crops results in more + # tokens per image instance, because it may scale the image more in + # the image preprocessing. Some references in the model docs and the + # formula for image tokens after the preprocessing + # transform can be found below. + # + # https://huggingface.co/microsoft/Phi-3.5-vision-instruct#loading-the-model-locally + # https://huggingface.co/microsoft/Phi-3.5-vision-instruct/blob/main/processing_phi3_v.py#L194 + llm = LLM( + model="microsoft/Phi-3.5-vision-instruct", + trust_remote_code=True, + max_model_len=4096, + max_num_seqs=2, + limit_mm_per_prompt={"image": len(image_urls)}, + mm_processor_kwargs={"num_crops": 4}, + ) + placeholders = "\n".join(f"<|image_{i}|>" + for i, _ in enumerate(image_urls, start=1)) + prompt = f"<|user|>\n{placeholders}\n{question}<|end|>\n<|assistant|>\n" + stop_token_ids = None + + return ModelRequestData( + llm=llm, + prompt=prompt, + stop_token_ids=stop_token_ids, + image_data=[fetch_image(url) for url in image_urls], + chat_template=None, + ) + + +def load_qwenvl_chat(question: str, image_urls: List[str]) -> ModelRequestData: + model_name = "Qwen/Qwen-VL-Chat" + llm = LLM( + model=model_name, + trust_remote_code=True, + max_model_len=1024, + max_num_seqs=2, + limit_mm_per_prompt={"image": len(image_urls)}, + ) + placeholders = "".join(f"Picture {i}: \n" + for i, _ in enumerate(image_urls, start=1)) + + # This model does not have a chat_template attribute on its tokenizer, + # so we need to explicitly pass it. We use ChatML since it's used in the + # generation utils of the model: + # https://huggingface.co/Qwen/Qwen-VL-Chat/blob/main/qwen_generation_utils.py#L265 + tokenizer = AutoTokenizer.from_pretrained(model_name, + trust_remote_code=True) + + # Copied from: https://huggingface.co/docs/transformers/main/en/chat_templating + chat_template = "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" # noqa: E501 + + messages = [{'role': 'user', 'content': f"{placeholders}\n{question}"}] + prompt = tokenizer.apply_chat_template(messages, + tokenize=False, + add_generation_prompt=True, + chat_template=chat_template) + + stop_tokens = ["<|endoftext|>", "<|im_start|>", "<|im_end|>"] + stop_token_ids = [tokenizer.convert_tokens_to_ids(i) for i in stop_tokens] + return ModelRequestData( + llm=llm, + prompt=prompt, + stop_token_ids=stop_token_ids, + image_data=[fetch_image(url) for url in image_urls], + chat_template=chat_template, + ) + + def load_qwen2_vl(question, image_urls: List[str]) -> ModelRequestData: try: from qwen_vl_utils import process_vision_info @@ -268,88 +340,16 @@ def load_qwen2_vl(question, image_urls: List[str]) -> ModelRequestData: ) -def load_mllama(question, image_urls: List[str]) -> ModelRequestData: - model_name = "meta-llama/Llama-3.2-11B-Vision-Instruct" - - # The configuration below has been confirmed to launch on a single L40 GPU. - llm = LLM( - model=model_name, - max_model_len=4096, - max_num_seqs=16, - enforce_eager=True, - limit_mm_per_prompt={"image": len(image_urls)}, - ) - - prompt = f"<|image|><|image|><|begin_of_text|>{question}" - return ModelRequestData( - llm=llm, - prompt=prompt, - stop_token_ids=None, - image_data=[fetch_image(url) for url in image_urls], - chat_template=None, - ) - - -def load_idefics3(question, image_urls: List[str]) -> ModelRequestData: - model_name = "HuggingFaceM4/Idefics3-8B-Llama3" - - # The configuration below has been confirmed to launch on a single L40 GPU. - llm = LLM( - model=model_name, - max_model_len=8192, - max_num_seqs=16, - enforce_eager=True, - limit_mm_per_prompt={"image": len(image_urls)}, - # if you are running out of memory, you can reduce the "longest_edge". - # see: https://huggingface.co/HuggingFaceM4/Idefics3-8B-Llama3#model-optimizations - mm_processor_kwargs={ - "size": { - "longest_edge": 2 * 364 - }, - }, - ) - - placeholders = "\n".join(f"Image-{i}: \n" - for i, _ in enumerate(image_urls, start=1)) - prompt = f"<|begin_of_text|>User:{placeholders}\n{question}\nAssistant:" # noqa: E501 - return ModelRequestData( - llm=llm, - prompt=prompt, - stop_token_ids=None, - image_data=[fetch_image(url) for url in image_urls], - chat_template=None, - ) - - -def load_aria(question, image_urls: List[str]) -> ModelRequestData: - model_name = "rhymes-ai/Aria" - llm = LLM(model=model_name, - tokenizer_mode="slow", - trust_remote_code=True, - dtype="bfloat16", - limit_mm_per_prompt={"image": len(image_urls)}) - placeholders = "<|img|>\n" * len(image_urls) - prompt = (f"<|im_start|>user\n{placeholders}{question}<|im_end|>\n" - "<|im_start|>assistant\n") - stop_token_ids = [93532, 93653, 944, 93421, 1019, 93653, 93519] - return ModelRequestData( - llm=llm, - prompt=prompt, - stop_token_ids=stop_token_ids, - image_data=[fetch_image(url) for url in image_urls], - chat_template=None) - - model_example_map = { - "phi3_v": load_phi3v, + "aria": load_aria, "h2ovl_chat": load_h2onvl, + "idefics3": load_idefics3, "internvl_chat": load_internvl, + "mllama": load_mllama, "NVLM_D": load_nvlm_d, - "qwen2_vl": load_qwen2_vl, + "phi3_v": load_phi3v, "qwen_vl_chat": load_qwenvl_chat, - "mllama": load_mllama, - "idefics3": load_idefics3, - "aria": load_aria, + "qwen2_vl": load_qwen2_vl, } From efbce85f4d375d7851a491a0126a224e25d9f91d Mon Sep 17 00:00:00 2001 From: Varun Sundar Rabindranath Date: Mon, 16 Dec 2024 13:14:57 -0500 Subject: [PATCH 384/397] [misc] Layerwise profile updates (#10242) Signed-off-by: Varun Sundar Rabindranath Co-authored-by: Varun Sundar Rabindranath --- .buildkite/test-pipeline.yaml | 2 +- examples/offline_profile.py | 236 +++++++++++++++--- tools/profiler/print_layerwise_table.py | 9 +- tools/profiler/visualize_layerwise_profile.py | 92 ++++++- vllm/profiler/layerwise_profile.py | 22 +- 5 files changed, 314 insertions(+), 47 deletions(-) diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index 97aae233db105..44f47fac1c1b3 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -201,7 +201,7 @@ steps: - python3 offline_inference_classification.py - python3 offline_inference_embedding.py - python3 offline_inference_scoring.py - - python3 offline_profile.py --model facebook/opt-125m + - python3 offline_profile.py --model facebook/opt-125m run_num_steps --num-steps 2 - label: Prefix Caching Test # 9min mirror_hardwares: [amd] diff --git a/examples/offline_profile.py b/examples/offline_profile.py index 1d415b82cddb6..46afe8aa2604b 100644 --- a/examples/offline_profile.py +++ b/examples/offline_profile.py @@ -4,9 +4,10 @@ import sys from argparse import RawTextHelpFormatter from dataclasses import asdict, dataclass -from typing import Optional +from typing import Any, Dict, Generator, List, Optional, TypeAlias import torch +import tqdm from vllm import LLM, SamplingParams from vllm.engine.arg_utils import EngineArgs @@ -15,16 +16,21 @@ BATCH_SIZE_DEFAULT = 1 PROMPT_LEN_DEFAULT = 256 -OUTPUT_LEN_DEFAULT = 2 @dataclass class ProfileContext: engine_args: EngineArgs prompt_len: int - output_len: int batch_size: int - save_chrome_traces_folder: Optional[str] + + # The profiler can run in 2 modes, + # 1. Run profiler for user specified num_steps + num_steps: Optional[int] = None + # 2. Run profiler until all requests complete + complete_num_requests_per_step: Optional[int] = None + + save_chrome_traces_folder: Optional[str] = None def get_dtype(dtype: str): @@ -34,23 +40,155 @@ def get_dtype(dtype: str): return dtype +OutputLen_NumReqs_Map: TypeAlias = Dict[int, int] +def compute_request_output_lengths(batch_size: int, step_requests: List[int]) \ + -> OutputLen_NumReqs_Map: + """ + Given the number of requests, batch_size, and the number of requests + that each engine-step should process, step_requests, determine the + output lengths of the requests such that step_request is honoured. + + Example: + if batch size = 128 and step_request = [128, 128, 96, 64, 32, 1] + then return, + {2 : 32, 3 : 32, 4 : 32, 5 : 31, 6 : 1}, meaning, + 32 requests should have output length 2, + 32 requests should have output length 3, + 32 requests should have output length 4, + 31 requests should have output length 5, + 1 request should have output length 6. + + Args: + batch_size (int): Number of requests submitted for profile. This is + args.batch_size. + step_requests (List[int]): step_requests[i] is the number of requests + that the ith engine step should process. + + Returns: + OutputLen_NumReqs_Map : A dictionary with output-length as keys and the + number of requests required to have that output-length as values. + """ + ol_nr: OutputLen_NumReqs_Map = {} + + # Number of request that are assigned an output-length + num_reqs_assigned: int = 0 + num_steps: int = len(step_requests) + + # sanity check. The first step (prefill-step), must process all requests. + assert step_requests[0] == batch_size + + # Begin assignments from the last step. + output_length: int = num_steps + for num_requests_at_step in reversed(step_requests): + if num_reqs_assigned == batch_size: + break + + assert num_reqs_assigned < batch_size + + # Remove the number of requests that have been determined + # to participate in this step and beyond. + num_reqs_unassigned_at_step = num_requests_at_step - num_reqs_assigned + assert num_reqs_unassigned_at_step >= 0 + + if num_reqs_unassigned_at_step > 0: + ol_nr[output_length] = num_reqs_unassigned_at_step + num_reqs_assigned += num_reqs_unassigned_at_step + + output_length -= 1 + + # sanity checks. + assert sum(ol_nr.values()) == batch_size, \ + ("Number of requests in output-length assignment does not match " + f"batch-size.\n batch size {batch_size} - " + f"step requests {step_requests} - assignments {ol_nr}") + + # Check that the output-length is in [1, num-steps]. Output length must be + # at least 1 as all requests must participate in the prefill-step. + assert all(ol >= 1 and ol <= num_steps for ol in ol_nr), \ + ("Output lengths of requests should be in range " + f"[1, num-engine-steps].\n batch size {batch_size} - " + f"step requests {step_requests} - assignments {ol_nr}") + + return ol_nr + + +def determine_requests_per_step(context: ProfileContext) -> List[int]: + """ + Determine number of requests each engine step should process. + If context.num_steps is set, then all engine steps process the + same number of requests and the output list is of length + context.num_steps. + + If context.complete_num_requests_per_step is set, then each decode step + processes fewer and fewer requests until there are no requests to process. + In this case, the output list is as big as the number of steps + required to process all requests. + + Args: + context: ProfileContext object. + + Returns: + List[int]: Number of requests to process for all engine-steps. + output[i], contains the number of requests that the ith step + should process. + """ + if context.num_steps: + # All requests must run until num_engine_steps. This implies + # that their output lengths must be equal to num_engine_steps. + return [context.batch_size] * context.num_steps + + assert context.complete_num_requests_per_step and \ + context.complete_num_requests_per_step > 0, \ + (f"Expected a positive complete_num_requests_per_step argument." + f"Instead got {context.complete_num_requests_per_step}") + + # We start dropping after the first decode step. + step_requests = [ + context.batch_size, # prefill + context.batch_size, # decode + ] + + num_running_requests = context.batch_size + num_running_requests -= context.complete_num_requests_per_step + while num_running_requests > 0: + step_requests.append(num_running_requests) + num_running_requests -= context.complete_num_requests_per_step + + if step_requests[-1] != 1: + # have 1 request running at the last step. This is often + # useful + step_requests.append(1) + + return step_requests + + def run_profile(context: ProfileContext, csv_output: Optional[str], json_output: Optional[str]): print("Run profile with:") for key, value in asdict(context).items(): print(f" {key} = {value}") + requests_per_step: List[int] = determine_requests_per_step(context) + + ol_nr: OutputLen_NumReqs_Map = compute_request_output_lengths( + context.batch_size, requests_per_step) + + num_steps_to_profile: int = len(requests_per_step) + max_output_len: int = max(ol_nr.keys()) + assert max_output_len >= 1 + # Create sampling params - sampling_params = SamplingParams(temperature=0.8, - top_p=0.95, - max_tokens=args.output_len, - ignore_eos=True) + sampling_params = SamplingParams( + temperature=0.8, + top_p=0.95, + # max_tokens is set on a per-request basis. + max_tokens=None, + ignore_eos=True) # Create LLM llm = LLM(**asdict(context.engine_args)) batch_size = context.batch_size prompt_len = context.prompt_len - output_len = context.output_len scheduler_config = llm.llm_engine.scheduler_config max_model_len = llm.llm_engine.model_config.max_model_len @@ -65,7 +203,7 @@ def run_profile(context: ProfileContext, csv_output: Optional[str], f"choose a smaller batch size or prompt length, or increase " f"--max-num-batched-tokens") sys.exit(-1) - if batch_size >= max_num_seqs: + if batch_size > max_num_seqs: print( f"ERROR: chosen batch_size ({batch_size}) is larger than " f"max_num_seqs ({max_num_seqs}) and therefore cannot be run in a " @@ -73,16 +211,26 @@ def run_profile(context: ProfileContext, csv_output: Optional[str], sys.exit(-1) print("llm.llm_engine.model_config.max_model_len: ", llm.llm_engine.model_config.max_model_len) - if prompt_len + output_len > llm.llm_engine.model_config.max_model_len: - print( - f"ERROR: chosen prompt_len + output_len ({prompt_len} + " - f"{output_len} = {prompt_len + output_len}) is larger than the " - f"model's max_model_len ({max_model_len}), please choose a smaller " - f"prompt_len or output_len, or increase --max-model-len") + if prompt_len + max_output_len > llm.llm_engine.model_config.max_model_len: + print(f"ERROR: chosen prompt_len + max_output_len ({prompt_len} + " + f"{max_output_len} = {prompt_len + max_output_len}) is larger " + f"than the model's max_model_len ({max_model_len}), please " + f"choose a smaller prompt_len or max_output_len, or increase " + f"--max-model-len") sys.exit(-1) def add_requests(): + + def get_output_len_generator() -> Generator[int, Any, Any]: + for output_len, num_reqs in ol_nr.items(): + for _ in range(num_reqs): + yield output_len + + output_len_generator = get_output_len_generator() for i in range(batch_size): + sampling_params.max_tokens = next(output_len_generator) + assert isinstance(sampling_params.max_tokens, int) + prompt_token_ids = torch.randint( llm.llm_engine.model_config.get_vocab_size(), size=(prompt_len, )).tolist() @@ -110,8 +258,11 @@ def abort_requests(): llm.llm_engine.step() # First step is prefill decode_profs = [] - for x in range(args.output_len - 1): - with layerwise_profile() as decode_prof: + for _ in tqdm.tqdm(range(num_steps_to_profile - 1)): + num_running_seqs = llm.llm_engine.scheduler[ + 0].get_num_unfinished_seq_groups() + with layerwise_profile( + num_running_seqs=num_running_seqs) as decode_prof: llm.llm_engine.step() decode_profs.append(decode_prof) @@ -154,7 +305,8 @@ def abort_requests(): decode_results_list[0].print_summary_table() if csv_output: - csv_filename_base = csv_output.rstrip(".csv") + csv_filename_base = csv_output[:-4] \ + if csv_output.endswith('.csv') else csv_output prefill_results.export_model_stats_table_csv( csv_filename_base + "_prefill_model_table.csv") prefill_results.export_summary_stats_table_csv( @@ -187,10 +339,10 @@ def abort_requests(): for idx, dr in enumerate(decode_results_list): json_dict[f"decode_{idx + 1}"] = dr.convert_stats_to_dict() - for idx, dr in enumerate(decode_results_list[1:]): - json_dict[f"decode_{idx + 1}"] = dr.convert_stats_to_dict() - - with open(json_output.rstrip(".json") + ".json", "w+") as f: + # Add .json to json_output filename if it doesn't exist already. + json_output_file = json_output if json_output.endswith( + '.json') else json_output + '.json' + with open(json_output_file, "w+") as f: json.dump(json_dict, f, indent=2) pass @@ -214,7 +366,7 @@ def abort_requests(): python examples/offline_profile.py \\ --model neuralmagic/Meta-Llama-3.1-8B-Instruct-FP8 --batch-size 4 \\ --prompt-len 512 --max-num-batched-tokens 8196 --json Llama31-8b-FP8 \\ - --enforce-eager + --enforce-eager run_num_steps -n 2 ``` then you can use various tools to analyze the json output @@ -261,17 +413,41 @@ def abort_requests(): default=BATCH_SIZE_DEFAULT, help=f"Number of requests to run as a single batch, " f"default={BATCH_SIZE_DEFAULT}") - parser.add_argument( - "--output-len", + + subparsers = parser.add_subparsers(dest="cmd") + + run_num_steps_parser = subparsers.add_parser( + "run_num_steps", + help="This variation profiles n engine.step() invocations.") + run_num_steps_parser.add_argument( + '-n', + '--num-steps', type=int, - default=OUTPUT_LEN_DEFAULT, - help="Number of llm steps to run (includes prefill and decode) " - "- default={OUTPUT_LEN_DEFAULT}") + help="Number of engine steps to profile.\n" + "Setting it to 1, profiles only the prefill step.\n" + "Setting it to 2, profiles the prefill and first decode step\n" + "Setting it to 3, profiles the prefill, 1st and 2nd decode steps\n" + "and so on ...") + + run_to_completion_parser = subparsers.add_parser( + "run_to_completion", + help="This variation profiles all the engine.step() invocations" + "until the engine exhausts all submitted requests.") + run_to_completion_parser.add_argument( + '-n', + '--complete-num-requests-per-step', + type=int, + help= + "Complete complete_num_requests_per_step requests every decode step." + "For e.g., with batch_size 128 and complete_num_requests_per_step 32," + "the profiler is run for 6 engine steps, with the steps processing, " + "128, 128, 96, 64, 32, 1 requests respectively.\n" + "Note that we tack-on a one-request step at the end as it is often " + "useful.") EngineArgs.add_cli_args(parser) args = parser.parse_args() - context = ProfileContext( engine_args=EngineArgs.from_cli_args(args), **{ diff --git a/tools/profiler/print_layerwise_table.py b/tools/profiler/print_layerwise_table.py index 081076ad7dbdc..394ca8663e189 100644 --- a/tools/profiler/print_layerwise_table.py +++ b/tools/profiler/print_layerwise_table.py @@ -34,9 +34,10 @@ def get_entries(node, curr_depth=0): "examples/offline_profile.py") parser.add_argument("--phase", type=str, - choices=["prefill", "decode_1"], required=True, - help="The phase to print the table for.") + help="The phase to print the table for. This is either" + "prefill or decode_n, where n is the decode step " + "number") parser.add_argument("--table", type=str, choices=["summary", "model"], @@ -49,6 +50,10 @@ def get_entries(node, curr_depth=0): with open(args.json_trace) as f: profile_data = json.load(f) + assert args.phase in profile_data, \ + (f"Cannot find phase {args.phase} in profile data. Choose one among" + f'{[x for x in profile_data.keys() if "prefill" in x or "decode" in x]}') #noqa + if args.table == "summary": entries_and_depths = flatten_entries( SummaryStatsEntry, profile_data[args.phase]["summary_stats"]) diff --git a/tools/profiler/visualize_layerwise_profile.py b/tools/profiler/visualize_layerwise_profile.py index adc44474aa4c1..da7a28da15c19 100644 --- a/tools/profiler/visualize_layerwise_profile.py +++ b/tools/profiler/visualize_layerwise_profile.py @@ -151,16 +151,31 @@ def is_quant(op_name: str): "scaled_int8_quant" in op_name: return True + # LoRA ops + def is_sgmv_shrink(op_name: str): + return "sgmv_shrink" in op_name + + def is_sgmv_expand(op_name: str): + return "sgmv_expand" in op_name + + def is_bgmv_shrink(op_name: str): + return "bgmv_shrink" in op_name + + def is_bgmv_expand(op_name: str): + return "bgmv_expand" in op_name + + def is_cutlass_gemm_op(op_name: str): + return "void cutlass::Kernel" in op_name or \ + "void cutlass::device_kernel" in op_name + def is_gemm_op(op_name: str): if is_quant(op_name): return False - if "xmma_gemm" in op_name or \ + return is_cutlass_gemm_op(op_name) or \ + "xmma_gemm" in op_name or \ "gemv2T_kernel" in op_name or \ "splitKreduce" in op_name or \ - "void cutlass::Kernel" in op_name or \ - "void cutlass::device_kernel" in op_name or \ - "s16816gemm" in op_name: - return True + "s16816gemm" in op_name def is_elementwise_op(op_name: str): return "elementwise_kernel" in op_name @@ -211,6 +226,18 @@ def is_reduce_kernel(op_name: str): quant_ops = list(filter(lambda x: is_quant(x), ops)) ops = list(filter(lambda x: x not in quant_ops, ops)) + sgmv_shrink_ops = list(filter(lambda x: is_sgmv_shrink(x), ops)) + ops = list(filter(lambda x: x not in sgmv_shrink_ops, ops)) + sgmv_expand_ops = list(filter(lambda x: is_sgmv_expand(x), ops)) + ops = list(filter(lambda x: x not in sgmv_expand_ops, ops)) + bgmv_shrink_ops = list(filter(lambda x: is_bgmv_shrink(x), ops)) + ops = list(filter(lambda x: x not in bgmv_shrink_ops, ops)) + bgmv_expand_ops = list(filter(lambda x: is_bgmv_expand(x), ops)) + ops = list(filter(lambda x: x not in bgmv_expand_ops, ops)) + + cutlass_gemm_ops = list(filter(lambda x: is_cutlass_gemm_op(x), ops)) + ops = list(filter(lambda x: x not in cutlass_gemm_ops, ops)) + gemm_ops = list(filter(lambda x: is_gemm_op(x), ops)) ops = list(filter(lambda x: x not in gemm_ops, ops)) @@ -257,6 +284,24 @@ def is_reduce_kernel(op_name: str): trace_df['attention'] = trace_df[attention_ops].agg("sum", axis=1) if len(quant_ops): trace_df['quant_ops'] = trace_df[quant_ops].agg("sum", axis=1) + + if len(sgmv_shrink_ops): + trace_df['sgmv_shrink_ops'] = trace_df[sgmv_shrink_ops].agg("sum", + axis=1) + if len(sgmv_expand_ops): + trace_df['sgmv_expand_ops'] = trace_df[sgmv_expand_ops].agg("sum", + axis=1) + if len(bgmv_shrink_ops): + trace_df['bgmv_shrink_ops'] = trace_df[bgmv_shrink_ops].agg("sum", + axis=1) + if len(bgmv_expand_ops): + trace_df['bgmv_expand_ops'] = trace_df[bgmv_expand_ops].agg("sum", + axis=1) + + if len(cutlass_gemm_ops): + trace_df['cutlass_gemm_ops'] = trace_df[cutlass_gemm_ops].agg("sum", + axis=1) + if len(gemm_ops): trace_df['gemm_ops'] = trace_df[gemm_ops].agg("sum", axis=1) if len(rms_norm_ops): @@ -296,7 +341,9 @@ def is_reduce_kernel(op_name: str): trace_df['reduce_kernel_ops'] = trace_df[reduce_kernel_ops].agg("sum", axis=1) - trace_df.drop(attention_ops + quant_ops + gemm_ops + rms_norm_ops + + trace_df.drop(attention_ops + quant_ops + sgmv_shrink_ops + + sgmv_expand_ops + bgmv_shrink_ops + bgmv_expand_ops + + cutlass_gemm_ops + gemm_ops + rms_norm_ops + vocab_embed_ops + mem_ops + elementwise_ops + nccl_all_reduce_ops + nccl_gather_ops + nccl_broadcast_ops + nccl_other_ops + cross_device_reduce_1stage_ops + @@ -315,7 +362,14 @@ def plot_trace_df(traces_df: pd.DataFrame, plot_title: str, output: Optional[Path] = None): + def get_phase_description(traces_df: pd.DataFrame, phase: str) -> str: + phase_df = traces_df.query(f'phase == "{phase}"') + descs = phase_df['phase_desc'].to_list() + assert all([desc == descs[0] for desc in descs]) + return descs[0] + phases = traces_df['phase'].unique() + phase_descs = [get_phase_description(traces_df, p) for p in phases] traces_df = traces_df.pivot_table(index="phase", columns="name", values=plot_metric, @@ -324,7 +378,8 @@ def plot_trace_df(traces_df: pd.DataFrame, traces_df = group_trace_by_operations(traces_df) # Make the figure - fig, ax = plt.subplots(1, figsize=(5, 8), sharex=True) + fig_size_x = max(5, len(phases)) + fig, ax = plt.subplots(1, figsize=(fig_size_x, 8), sharex=True) # Draw the stacked bars ops = list(traces_df) @@ -332,7 +387,7 @@ def plot_trace_df(traces_df: pd.DataFrame, for op in ops: values = [traces_df[op][phase] for phase in phases] values = list(map(lambda x: 0.0 if math.isnan(x) else x, values)) - ax.bar(phases, values, label=op, bottom=bottom) + ax.bar(phase_descs, values, label=op, bottom=bottom) bottom = [bottom[j] + values[j] for j in range(len(phases))] # Write the values as text on the bars @@ -390,6 +445,14 @@ def keep_only_top_entries(df: pd.DataFrame, ["name"]] = "others" return df + def get_phase_description(key: str) -> str: + num_running_seqs = profile_json[key]['metadata'][ + 'num_running_seqs'] + if num_running_seqs is not None: + return f"{key}-seqs-{num_running_seqs}" + else: + return key + # Get data for each key traces = list(map(lambda x: get_entries_and_traces(x), step_keys)) @@ -413,6 +476,7 @@ def keep_only_top_entries(df: pd.DataFrame, # Fill in information about the step-keys for trace_df, step_key in zip(trace_dfs, step_keys): trace_df['phase'] = step_key + trace_df['phase_desc'] = get_phase_description(step_key) # Combine all data frames so they can be put in a single plot traces_df = pd.concat(trace_dfs) @@ -426,12 +490,16 @@ def keep_only_top_entries(df: pd.DataFrame, def make_plot_title_suffix(profile_json: dict) -> str: context = profile_json["context"] sparsity = context.get('sparsity', None) - return (f"{context['model']}\n" + run_type = \ + f'Run {context["num_steps"]} steps' if context['num_steps'] else \ + (f'Complete {context["complete_num_requests_per_step"]} per ' + f'step; Run till completion') + return (f"{context['engine_args']['model']}\n" f"Batch={context['batch_size']}, " f"PromptLen={context['prompt_len']}, " - f"OutputLen={context['output_len']}," - f"NumGpus={context['tensor_parallel_size']}" - f"{', Sparsity ' + sparsity if sparsity else ''}") + f"NumGpus={context['engine_args']['tensor_parallel_size']}" + f"{', Sparsity ' + sparsity if sparsity else ''}\n" + f"Run Type: {run_type}") profile_json = None with open(json_trace) as f: diff --git a/vllm/profiler/layerwise_profile.py b/vllm/profiler/layerwise_profile.py index 9d9f427e807f6..33babfebdca1e 100644 --- a/vllm/profiler/layerwise_profile.py +++ b/vllm/profiler/layerwise_profile.py @@ -72,6 +72,9 @@ class LayerwiseProfileResults(profile): _model_stats_tree: List[_StatsTreeNode] = field(init=False) _summary_stats_tree: List[_StatsTreeNode] = field(init=False) + # profile metadata + num_running_seqs: Optional[int] = None + def __post_init__(self): self._build_correlation_map() self._build_module_tree() @@ -127,6 +130,9 @@ def export_summary_stats_table_csv(self, filename: str): def convert_stats_to_dict(self) -> str: return { + "metadata": { + "num_running_seqs": self.num_running_seqs + }, "summary_stats": self._convert_stats_tree_to_dict(self._summary_stats_tree), "model_stats": @@ -338,7 +344,15 @@ def df_traversal(node: _StatsTreeNode, curr_json_list: List[Dict]): class layerwise_profile(profile): - def __init__(self): + def __init__(self, num_running_seqs: Optional[int] = None): + """ + layerwise profile constructor. + + Args: + num_running_seqs (Optional[int], optional): When given, + num_running_seqs will be passed to LayerProfileResults for metadata + update. Defaults to None. + """ super().__init__( activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], record_shapes=True, @@ -346,9 +360,13 @@ def __init__(self): with_modules=True, experimental_config=_ExperimentalConfig(verbose=True)) + self.num_running_seqs = num_running_seqs + def __enter__(self): return super().__enter__() def __exit__(self, exc_type, exc_val, exc_tb): super().__exit__(exc_type, exc_val, exc_tb) - self.results = LayerwiseProfileResults(self.profiler.kineto_results) + self.results = LayerwiseProfileResults( + self.profiler.kineto_results, + num_running_seqs=self.num_running_seqs) From 551603feffd9b4ba98ccdd34e02e403e04db88c1 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Mon, 16 Dec 2024 13:32:25 -0800 Subject: [PATCH 385/397] [core] overhaul memory profiling and fix backward compatibility (#10511) Signed-off-by: youkaichao --- tests/entrypoints/llm/test_gpu_utilization.py | 25 ++++ tests/entrypoints/llm/test_lazy_outlines.py | 2 +- tests/test_utils.py | 44 +++++- tests/worker/test_profile.py | 18 +-- vllm/engine/arg_utils.py | 11 +- vllm/utils.py | 125 +++++++++++++++++- vllm/worker/multi_step_model_runner.py | 3 +- vllm/worker/worker.py | 68 ++++------ 8 files changed, 236 insertions(+), 60 deletions(-) create mode 100644 tests/entrypoints/llm/test_gpu_utilization.py diff --git a/tests/entrypoints/llm/test_gpu_utilization.py b/tests/entrypoints/llm/test_gpu_utilization.py new file mode 100644 index 0000000000000..c2dab300ecefb --- /dev/null +++ b/tests/entrypoints/llm/test_gpu_utilization.py @@ -0,0 +1,25 @@ +from vllm import LLM, SamplingParams + + +def test_gpu_memory_utilization(): + prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", + ] + sampling_params = SamplingParams(temperature=0.8, top_p=0.95) + + # makes sure gpu_memory_utilization is per-instance limit, + # not a global limit + llms = [ + LLM(model="facebook/opt-125m", + gpu_memory_utilization=0.3, + enforce_eager=True) for i in range(3) + ] + for llm in llms: + outputs = llm.generate(prompts, sampling_params) + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") diff --git a/tests/entrypoints/llm/test_lazy_outlines.py b/tests/entrypoints/llm/test_lazy_outlines.py index 2c53676c5f5dd..bf609b38a94f5 100644 --- a/tests/entrypoints/llm/test_lazy_outlines.py +++ b/tests/entrypoints/llm/test_lazy_outlines.py @@ -36,7 +36,7 @@ def run_lmfe(sample_regex): llm = LLM(model="facebook/opt-125m", enforce_eager=True, guided_decoding_backend="lm-format-enforcer", - gpu_memory_utilization=0.6) + gpu_memory_utilization=0.3) sampling_params = SamplingParams(temperature=0.8, top_p=0.95) outputs = llm.generate( prompts=[ diff --git a/tests/test_utils.py b/tests/test_utils.py index a731b11eae81c..0bc9e5bc32a46 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -5,11 +5,13 @@ from typing import AsyncIterator, Tuple import pytest +import torch from vllm.utils import (FlexibleArgumentParser, StoreBoolean, deprecate_kwargs, - get_open_port, merge_async_iterators, supports_kw) + get_open_port, memory_profiling, merge_async_iterators, + supports_kw) -from .utils import error_on_warning +from .utils import error_on_warning, fork_new_process_for_each_test @pytest.mark.asyncio @@ -270,3 +272,41 @@ def test_supports_kw(callable,kw_name,requires_kw_only, requires_kw_only=requires_kw_only, allow_var_kwargs=allow_var_kwargs ) == is_supported + + +@fork_new_process_for_each_test +def test_memory_profiling(): + # Fake out some model loading + inference memory usage to test profiling + # Memory used by other processes will show up as cuda usage outside of torch + from vllm.distributed.device_communicators.cuda_wrapper import ( + CudaRTLibrary) + lib = CudaRTLibrary() + # 512 MiB allocation outside of this instance + handle1 = lib.cudaMalloc(512 * 1024 * 1024) + + baseline_memory_in_bytes = \ + torch.cuda.mem_get_info()[1] - torch.cuda.mem_get_info()[0] + + # load weights + + weights = torch.randn(128, 1024, 1024, device='cuda', dtype=torch.float32) + + weights_memory_in_bytes = 128 * 1024 * 1024 * 4 # 512 MiB + + with memory_profiling(baseline_memory_in_bytes=baseline_memory_in_bytes, + weights_memory_in_bytes=weights_memory_in_bytes) as result: + # make a memory spike, 1 GiB + spike = torch.randn(256, 1024, 1024, device='cuda', dtype=torch.float32) + del spike + + # Add some extra non-torch memory 256 MiB (simulate NCCL) + handle2 = lib.cudaMalloc(256 * 1024 * 1024) + + # Check that the memory usage is within 5% of the expected values + non_torch_ratio = result.non_torch_increase_in_bytes / (256 * 1024 * 1024) # noqa + torch_peak_ratio = result.torch_peak_increase_in_bytes / (1024 * 1024 * 1024) # noqa + assert abs(non_torch_ratio - 1) <= 0.05 + assert abs(torch_peak_ratio - 1) <= 0.05 + del weights + lib.cudaFree(handle1) + lib.cudaFree(handle2) diff --git a/tests/worker/test_profile.py b/tests/worker/test_profile.py index 194ea2aa506f4..79233c75714de 100644 --- a/tests/worker/test_profile.py +++ b/tests/worker/test_profile.py @@ -31,10 +31,6 @@ def test_gpu_memory_profiling(): is_driver_worker=True, ) - # Load the model so we can profile it - worker.init_device() - worker.load_model() - # Set 10GiB as the total gpu ram to be device-agnostic def mock_mem_info(): current_usage = torch.cuda.memory_stats( @@ -46,20 +42,24 @@ def mock_mem_info(): from unittest.mock import patch with patch("torch.cuda.mem_get_info", side_effect=mock_mem_info): + # Load the model so we can profile it + worker.init_device() + worker.load_model() gpu_blocks, _ = worker.determine_num_available_blocks() - # Peak vram usage by torch should be 0.7077 GiB + # Peak vram usage by torch should be 0.47 GiB + # Model weights take 0.25 GiB # No memory should be allocated outside of torch # 9.0 GiB should be the utilization target - # 8.2923 GiB should be available for the KV cache + # 8.28 GiB should be available for the KV cache block_size = CacheEngine.get_cache_block_size( engine_config.cache_config, engine_config.model_config, engine_config.parallel_config) - expected_blocks = (8.2923 * 1024**3) // block_size + expected_blocks = (8.28 * 1024**3) // block_size # Check within a small tolerance for portability # Hardware, kernel, or dependency changes could all affect memory # utilization. - # A 10 block tolerance here should be about 6MB of wiggle room. - assert abs(gpu_blocks - expected_blocks) < 10 + # A 100 block tolerance here should be about 60MB of wiggle room. + assert abs(gpu_blocks - expected_blocks) < 100 diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 0aa367a173b6c..06b8542779dc0 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -487,11 +487,12 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: help='The fraction of GPU memory to be used for the model ' 'executor, which can range from 0 to 1. For example, a value of ' '0.5 would imply 50%% GPU memory utilization. If unspecified, ' - 'will use the default value of 0.9. This is a global gpu memory ' - 'utilization limit, for example if 50%% of the gpu memory is ' - 'already used before vLLM starts and --gpu-memory-utilization is ' - 'set to 0.9, then only 40%% of the gpu memory will be allocated ' - 'to the model executor.') + 'will use the default value of 0.9. This is a per-instance ' + 'limit, and only applies to the current vLLM instance.' + 'It does not matter if you have another vLLM instance running ' + 'on the same GPU. For example, if you have two vLLM instances ' + 'running on the same GPU, you can set the GPU memory utilization ' + 'to 0.5 for each instance.') parser.add_argument( '--num-gpu-blocks-override', type=int, diff --git a/vllm/utils.py b/vllm/utils.py index 45e682ac15782..73d2ae25f15ca 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -23,10 +23,12 @@ from asyncio import FIRST_COMPLETED, AbstractEventLoop, Future, Task from collections import UserDict, defaultdict from collections.abc import Iterable, Mapping +from dataclasses import dataclass, field from functools import lru_cache, partial, wraps from typing import (TYPE_CHECKING, Any, AsyncGenerator, Awaitable, Callable, - Dict, Generic, Hashable, List, Literal, Optional, - OrderedDict, Set, Tuple, Type, TypeVar, Union, overload) + Dict, Generator, Generic, Hashable, List, Literal, + Optional, OrderedDict, Set, Tuple, Type, TypeVar, Union, + overload) from uuid import uuid4 import numpy as np @@ -1664,3 +1666,122 @@ def kill_process_tree(pid: int): # Finally kill the parent with contextlib.suppress(ProcessLookupError): os.kill(pid, signal.SIGKILL) + + +@dataclass +class MemorySnapshot: + """Memory snapshot.""" + torch_peak_in_bytes: int = 0 + torch_memory_in_bytes: int = 0 + timestamp: float = 0.0 + + def measure(self): + self.torch_peak_in_bytes = torch.cuda.memory_stats( + )["allocated_bytes.all.peak"] + self.torch_memory_in_bytes = torch.cuda.memory_stats( + )["allocated_bytes.all.current"] + self.timestamp = time.time() + + def __sub__(self, other: "MemorySnapshot") -> "MemorySnapshot": + """support a - b""" + return MemorySnapshot( + torch_peak_in_bytes=self.torch_peak_in_bytes - + other.torch_peak_in_bytes, + torch_memory_in_bytes=self.torch_memory_in_bytes - + other.torch_memory_in_bytes, + timestamp=self.timestamp - other.timestamp) + + +@dataclass +class MemoryProfilingResult: + """Memory profiling result. + """ # noqa + baseline_memory_in_bytes: int = 0 + non_kv_cache_memory_in_bytes: int = 0 + torch_peak_increase_in_bytes: int = 0 + non_torch_increase_in_bytes: int = 0 + weights_memory_in_bytes: float = 0 + before_profile: MemorySnapshot = field(default_factory=MemorySnapshot) + after_profile: MemorySnapshot = field(default_factory=MemorySnapshot) + profile_time: float = 0.0 + + +@contextlib.contextmanager +def memory_profiling( + baseline_memory_in_bytes: int, weights_memory_in_bytes: int +) -> Generator[MemoryProfilingResult, None, None]: + """Memory profiling context manager. + baseline_memory_in_bytes: memory used by all the components other than + the current vLLM instance. It contains: memory used by other processes, memory + used by another vLLM instance in the same process, etc. It is usually measured + before the current vLLM instance initialize the device. And we assume it is + constant during the profiling of the current vLLM instance. + weights_memory_in_bytes: memory used by PyTorch when loading the model weights. + Note that, before loading the model weights, we also initialize the device + and distributed environment, which may consume some memory. This part is not + included in the weights_memory_in_bytes because PyTorch does not control it. + + The memory in one GPU can be classified into 3 categories: + 1. memory used by anything other than the current vLLM instance. + 2. memory used by torch in the current vLLM instance. + 3. memory used in the current vLLM instance, but not by torch. + + A quantitive example: + + Before creating the current vLLM instance: + category 1: 1 GiB + category 2: 0 GiB + category 3: 0 GiB + + After creating the current vLLM instance and loading the model, + (i.e. before profiling): + category 1: 1 GiB + category 2: 2 GiB (model weights take 2 GiB) + category 3: 0.5 GiB (memory used by NCCL) + + During profiling (peak): + category 1: 1 GiB + category 2: 4 GiB (peak activation tensors take 2 GiB) + category 3: 1 GiB (memory used by NCCL + buffers for some attention backends) + + After profiling: + category 1: 1 GiB + category 2: 3 GiB (after garbage-collecting activation tensors) + category 3: 1 GiB (memory used by NCCL + buffers for some attention backends) + + In this case, non-kv cache takes 5 GiB in total, including: + a. 2 GiB used by the model weights (category 2) + b. 2 GiB reserved for the peak activation tensors (category 2) + c. 1 GiB used by non-torch components (category 3) + + The memory used for loading weights (a.) is directly given from the argument `weights_memory_in_bytes`. + + The increase of ``torch.cuda.memory_stats()["allocated_bytes.all.peak"]` after profiling gives (b.). + + (c.) is tricky. We measure the total memory used in this GPU (`torch.cuda.mem_get_info()[1] - torch.cuda.mem_get_info()[0]`), + subtract the baseline memory, the memory used by the model weights, and diff of `torch.cuda.memory_stats()["allocated_bytes.all.current"]`. + """ # noqa + torch.cuda.reset_peak_memory_stats() + + result = MemoryProfilingResult() + + result.baseline_memory_in_bytes = baseline_memory_in_bytes + # the part of memory used for holding the model weights + result.weights_memory_in_bytes = weights_memory_in_bytes + + result.before_profile.measure() + + yield result + + gc.collect() + torch.cuda.empty_cache() + + result.after_profile.measure() + + diff = result.after_profile - result.before_profile + result.torch_peak_increase_in_bytes = diff.torch_peak_in_bytes + current_cuda_memory_bytes = torch.cuda.mem_get_info( + )[1] - torch.cuda.mem_get_info()[0] + result.non_torch_increase_in_bytes = current_cuda_memory_bytes - baseline_memory_in_bytes - weights_memory_in_bytes - diff.torch_memory_in_bytes # noqa + result.profile_time = diff.timestamp + result.non_kv_cache_memory_in_bytes = result.non_torch_increase_in_bytes + result.torch_peak_increase_in_bytes + result.weights_memory_in_bytes # noqa diff --git a/vllm/worker/multi_step_model_runner.py b/vllm/worker/multi_step_model_runner.py index e08a61e31fe42..18b03bf1bfb56 100644 --- a/vllm/worker/multi_step_model_runner.py +++ b/vllm/worker/multi_step_model_runner.py @@ -645,7 +645,8 @@ def _advance_step(self, model_input: StatefulModelInput, return model_input def load_model(self) -> None: - return self._base_model_runner.load_model() + self._base_model_runner.load_model() + self.model_memory_usage = self._base_model_runner.model_memory_usage def save_sharded_state( self, diff --git a/vllm/worker/worker.py b/vllm/worker/worker.py index a368bb9ee9a5b..f51b51d433d3d 100644 --- a/vllm/worker/worker.py +++ b/vllm/worker/worker.py @@ -1,7 +1,6 @@ """A GPU worker class.""" import gc import os -import time from typing import Dict, List, Optional, Set, Tuple, Type, Union import torch @@ -22,6 +21,7 @@ from vllm.prompt_adapter.request import PromptAdapterRequest from vllm.sequence import (ExecuteModelRequest, IntermediateTensors, SequenceGroupMetadata, SequenceGroupMetadataDelta) +from vllm.utils import GiB_bytes, memory_profiling from vllm.worker.cache_engine import CacheEngine from vllm.worker.enc_dec_model_runner import EncoderDecoderModelRunner from vllm.worker.model_runner import GPUModelRunnerBase, ModelRunner @@ -192,33 +192,22 @@ def determine_num_available_blocks(self) -> Tuple[int, int]: torch.cuda.reset_peak_memory_stats() free_memory_pre_profile, total_gpu_memory = torch.cuda.mem_get_info() - start_time = time.time() # Execute a forward pass with dummy inputs to profile the memory usage # of the model. - self.model_runner.profile_run() - torch.cuda.synchronize() + with memory_profiling(baseline_memory_in_bytes=total_gpu_memory - + self.init_gpu_memory, + weights_memory_in_bytes=self.model_runner. + model_memory_usage) as result: + self.model_runner.profile_run() + torch.cuda.synchronize() self._assert_memory_footprint_increased_during_profiling() - # Get the peak memory allocation recorded by torch - peak_memory = torch.cuda.memory_stats()["allocated_bytes.all.peak"] - - # Check for any memory left around that may have been allocated on the - # gpu outside of `torch`. NCCL operations, for example, can use a few - # GB during a forward pass - torch.cuda.empty_cache() - torch_allocated_bytes = torch.cuda.memory_stats( - )["allocated_bytes.all.current"] - total_allocated_bytes = torch.cuda.mem_get_info( - )[1] - torch.cuda.mem_get_info()[0] - non_torch_allocations = total_allocated_bytes - torch_allocated_bytes - if non_torch_allocations > 0: - peak_memory += non_torch_allocations - - available_kv_cache_memory = ( - total_gpu_memory * self.cache_config.gpu_memory_utilization - - peak_memory) + memory_for_current_instance = total_gpu_memory * \ + self.cache_config.gpu_memory_utilization + available_kv_cache_memory = (memory_for_current_instance - + result.non_kv_cache_memory_in_bytes) # Calculate the number of blocks that can be allocated with the # profiled peak memory. @@ -233,24 +222,23 @@ def determine_num_available_blocks(self) -> Tuple[int, int]: num_gpu_blocks = max(num_gpu_blocks, 0) num_cpu_blocks = max(num_cpu_blocks, 0) - end_time = time.time() - logger.info( - "Memory profiling results: " - "duration=%.2f seconds, " - "total_gpu_memory=%.2fGiB, " - "initial_memory_usage=%.2fGiB, " - "peak_torch_memory=%.2fGiB, " - "memory_usage_post_profile=%.2fGiB, " - "non_torch_memory=%.2fGiB, " - "kv_cache_size=%.2fGiB, " - "gpu_memory_utilization=%.2f.", end_time - start_time, - total_gpu_memory / (1024**3), - (total_gpu_memory - free_memory_pre_profile) / (1024**3), - (peak_memory - non_torch_allocations) / (1024**3), - total_allocated_bytes / (1024**3), - non_torch_allocations / (1024**3), - available_kv_cache_memory / (1024**3), - self.cache_config.gpu_memory_utilization) + msg = (f"Memory profiling takes {result.profile_time:.2f} seconds\n" + "the current vLLM instance can use " + "total_gpu_memory " + f"({(total_gpu_memory / GiB_bytes):.2f}GiB)" + " x gpu_memory_utilization " + f"({self.cache_config.gpu_memory_utilization:.2f})" + f" = {(memory_for_current_instance / GiB_bytes):.2f}GiB\n" + "model weights take " + f"{(result.weights_memory_in_bytes / GiB_bytes):.2f}GiB;" + " non_torch_memory takes " + f"{(result.non_torch_increase_in_bytes / GiB_bytes):.2f}GiB;" + " PyTorch activation peak memory takes " + f"{(result.torch_peak_increase_in_bytes / GiB_bytes):.2f}GiB;" + " the rest of the memory reserved for KV Cache is " + f"{(available_kv_cache_memory / GiB_bytes):.2f}GiB.") + + logger.info(msg) # Final cleanup if self.model_runner.lora_manager: From 35ffa682b1cd3f47eb6cda586a16dab5c0401477 Mon Sep 17 00:00:00 2001 From: bk-TurbaAI Date: Mon, 16 Dec 2024 23:20:39 +0100 Subject: [PATCH 386/397] [Docs] hint to enable use of GPU performance counters in profiling tools for multi-node distributed serving (#11235) Co-authored-by: Michael Goin --- docs/source/serving/distributed_serving.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/serving/distributed_serving.rst b/docs/source/serving/distributed_serving.rst index 4d57206e53a05..b24ba53e59694 100644 --- a/docs/source/serving/distributed_serving.rst +++ b/docs/source/serving/distributed_serving.rst @@ -54,7 +54,7 @@ Multi-Node Inference and Serving If a single node does not have enough GPUs to hold the model, you can run the model using multiple nodes. It is important to make sure the execution environment is the same on all nodes, including the model path, the Python environment. The recommended way is to use docker images to ensure the same environment, and hide the heterogeneity of the host machines via mapping them into the same docker configuration. -The first step, is to start containers and organize them into a cluster. We have provided a helper `script `_ to start the cluster. +The first step, is to start containers and organize them into a cluster. We have provided a helper `script `_ to start the cluster. Please note, this script launches docker without administrative privileges that would be required to access GPU performance counters when running profiling and tracing tools. For that purpose, the script can have ``CAP_SYS_ADMIN`` to the docker container by using the ``--cap-add`` option in the docker run command. Pick a node as the head node, and run the following command: From c301616ed23fef433db1a49df332b9d61d3178ad Mon Sep 17 00:00:00 2001 From: youkaichao Date: Mon, 16 Dec 2024 15:53:18 -0800 Subject: [PATCH 387/397] [ci][tests] add gh200 tests (#11244) Signed-off-by: youkaichao --- .buildkite/run-gh200-test.sh | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 .buildkite/run-gh200-test.sh diff --git a/.buildkite/run-gh200-test.sh b/.buildkite/run-gh200-test.sh new file mode 100644 index 0000000000000..d25510c47fe6b --- /dev/null +++ b/.buildkite/run-gh200-test.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +# This script build the GH200 docker image and run the offline inference inside the container. +# It serves a sanity check for compilation and basic model usage. +set -ex + +# Try building the docker image +DOCKER_BUILDKIT=1 docker build . \ + --target test \ + -platform "linux/arm64" \ + -t gh200-test \ + --build-arg max_jobs=66 \ + --build-arg nvcc_threads=2 \ + --build-arg torch_cuda_arch_list="9.0+PTX" \ + --build-arg vllm_fa_cmake_gpu_arches="90-real" + +# Setup cleanup +remove_docker_container() { docker rm -f gh200-test || true; } +trap remove_docker_container EXIT +remove_docker_container + +# Run the image and test offline inference +docker run --name gh200-test --gpus=all --entrypoint="" gh200-test bash -c ' + python3 examples/offline_inference.py +' From 88a412ed3d964de3443c42a6a35108115ee0ad25 Mon Sep 17 00:00:00 2001 From: youkaichao Date: Mon, 16 Dec 2024 16:15:22 -0800 Subject: [PATCH 388/397] [torch.compile] fast inductor (#11108) Signed-off-by: youkaichao Co-authored-by: Tyler Michael Smith --- vllm/compilation/backends.py | 213 +++++++++++++++++- vllm/config.py | 415 ++++++++++++++++++++++++++++++++++- vllm/envs.py | 3 + 3 files changed, 624 insertions(+), 7 deletions(-) diff --git a/vllm/compilation/backends.py b/vllm/compilation/backends.py index 4a5dc337d01b8..0c7bbfe599b02 100644 --- a/vllm/compilation/backends.py +++ b/vllm/compilation/backends.py @@ -1,6 +1,10 @@ +import ast import copy import dataclasses +import os +import pprint import time +from collections import defaultdict from contextlib import ExitStack from typing import Any, Callable, Dict, List, Optional, Sequence, Set, Tuple from unittest.mock import patch @@ -21,6 +25,122 @@ logger = init_logger(__name__) +class InductorHashCache: + """ + Disk format: a Python list of tuples, each tuple is + (runtime_shape, graph_index, hash_str) + We use list of tuple for readability. + + In-memory format: a defaultdict of dict, where the key is + runtime_shape, and the value is a dict of graph_index to hash_str. + + The data is essentially `Dict[Optional[int], Dict[int, str]]`, + we don't use json here because json doesn't support int as key. + + TODO: better off-the-shelf solution to serialize the data? + """ + + def __init__(self, cache_dir: str, disabled: bool = False): + self.cache: defaultdict = defaultdict(dict) + self.disabled = disabled + self.cache_dir = cache_dir + self.cache_file_path = os.path.join(cache_dir, + "inductor_hash_cache.py") + if disabled: + return + # set flags so that Inductor and Triton store their cache + # in the cache_dir, then users only need to copy the cache_dir + # to another machine to reuse the cache. + inductor_cache = os.path.join(cache_dir, "inductor_cache") + os.makedirs(inductor_cache, exist_ok=True) + os.environ["TORCHINDUCTOR_CACHE_DIR"] = inductor_cache + triton_cache = os.path.join(cache_dir, "triton_cache") + os.makedirs(triton_cache, exist_ok=True) + os.environ["TRITON_CACHE_DIR"] = triton_cache + if os.path.exists(self.cache_file_path): + with open(self.cache_file_path) as f: + self.deserialize(f.read()) + + def deserialize(self, data: str): + # we use ast.literal_eval to parse the data + # because it is a safe way to parse Python literals. + # do not use eval(), it is unsafe. + list_data = ast.literal_eval(data) + for runtime_shape, graph_index, hash_str in list_data: + self.cache[runtime_shape][graph_index] = hash_str + + def serialize(self) -> str: + data = [] + for runtime_shape, graph_index_to_hash_str in self.cache.items(): + for graph_index, hash_str in graph_index_to_hash_str.items(): + data.append((runtime_shape, graph_index, hash_str)) + printer = pprint.PrettyPrinter(indent=4) + return printer.pformat(data) + + def save_to_file(self): + if self.disabled: + return + with open(self.cache_file_path, "w") as f: + f.write(self.serialize()) + + def __contains__(self, key: Tuple[Optional[int], int]) -> bool: + if self.disabled: + return False + runtime_shape, graph_index = key + return runtime_shape in self.cache and graph_index in self.cache[ + runtime_shape] + + def __getitem__(self, key: Tuple[Optional[int], int]) -> str: + if self.disabled: + raise KeyError("cannot read from disabled cache") + runtime_shape, graph_index = key + return self.cache[runtime_shape][graph_index] + + def __setitem__(self, key: Tuple[Optional[int], int], value: str): + # setitem for disabled cache is fine, because we + # don't actually write to the disk + runtime_shape, graph_index = key + self.cache[runtime_shape][graph_index] = value + + +class AlwaysHitShapeEnv: + """ + Why do we need this class: + + For normal `torch.compile` usage, every compilation will have + one Dynamo bytecode compilation and one Inductor compilation. + The Inductor compilation happens under the context of the + Dynamo bytecode compilation, and that context is used to + determine the dynamic shape information, etc. + + For our use case, we only run Dynamo bytecode compilation once, + and run Inductor compilation multiple times with different shapes + plus a general shape. The compilation for specific shapes happens + outside of the context of the Dynamo bytecode compilation. At that + time, we don't have shape environment to provide to Inductor, and + it will fail the Inductor code cache lookup. + + By providing a dummy shape environment that always hits, we can + make the Inductor code cache lookup always hit, and we can + compile the graph for different shapes as needed. + + The following dummy methods are obtained by trial-and-error + until it works. + """ + + def __init__(self) -> None: + self.guards: List[Any] = [] + + def evaluate_guards_expression(self, *args, **kwargs): + return True + + def get_pruned_guards(self, *args, **kwargs): + return [] + + def produce_guards_expression(self, *args, **kwargs): + return "" + + def wrap_inductor(graph, example_inputs, additional_inductor_config, @@ -55,9 +175,93 @@ def wrap_inductor(graph, # inductor can inplace modify the graph, so we need to copy it # see https://github.com/pytorch/pytorch/issues/138980 graph = copy.deepcopy(graph) - compiled_graph = compile_fx(graph, - example_inputs, - config_patches=current_config) + + cache_data = compilation_config.inductor_hash_cache + if (runtime_shape, graph_index) in cache_data: + # we compiled this graph before + # so we can directly lookup the compiled graph via hash + hash_str = cache_data[(runtime_shape, graph_index)] + if graph_index == 0: + # adds some info logging for the first graph + logger.info( + "Directly lookup the graph for shape %s from the cache", + str(runtime_shape)) # noqa + logger.debug( + "directly lookup the %s-th graph for shape %s via hash %s", + graph_index, str(runtime_shape), hash_str) + from torch._inductor.codecache import FxGraphCache + with patch("torch._inductor.codecache.FxGraphCache._get_shape_env", + lambda *args, **kwargs: AlwaysHitShapeEnv()): + inductor_compiled_graph = FxGraphCache._lookup_graph( + hash_str, example_inputs, True, False) + assert inductor_compiled_graph is not None, ( + "Inductor cache lookup failed. Please remove" + f"the cache file {compilation_config.inductor_hash_cache.cache_file_path} and try again." # noqa + ) + + # Inductor calling convention (function signature): + # f(list) -> tuple + # Dynamo calling convention (function signature): + # f(*args) -> Any + + # need to know if the graph returns a tuple + from torch._inductor.compile_fx import graph_returns_tuple + returns_tuple = graph_returns_tuple(graph) + + # this is the graph we return to Dynamo to run + def compiled_graph(*args): + # convert args to list + list_args = list(args) + graph_output = inductor_compiled_graph(list_args) + # unpack the tuple if needed + if returns_tuple: + return graph_output + else: + return graph_output[0] + else: + # it's the first time we compile this graph + # the assumption is that we don't have nested Inductor compilation. + # compiled_fx_graph_hash will only be called once, and we can hook + # it to get the hash of the compiled graph directly. + from torch._inductor.codecache import compiled_fx_graph_hash + + def hijack_compiled_fx_graph_hash(*args, **kwargs): + out = compiled_fx_graph_hash(*args, **kwargs) + # store the hash in the cache + nonlocal cache_data + cache_data[(runtime_shape, graph_index)] = out[0] + if graph_index == 0: + # adds some info logging for the first graph + logger.info("Cache the graph of shape %s for later use", + str(runtime_shape)) + logger.debug("store the %s-th graph for shape %s via hash %s", + graph_index, str(runtime_shape), out[0]) + return out + + def _check_can_cache(*args, **kwargs): + # no error means it can be cached. + # Inductor refuses to cache the graph outside of Dynamo + # tracing context, and also disables caching for graphs + # with high-order ops. + # For vLLM, in either case, we want to cache the graph. + # see https://github.com/pytorch/pytorch/blob/9f5ebf3fc609105a74eab4ccc24932d6353ff566/torch/_inductor/codecache.py#L1221 # noqa + return + + def _get_shape_env(): + return AlwaysHitShapeEnv() + + with patch(# for hijacking the hash of the compiled graph + "torch._inductor.codecache.compiled_fx_graph_hash", + hijack_compiled_fx_graph_hash), \ + patch(# for providing a dummy shape environment + "torch._inductor.codecache.FxGraphCache._get_shape_env", + _get_shape_env), \ + patch(# for forcing the graph to be cached + "torch._inductor.codecache.FxGraphCache._check_can_cache", + _check_can_cache): + compiled_graph = compile_fx(graph, + example_inputs, + config_patches=current_config) # after compiling the last graph, record the end time if graph_index == num_graphs - 1: @@ -457,6 +661,9 @@ def __call__(self, *args) -> Any: # finished compilations for all required shapes if self.is_last_graph and not self.to_be_compiled_sizes: + + # save the hash of the inductor graph for the next run + self.compilation_config.inductor_hash_cache.save_to_file() end_monitoring_torch_compile(self.vllm_config) if not entry.use_cudagraph: diff --git a/vllm/config.py b/vllm/config.py index fce8011be4015..9cfd08024ea7b 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -3,6 +3,7 @@ import enum import hashlib import json +import os import warnings from contextlib import contextmanager from dataclasses import dataclass, field, replace @@ -162,6 +163,30 @@ class ModelConfig: which allows no processors. """ + def compute_hash(self) -> str: + """ + WARNING: Whenever a new field is added to this config, + ensure that it is included in the factors list if + it affects the computation graph. + + Provide a hash that uniquely identifies all the configs + that affect the structure of the computation + graph from input ids/embeddings to the final hidden states, + excluding anything before input ids/embeddings and after + the final hidden states. + """ + factors: List[Any] = [] + factors.append(self.model) + factors.append(self.dtype) + factors.append(self.quantization) + factors.append(self.quantization_param_path) + factors.append(self.revision) + factors.append(self.code_revision) + factors.append(self.trust_remote_code) + factors.append(self.rope_scaling) + factors.append(self.rope_theta) + return hashlib.sha256(str(factors).encode()).hexdigest() + def __init__(self, model: str, task: Union[TaskOption, Literal["draft"]], @@ -203,6 +228,8 @@ def __init__(self, self.seed = seed self.revision = revision self.code_revision = code_revision + self.rope_scaling = rope_scaling + self.rope_theta = rope_theta if hf_overrides is None: hf_overrides = {} @@ -832,6 +859,24 @@ class CacheConfig: cpu_offload_gb: Size of the CPU offload buffer in GiB. """ + def compute_hash(self) -> str: + """ + WARNING: Whenever a new field is added to this config, + ensure that it is included in the factors list if + it affects the computation graph. + + Provide a hash that uniquely identifies all the configs + that affect the structure of the computation + graph from input ids/embeddings to the final hidden states, + excluding anything before input ids/embeddings and after + the final hidden states. + """ + factors: List[Any] = [] + factors.append(self.cache_dtype) + # `cpu_offload_gb` does not use `torch.compile` yet. + hash_str = hashlib.md5(str(factors).encode()).hexdigest() + return hash_str + def __init__( self, block_size: int, @@ -928,6 +973,24 @@ class TokenizerPoolConfig: pool_type: Union[str, Type["BaseTokenizerGroup"]] extra_config: dict + def compute_hash(self) -> str: + """ + WARNING: Whenever a new field is added to this config, + ensure that it is included in the factors list if + it affects the computation graph. + + Provide a hash that uniquely identifies all the configs + that affect the structure of the computation + graph from input ids/embeddings to the final hidden states, + excluding anything before input ids/embeddings and after + the final hidden states. + """ + # no factors to consider. + # this config will not affect the computation graph. + factors: List[Any] = [] + hash_str = hashlib.md5(str(factors).encode()).hexdigest() + return hash_str + def __post_init__(self): if self.pool_type not in ("ray", ) and not isinstance( self.pool_type, type): @@ -1010,6 +1073,24 @@ class LoadConfig: default_factory=dict) ignore_patterns: Optional[Union[List[str], str]] = None + def compute_hash(self) -> str: + """ + WARNING: Whenever a new field is added to this config, + ensure that it is included in the factors list if + it affects the computation graph. + + Provide a hash that uniquely identifies all the configs + that affect the structure of the computation + graph from input ids/embeddings to the final hidden states, + excluding anything before input ids/embeddings and after + the final hidden states. + """ + # no factors to consider. + # this config will not affect the computation graph. + factors: List[Any] = [] + hash_str = hashlib.md5(str(factors).encode()).hexdigest() + return hash_str + def __post_init__(self): model_loader_extra_config = self.model_loader_extra_config or {} if isinstance(model_loader_extra_config, str): @@ -1073,6 +1154,19 @@ class ParallelConfig: rank: int = 0 + def compute_hash(self): + """ + Provide a hash that uniquely identifies all the configs + that affect the structure of the computation + graph from input ids/embeddings to the final hidden states, + excluding anything before input ids/embeddings and after + the final hidden states. + """ + factors: List[Any] = [] + factors.append(self.pipeline_parallel_size) + factors.append(self.tensor_parallel_size) + return hashlib.sha256(str(factors).encode()).hexdigest() + def __post_init__(self) -> None: self.world_size = self.pipeline_parallel_size * \ self.tensor_parallel_size @@ -1209,6 +1303,24 @@ class SchedulerConfig: chunked_prefill_enabled: bool = field(init=False) + def compute_hash(self) -> str: + """ + WARNING: Whenever a new field is added to this config, + ensure that it is included in the factors list if + it affects the computation graph. + + Provide a hash that uniquely identifies all the configs + that affect the structure of the computation + graph from input ids/embeddings to the final hidden states, + excluding anything before input ids/embeddings and after + the final hidden states. + """ + # no factors to consider. + # this config will not affect the computation graph. + factors: List[Any] = [] + hash_str = hashlib.md5(str(factors).encode()).hexdigest() + return hash_str + def __post_init__(self) -> None: if self.max_num_batched_tokens is None: if self.enable_chunked_prefill: @@ -1286,6 +1398,25 @@ class DeviceConfig: device: Optional[torch.device] device_type: str + def compute_hash(self) -> str: + """ + WARNING: Whenever a new field is added to this config, + ensure that it is included in the factors list if + it affects the computation graph. + + Provide a hash that uniquely identifies all the configs + that affect the structure of the computation + graph from input ids/embeddings to the final hidden states, + excluding anything before input ids/embeddings and after + the final hidden states. + """ + # no factors to consider. + # the device/platform information will be summarized + # by torch/vllm automatically. + factors: List[Any] = [] + hash_str = hashlib.md5(str(factors).encode()).hexdigest() + return hash_str + def __init__(self, device: str = "auto") -> None: if device == "auto": # Automated device type detection @@ -1313,6 +1444,24 @@ class SpeculativeConfig: decoding with top-1 proposals. """ + def compute_hash(self) -> str: + """ + WARNING: Whenever a new field is added to this config, + ensure that it is included in the factors list if + it affects the computation graph. + + Provide a hash that uniquely identifies all the configs + that affect the structure of the computation + graph from input ids/embeddings to the final hidden states, + excluding anything before input ids/embeddings and after + the final hidden states. + """ + # no factors to consider. + # spec decode does not use `torch.compile` yet. + factors: List[Any] = [] + hash_str = hashlib.md5(str(factors).encode()).hexdigest() + return hash_str + @staticmethod def maybe_create_spec_config( target_model_config: ModelConfig, @@ -1753,6 +1902,24 @@ class LoRAConfig: long_lora_scaling_factors: Optional[Tuple[float]] = None bias_enabled: bool = False + def compute_hash(self) -> str: + """ + WARNING: Whenever a new field is added to this config, + ensure that it is included in the factors list if + it affects the computation graph. + + Provide a hash that uniquely identifies all the configs + that affect the structure of the computation + graph from input ids/embeddings to the final hidden states, + excluding anything before input ids/embeddings and after + the final hidden states. + """ + # no factors to consider. + # LoRA is not compatible with `torch.compile` . + factors: List[Any] = [] + hash_str = hashlib.md5(str(factors).encode()).hexdigest() + return hash_str + def __post_init__(self): # Setting the maximum rank to 256 should be able to satisfy the vast # majority of applications. @@ -1802,6 +1969,24 @@ class PromptAdapterConfig: max_cpu_prompt_adapters: Optional[int] = None prompt_adapter_dtype: Optional[torch.dtype] = None + def compute_hash(self) -> str: + """ + WARNING: Whenever a new field is added to this config, + ensure that it is included in the factors list if + it affects the computation graph. + + Provide a hash that uniquely identifies all the configs + that affect the structure of the computation + graph from input ids/embeddings to the final hidden states, + excluding anything before input ids/embeddings and after + the final hidden states. + """ + # no factors to consider. + # this config will not affect the computation graph. + factors: List[Any] = [] + hash_str = hashlib.md5(str(factors).encode()).hexdigest() + return hash_str + def __post_init__(self): if self.max_prompt_adapters < 1: @@ -1830,6 +2015,24 @@ class MultiModalConfig: for each :class:`~vllm.multimodal.MultiModalPlugin`. """ + def compute_hash(self) -> str: + """ + WARNING: Whenever a new field is added to this config, + ensure that it is included in the factors list if + it affects the computation graph. + + Provide a hash that uniquely identifies all the configs + that affect the structure of the computation + graph from input ids/embeddings to the final hidden states, + excluding anything before input ids/embeddings and after + the final hidden states. + """ + # no factors to consider. + # this config will not affect the computation graph. + factors: List[Any] = [] + hash_str = hashlib.md5(str(factors).encode()).hexdigest() + return hash_str + # TODO: Add configs to init vision tower or not. @@ -1869,6 +2072,24 @@ class PoolerConfig: ``math-shepherd-mistral-7b-prm`` model. """ + def compute_hash(self) -> str: + """ + WARNING: Whenever a new field is added to this config, + ensure that it is included in the factors list if + it affects the computation graph. + + Provide a hash that uniquely identifies all the configs + that affect the structure of the computation + graph from input ids/embeddings to the final hidden states, + excluding anything before input ids/embeddings and after + the final hidden states. + """ + # no factors to consider. + # this config will not affect the computation graph. + factors: List[Any] = [] + hash_str = hashlib.md5(str(factors).encode()).hexdigest() + return hash_str + @staticmethod def from_json(json_str: str) -> "PoolerConfig": return PoolerConfig(**json.loads(json_str)) @@ -2103,6 +2324,24 @@ class DecodingConfig: # 'outlines' / 'lm-format-enforcer' / 'xgrammar' guided_decoding_backend: str = 'xgrammar' + def compute_hash(self) -> str: + """ + WARNING: Whenever a new field is added to this config, + ensure that it is included in the factors list if + it affects the computation graph. + + Provide a hash that uniquely identifies all the configs + that affect the structure of the computation + graph from input ids/embeddings to the final hidden states, + excluding anything before input ids/embeddings and after + the final hidden states. + """ + # no factors to consider. + # this config will not affect the computation graph. + factors: List[Any] = [] + hash_str = hashlib.md5(str(factors).encode()).hexdigest() + return hash_str + def __post_init__(self): valid_guided_backends = ['outlines', 'lm-format-enforcer', 'xgrammar'] backend = self.guided_decoding_backend @@ -2124,6 +2363,24 @@ class ObservabilityConfig: # If set, collects the model execute time for the request. collect_model_execute_time: bool = False + def compute_hash(self) -> str: + """ + WARNING: Whenever a new field is added to this config, + ensure that it is included in the factors list if + it affects the computation graph. + + Provide a hash that uniquely identifies all the configs + that affect the structure of the computation + graph from input ids/embeddings to the final hidden states, + excluding anything before input ids/embeddings and after + the final hidden states. + """ + # no factors to consider. + # this config will not affect the computation graph. + factors: List[Any] = [] + hash_str = hashlib.md5(str(factors).encode()).hexdigest() + return hash_str + def __post_init__(self): if not is_otel_available() and self.otlp_traces_endpoint is not None: raise ValueError( @@ -2165,6 +2422,24 @@ class KVTransferConfig(BaseModel): # The KV connector port, used to build distributed connection kv_port: int = 14579 + def compute_hash(self) -> str: + """ + WARNING: Whenever a new field is added to this config, + ensure that it is included in the factors list if + it affects the computation graph. + + Provide a hash that uniquely identifies all the configs + that affect the structure of the computation + graph from input ids/embeddings to the final hidden states, + excluding anything before input ids/embeddings and after + the final hidden states. + """ + # no factors to consider. + # this config will not affect the computation graph. + factors: List[Any] = [] + hash_str = hashlib.md5(str(factors).encode()).hexdigest() + return hash_str + @classmethod def from_cli(cls, cli_value: str) -> "KVTransferConfig": """Parse the CLI value for the kv cache transfer config.""" @@ -2234,6 +2509,9 @@ class CompilationConfig(BaseModel): - 2: dynamo once. - 3: piecewise compilation. - debug_dump_path: the path to dump the debug information. + - cache_dir: the directory to store the compiled graph, to + accelerate Inductor compilation. By default, it will use + model-related information to generate a cache directory. - backend: the backend for compilation. It needs to be a string. - "" (empty string): use the default backend. - "eager"/"openxla"/...: use the specified backend registered in PyTorch. @@ -2302,12 +2580,10 @@ class CompilationConfig(BaseModel): """ # noqa level: int = 0 debug_dump_path: str = "" + cache_dir: str = "" backend: str = "" custom_ops: List[str] = Field(default_factory=list) - splitting_ops: List[str] = Field(default_factory=lambda: [ - "vllm.unified_attention", - "vllm.unified_attention_with_output", - ]) + splitting_ops: List[str] = Field(default=None) # type: ignore use_inductor: bool = True candidate_compile_sizes: Optional[List[int]] = Field(default=None) @@ -2371,12 +2647,37 @@ def model_post_init(self, __context: Any) -> None: enabled_custom_ops: Counter[str] = PrivateAttr disabled_custom_ops: Counter[str] = PrivateAttr compilation_time: float = PrivateAttr + # should be InductorHashCache, but Pydantic does not support it + inductor_hash_cache: Any = PrivateAttr # Per-model forward context # Mainly used to store attention cls # Map from layer name to the attention cls static_forward_context: Dict[str, Any] = PrivateAttr + def compute_hash(self) -> str: + """ + WARNING: Whenever a new field is added to this config, + ensure that it is included in the factors list if + it affects the computation graph. + + Provide a hash that uniquely identifies all the configs + that affect the structure of the computation + graph from input ids/embeddings to the final hidden states, + excluding anything before input ids/embeddings and after + the final hidden states. + """ + factors: List[Any] = [] + factors.append(self.level) + factors.append(self.backend) + factors.append(self.custom_ops) + factors.append(self.splitting_ops) + factors.append(self.use_inductor) + factors.append(self.inductor_compile_config) + factors.append(self.inductor_passes) + factors.append(self.pass_config.uuid()) + return hashlib.sha256(str(factors).encode()).hexdigest() + def __repr__(self) -> str: exclude = { "static_forward_context", @@ -2405,6 +2706,27 @@ def model_post_init(self, __context: Any) -> None: count_all = self.custom_ops.count("all") assert count_none + count_all <= 1, "Can only specify 'none' or 'all'" + if self.splitting_ops is None: + if envs.VLLM_USE_V1: + # v1 must split the graph on attention ops + # for piecewise cudagraph + self.splitting_ops = [ + "vllm.unified_attention", + "vllm.unified_attention_with_output", + ] + else: + # v0 can use full graph compilation without splitting, + # splitting is optional. + # right now we still need it. kv cache shape + # will be included in the graph if we don't split + # the graph. + # TODO: hide kv cache in static forward context + # so that inductor does not see it. + self.splitting_ops = [ + "vllm.unified_attention", + "vllm.unified_attention_with_output", + ] + for k, v in self.inductor_passes.items(): if not isinstance(v, str): assert callable(v), ( @@ -2444,6 +2766,30 @@ def init_backend(self, vllm_config: "VllmConfig") -> Union[str, Callable]: # TODO: pass user-specified backend to piecewise compilation # merge with the config use_inductor assert self.level == CompilationLevel.PIECEWISE + + if not self.cache_dir: + # no provided cache dir, generate one based on the known factors + # that affects the compilation. if none of the factors change, + # the cache dir will be the same so that we can reuse the compiled + # graph. + hash_key = vllm_config.compute_hash() + cache_dir = os.path.join( + envs.VLLM_CACHE_ROOT, "torch_compile_cache", hash_key, + f"rank_{vllm_config.parallel_config.rank}") + os.makedirs(cache_dir, exist_ok=True) + self.cache_dir = cache_dir + + disabled = envs.VLLM_DISABLE_COMPILE_CACHE + from vllm.compilation.backends import InductorHashCache + self.inductor_hash_cache: InductorHashCache = InductorHashCache( + self.cache_dir, disabled=disabled) + if disabled: + logger.info("vLLM's torch.compile cache is disabled.") + else: + logger.info( + "Using cache directory: %s for vLLM's torch.compile", + self.cache_dir) + from vllm.compilation.backends import VllmBackend return VllmBackend(vllm_config) @@ -2520,6 +2866,67 @@ class VllmConfig: init=True) # type: ignore instance_id: str = "" + def compute_hash(self) -> str: + """ + WARNING: Whenever a new field is added to this config, + ensure that it is included in the factors list if + it affects the computation graph. + + Provide a hash that uniquely identifies all the configs + that affect the structure of the computation + graph from input ids/embeddings to the final hidden states, + excluding anything before input ids/embeddings and after + the final hidden states. + """ + factors: List[Any] = [] + # summarize system state + from torch._inductor.codecache import CacheBase + system_factors = CacheBase.get_system() + factors.append(system_factors) + + # summarize pytorch state + from torch._inductor.codecache import torch_key + torch_factors = torch_key() + factors.append(torch_factors) + + # summarize vllm config + vllm_factors: List[Any] = [] + from vllm import __version__ + vllm_factors.append(__version__) + if self.model_config: + vllm_factors.append(self.model_config.compute_hash()) + if self.cache_config: + vllm_factors.append(self.cache_config.compute_hash()) + if self.parallel_config: + vllm_factors.append(self.parallel_config.compute_hash()) + if self.scheduler_config: + vllm_factors.append(self.scheduler_config.compute_hash()) + if self.device_config: + vllm_factors.append(self.device_config.compute_hash()) + if self.load_config: + vllm_factors.append(self.load_config.compute_hash()) + if self.lora_config: + vllm_factors.append(self.lora_config.compute_hash()) + if self.speculative_config: + vllm_factors.append(self.speculative_config.compute_hash()) + if self.decoding_config: + vllm_factors.append(self.decoding_config.compute_hash()) + if self.observability_config: + vllm_factors.append(self.observability_config.compute_hash()) + if self.prompt_adapter_config: + vllm_factors.append(self.prompt_adapter_config.compute_hash()) + if self.quant_config: + pass # should be captured by model_config.quantization + if self.compilation_config: + vllm_factors.append(self.compilation_config.compute_hash()) + if self.kv_transfer_config: + vllm_factors.append(self.kv_transfer_config.compute_hash()) + + factors.append(vllm_factors) + + hash_str = hashlib.md5(str(factors).encode()).hexdigest()[:10] + return hash_str + def pad_for_cudagraph(self, batch_size: int) -> int: # if batch_size > self.compilation_config.max_capture_size, # it should raise an IndexError. diff --git a/vllm/envs.py b/vllm/envs.py index da17b747ea215..18870c1c6b51a 100644 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -71,6 +71,7 @@ VLLM_USE_V1: bool = False VLLM_ENABLE_V1_MULTIPROCESSING: bool = True VLLM_LOG_BATCHSIZE_INTERVAL: float = -1 + VLLM_DISABLE_COMPILE_CACHE: bool = False def get_default_cache_root(): @@ -463,6 +464,8 @@ def get_default_config_root(): lambda: bool(int(os.getenv("VLLM_ENABLE_V1_MULTIPROCESSING", "1"))), "VLLM_LOG_BATCHSIZE_INTERVAL": lambda: float(os.getenv("VLLM_LOG_BATCHSIZE_INTERVAL", "-1")), + "VLLM_DISABLE_COMPILE_CACHE": + lambda: bool(int(os.getenv("VLLM_DISABLE_COMPILE_CACHE", "0"))), } # end-env-vars-definition From 35bae114a89e03e3dc6a6d2f758378e58938bffa Mon Sep 17 00:00:00 2001 From: youkaichao Date: Mon, 16 Dec 2024 17:22:38 -0800 Subject: [PATCH 389/397] fix gh200 tests on main (#11246) Signed-off-by: youkaichao --- .buildkite/run-gh200-test.sh | 4 ++-- docs/source/serving/deploying_with_docker.rst | 5 +---- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/.buildkite/run-gh200-test.sh b/.buildkite/run-gh200-test.sh index d25510c47fe6b..d06604f96f2b8 100644 --- a/.buildkite/run-gh200-test.sh +++ b/.buildkite/run-gh200-test.sh @@ -6,8 +6,8 @@ set -ex # Try building the docker image DOCKER_BUILDKIT=1 docker build . \ - --target test \ - -platform "linux/arm64" \ + --target vllm-openai \ + --platform "linux/arm64" \ -t gh200-test \ --build-arg max_jobs=66 \ --build-arg nvcc_threads=2 \ diff --git a/docs/source/serving/deploying_with_docker.rst b/docs/source/serving/deploying_with_docker.rst index 11a9f12fd17cd..56f0020a1011a 100644 --- a/docs/source/serving/deploying_with_docker.rst +++ b/docs/source/serving/deploying_with_docker.rst @@ -54,16 +54,13 @@ of PyTorch Nightly and should be considered **experimental**. Using the flag `-- # Example of building on Nvidia GH200 server. (Memory usage: ~12GB, Build time: ~1475s / ~25 min, Image size: 7.26GB) $ DOCKER_BUILDKIT=1 sudo docker build . \ --target vllm-openai \ - -platform "linux/arm64" \ + --platform "linux/arm64" \ -t vllm/vllm-gh200-openai:latest \ --build-arg max_jobs=66 \ --build-arg nvcc_threads=2 \ --build-arg torch_cuda_arch_list="9.0+PTX" \ --build-arg vllm_fa_cmake_gpu_arches="90-real" - - - To run vLLM: .. code-block:: console From 0064f697d318a2ce38342f7c20754cf229311b8b Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Mon, 16 Dec 2024 22:39:58 -0500 Subject: [PATCH 390/397] [CI] Add test case with JSON schema using references + use xgrammar by default with OpenAI parse (#10935) Signed-off-by: mgoin --- tests/entrypoints/conftest.py | 39 +++++++++++++++++++ tests/entrypoints/llm/test_guided_generate.py | 28 +++++++++++++ vllm/entrypoints/openai/protocol.py | 2 +- 3 files changed, 68 insertions(+), 1 deletion(-) diff --git a/tests/entrypoints/conftest.py b/tests/entrypoints/conftest.py index 0f7d15e1d85aa..ef74062ce4b41 100644 --- a/tests/entrypoints/conftest.py +++ b/tests/entrypoints/conftest.py @@ -100,6 +100,45 @@ def sample_complex_json_schema(): } +@pytest.fixture +def sample_definition_json_schema(): + return { + '$defs': { + 'Step': { + 'properties': { + 'explanation': { + 'title': 'Explanation', + 'type': 'string' + }, + 'output': { + 'title': 'Output', + 'type': 'string' + } + }, + 'required': ['explanation', 'output'], + 'title': 'Step', + 'type': 'object' + } + }, + 'properties': { + 'steps': { + 'items': { + '$ref': '#/$defs/Step' + }, + 'title': 'Steps', + 'type': 'array' + }, + 'final_answer': { + 'title': 'Final Answer', + 'type': 'string' + } + }, + 'required': ['steps', 'final_answer'], + 'title': 'MathReasoning', + 'type': 'object' + } + + @pytest.fixture def sample_guided_choice(): return [ diff --git a/tests/entrypoints/llm/test_guided_generate.py b/tests/entrypoints/llm/test_guided_generate.py index de6257cfc551c..ed50ec6bbc9eb 100644 --- a/tests/entrypoints/llm/test_guided_generate.py +++ b/tests/entrypoints/llm/test_guided_generate.py @@ -104,6 +104,34 @@ def test_guided_complex_json_completion(sample_complex_json_schema, llm): schema=sample_complex_json_schema) +@pytest.mark.skip_global_cleanup +def test_guided_definition_json_completion(sample_definition_json_schema, llm): + sampling_params = SamplingParams(temperature=1.0, + max_tokens=1000, + guided_decoding=GuidedDecodingParams( + json=sample_definition_json_schema)) + outputs = llm.generate(prompts=[ + f"Give an example JSON for solving 8x + 7 = -23 " + f"that fits this schema: {sample_definition_json_schema}" + ] * 2, + sampling_params=sampling_params, + use_tqdm=True) + + assert outputs is not None + + for output in outputs: + assert output is not None + assert isinstance(output, RequestOutput) + prompt = output.prompt + + generated_text = output.outputs[0].text + assert generated_text is not None + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + output_json = json.loads(generated_text) + jsonschema.validate(instance=output_json, + schema=sample_definition_json_schema) + + @pytest.mark.skip_global_cleanup def test_guided_choice_completion(sample_guided_choice, llm): sampling_params = SamplingParams( diff --git a/vllm/entrypoints/openai/protocol.py b/vllm/entrypoints/openai/protocol.py index 6ed7c2e9dcd6b..5a70e0952666b 100644 --- a/vllm/entrypoints/openai/protocol.py +++ b/vllm/entrypoints/openai/protocol.py @@ -387,7 +387,7 @@ def to_sampling_params( assert json_schema is not None self.guided_json = json_schema.json_schema if self.guided_decoding_backend is None: - self.guided_decoding_backend = "lm-format-enforcer" + self.guided_decoding_backend = "xgrammar" guided_decoding = GuidedDecodingParams.from_optional( json=self._get_guided_json_from_tool() or self.guided_json, From 66d4b16724226e9f377551198cc7425c12ddafae Mon Sep 17 00:00:00 2001 From: kYLe Date: Tue, 17 Dec 2024 00:09:58 -0600 Subject: [PATCH 391/397] [Frontend] Add OpenAI API support for input_audio (#11027) Signed-off-by: DarkLight1337 Co-authored-by: DarkLight1337 --- .../serving/openai_compatible_server.md | 10 +- docs/source/usage/multimodal_inputs.rst | 90 ++++++++++++- ...i_chat_completion_client_for_multimodal.py | 34 ++++- tests/entrypoints/openai/test_audio.py | 125 +++++++++++++++++- vllm/entrypoints/chat_utils.py | 65 +++++++-- 5 files changed, 301 insertions(+), 23 deletions(-) diff --git a/docs/source/serving/openai_compatible_server.md b/docs/source/serving/openai_compatible_server.md index 14a5b02d72aa5..1bc8d32d2d161 100644 --- a/docs/source/serving/openai_compatible_server.md +++ b/docs/source/serving/openai_compatible_server.md @@ -34,11 +34,6 @@ We currently support the following OpenAI APIs: - *Note: `suffix` parameter is not supported.* - [Chat Completions API](#chat-api) (`/v1/chat/completions`) - Only applicable to [text generation models](../models/generative_models.rst) (`--task generate`) with a [chat template](#chat-template). - - [Vision](https://platform.openai.com/docs/guides/vision)-related parameters are supported; see [Multimodal Inputs](../usage/multimodal_inputs.rst). - - *Note: `image_url.detail` parameter is not supported.* - - We also support `audio_url` content type for audio files. - - Refer to [vllm.entrypoints.chat_utils](https://github.com/vllm-project/vllm/tree/main/vllm/entrypoints/chat_utils.py) for the exact schema. - - *TODO: Support `input_audio` content type as defined [here](https://github.com/openai/openai-python/blob/v1.52.2/src/openai/types/chat/chat_completion_content_part_input_audio_param.py).* - *Note: `parallel_tool_calls` and `user` parameters are ignored.* - [Embeddings API](#embeddings-api) (`/v1/embeddings`) - Only applicable to [embedding models](../models/pooling_models.rst) (`--task embed`). @@ -209,6 +204,11 @@ The following extra parameters are supported: Refer to [OpenAI's API reference](https://platform.openai.com/docs/api-reference/chat) for more details. +We support both [Vision](https://platform.openai.com/docs/guides/vision)- and +[Audio](https://platform.openai.com/docs/guides/audio?audio-generation-quickstart-example=audio-in)-related parameters; +see our [Multimodal Inputs](../usage/multimodal_inputs.rst) guide for more information. +- *Note: `image_url.detail` parameter is not supported.* + #### Extra parameters The following [sampling parameters (click through to see documentation)](../dev/sampling_params.rst) are supported. diff --git a/docs/source/usage/multimodal_inputs.rst b/docs/source/usage/multimodal_inputs.rst index 1e00f26f9a3ba..680382e457cc5 100644 --- a/docs/source/usage/multimodal_inputs.rst +++ b/docs/source/usage/multimodal_inputs.rst @@ -315,7 +315,95 @@ You can use `these tests `_. +Here is a simple example using Ultravox-v0.3. + +First, launch the OpenAI-compatible server: + +.. code-block:: bash + + vllm serve fixie-ai/ultravox-v0_3 + +Then, you can use the OpenAI client as follows: + +.. code-block:: python + + import base64 + import requests + from openai import OpenAI + from vllm.assets.audio import AudioAsset + + def encode_base64_content_from_url(content_url: str) -> str: + """Encode a content retrieved from a remote url to base64 format.""" + + with requests.get(content_url) as response: + response.raise_for_status() + result = base64.b64encode(response.content).decode('utf-8') + + return result + + openai_api_key = "EMPTY" + openai_api_base = "http://localhost:8000/v1" + + client = OpenAI( + api_key=openai_api_key, + base_url=openai_api_base, + ) + + # Any format supported by librosa is supported + audio_url = AudioAsset("winning_call").url + audio_base64 = encode_base64_content_from_url(audio_url) + + chat_completion_from_base64 = client.chat.completions.create( + messages=[{ + "role": "user", + "content": [ + { + "type": "text", + "text": "What's in this audio?" + }, + { + "type": "input_audio", + "input_audio": { + "data": audio_base64, + "format": "wav" + }, + }, + ], + }], + model=model, + max_completion_tokens=64, + ) + + result = chat_completion_from_base64.choices[0].message.content + print("Chat completion output from input audio:", result) + +Alternatively, you can pass :code:`audio_url`, which is the audio counterpart of :code:`image_url` for image input: + +.. code-block:: python + + chat_completion_from_url = client.chat.completions.create( + messages=[{ + "role": "user", + "content": [ + { + "type": "text", + "text": "What's in this audio?" + }, + { + "type": "audio_url", + "audio_url": { + "url": audio_url + }, + }, + ], + }], + model=model, + max_completion_tokens=64, + ) + + result = chat_completion_from_url.choices[0].message.content + print("Chat completion output from audio url:", result) A full code example can be found in `examples/openai_chat_completion_client_for_multimodal.py `_. diff --git a/examples/openai_chat_completion_client_for_multimodal.py b/examples/openai_chat_completion_client_for_multimodal.py index 0ec4f71dddf93..6a160fd70423f 100644 --- a/examples/openai_chat_completion_client_for_multimodal.py +++ b/examples/openai_chat_completion_client_for_multimodal.py @@ -153,10 +153,37 @@ def run_multi_image() -> None: # Audio input inference def run_audio() -> None: - # Any format supported by librosa is supported audio_url = AudioAsset("winning_call").url + audio_base64 = encode_base64_content_from_url(audio_url) + + # OpenAI-compatible schema (`input_audio`) + chat_completion_from_base64 = client.chat.completions.create( + messages=[{ + "role": + "user", + "content": [ + { + "type": "text", + "text": "What's in this audio?" + }, + { + "type": "input_audio", + "input_audio": { + # Any format supported by librosa is supported + "data": audio_base64, + "format": "wav" + }, + }, + ], + }], + model=model, + max_completion_tokens=64, + ) + + result = chat_completion_from_base64.choices[0].message.content + print("Chat completion output from input audio:", result) - # Use audio url in the payload + # HTTP URL chat_completion_from_url = client.chat.completions.create( messages=[{ "role": @@ -169,6 +196,7 @@ def run_audio() -> None: { "type": "audio_url", "audio_url": { + # Any format supported by librosa is supported "url": audio_url }, }, @@ -181,7 +209,7 @@ def run_audio() -> None: result = chat_completion_from_url.choices[0].message.content print("Chat completion output from audio url:", result) - audio_base64 = encode_base64_content_from_url(audio_url) + # base64 URL chat_completion_from_base64 = client.chat.completions.create( messages=[{ "role": diff --git a/tests/entrypoints/openai/test_audio.py b/tests/entrypoints/openai/test_audio.py index b579dcbb5c402..0a29d77e73abc 100644 --- a/tests/entrypoints/openai/test_audio.py +++ b/tests/entrypoints/openai/test_audio.py @@ -155,6 +155,61 @@ async def test_single_chat_session_audio_base64encoded( assert message.content is not None and len(message.content) >= 0 +@pytest.mark.asyncio +@pytest.mark.parametrize("model_name", [MODEL_NAME]) +@pytest.mark.parametrize("audio_url", TEST_AUDIO_URLS) +async def test_single_chat_session_input_audio( + client: openai.AsyncOpenAI, model_name: str, audio_url: str, + base64_encoded_audio: Dict[str, str]): + messages = [{ + "role": + "user", + "content": [ + { + "type": "input_audio", + "input_audio": { + "data": base64_encoded_audio[audio_url], + "format": "wav" + } + }, + { + "type": "text", + "text": "What's happening in this audio?" + }, + ], + }] + + # test single completion + chat_completion = await client.chat.completions.create( + model=model_name, + messages=messages, + max_completion_tokens=10, + logprobs=True, + top_logprobs=5) + assert len(chat_completion.choices) == 1 + + choice = chat_completion.choices[0] + assert choice.finish_reason == "length" + assert chat_completion.usage == openai.types.CompletionUsage( + completion_tokens=10, prompt_tokens=202, total_tokens=212) + + message = choice.message + message = chat_completion.choices[0].message + assert message.content is not None and len(message.content) >= 10 + assert message.role == "assistant" + messages.append({"role": "assistant", "content": message.content}) + + # test multi-turn dialogue + messages.append({"role": "user", "content": "express your result in json"}) + chat_completion = await client.chat.completions.create( + model=model_name, + messages=messages, + max_completion_tokens=10, + ) + message = chat_completion.choices[0].message + assert message.content is not None and len(message.content) >= 0 + + @pytest.mark.asyncio @pytest.mark.parametrize("model_name", [MODEL_NAME]) @pytest.mark.parametrize("audio_url", TEST_AUDIO_URLS) @@ -212,11 +267,72 @@ async def test_chat_streaming_audio(client: openai.AsyncOpenAI, assert "".join(chunks) == output +@pytest.mark.asyncio +@pytest.mark.parametrize("model_name", [MODEL_NAME]) +@pytest.mark.parametrize("audio_url", TEST_AUDIO_URLS) +async def test_chat_streaming_input_audio(client: openai.AsyncOpenAI, + model_name: str, audio_url: str, + base64_encoded_audio: Dict[str, + str]): + messages = [{ + "role": + "user", + "content": [ + { + "type": "input_audio", + "input_audio": { + "data": base64_encoded_audio[audio_url], + "format": "wav" + } + }, + { + "type": "text", + "text": "What's happening in this audio?" + }, + ], + }] + + # test single completion + chat_completion = await client.chat.completions.create( + model=model_name, + messages=messages, + max_completion_tokens=10, + temperature=0.0, + ) + output = chat_completion.choices[0].message.content + stop_reason = chat_completion.choices[0].finish_reason + + # test streaming + stream = await client.chat.completions.create( + model=model_name, + messages=messages, + max_completion_tokens=10, + temperature=0.0, + stream=True, + ) + chunks: List[str] = [] + finish_reason_count = 0 + async for chunk in stream: + delta = chunk.choices[0].delta + if delta.role: + assert delta.role == "assistant" + if delta.content: + chunks.append(delta.content) + if chunk.choices[0].finish_reason is not None: + finish_reason_count += 1 + # finish reason should only return in last block + assert finish_reason_count == 1 + assert chunk.choices[0].finish_reason == stop_reason + assert delta.content + assert "".join(chunks) == output + + @pytest.mark.asyncio @pytest.mark.parametrize("model_name", [MODEL_NAME]) @pytest.mark.parametrize("audio_url", TEST_AUDIO_URLS) async def test_multi_audio_input(client: openai.AsyncOpenAI, model_name: str, - audio_url: str): + audio_url: str, + base64_encoded_audio: Dict[str, str]): messages = [{ "role": @@ -229,9 +345,10 @@ async def test_multi_audio_input(client: openai.AsyncOpenAI, model_name: str, } }, { - "type": "audio_url", - "audio_url": { - "url": audio_url + "type": "input_audio", + "input_audio": { + "data": base64_encoded_audio[audio_url], + "format": "wav" } }, { diff --git a/vllm/entrypoints/chat_utils.py b/vllm/entrypoints/chat_utils.py index aaa5cd759366a..3df08c740d65b 100644 --- a/vllm/entrypoints/chat_utils.py +++ b/vllm/entrypoints/chat_utils.py @@ -13,7 +13,8 @@ # yapf conflicts with isort for this block # yapf: disable from openai.types.chat import (ChatCompletionAssistantMessageParam, - ChatCompletionContentPartImageParam) + ChatCompletionContentPartImageParam, + ChatCompletionContentPartInputAudioParam) from openai.types.chat import ( ChatCompletionContentPartParam as OpenAIChatCompletionContentPartParam) from openai.types.chat import (ChatCompletionContentPartRefusalParam, @@ -105,6 +106,7 @@ class CustomChatCompletionContentSimpleVideoParam(TypedDict, total=False): ChatCompletionContentPartParam: TypeAlias = Union[ OpenAIChatCompletionContentPartParam, ChatCompletionContentPartAudioParam, + ChatCompletionContentPartInputAudioParam, ChatCompletionContentPartVideoParam, ChatCompletionContentPartRefusalParam, CustomChatCompletionContentSimpleImageParam, CustomChatCompletionContentSimpleAudioParam, @@ -519,6 +521,10 @@ def parse_image(self, image_url: str) -> None: def parse_audio(self, audio_url: str) -> None: raise NotImplementedError + @abstractmethod + def parse_input_audio(self, input_audio: Dict[str, str]) -> None: + raise NotImplementedError + @abstractmethod def parse_video(self, video_url: str) -> None: raise NotImplementedError @@ -545,6 +551,15 @@ def parse_audio(self, audio_url: str) -> None: placeholder = self._tracker.add("audio", audio) self._add_placeholder(placeholder) + def parse_input_audio(self, input_audio: Dict[str, str]) -> None: + input_audio_data = input_audio.get("data","") + input_audio_format = input_audio.get("format","") + audio_url = f"data:audio/{input_audio_format};base64,{input_audio_data}" + audio = get_and_parse_audio(audio_url) + + placeholder = self._tracker.add("audio", audio) + self._add_placeholder(placeholder) + def parse_video(self, video_url: str) -> None: video = get_and_parse_video(video_url) @@ -574,6 +589,15 @@ def parse_audio(self, audio_url: str) -> None: placeholder = self._tracker.add("audio", audio_coro) self._add_placeholder(placeholder) + def parse_input_audio(self, input_audio: Dict[str, str]) -> None: + input_audio_data = input_audio.get("data","") + input_audio_format = input_audio.get("format","") + audio_url = f"data:audio/{input_audio_format};base64,{input_audio_data}" + audio_coro = async_get_and_parse_audio(audio_url) + + placeholder = self._tracker.add("audio", audio_coro) + self._add_placeholder(placeholder) + def parse_video(self, video_url: str) -> None: video = async_get_and_parse_video(video_url) @@ -667,17 +691,22 @@ def _get_full_multimodal_text_prompt(placeholder_counts: Dict[str, int], _TextParser = partial(cast, ChatCompletionContentPartTextParam) _ImageParser = partial(cast, ChatCompletionContentPartImageParam) _AudioParser = partial(cast, ChatCompletionContentPartAudioParam) +_InputAudioParser = partial(cast, ChatCompletionContentPartInputAudioParam) _RefusalParser = partial(cast, ChatCompletionContentPartRefusalParam) _VideoParser = partial(cast, ChatCompletionContentPartVideoParam) # Define a mapping from part types to their corresponding parsing functions. -MM_PARSER_MAP: Dict[str, Callable[[ChatCompletionContentPartParam], str]] = { +MM_PARSER_MAP: Dict[str, + Callable[[ChatCompletionContentPartParam], + Union[str, Dict[str,str]]]] = { "text": lambda part: _TextParser(part).get("text", ""), "image_url": lambda part: _ImageParser(part).get("image_url", {}).get("url", ""), "audio_url": lambda part: _AudioParser(part).get("audio_url", {}).get("url", ""), + "input_audio": + lambda part: _InputAudioParser(part).get("input_audio", {}), "refusal": lambda part: _RefusalParser(part).get("refusal", ""), "video_url": @@ -686,7 +715,8 @@ def _get_full_multimodal_text_prompt(placeholder_counts: Dict[str, int], def _parse_chat_message_content_mm_part( - part: ChatCompletionContentPartParam) -> Tuple[str, str]: + part: ChatCompletionContentPartParam) -> Tuple[str, + Union[str, Dict[str, str]]]: """ Parses a given multi-modal content part based on its type. @@ -717,6 +747,7 @@ def _parse_chat_message_content_mm_part( return part_type, content # Handle missing 'type' but provided direct URL fields. + # 'type' is required field by pydantic if part_type is None: if part.get("image_url") is not None: image_params = cast(CustomChatCompletionContentSimpleImageParam, @@ -726,6 +757,9 @@ def _parse_chat_message_content_mm_part( audio_params = cast(CustomChatCompletionContentSimpleAudioParam, part) return "audio_url", audio_params.get("audio_url", "") + if part.get("input_audio") is not None: + input_audio_params = cast(Dict[str, str], part) + return "input_audio", input_audio_params if part.get("video_url") is not None: video_params = cast(CustomChatCompletionContentSimpleVideoParam, part) @@ -739,7 +773,7 @@ def _parse_chat_message_content_mm_part( VALID_MESSAGE_CONTENT_MM_PART_TYPES = ("text", "refusal", "image_url", - "audio_url", "video_url") + "audio_url", "input_audio", "video_url") def _parse_chat_message_content_parts( @@ -795,7 +829,7 @@ def _parse_chat_message_content_part( # Handle structured dictionary parts part_type, content = _parse_chat_message_content_mm_part(part) - # if part_type is text/refusal/image_url/audio_url/video_url but + # if part_type is text/refusal/image_url/audio_url/video_url/input_audio but # content is empty, log a warning and skip if part_type in VALID_MESSAGE_CONTENT_MM_PART_TYPES and not content: logger.warning( @@ -804,18 +838,30 @@ def _parse_chat_message_content_part( return None if part_type in ("text", "refusal"): - return {'type': 'text', 'text': content} if wrap_dicts else content + str_content = cast(str, content) + if wrap_dicts: + return {'type': 'text', 'text': str_content} + else: + return str_content if part_type == "image_url": - mm_parser.parse_image(content) + str_content = cast(str, content) + mm_parser.parse_image(str_content) return {'type': 'image'} if wrap_dicts else None if part_type == "audio_url": - mm_parser.parse_audio(content) + str_content = cast(str, content) + mm_parser.parse_audio(str_content) + return {'type': 'audio'} if wrap_dicts else None + + if part_type == "input_audio": + dict_content = cast(Dict[str, str], content) + mm_parser.parse_input_audio(dict_content) return {'type': 'audio'} if wrap_dicts else None if part_type == "video_url": - mm_parser.parse_video(content) + str_content = cast(str, content) + mm_parser.parse_video(str_content) return {'type': 'video'} if wrap_dicts else None raise NotImplementedError(f"Unknown part type: {part_type}") @@ -840,7 +886,6 @@ def _parse_chat_message_content( content = [ ChatCompletionContentPartTextParam(type="text", text=content) ] - result = _parse_chat_message_content_parts( role, content, # type: ignore From 59c9b6ebeba79b2d744eec86734a7e13b03dcab7 Mon Sep 17 00:00:00 2001 From: Roger Wang <136131678+ywang96@users.noreply.github.com> Date: Mon, 16 Dec 2024 22:10:57 -0800 Subject: [PATCH 392/397] [V1][VLM] Proper memory profiling for image language models (#11210) Signed-off-by: Roger Wang Co-authored-by: ywang96 --- vllm/config.py | 8 ++++ vllm/model_executor/models/pixtral.py | 5 ++ vllm/multimodal/registry.py | 23 +++++++-- vllm/v1/core/scheduler.py | 7 ++- vllm/v1/engine/mm_input_mapper.py | 1 + vllm/v1/worker/gpu_model_runner.py | 67 ++++++++++++++++++++++++--- 6 files changed, 98 insertions(+), 13 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index 9cfd08024ea7b..9ecd3e72afa9f 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -1280,6 +1280,14 @@ class SchedulerConfig: is_multimodal_model: bool = False + # FIXME(woosuk & ywang96): Below are placeholder values. We need to + # calculate the actual values from the configurations. + # Multimodal encoder run compute budget, only used in V1 + max_num_encoder_input_tokens = 16384 + + # Multimodal encoder cache size, only used in V1 + encoder_cache_size = 16384 + # Whether to perform preemption by swapping or # recomputation. If not specified, we determine the mode as follows: # We use recomputation by default since it incurs lower overhead than diff --git a/vllm/model_executor/models/pixtral.py b/vllm/model_executor/models/pixtral.py index 161d6b41bfa5f..f05ea195e043d 100644 --- a/vllm/model_executor/models/pixtral.py +++ b/vllm/model_executor/models/pixtral.py @@ -245,6 +245,11 @@ def get_multimodal_embeddings(self, **kwargs) -> Optional[NestedTensors]: # Do not split, return as tensor of shape [1, fs, hs] return image_embeds.unsqueeze(0) + # If the last split index is the last index in image_tokens, we + # ignore it to avoid empty split tensor + if split_indices[-1] == len(image_tokens): + split_indices = split_indices[:-1] + image_embeds = image_embeds.tensor_split(split_indices.cpu()) return image_embeds diff --git a/vllm/multimodal/registry.py b/vllm/multimodal/registry.py index 03f8814a95356..6cd79d414c978 100644 --- a/vllm/multimodal/registry.py +++ b/vllm/multimodal/registry.py @@ -200,6 +200,23 @@ def register_max_image_tokens( """ return self.register_max_multimodal_tokens("image", max_mm_tokens) + def get_max_tokens_per_item_by_modality( + self, + model_config: "ModelConfig", + ) -> Mapping[str, int]: + """ + Get the maximum number of tokens per data item from each modality + for profiling the memory usage of a model. + + Note: + This is currently directly used only in V1. + """ + + return { + key: plugin.get_max_multimodal_tokens(model_config) + for key, plugin in self._plugins.items() + } + def get_max_tokens_by_modality( self, model_config: "ModelConfig", @@ -216,9 +233,9 @@ def get_max_tokens_by_modality( limits_per_plugin = self._limits_by_model[model_config] return { - key: (limits_per_plugin[key] * - plugin.get_max_multimodal_tokens(model_config)) - for key, plugin in self._plugins.items() + key: limits_per_plugin[key] * max_tokens_per_mm_item + for key, max_tokens_per_mm_item in + self.get_max_tokens_per_item_by_modality(model_config).items() } def get_max_multimodal_tokens(self, model_config: "ModelConfig") -> int: diff --git a/vllm/v1/core/scheduler.py b/vllm/v1/core/scheduler.py index f76364f64033d..178532e477dae 100644 --- a/vllm/v1/core/scheduler.py +++ b/vllm/v1/core/scheduler.py @@ -73,14 +73,13 @@ def __init__( # NOTE(woosuk): Here, "encoder" includes the vision encoder (and # projector if needed). Currently, we assume that the encoder also # has the Transformer architecture (e.g., ViT). - # FIXME(woosuk): Below are placeholder values. We need to calculate the - # actual values from the configurations. - self.max_num_encoder_input_tokens = 16384 + self.max_num_encoder_input_tokens = self.scheduler_config.max_num_encoder_input_tokens #noqa: E501 # NOTE(woosuk): For the models without encoder (e.g., text-only models), # the encoder cache will not be initialized and used, regardless of # the cache size. This is because the memory space for the encoder cache # is preallocated in the profiling run. - self.encoder_cache_manager = EncoderCacheManager(cache_size=16384) + self.encoder_cache_manager = EncoderCacheManager( + cache_size=self.scheduler_config.encoder_cache_size) def schedule(self) -> "SchedulerOutput": # NOTE(woosuk) on the scheduling algorithm: diff --git a/vllm/v1/engine/mm_input_mapper.py b/vllm/v1/engine/mm_input_mapper.py index cca27c2218af7..6cdeba6f3f71e 100644 --- a/vllm/v1/engine/mm_input_mapper.py +++ b/vllm/v1/engine/mm_input_mapper.py @@ -54,6 +54,7 @@ def cache_hit_ratio(self, steps): logger.debug("MMInputMapper: cache_hit_ratio = %.2f ", self.mm_cache_hits / self.mm_cache_total) + # TODO: Support modalities beyond image. def process_inputs( self, mm_data: MultiModalDataDict, diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 67166fb05085c..c6fab5f05fcb3 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -10,15 +10,16 @@ from vllm.config import CompilationLevel, VllmConfig from vllm.distributed.parallel_state import graph_capture from vllm.forward_context import set_forward_context -from vllm.inputs import INPUT_REGISTRY, InputRegistry +from vllm.inputs import INPUT_REGISTRY from vllm.logger import init_logger from vllm.model_executor.model_loader import get_model -from vllm.multimodal import MultiModalKwargs +from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs from vllm.sampling_params import SamplingType from vllm.utils import (STR_DTYPE_TO_TORCH_DTYPE, DeviceMemoryProfiler, LayerBlockType, cdiv, is_pin_memory_available) from vllm.v1.attention.backends.flash_attn import (FlashAttentionBackend, FlashAttentionMetadata) +from vllm.v1.engine.mm_input_mapper import MMInputMapperClient from vllm.v1.outputs import ModelRunnerOutput from vllm.v1.sample.metadata import SamplingMetadata from vllm.v1.worker.gpu_input_batch import CachedRequestState, InputBatch @@ -35,7 +36,6 @@ def __init__( self, vllm_config: VllmConfig, device: torch.device, - input_registry: InputRegistry = INPUT_REGISTRY, ): self.vllm_config = vllm_config self.model_config = vllm_config.model_config @@ -77,7 +77,12 @@ def __init__( self.hidden_size = model_config.get_hidden_size() # Multi-modal data support - self.input_registry = input_registry + self.input_registry = INPUT_REGISTRY + self.mm_registry = MULTIMODAL_REGISTRY + # NOTE: mm_input_mapper is only used for memory profiling. + self.mm_input_mapper = MMInputMapperClient(self.model_config) + self.max_num_encoder_input_tokens = self.scheduler_config.max_num_encoder_input_tokens # noqa: E501 + self.encoder_cache_size = self.scheduler_config.encoder_cache_size # Lazy initialization # self.model: nn.Module # Set after load_model @@ -599,8 +604,6 @@ def _dummy_run( return hidden_states def profile_run(self) -> None: - # TODO(woosuk): Profile the max memory usage of the encoder and - # the encoder cache. # use an empty tensor instead of `None`` to force Dynamo to pass # it by reference, rather by specializing on the value `None`. # the `dtype` argument does not matter, and we use `float32` as @@ -612,6 +615,57 @@ def profile_run(self) -> None: torch.tensor([], dtype=torch.float32, device=self.device) for _ in range(self.num_attn_layers) ] + + # Profile with multimodal encoder & encoder cache. + # TODO (ywang96): generalize this beyond image modality since + # mm_input_mapper only supports image inputs. + if self.is_multimodal_model: + + # Create dummy batch of multimodal inputs. + dummy_request_data = self.input_registry.dummy_data_for_profiling( + model_config=self.model_config, + seq_len=self.max_num_tokens, + mm_registry=self.mm_registry, + ) + dummy_mm_data = dummy_request_data.multi_modal_data + dummy_mm_kwargs, _ = self.mm_input_mapper.process_inputs( + mm_data=dummy_mm_data, + mm_hashes=None, + mm_processor_kwargs=None, + precomputed_mm_inputs=None) + + # NOTE: Currently model is profiled with a single non-text + # modality even when it supports multiple. + max_tokens_per_mm_item = max( + self.mm_registry.get_max_tokens_per_item_by_modality( + self.model_config).values()) + + max_num_mm_items = min( + self.max_num_encoder_input_tokens, + self.encoder_cache_size) // max_tokens_per_mm_item + + # Dummy data definition in V0 may contain multiple multimodal items + # (e.g, multiple images) for a single request, therefore here we + # always replicate first item by max_num_mm_items times since in V1 + # they are scheduled to be processed separately. + batched_dummy_mm_inputs = MultiModalKwargs.batch( + [dummy_mm_kwargs[0]] * max_num_mm_items) + batched_dummy_mm_inputs = MultiModalKwargs.as_kwargs( + batched_dummy_mm_inputs, device=self.device) + + # Run multimodal encoder. + dummy_encoder_outputs = self.model.get_multimodal_embeddings( + **batched_dummy_mm_inputs) + assert len(dummy_encoder_outputs) == max_num_mm_items, ( + "Expected dimension 0 of encoder outputs to match the number " + f"of multimodal data items: {max_num_mm_items}, got " + f"{len(dummy_encoder_outputs)=} instead. This is most likely " + "due to the 'get_multimodal_embeddings' method of the model " + "not implemented correctly.") + + # Cache the dummy encoder outputs. + self.encoder_cache["tmp"] = dict(enumerate(dummy_encoder_outputs)) + # Trigger compilation for general shape. hidden_states = self._dummy_run(self.model, self.max_num_tokens, dummy_kv_caches) @@ -620,6 +674,7 @@ def profile_run(self) -> None: # TODO(woosuk): Consider the memory usage of the sampler. torch.cuda.synchronize() del hidden_states, logits + self.encoder_cache.clear() gc.collect() def capture_model(self) -> None: From e88db68cf5712956f36e77c288699592327b15bd Mon Sep 17 00:00:00 2001 From: wangxiyuan Date: Tue, 17 Dec 2024 14:11:06 +0800 Subject: [PATCH 393/397] [Platform] platform agnostic for EngineArgs initialization (#11225) Signed-off-by: wangxiyuan --- vllm/engine/arg_utils.py | 8 ++------ vllm/platforms/cpu.py | 3 +++ vllm/platforms/cuda.py | 4 ++++ vllm/platforms/hpu.py | 6 ++++++ vllm/platforms/neuron.py | 6 ++++++ vllm/platforms/openvino.py | 3 +++ vllm/platforms/rocm.py | 4 ++++ vllm/platforms/tpu.py | 5 +++++ vllm/platforms/xpu.py | 4 ++++ 9 files changed, 37 insertions(+), 6 deletions(-) diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 06b8542779dc0..f6d276fe7c0c8 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -112,9 +112,7 @@ class EngineArgs: pipeline_parallel_size: int = 1 tensor_parallel_size: int = 1 max_parallel_loading_workers: Optional[int] = None - # NOTE(kzawora): default block size for Gaudi should be 128 - # smaller sizes still work, but very inefficiently - block_size: int = 16 if not current_platform.is_hpu() else 128 + block_size: Optional[int] = None enable_prefix_caching: Optional[bool] = None disable_sliding_window: bool = False use_v2_block_manager: bool = True @@ -1036,9 +1034,7 @@ def create_engine_config(self, self.enable_prefix_caching = False cache_config = CacheConfig( - # neuron needs block_size = max_model_len - block_size=self.block_size if self.device != "neuron" else - (self.max_model_len if self.max_model_len is not None else 0), + block_size=self.block_size, gpu_memory_utilization=self.gpu_memory_utilization, swap_space=self.swap_space, cache_dtype=self.kv_cache_dtype, diff --git a/vllm/platforms/cpu.py b/vllm/platforms/cpu.py index aad8755d9fcd8..d95a2b4cd5565 100644 --- a/vllm/platforms/cpu.py +++ b/vllm/platforms/cpu.py @@ -60,6 +60,9 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: cache_config = vllm_config.cache_config + if cache_config and cache_config.block_size is None: + cache_config.block_size = 16 + kv_cache_space = envs.VLLM_CPU_KVCACHE_SPACE if kv_cache_space >= 0: diff --git a/vllm/platforms/cuda.py b/vllm/platforms/cuda.py index ae1fd6d5ce068..3c5350b778345 100644 --- a/vllm/platforms/cuda.py +++ b/vllm/platforms/cuda.py @@ -137,6 +137,10 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: else: parallel_config.worker_cls = "vllm.worker.worker.Worker" + cache_config = vllm_config.cache_config + if cache_config and cache_config.block_size is None: + cache_config.block_size = 16 + # NVML utils # Note that NVML is not affected by `CUDA_VISIBLE_DEVICES`, diff --git a/vllm/platforms/hpu.py b/vllm/platforms/hpu.py index 2b947d280f9f8..0a44f2b74163a 100644 --- a/vllm/platforms/hpu.py +++ b/vllm/platforms/hpu.py @@ -48,6 +48,12 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: if parallel_config.worker_cls == "auto": parallel_config.worker_cls = "vllm.worker.hpu_worker.HPUWorker" + # NOTE(kzawora): default block size for Gaudi should be 128 + # smaller sizes still work, but very inefficiently + cache_config = vllm_config.cache_config + if cache_config and cache_config.block_size is None: + cache_config.block_size = 128 + @classmethod def is_pin_memory_available(cls): logger.warning("Pin memory is not supported on HPU.") diff --git a/vllm/platforms/neuron.py b/vllm/platforms/neuron.py index 86113523385f6..a4bbbd27c8a89 100644 --- a/vllm/platforms/neuron.py +++ b/vllm/platforms/neuron.py @@ -33,6 +33,12 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: parallel_config.worker_cls = \ "vllm.worker.neuron_worker.NeuronWorker" + cache_config = vllm_config.cache_config + if cache_config: + # neuron needs block_size = max_model_len + vllm_config.cache_config.block_size = \ + vllm_config.model_config.max_model_len + @classmethod def is_pin_memory_available(cls) -> bool: logger.warning("Pin memory is not supported on Neuron.") diff --git a/vllm/platforms/openvino.py b/vllm/platforms/openvino.py index ccd94e8adb3b1..16eb8dc81efc2 100644 --- a/vllm/platforms/openvino.py +++ b/vllm/platforms/openvino.py @@ -87,6 +87,9 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: # check and update cache config ov_core = ov.Core() cache_config = vllm_config.cache_config + if cache_config and cache_config.block_size is None: + cache_config.block_size = 16 + if envs.VLLM_OPENVINO_CPU_KV_CACHE_PRECISION == "u8": if not OpenVinoPlatform.is_openvino_cpu(): logger.info("VLLM_OPENVINO_CPU_KV_CACHE_PRECISION is" diff --git a/vllm/platforms/rocm.py b/vllm/platforms/rocm.py index 0133f26a0b1bc..7778b565372cb 100644 --- a/vllm/platforms/rocm.py +++ b/vllm/platforms/rocm.py @@ -84,6 +84,10 @@ def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool: @classmethod def check_and_update_config(cls, vllm_config: VllmConfig) -> None: + cache_config = vllm_config.cache_config + if cache_config and cache_config.block_size is None: + cache_config.block_size = 16 + parallel_config = vllm_config.parallel_config scheduler_config = vllm_config.scheduler_config if parallel_config.worker_cls == "auto": diff --git a/vllm/platforms/tpu.py b/vllm/platforms/tpu.py index 10d874349f36b..77f5c8401424b 100644 --- a/vllm/platforms/tpu.py +++ b/vllm/platforms/tpu.py @@ -46,6 +46,11 @@ def inference_mode(cls): @classmethod def check_and_update_config(cls, vllm_config: VllmConfig) -> None: from vllm.config import CompilationLevel + + cache_config = vllm_config.cache_config + if cache_config and cache_config.block_size is None: + cache_config.block_size = 16 + compilation_config = vllm_config.compilation_config if compilation_config.level == CompilationLevel.NO_COMPILATION: # TPU does not support NO_COMPILATION diff --git a/vllm/platforms/xpu.py b/vllm/platforms/xpu.py index c20190e789d7e..78e17c2afec65 100644 --- a/vllm/platforms/xpu.py +++ b/vllm/platforms/xpu.py @@ -51,6 +51,10 @@ def inference_mode(): @classmethod def check_and_update_config(cls, vllm_config: VllmConfig) -> None: + cache_config = vllm_config.cache_config + if cache_config and cache_config.block_size is None: + cache_config.block_size = 16 + # check and update model config model_config = vllm_config.model_config if model_config.dtype == torch.bfloat16: From 2bfdbf2a36256bb08547cea3d4ef83b5d27c4b04 Mon Sep 17 00:00:00 2001 From: Tyler Michael Smith Date: Tue, 17 Dec 2024 01:11:33 -0500 Subject: [PATCH 394/397] [V1][Core] Use weakref.finalize instead of atexit (#11242) Signed-off-by: Tyler Michael Smith --- vllm/v1/engine/core_client.py | 13 ++----------- vllm/v1/executor/multiproc_executor.py | 10 +++------- 2 files changed, 5 insertions(+), 18 deletions(-) diff --git a/vllm/v1/engine/core_client.py b/vllm/v1/engine/core_client.py index ff25a9b2e9cac..d56fcbdb1e7c4 100644 --- a/vllm/v1/engine/core_client.py +++ b/vllm/v1/engine/core_client.py @@ -1,5 +1,5 @@ -import atexit import os +import weakref from typing import List, Optional import msgspec @@ -165,15 +165,9 @@ def __init__( ready_path=ready_path, # type: ignore[misc] **kwargs, ) - atexit.register(self.shutdown) + self._finalizer = weakref.finalize(self, self.shutdown) def shutdown(self): - # During final garbage collection in process shutdown, atexit may be - # None. - if atexit: - # in case shutdown gets called via __del__ first - atexit.unregister(self.shutdown) - # Shut down the zmq context. self.ctx.destroy(linger=0) @@ -197,9 +191,6 @@ def shutdown(self): os.remove(socket_file) self.proc_handle = None - def __del__(self): - self.shutdown() - class SyncMPClient(MPClient): """Synchronous client for multi-proc EngineCore.""" diff --git a/vllm/v1/executor/multiproc_executor.py b/vllm/v1/executor/multiproc_executor.py index 17441dacdc5cf..128101aa6956d 100644 --- a/vllm/v1/executor/multiproc_executor.py +++ b/vllm/v1/executor/multiproc_executor.py @@ -1,9 +1,9 @@ -import atexit import os import pickle import signal import sys import time +import weakref from dataclasses import dataclass from enum import Enum, auto from multiprocessing.process import BaseProcess @@ -37,7 +37,7 @@ class MultiprocExecutor(Executor): def __init__(self, vllm_config: VllmConfig) -> None: # Call self.shutdown at exit to clean up # and ensure workers will be terminated. - atexit.register(self.shutdown) + self._finalizer = weakref.finalize(self, self.shutdown) self.vllm_config = vllm_config self.parallel_config = vllm_config.parallel_config @@ -195,14 +195,10 @@ def _cleanup_sockets(self): os.remove(socket_path) def shutdown(self): - if atexit: - # in case shutdown was called explicitly, we don't need to call it - # again - atexit.unregister(self.shutdown) """Properly shut down the executor and its workers""" if getattr(self, 'shutting_down', False): self.shutting_down = True - for w in self.workers: #TODO: not sure if needed + for w in self.workers: w.worker_response_mq = None self._ensure_worker_termination() From 02222a0256f60319f5bcd56d1d036a943d6334f8 Mon Sep 17 00:00:00 2001 From: Roger Wang <136131678+ywang96@users.noreply.github.com> Date: Mon, 16 Dec 2024 22:57:02 -0800 Subject: [PATCH 395/397] [Misc] Kernel Benchmark for `RMSNorm` (#11241) Signed-off-by: Roger Wang Co-authored-by: Xiaoyu Zhang --- benchmarks/kernels/benchmark_rmsnorm.py | 262 ++++++++++++++++++++++++ 1 file changed, 262 insertions(+) create mode 100644 benchmarks/kernels/benchmark_rmsnorm.py diff --git a/benchmarks/kernels/benchmark_rmsnorm.py b/benchmarks/kernels/benchmark_rmsnorm.py new file mode 100644 index 0000000000000..baa5de0fff1bd --- /dev/null +++ b/benchmarks/kernels/benchmark_rmsnorm.py @@ -0,0 +1,262 @@ +import itertools +from typing import Optional, Tuple, Union + +import torch +import triton +from flashinfer.norm import fused_add_rmsnorm, rmsnorm +from torch import nn + +from vllm import _custom_ops as vllm_ops + + +class HuggingFaceRMSNorm(nn.Module): + + def __init__(self, hidden_size: int, eps: float = 1e-6) -> None: + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward( + self, + x: torch.Tensor, + residual: Optional[torch.Tensor] = None, + ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: + orig_dtype = x.dtype + x = x.to(torch.float32) + if residual is not None: + x = x + residual.to(torch.float32) + residual = x.to(orig_dtype) + + variance = x.pow(2).mean(dim=-1, keepdim=True) + x = x * torch.rsqrt(variance + self.variance_epsilon) + x = x.to(orig_dtype) * self.weight + if residual is None: + return x + else: + return x, residual + + +def rmsnorm_naive( + x: torch.Tensor, + weight: torch.Tensor, + residual: Optional[torch.Tensor] = None, + eps: float = 1e-6, +): + naive_norm = HuggingFaceRMSNorm(x.shape[-1], eps=eps) + naive_norm.weight = nn.Parameter(weight) + naive_norm = naive_norm.to(x.device) + + orig_shape = x.shape + x = x.view(-1, x.shape[-1]) + if residual is not None: + residual = residual.view(-1, residual.shape[-1]) + + output = naive_norm(x, residual) + + if isinstance(output, tuple): + output = (output[0].view(orig_shape), output[1].view(orig_shape)) + else: + output = output.view(orig_shape) + return output + + +def rmsnorm_flashinfer( + x: torch.Tensor, + weight: torch.Tensor, + residual: Optional[torch.Tensor] = None, + eps: float = 1e-6, +): + orig_shape = x.shape + x = x.view(-1, x.shape[-1]) + if residual is not None: + residual = residual.view(-1, residual.shape[-1]) + + if residual is not None: + fused_add_rmsnorm(x, residual, weight, eps) + output = (x, residual) + else: + output = rmsnorm(x, weight, eps) + + if isinstance(output, tuple): + output = (output[0].view(orig_shape), output[1].view(orig_shape)) + else: + output = output.view(orig_shape) + return output + + +def rmsnorm_vllm( + x: torch.Tensor, + weight: torch.Tensor, + residual: Optional[torch.Tensor] = None, + eps: float = 1e-6, +): + orig_shape = x.shape + x = x.view(-1, x.shape[-1]) + if residual is not None: + residual = residual.view(-1, residual.shape[-1]) + + if residual is not None: + vllm_ops.fused_add_rms_norm(x, residual, weight, eps) + output = (x, residual) + else: + out = torch.empty_like(x) + vllm_ops.rms_norm(out, x, weight, eps) + output = out + + if isinstance(output, tuple): + output = (output[0].view(orig_shape), output[1].view(orig_shape)) + else: + output = output.view(orig_shape) + return output + + +def calculate_diff(batch_size, seq_len, hidden_size, use_residual=True): + dtype = torch.bfloat16 + x = torch.randn(batch_size, + seq_len, + hidden_size, + dtype=dtype, + device="cuda") + weight = torch.ones(hidden_size, dtype=dtype, device="cuda") + residual = torch.randn_like(x) if use_residual else None + + output_naive = rmsnorm_naive( + x.clone(), weight, + residual.clone() if residual is not None else None) + output_flashinfer = rmsnorm_flashinfer( + x.clone(), weight, + residual.clone() if residual is not None else None) + output_vllm = rmsnorm_vllm( + x.clone(), weight, + residual.clone() if residual is not None else None) + + if use_residual: + output_naive = output_naive[0] + output_flashinfer = output_flashinfer[0] + output_vllm = output_vllm[0] + + print(f"Naive output={output_naive}") + print(f"FlashInfer output={output_flashinfer}") + print(f"VLLM output={output_vllm}") + + if torch.allclose(output_naive, output_flashinfer, atol=1e-2, + rtol=1e-2) and torch.allclose( + output_naive, output_vllm, atol=1e-2, rtol=1e-2): + print("✅ All implementations match") + else: + print("❌ Implementations differ") + + +batch_size_range = [2**i for i in range(0, 7, 2)] +seq_length_range = [2**i for i in range(6, 11, 1)] +head_num_range = [32, 48] +configs = list( + itertools.product(head_num_range, batch_size_range, seq_length_range)) + + +def get_benchmark(use_residual): + + @triton.testing.perf_report( + triton.testing.Benchmark( + x_names=["head_num", "batch_size", "seq_len"], + x_vals=[list(_) for _ in configs], + line_arg="provider", + line_vals=["huggingface", "flashinfer", "vllm"], + line_names=["HuggingFace", "FlashInfer", "vLLM"], + styles=[("blue", "-"), ("green", "-"), ("red", "-")], + ylabel="us", + plot_name= + f"rmsnorm-perf-{'with' if use_residual else 'without'}-residual", + args={}, + )) + def benchmark(head_num, batch_size, seq_len, provider): + dtype = torch.bfloat16 + hidden_size = head_num * 128 # assuming head_dim = 128 + + x = torch.randn(batch_size, + seq_len, + hidden_size, + dtype=dtype, + device="cuda") + weight = torch.ones(hidden_size, dtype=dtype, device="cuda") + residual = torch.randn_like(x) if use_residual else None + + quantiles = [0.5, 0.2, 0.8] + + if provider == "huggingface": + ms, min_ms, max_ms = triton.testing.do_bench( + lambda: rmsnorm_naive( + x.clone(), + weight, + residual.clone() if residual is not None else None, + ), + quantiles=quantiles, + ) + elif provider == "flashinfer": + ms, min_ms, max_ms = triton.testing.do_bench( + lambda: rmsnorm_flashinfer( + x.clone(), + weight, + residual.clone() if residual is not None else None, + ), + quantiles=quantiles, + ) + else: + ms, min_ms, max_ms = triton.testing.do_bench( + lambda: rmsnorm_vllm( + x.clone(), + weight, + residual.clone() if residual is not None else None, + ), + quantiles=quantiles, + ) + + return 1000 * ms, 1000 * max_ms, 1000 * min_ms + + return benchmark + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument( + "--batch-size", + type=int, + default=4, + help="Batch size", + ) + parser.add_argument( + "--seq-len", + type=int, + default=128, + help="Sequence length", + ) + parser.add_argument( + "--hidden-size", + type=int, + default=4096, + help="Hidden size (2nd dimension) of the sequence", + ) + parser.add_argument("--use-residual", + action="store_true", + help="Whether to use residual connection") + parser.add_argument( + "--save-path", + type=str, + default="./configs/rmsnorm/", + help="Path to save rmsnorm benchmark results", + ) + + args = parser.parse_args() + + # Run correctness test + calculate_diff(batch_size=args.batch_size, + seq_len=args.seq_len, + hidden_size=args.hidden_size, + use_residual=args.use_residual) + + # Get the benchmark function with proper use_residual setting + benchmark = get_benchmark(args.use_residual) + # Run performance benchmark + benchmark.run(print_data=True, save_path=args.save_path) From f9ecbb18bf03338a4272c933a49a87021363b048 Mon Sep 17 00:00:00 2001 From: Isotr0py Date: Tue, 17 Dec 2024 16:37:04 +0800 Subject: [PATCH 396/397] [Misc] Allow passing logits_soft_cap for xformers backend (#11252) Signed-off-by: Isotr0py <2037008807@qq.com> --- vllm/attention/backends/xformers.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/vllm/attention/backends/xformers.py b/vllm/attention/backends/xformers.py index e2e989efb020c..3e59b3603d2c6 100644 --- a/vllm/attention/backends/xformers.py +++ b/vllm/attention/backends/xformers.py @@ -17,9 +17,7 @@ is_all_cross_attn_metadata_set, is_all_encoder_attn_metadata_set) from vllm.attention.ops.paged_attn import (PagedAttention, PagedAttentionMetadata) -from vllm.logger import init_logger - -logger = init_logger(__name__) +from vllm.utils import print_warning_once class XFormersBackend(AttentionBackend): @@ -386,8 +384,8 @@ def __init__( raise ValueError( "XFormers does not support block-sparse attention.") if logits_soft_cap is not None: - raise ValueError( - "XFormers does not support attention logits soft capping.") + print_warning_once("XFormers does not support logits soft cap. " + "Outputs may be slightly off.") self.num_heads = num_heads self.head_size = head_size self.scale = float(scale) From 2d1b9baa8f57fc59912c7bcd07fd630fb9d72c9d Mon Sep 17 00:00:00 2001 From: Joe Runde Date: Tue, 17 Dec 2024 13:26:32 -0700 Subject: [PATCH 397/397] [Bugfix] Fix request cancellation without polling (#11190) --- tests/entrypoints/openai/test_basic.py | 51 ++++++++++++++++ tests/test_utils.py | 6 +- tests/utils.py | 11 ++-- vllm/engine/async_llm_engine.py | 46 +++++++++------ vllm/entrypoints/api_server.py | 11 ++-- vllm/entrypoints/openai/api_server.py | 8 +++ vllm/entrypoints/openai/serving_chat.py | 5 -- vllm/entrypoints/openai/serving_completion.py | 3 +- vllm/entrypoints/openai/serving_embedding.py | 5 +- vllm/entrypoints/openai/serving_score.py | 5 +- vllm/entrypoints/utils.py | 57 ++++++++++++++++++ vllm/utils.py | 59 ++----------------- 12 files changed, 164 insertions(+), 103 deletions(-) create mode 100644 vllm/entrypoints/utils.py diff --git a/tests/entrypoints/openai/test_basic.py b/tests/entrypoints/openai/test_basic.py index 4616f363cc04a..547c1fd020928 100644 --- a/tests/entrypoints/openai/test_basic.py +++ b/tests/entrypoints/openai/test_basic.py @@ -1,6 +1,8 @@ +import asyncio from http import HTTPStatus from typing import List +import openai import pytest import pytest_asyncio import requests @@ -103,3 +105,52 @@ async def test_check_health(server: RemoteOpenAIServer): response = requests.get(server.url_for("health")) assert response.status_code == HTTPStatus.OK + + +@pytest.mark.parametrize( + "server_args", + [ + pytest.param(["--max-model-len", "10100"], + id="default-frontend-multiprocessing"), + pytest.param( + ["--disable-frontend-multiprocessing", "--max-model-len", "10100"], + id="disable-frontend-multiprocessing") + ], + indirect=True, +) +@pytest.mark.asyncio +async def test_request_cancellation(server: RemoteOpenAIServer): + # clunky test: send an ungodly amount of load in with short timeouts + # then ensure that it still responds quickly afterwards + + chat_input = [{"role": "user", "content": "Write a long story"}] + client = server.get_async_client(timeout=0.5) + tasks = [] + # Request about 2 million tokens + for _ in range(200): + task = asyncio.create_task( + client.chat.completions.create(messages=chat_input, + model=MODEL_NAME, + max_tokens=10000, + extra_body={"min_tokens": 10000})) + tasks.append(task) + + done, pending = await asyncio.wait(tasks, + return_when=asyncio.ALL_COMPLETED) + + # Make sure all requests were sent to the server and timed out + # (We don't want to hide other errors like 400s that would invalidate this + # test) + assert len(pending) == 0 + for d in done: + with pytest.raises(openai.APITimeoutError): + d.result() + + # If the server had not cancelled all the other requests, then it would not + # be able to respond to this one within the timeout + client = server.get_async_client(timeout=5) + response = await client.chat.completions.create(messages=chat_input, + model=MODEL_NAME, + max_tokens=10) + + assert len(response.choices) == 1 diff --git a/tests/test_utils.py b/tests/test_utils.py index 0bc9e5bc32a46..32a6b0aed66aa 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,7 +1,6 @@ import asyncio import os import socket -from functools import partial from typing import AsyncIterator, Tuple import pytest @@ -26,10 +25,7 @@ async def mock_async_iterator(idx: int): print(f"iterator {idx} cancelled") iterators = [mock_async_iterator(i) for i in range(3)] - merged_iterator = merge_async_iterators(*iterators, - is_cancelled=partial(asyncio.sleep, - 0, - result=False)) + merged_iterator = merge_async_iterators(*iterators) async def stream_output(generator: AsyncIterator[Tuple[int, str]]): async for idx, output in generator: diff --git a/tests/utils.py b/tests/utils.py index afeb708f3bcdc..bf3d88194e4ca 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -163,12 +163,11 @@ def get_client(self): api_key=self.DUMMY_API_KEY, ) - def get_async_client(self): - return openai.AsyncOpenAI( - base_url=self.url_for("v1"), - api_key=self.DUMMY_API_KEY, - max_retries=0, - ) + def get_async_client(self, **kwargs): + return openai.AsyncOpenAI(base_url=self.url_for("v1"), + api_key=self.DUMMY_API_KEY, + max_retries=0, + **kwargs) def _test_completion( diff --git a/vllm/engine/async_llm_engine.py b/vllm/engine/async_llm_engine.py index 32396fd10188d..f50e20cf70323 100644 --- a/vllm/engine/async_llm_engine.py +++ b/vllm/engine/async_llm_engine.py @@ -1065,16 +1065,20 @@ async def generate( >>> # Process and return the final output >>> ... """ - async for output in await self.add_request( - request_id, - prompt, - sampling_params, - lora_request=lora_request, - trace_headers=trace_headers, - prompt_adapter_request=prompt_adapter_request, - priority=priority, - ): - yield LLMEngine.validate_output(output, RequestOutput) + try: + async for output in await self.add_request( + request_id, + prompt, + sampling_params, + lora_request=lora_request, + trace_headers=trace_headers, + prompt_adapter_request=prompt_adapter_request, + priority=priority, + ): + yield LLMEngine.validate_output(output, RequestOutput) + except asyncio.CancelledError: + await self.abort(request_id) + raise async def encode( self, @@ -1147,15 +1151,19 @@ async def encode( >>> # Process and return the final output >>> ... """ - async for output in await self.add_request( - request_id, - prompt, - pooling_params, - lora_request=lora_request, - trace_headers=trace_headers, - priority=priority, - ): - yield LLMEngine.validate_output(output, PoolingRequestOutput) + try: + async for output in await self.add_request( + request_id, + prompt, + pooling_params, + lora_request=lora_request, + trace_headers=trace_headers, + priority=priority, + ): + yield LLMEngine.validate_output(output, PoolingRequestOutput) + except asyncio.CancelledError: + await self.abort(request_id) + raise async def abort(self, request_id: str) -> None: """Abort a request. diff --git a/vllm/entrypoints/api_server.py b/vllm/entrypoints/api_server.py index ea3c93f733038..95da1c6e7b9bf 100644 --- a/vllm/entrypoints/api_server.py +++ b/vllm/entrypoints/api_server.py @@ -17,11 +17,11 @@ from vllm.engine.arg_utils import AsyncEngineArgs from vllm.engine.async_llm_engine import AsyncLLMEngine from vllm.entrypoints.launcher import serve_http +from vllm.entrypoints.utils import with_cancellation from vllm.logger import init_logger from vllm.sampling_params import SamplingParams from vllm.usage.usage_lib import UsageContext -from vllm.utils import (FlexibleArgumentParser, iterate_with_cancellation, - random_uuid) +from vllm.utils import FlexibleArgumentParser, random_uuid from vllm.version import __version__ as VLLM_VERSION logger = init_logger("vllm.entrypoints.api_server") @@ -47,6 +47,11 @@ async def generate(request: Request) -> Response: - other fields: the sampling parameters (See `SamplingParams` for details). """ request_dict = await request.json() + return await _generate(request_dict, raw_request=request) + + +@with_cancellation +async def _generate(request_dict: dict, raw_request: Request) -> Response: prompt = request_dict.pop("prompt") stream = request_dict.pop("stream", False) sampling_params = SamplingParams(**request_dict) @@ -54,8 +59,6 @@ async def generate(request: Request) -> Response: assert engine is not None results_generator = engine.generate(prompt, sampling_params, request_id) - results_generator = iterate_with_cancellation( - results_generator, is_cancelled=request.is_disconnected) # Streaming case async def stream_results() -> AsyncGenerator[bytes, None]: diff --git a/vllm/entrypoints/openai/api_server.py b/vllm/entrypoints/openai/api_server.py index 14e3a34ce141c..00e2d1a56f160 100644 --- a/vllm/entrypoints/openai/api_server.py +++ b/vllm/entrypoints/openai/api_server.py @@ -59,6 +59,7 @@ from vllm.entrypoints.openai.serving_tokenization import ( OpenAIServingTokenization) from vllm.entrypoints.openai.tool_parsers import ToolParserManager +from vllm.entrypoints.utils import with_cancellation from vllm.logger import init_logger from vllm.usage.usage_lib import UsageContext from vllm.utils import (FlexibleArgumentParser, get_open_zmq_ipc_path, @@ -311,6 +312,7 @@ async def health(raw_request: Request) -> Response: @router.post("/tokenize") +@with_cancellation async def tokenize(request: TokenizeRequest, raw_request: Request): handler = tokenization(raw_request) @@ -325,6 +327,7 @@ async def tokenize(request: TokenizeRequest, raw_request: Request): @router.post("/detokenize") +@with_cancellation async def detokenize(request: DetokenizeRequest, raw_request: Request): handler = tokenization(raw_request) @@ -353,6 +356,7 @@ async def show_version(): @router.post("/v1/chat/completions") +@with_cancellation async def create_chat_completion(request: ChatCompletionRequest, raw_request: Request): handler = chat(raw_request) @@ -373,6 +377,7 @@ async def create_chat_completion(request: ChatCompletionRequest, @router.post("/v1/completions") +@with_cancellation async def create_completion(request: CompletionRequest, raw_request: Request): handler = completion(raw_request) if handler is None: @@ -390,6 +395,7 @@ async def create_completion(request: CompletionRequest, raw_request: Request): @router.post("/v1/embeddings") +@with_cancellation async def create_embedding(request: EmbeddingRequest, raw_request: Request): handler = embedding(raw_request) if handler is None: @@ -407,6 +413,7 @@ async def create_embedding(request: EmbeddingRequest, raw_request: Request): @router.post("/score") +@with_cancellation async def create_score(request: ScoreRequest, raw_request: Request): handler = score(raw_request) if handler is None: @@ -424,6 +431,7 @@ async def create_score(request: ScoreRequest, raw_request: Request): @router.post("/v1/score") +@with_cancellation async def create_score_v1(request: ScoreRequest, raw_request: Request): logger.warning( "To indicate that Score API is not part of standard OpenAI API, we " diff --git a/vllm/entrypoints/openai/serving_chat.py b/vllm/entrypoints/openai/serving_chat.py index 527418c635093..81bce0dd370bb 100644 --- a/vllm/entrypoints/openai/serving_chat.py +++ b/vllm/entrypoints/openai/serving_chat.py @@ -32,7 +32,6 @@ from vllm.sequence import Logprob from vllm.transformers_utils.tokenizer import AnyTokenizer, MistralTokenizer from vllm.transformers_utils.tokenizers import maybe_serialize_tool_calls -from vllm.utils import iterate_with_cancellation logger = init_logger(__name__) @@ -234,10 +233,6 @@ async def create_chat_completion( assert len(generators) == 1 result_generator, = generators - if raw_request: - result_generator = iterate_with_cancellation( - result_generator, raw_request.is_disconnected) - # Streaming response if request.stream: return self.chat_completion_stream_generator( diff --git a/vllm/entrypoints/openai/serving_completion.py b/vllm/entrypoints/openai/serving_completion.py index bd39a4c42e938..5cf9df92e296e 100644 --- a/vllm/entrypoints/openai/serving_completion.py +++ b/vllm/entrypoints/openai/serving_completion.py @@ -159,8 +159,7 @@ async def create_completion( # TODO: Use a vllm-specific Validation Error return self.create_error_response(str(e)) - result_generator = merge_async_iterators( - *generators, is_cancelled=raw_request.is_disconnected) + result_generator = merge_async_iterators(*generators) model_name = self._get_model_name(lora_request) num_prompts = len(engine_prompts) diff --git a/vllm/entrypoints/openai/serving_embedding.py b/vllm/entrypoints/openai/serving_embedding.py index fd501ad4f833e..879276646d2ba 100644 --- a/vllm/entrypoints/openai/serving_embedding.py +++ b/vllm/entrypoints/openai/serving_embedding.py @@ -202,10 +202,7 @@ async def create_embedding( # TODO: Use a vllm-specific Validation Error return self.create_error_response(str(e)) - result_generator = merge_async_iterators( - *generators, - is_cancelled=raw_request.is_disconnected if raw_request else None, - ) + result_generator = merge_async_iterators(*generators) num_prompts = len(engine_prompts) diff --git a/vllm/entrypoints/openai/serving_score.py b/vllm/entrypoints/openai/serving_score.py index 6f5cc14ac37cc..101d170bee4d6 100644 --- a/vllm/entrypoints/openai/serving_score.py +++ b/vllm/entrypoints/openai/serving_score.py @@ -186,10 +186,7 @@ async def create_score( # TODO: Use a vllm-specific Validation Error return self.create_error_response(str(e)) - result_generator = merge_async_iterators( - *generators, - is_cancelled=raw_request.is_disconnected if raw_request else None, - ) + result_generator = merge_async_iterators(*generators) num_prompts = len(engine_prompts) diff --git a/vllm/entrypoints/utils.py b/vllm/entrypoints/utils.py new file mode 100644 index 0000000000000..e8a78d216d0f0 --- /dev/null +++ b/vllm/entrypoints/utils.py @@ -0,0 +1,57 @@ +import asyncio +import functools + +from fastapi import Request + + +async def listen_for_disconnect(request: Request) -> None: + """Returns if a disconnect message is received""" + while True: + message = await request.receive() + if message["type"] == "http.disconnect": + break + + +def with_cancellation(handler_func): + """Decorator that allows a route handler to be cancelled by client + disconnections. + + This does _not_ use request.is_disconnected, which does not work with + middleware. Instead this follows the pattern from + starlette.StreamingResponse, which simultaneously awaits on two tasks- one + to wait for an http disconnect message, and the other to do the work that we + want done. When the first task finishes, the other is cancelled. + + A core assumption of this method is that the body of the request has already + been read. This is a safe assumption to make for fastapi handlers that have + already parsed the body of the request into a pydantic model for us. + This decorator is unsafe to use elsewhere, as it will consume and throw away + all incoming messages for the request while it looks for a disconnect + message. + + In the case where a `StreamingResponse` is returned by the handler, this + wrapper will stop listening for disconnects and instead the response object + will start listening for disconnects. + """ + + # Functools.wraps is required for this wrapper to appear to fastapi as a + # normal route handler, with the correct request type hinting. + @functools.wraps(handler_func) + async def wrapper(*args, **kwargs): + + # The request is either the second positional arg or `raw_request` + request = args[1] if len(args) > 1 else kwargs["raw_request"] + + handler_task = asyncio.create_task(handler_func(*args, **kwargs)) + cancellation_task = asyncio.create_task(listen_for_disconnect(request)) + + done, pending = await asyncio.wait([handler_task, cancellation_task], + return_when=asyncio.FIRST_COMPLETED) + for task in pending: + task.cancel() + + if handler_task in done: + return handler_task.result() + return None + + return wrapper diff --git a/vllm/utils.py b/vllm/utils.py index 73d2ae25f15ca..38c7dea6d2d3d 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -20,7 +20,7 @@ import uuid import warnings import weakref -from asyncio import FIRST_COMPLETED, AbstractEventLoop, Future, Task +from asyncio import FIRST_COMPLETED, AbstractEventLoop, Task from collections import UserDict, defaultdict from collections.abc import Iterable, Mapping from dataclasses import dataclass, field @@ -370,72 +370,23 @@ def _next_task(iterator: AsyncGenerator[T, None], return loop.create_task(iterator.__anext__()) # type: ignore[arg-type] -async def iterate_with_cancellation( - iterator: AsyncGenerator[T, None], - is_cancelled: Callable[[], Awaitable[bool]], -) -> AsyncGenerator[T, None]: - """Convert async iterator into one that polls the provided function - at least once per second to check for client cancellation. - """ - - loop = asyncio.get_running_loop() - - awaits: List[Future[T]] = [_next_task(iterator, loop)] - next_cancel_check: float = 0 - while True: - done, pending = await asyncio.wait(awaits, timeout=1.5) - - # Check for cancellation at most once per second - time_now = time.time() - if time_now >= next_cancel_check: - if await is_cancelled(): - with contextlib.suppress(BaseException): - awaits[0].cancel() - await iterator.aclose() - raise asyncio.CancelledError("client cancelled") - next_cancel_check = time_now + 1 - - if done: - try: - item = await awaits[0] - awaits[0] = _next_task(iterator, loop) - yield item - except StopAsyncIteration: - # we are done - return - - async def merge_async_iterators( - *iterators: AsyncGenerator[T, None], - is_cancelled: Optional[Callable[[], Awaitable[bool]]] = None, -) -> AsyncGenerator[Tuple[int, T], None]: + *iterators: AsyncGenerator[T, + None], ) -> AsyncGenerator[Tuple[int, T], None]: """Merge multiple asynchronous iterators into a single iterator. This method handle the case where some iterators finish before others. When it yields, it yields a tuple (i, item) where i is the index of the iterator that yields the item. - - It also optionally polls a provided function at least once per second - to check for client cancellation. """ loop = asyncio.get_running_loop() awaits = {_next_task(pair[1], loop): pair for pair in enumerate(iterators)} - timeout = None if is_cancelled is None else 1.5 - next_cancel_check: float = 0 try: while awaits: - done, pending = await asyncio.wait(awaits.keys(), - return_when=FIRST_COMPLETED, - timeout=timeout) - if is_cancelled is not None: - # Check for cancellation at most once per second - time_now = time.time() - if time_now >= next_cancel_check: - if await is_cancelled(): - raise asyncio.CancelledError("client cancelled") - next_cancel_check = time_now + 1 + done, _ = await asyncio.wait(awaits.keys(), + return_when=FIRST_COMPLETED) for d in done: pair = awaits.pop(d) try: