From 67701d072184abefd1d07426bca1e831701b916c Mon Sep 17 00:00:00 2001 From: ATMxsp01 Date: Fri, 13 Dec 2024 17:41:10 +0800 Subject: [PATCH 1/6] Add Modelscope option for GPU model chatglm3 --- .../GPU/HuggingFace/LLM/chatglm3/README.md | 5 +++++ .../GPU/HuggingFace/LLM/chatglm3/generate.py | 19 ++++++++++++++---- .../HuggingFace/LLM/chatglm3/streamchat.py | 20 +++++++++++++++---- 3 files changed, 36 insertions(+), 8 deletions(-) diff --git a/python/llm/example/GPU/HuggingFace/LLM/chatglm3/README.md b/python/llm/example/GPU/HuggingFace/LLM/chatglm3/README.md index 111c628c526..0b334887955 100644 --- a/python/llm/example/GPU/HuggingFace/LLM/chatglm3/README.md +++ b/python/llm/example/GPU/HuggingFace/LLM/chatglm3/README.md @@ -23,6 +23,9 @@ conda activate llm # below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ + +# [optional] if you use modelscope as model hub, please make sure you are using 1.11.0 version +pip install modelscope==1.11.0 ``` ## 2. Configures OneAPI environment variables for Linux @@ -101,6 +104,7 @@ Arguments info: - `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the huggingface repo id for the ChatGLM3 model to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'THUDM/chatglm3-6b'`. - `--prompt PROMPT`: argument defining the prompt to be infered (with integrated prompt format for chat). It is default to be `'AI是什么?'`. - `--n-predict N_PREDICT`: argument defining the max number of tokens to predict. It is default to be `32`. +- `--modelscope`: using **Modelscope** as model hub instead of **huggingface**. #### Sample Output #### [THUDM/chatglm3-6b](https://huggingface.co/THUDM/chatglm3-6b) @@ -146,3 +150,4 @@ Arguments info: - `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the huggingface repo id for the ChatGLM3 model to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'THUDM/chatglm3-6b'`. - `--question QUESTION`: argument defining the question to ask. It is default to be `"晚上睡不着应该怎么办"`. - `--disable-stream`: argument defining whether to stream chat. If include `--disable-stream` when running the script, the stream chat is disabled and `chat()` API is used. +- `--modelscope`: using **Modelscope** as model hub instead of **huggingface**. diff --git a/python/llm/example/GPU/HuggingFace/LLM/chatglm3/generate.py b/python/llm/example/GPU/HuggingFace/LLM/chatglm3/generate.py index da730d70cfc..25230055698 100644 --- a/python/llm/example/GPU/HuggingFace/LLM/chatglm3/generate.py +++ b/python/llm/example/GPU/HuggingFace/LLM/chatglm3/generate.py @@ -20,7 +20,6 @@ import numpy as np from ipex_llm.transformers import AutoModel -from transformers import AutoTokenizer # you could tune the prompt based on your own model, # here the prompt tuning refers to https://github.com/THUDM/ChatGLM3/blob/main/PROMPT.md @@ -28,16 +27,27 @@ if __name__ == '__main__': parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for ChatGLM3 model') - parser.add_argument('--repo-id-or-model-path', type=str, default="THUDM/chatglm3-6b", + parser.add_argument('--repo-id-or-model-path', type=str, help='The huggingface repo id for the ChatGLM3 model to be downloaded' ', or the path to the huggingface checkpoint folder') parser.add_argument('--prompt', type=str, default="AI是什么?", help='Prompt to infer') parser.add_argument('--n-predict', type=int, default=32, help='Max tokens to predict') + parser.add_argument('--modelscope', action="store_true", default=False, + help="Use models from modelscope") args = parser.parse_args() - model_path = args.repo_id_or_model_path + + if args.modelscope: + from modelscope import AutoTokenizer + model_hub = 'modelscope' + else: + from transformers import AutoTokenizer + model_hub = 'huggingface' + + model_path = args.repo_id_or_model_path if args.repo_id_or_model_path else \ + ("ZhipuAI/chatglm3-6b" if args.modelscope else "THUDM/chatglm3-6b") # Load model in 4 bit, # which convert the relevant layers in the model into INT4 format @@ -47,7 +57,8 @@ load_in_4bit=True, optimize_model=True, trust_remote_code=True, - use_cache=True) + use_cache=True, + model_hub=model_hub) model = model.half().to('xpu') # Load tokenizer diff --git a/python/llm/example/GPU/HuggingFace/LLM/chatglm3/streamchat.py b/python/llm/example/GPU/HuggingFace/LLM/chatglm3/streamchat.py index 986a7365b65..eec019fb242 100644 --- a/python/llm/example/GPU/HuggingFace/LLM/chatglm3/streamchat.py +++ b/python/llm/example/GPU/HuggingFace/LLM/chatglm3/streamchat.py @@ -20,21 +20,32 @@ import numpy as np from ipex_llm.transformers import AutoModel -from transformers import AutoTokenizer if __name__ == '__main__': parser = argparse.ArgumentParser(description='Stream Chat for ChatGLM3 model') - parser.add_argument('--repo-id-or-model-path', type=str, default="THUDM/chatglm3-6b", + parser.add_argument('--repo-id-or-model-path', type=str, help='The huggingface repo id for the ChatGLM3 model to be downloaded' ', or the path to the huggingface checkpoint folder') parser.add_argument('--question', type=str, default="晚上睡不着应该怎么办", help='Qustion you want to ask') parser.add_argument('--disable-stream', action="store_true", help='Disable stream chat') + parser.add_argument('--modelscope', action="store_true", default=False, + help="Use models from modelscope") args = parser.parse_args() - model_path = args.repo_id_or_model_path + + if args.modelscope: + from modelscope import AutoTokenizer + model_hub = 'modelscope' + else: + from transformers import AutoTokenizer + model_hub = 'huggingface' + + model_path = args.repo_id_or_model_path if args.repo_id_or_model_path else \ + ("ZhipuAI/chatglm3-6b" if args.modelscope else "THUDM/chatglm3-6b") + disable_stream = args.disable_stream # Load model in 4 bit, @@ -44,7 +55,8 @@ model = AutoModel.from_pretrained(model_path, load_in_4bit=True, trust_remote_code=True, - optimize_model=True) + optimize_model=True, + model_hub=model_hub) model.to('xpu') # Load tokenizer From 837cab62fdf87ea481d6ef7a9197d4a3f9f919c2 Mon Sep 17 00:00:00 2001 From: ATMxsp01 Date: Mon, 16 Dec 2024 12:22:19 +0800 Subject: [PATCH 2/6] Update readme --- .../example/GPU/HuggingFace/LLM/chatglm3/README.md | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/python/llm/example/GPU/HuggingFace/LLM/chatglm3/README.md b/python/llm/example/GPU/HuggingFace/LLM/chatglm3/README.md index 0b334887955..3dbcbc33adf 100644 --- a/python/llm/example/GPU/HuggingFace/LLM/chatglm3/README.md +++ b/python/llm/example/GPU/HuggingFace/LLM/chatglm3/README.md @@ -1,6 +1,6 @@ # ChatGLM3 -In this directory, you will find examples on how you could apply IPEX-LLM INT4 optimizations on ChatGLM3 models on [Intel GPUs](../../../README.md). For illustration purposes, we utilize the [THUDM/chatglm3-6b](https://huggingface.co/THUDM/chatglm3-6b) as a reference ChatGLM3 model. +In this directory, you will find examples on how you could apply IPEX-LLM INT4 optimizations on ChatGLM3 models on [Intel GPUs](../../../README.md). For illustration purposes, we utilize the [THUDM/chatglm3-6b](https://huggingface.co/THUDM/chatglm3-6b) as a reference ChatGLM3 model when you choose **Hugging Face** as your model hub. And if you choose **ModelScope** as your model hub, we use [ZhipuAI/chatglm3-6b](https://www.modelscope.cn/models/ZhipuAI/chatglm3-6b) as a reference ChatGLM3 model. ## 0. Requirements To run these examples with IPEX-LLM on Intel GPUs, we have some recommended requirements for your machine, please refer to [here](../../../README.md#requirements) for more information. @@ -13,6 +13,9 @@ conda create -n llm python=3.11 conda activate llm # below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ + +# [optional] if you use modelscope as model hub, please make sure you are using 1.11.0 version +pip install modelscope==1.11.0 ``` ### 1.2 Installation on Windows @@ -101,10 +104,10 @@ python ./generate.py --repo-id-or-model-path REPO_ID_OR_MODEL_PATH --prompt PROM ``` Arguments info: -- `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the huggingface repo id for the ChatGLM3 model to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'THUDM/chatglm3-6b'`. +- `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the huggingface repo id for the ChatGLM3 model to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'THUDM/chatglm3-6b'` for **Hugging Face** or `ZhipuAI/chatglm3-6b` for **ModelScope**. - `--prompt PROMPT`: argument defining the prompt to be infered (with integrated prompt format for chat). It is default to be `'AI是什么?'`. - `--n-predict N_PREDICT`: argument defining the max number of tokens to predict. It is default to be `32`. -- `--modelscope`: using **Modelscope** as model hub instead of **huggingface**. +- `--modelscope`: using **ModelScope** as model hub instead of **Hugging Face**. #### Sample Output #### [THUDM/chatglm3-6b](https://huggingface.co/THUDM/chatglm3-6b) @@ -150,4 +153,4 @@ Arguments info: - `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the huggingface repo id for the ChatGLM3 model to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'THUDM/chatglm3-6b'`. - `--question QUESTION`: argument defining the question to ask. It is default to be `"晚上睡不着应该怎么办"`. - `--disable-stream`: argument defining whether to stream chat. If include `--disable-stream` when running the script, the stream chat is disabled and `chat()` API is used. -- `--modelscope`: using **Modelscope** as model hub instead of **huggingface**. +- `--modelscope`: using **ModelScope** as model hub instead of **Hugging Face**. From 2d3b54b7f820290ca8d5350b1cf3ba6a984a4dec Mon Sep 17 00:00:00 2001 From: ATMxsp01 Date: Mon, 16 Dec 2024 14:13:51 +0800 Subject: [PATCH 3/6] Update readme --- python/llm/example/GPU/HuggingFace/LLM/chatglm3/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/python/llm/example/GPU/HuggingFace/LLM/chatglm3/README.md b/python/llm/example/GPU/HuggingFace/LLM/chatglm3/README.md index 3dbcbc33adf..2fa83100208 100644 --- a/python/llm/example/GPU/HuggingFace/LLM/chatglm3/README.md +++ b/python/llm/example/GPU/HuggingFace/LLM/chatglm3/README.md @@ -1,6 +1,6 @@ # ChatGLM3 -In this directory, you will find examples on how you could apply IPEX-LLM INT4 optimizations on ChatGLM3 models on [Intel GPUs](../../../README.md). For illustration purposes, we utilize the [THUDM/chatglm3-6b](https://huggingface.co/THUDM/chatglm3-6b) as a reference ChatGLM3 model when you choose **Hugging Face** as your model hub. And if you choose **ModelScope** as your model hub, we use [ZhipuAI/chatglm3-6b](https://www.modelscope.cn/models/ZhipuAI/chatglm3-6b) as a reference ChatGLM3 model. +In this directory, you will find examples on how you could apply IPEX-LLM INT4 optimizations on ChatGLM3 models on [Intel GPUs](../../../README.md). For illustration purposes, we utilize the [THUDM/chatglm3-6b](https://huggingface.co/THUDM/chatglm3-6b) (or [ZhipuAI/chatglm3-6b](https://www.modelscope.cn/models/ZhipuAI/chatglm3-6b) for ModelScope) as a reference ChatGLM3 model. ## 0. Requirements To run these examples with IPEX-LLM on Intel GPUs, we have some recommended requirements for your machine, please refer to [here](../../../README.md#requirements) for more information. @@ -14,7 +14,7 @@ conda activate llm # below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ -# [optional] if you use modelscope as model hub, please make sure you are using 1.11.0 version +# [optional] only needed if you would like to use ModelScope as model hub pip install modelscope==1.11.0 ``` @@ -27,7 +27,7 @@ conda activate llm # below command will install intel_extension_for_pytorch==2.1.10+xpu as default pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ -# [optional] if you use modelscope as model hub, please make sure you are using 1.11.0 version +# [optional] only needed if you would like to use ModelScope as model hub pip install modelscope==1.11.0 ``` From 4d629a31902706053befc43b453b47e2a883ecfb Mon Sep 17 00:00:00 2001 From: ATMxsp01 Date: Mon, 16 Dec 2024 14:15:47 +0800 Subject: [PATCH 4/6] Update readme --- python/llm/example/GPU/HuggingFace/LLM/chatglm3/streamchat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/llm/example/GPU/HuggingFace/LLM/chatglm3/streamchat.py b/python/llm/example/GPU/HuggingFace/LLM/chatglm3/streamchat.py index eec019fb242..5974475d1cc 100644 --- a/python/llm/example/GPU/HuggingFace/LLM/chatglm3/streamchat.py +++ b/python/llm/example/GPU/HuggingFace/LLM/chatglm3/streamchat.py @@ -57,7 +57,7 @@ trust_remote_code=True, optimize_model=True, model_hub=model_hub) - model.to('xpu') + model.half().to('xpu') # Load tokenizer tokenizer = AutoTokenizer.from_pretrained(model_path, From 696283c729ae8fb6edc46a87fa70fd25b0732e65 Mon Sep 17 00:00:00 2001 From: ATMxsp01 Date: Mon, 16 Dec 2024 14:41:43 +0800 Subject: [PATCH 5/6] Update readme --- .../GPU/HuggingFace/LLM/chatglm3/README.md | 22 ++++++++++++++----- .../GPU/HuggingFace/LLM/chatglm3/generate.py | 4 ++-- .../HuggingFace/LLM/chatglm3/streamchat.py | 4 ++-- 3 files changed, 21 insertions(+), 9 deletions(-) diff --git a/python/llm/example/GPU/HuggingFace/LLM/chatglm3/README.md b/python/llm/example/GPU/HuggingFace/LLM/chatglm3/README.md index 2fa83100208..7a018f44e65 100644 --- a/python/llm/example/GPU/HuggingFace/LLM/chatglm3/README.md +++ b/python/llm/example/GPU/HuggingFace/LLM/chatglm3/README.md @@ -99,12 +99,16 @@ set SYCL_CACHE_PERSISTENT=1 ### Example 1: Predict Tokens using `generate()` API In the example [generate.py](./generate.py), we show a basic use case for a ChatGLM3 model to predict the next N tokens using `generate()` API, with IPEX-LLM INT4 optimizations on Intel GPUs. -``` +```bash +# for Hugging Face model hub python ./generate.py --repo-id-or-model-path REPO_ID_OR_MODEL_PATH --prompt PROMPT --n-predict N_PREDICT + +# for ModelScope model hub +python ./generate.py --repo-id-or-model-path REPO_ID_OR_MODEL_PATH --prompt PROMPT --n-predict N_PREDICT --modelscope ``` Arguments info: -- `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the huggingface repo id for the ChatGLM3 model to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'THUDM/chatglm3-6b'` for **Hugging Face** or `ZhipuAI/chatglm3-6b` for **ModelScope**. +- `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the **Hugging Face** or **ModelScope** repo id for the ChatGLM3 model to be downloaded, or the path to the checkpoint folder. It is default to be `'THUDM/chatglm3-6b'` for **Hugging Face** or `ZhipuAI/chatglm3-6b` for **ModelScope**. - `--prompt PROMPT`: argument defining the prompt to be infered (with integrated prompt format for chat). It is default to be `'AI是什么?'`. - `--n-predict N_PREDICT`: argument defining the max number of tokens to predict. It is default to be `32`. - `--modelscope`: using **ModelScope** as model hub instead of **Hugging Face**. @@ -140,17 +144,25 @@ AI stands for Artificial Intelligence. It refers to the development of computer In the example [streamchat.py](./streamchat.py), we show a basic use case for a ChatGLM3 model to stream chat, with IPEX-LLM INT4 optimizations. **Stream Chat using `stream_chat()` API**: -``` +```bash +# for Hugging Face model hub python ./streamchat.py --repo-id-or-model-path REPO_ID_OR_MODEL_PATH --question QUESTION + +# for ModelScope model hub +python ./streamchat.py --repo-id-or-model-path REPO_ID_OR_MODEL_PATH --question QUESTION --modelscope ``` **Chat using `chat()` API**: -``` +```bash +# for Hugging Face model hub python ./streamchat.py --repo-id-or-model-path REPO_ID_OR_MODEL_PATH --question QUESTION --disable-stream + +# for ModelScope model hub +python ./streamchat.py --repo-id-or-model-path REPO_ID_OR_MODEL_PATH --question QUESTION --disable-stream --modelscope ``` Arguments info: -- `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the huggingface repo id for the ChatGLM3 model to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'THUDM/chatglm3-6b'`. +- `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the **Hugging Face** or **ModelScope** repo id for the ChatGLM3 model to be downloaded, or the path to the checkpoint folder. It is default to be `'THUDM/chatglm3-6b'` for **Hugging Face** or `ZhipuAI/chatglm3-6b` for **ModelScope**. - `--question QUESTION`: argument defining the question to ask. It is default to be `"晚上睡不着应该怎么办"`. - `--disable-stream`: argument defining whether to stream chat. If include `--disable-stream` when running the script, the stream chat is disabled and `chat()` API is used. - `--modelscope`: using **ModelScope** as model hub instead of **Hugging Face**. diff --git a/python/llm/example/GPU/HuggingFace/LLM/chatglm3/generate.py b/python/llm/example/GPU/HuggingFace/LLM/chatglm3/generate.py index 25230055698..ee5e77b10eb 100644 --- a/python/llm/example/GPU/HuggingFace/LLM/chatglm3/generate.py +++ b/python/llm/example/GPU/HuggingFace/LLM/chatglm3/generate.py @@ -28,8 +28,8 @@ if __name__ == '__main__': parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for ChatGLM3 model') parser.add_argument('--repo-id-or-model-path', type=str, - help='The huggingface repo id for the ChatGLM3 model to be downloaded' - ', or the path to the huggingface checkpoint folder') + help='The Hugging Face or ModelScope repo id for the ChatGLM3 model to be downloaded' + ', or the path to the checkpoint folder') parser.add_argument('--prompt', type=str, default="AI是什么?", help='Prompt to infer') parser.add_argument('--n-predict', type=int, default=32, diff --git a/python/llm/example/GPU/HuggingFace/LLM/chatglm3/streamchat.py b/python/llm/example/GPU/HuggingFace/LLM/chatglm3/streamchat.py index 5974475d1cc..a1f0b61352b 100644 --- a/python/llm/example/GPU/HuggingFace/LLM/chatglm3/streamchat.py +++ b/python/llm/example/GPU/HuggingFace/LLM/chatglm3/streamchat.py @@ -25,8 +25,8 @@ if __name__ == '__main__': parser = argparse.ArgumentParser(description='Stream Chat for ChatGLM3 model') parser.add_argument('--repo-id-or-model-path', type=str, - help='The huggingface repo id for the ChatGLM3 model to be downloaded' - ', or the path to the huggingface checkpoint folder') + help='The Hugging Face or ModelScope repo id for the ChatGLM3 model to be downloaded' + ', or the path to the checkpoint folder') parser.add_argument('--question', type=str, default="晚上睡不着应该怎么办", help='Qustion you want to ask') parser.add_argument('--disable-stream', action="store_true", From 99c7cf475ceb0692b0259c98905ef6f5ea312173 Mon Sep 17 00:00:00 2001 From: ATMxsp01 Date: Mon, 16 Dec 2024 14:52:41 +0800 Subject: [PATCH 6/6] format update --- python/llm/example/GPU/HuggingFace/LLM/chatglm3/streamchat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/llm/example/GPU/HuggingFace/LLM/chatglm3/streamchat.py b/python/llm/example/GPU/HuggingFace/LLM/chatglm3/streamchat.py index a1f0b61352b..305e3c051f7 100644 --- a/python/llm/example/GPU/HuggingFace/LLM/chatglm3/streamchat.py +++ b/python/llm/example/GPU/HuggingFace/LLM/chatglm3/streamchat.py @@ -57,7 +57,7 @@ trust_remote_code=True, optimize_model=True, model_hub=model_hub) - model.half().to('xpu') + model = model.half().to('xpu') # Load tokenizer tokenizer = AutoTokenizer.from_pretrained(model_path,