From 3184504434c49a658346f9d7beb5f9206f31a1ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=85=BC=E6=AC=A3?= Date: Tue, 3 Sep 2024 16:04:18 +0800 Subject: [PATCH] add jsonschema as a dependency --- examples/llm_vl_mix_text.py | 4 ++-- examples/multi_agent_router.py | 4 ++-- examples/visual_storytelling.py | 4 ++-- qwen_agent/llm/base.py | 2 +- setup.py | 1 + tests/agents/test_assistant.py | 11 ++++------- tests/agents/test_router.py | 2 +- tests/examples/test_examples.py | 4 ++-- tests/llm/test_dashscope.py | 5 +++-- 9 files changed, 18 insertions(+), 19 deletions(-) diff --git a/examples/llm_vl_mix_text.py b/examples/llm_vl_mix_text.py index 32c05d8..cc62456 100644 --- a/examples/llm_vl_mix_text.py +++ b/examples/llm_vl_mix_text.py @@ -29,9 +29,9 @@ def test(): 'role': 'user', 'content': [{ - 'text': '框出太阳并描述' + 'text': '框出小狗并描述', }, { - 'image': 'https://img01.sc115.com/uploads/sc/jpgs/1505/apic11540_sc115.com.jpg' + 'image': 'https://dashscope.oss-cn-beijing.aliyuncs.com/images/dog_and_girl.jpeg', }] }] response = llm_vl.chat(messages, stream=True) diff --git a/examples/multi_agent_router.py b/examples/multi_agent_router.py index 706798d..742179a 100644 --- a/examples/multi_agent_router.py +++ b/examples/multi_agent_router.py @@ -36,7 +36,7 @@ def init_agent_service(): def test( query: str = 'hello', - image: str = 'https://img01.sc115.com/uploads/sc/jpgs/1505/apic11540_sc115.com.jpg', + image: str = 'https://dashscope.oss-cn-beijing.aliyuncs.com/images/dog_and_girl.jpeg', file: Optional[str] = os.path.join(ROOT_RESOURCE, 'poem.pdf'), ): # Define the agent @@ -66,7 +66,7 @@ def app_tui(): messages = [] while True: query = input('user question: ') - # Image example: https://img01.sc115.com/uploads/sc/jpgs/1505/apic11540_sc115.com.jpg + # Image example: https://dashscope.oss-cn-beijing.aliyuncs.com/images/dog_and_girl.jpeg image = input('image url (press enter if no image): ') # File example: resource/poem.pdf file = input('file url (press enter if no file): ').strip() diff --git a/examples/visual_storytelling.py b/examples/visual_storytelling.py index e660887..084aa90 100644 --- a/examples/visual_storytelling.py +++ b/examples/visual_storytelling.py @@ -50,7 +50,7 @@ def _run(self, messages: List[Message], lang: str = 'zh', **kwargs) -> Iterator[ def test(query: Optional[str] = '看图说话', - image: str = 'https://img01.sc115.com/uploads3/sc/vector/201809/51413-20180914205509.jpg'): + image: str = 'https://dashscope.oss-cn-beijing.aliyuncs.com/images/dog_and_girl.jpeg'): # define a writer agent bot = VisualStorytelling(llm={'model': 'qwen-max'}) @@ -71,7 +71,7 @@ def app_tui(): messages = [] while True: query = input('user question: ') - # image example: https://img01.sc115.com/uploads3/sc/vector/201809/51413-20180914205509.jpg + # image example: https://dashscope.oss-cn-beijing.aliyuncs.com/images/dog_and_girl.jpeg image = input('image url: ').strip() if not image: diff --git a/qwen_agent/llm/base.py b/qwen_agent/llm/base.py index b3a4ae4..395f141 100644 --- a/qwen_agent/llm/base.py +++ b/qwen_agent/llm/base.py @@ -60,7 +60,7 @@ def __init__(self, cfg: Optional[Dict] = None): self.generate_cfg = generate_cfg def quick_chat(self, prompt: str) -> str: - responses = self.chat(messages=[Message(role=USER, content=prompt)], stream=False) + *_, responses = self.chat(messages=[Message(role=USER, content=prompt)]) assert len(responses) == 1 assert not responses[0].function_call assert isinstance(responses[0].content, str) diff --git a/setup.py b/setup.py index 7c2adb8..da79b2b 100644 --- a/setup.py +++ b/setup.py @@ -47,6 +47,7 @@ def read_description() -> str: 'eval_type_backport', 'json5', 'jsonlines', + 'jsonschema', 'openai', 'pydantic>=2.3.0', 'requests', diff --git a/tests/agents/test_assistant.py b/tests/agents/test_assistant.py index 805c042..428fb02 100644 --- a/tests/agents/test_assistant.py +++ b/tests/agents/test_assistant.py @@ -57,13 +57,10 @@ def test_assistant_vl(): agent = Assistant(llm=llm_cfg) messages = [ - Message( - 'user', - [ - ContentItem(text='用一句话描述图片'), - ContentItem(image= # NOQA - 'https://img01.sc115.com/uploads/sc/jpgs/1505/apic11540_sc115.com.jpg') - ]) + Message('user', [ + ContentItem(text='用一句话描述图片'), + ContentItem(image='https://dashscope.oss-cn-beijing.aliyuncs.com/images/dog_and_girl.jpeg'), + ]) ] *_, last = agent.run(messages) diff --git a/tests/agents/test_router.py b/tests/agents/test_router.py index ef6a274..e0f207e 100644 --- a/tests/agents/test_router.py +++ b/tests/agents/test_router.py @@ -23,7 +23,7 @@ def test_router(): messages = [ Message('user', [ ContentItem(text='描述图片'), - ContentItem(image='https://img01.sc115.com/uploads/sc/jpgs/1505/apic11540_sc115.com.jpg'), + ContentItem(image='https://dashscope.oss-cn-beijing.aliyuncs.com/images/dog_and_girl.jpeg'), ]) ] diff --git a/tests/examples/test_examples.py b/tests/examples/test_examples.py index 739910e..94b811e 100644 --- a/tests/examples/test_examples.py +++ b/tests/examples/test_examples.py @@ -37,7 +37,7 @@ def test_llm_vl_mix_text(): @pytest.mark.parametrize('query', [None, '看图说话']) -@pytest.mark.parametrize('image', ['https://img01.sc115.com/uploads3/sc/vector/201809/51413-20180914205509.jpg']) +@pytest.mark.parametrize('image', ['https://dashscope.oss-cn-beijing.aliyuncs.com/images/dog_and_girl.jpeg']) def test_visual_storytelling(query, image): visual_storytelling(query=query, image=image) @@ -69,7 +69,7 @@ def test_llm_riddles(): @pytest.mark.parametrize('query', ['告诉我你现在知道什么了']) -@pytest.mark.parametrize('image', [None, 'https://img01.sc115.com/uploads/sc/jpgs/1505/apic11540_sc115.com.jpg']) +@pytest.mark.parametrize('image', [None, 'https://dashscope.oss-cn-beijing.aliyuncs.com/images/dog_and_girl.jpeg']) @pytest.mark.parametrize('file', [None, os.path.join(ROOT_RESOURCE, 'poem.pdf')]) def test_multi_agent_router(query, image, file): multi_agent_router(query=query, image=image, file=file) diff --git a/tests/llm/test_dashscope.py b/tests/llm/test_dashscope.py index 49c723e..2adb449 100644 --- a/tests/llm/test_dashscope.py +++ b/tests/llm/test_dashscope.py @@ -34,11 +34,12 @@ def test_vl_mix_text(functions, stream, delta_stream): # Chat with vl llm llm_vl = get_chat_model(llm_cfg_vl) messages = [{ - 'role': 'user', + 'role': + 'user', 'content': [{ 'text': '框出太阳' }, { - 'image': 'https://img01.sc115.com/uploads/sc/jpgs/1505/apic11540_sc115.com.jpg' + 'image': 'https://dashscope.oss-cn-beijing.aliyuncs.com/images/dog_and_girl.jpeg' }] }] response = llm_vl.chat(messages=messages, functions=None, stream=stream, delta_stream=delta_stream)