Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix codegen #77

Merged
merged 5 commits into from
Apr 30, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -168,4 +168,5 @@ scripts
# Polymind learned facts and tools
knowledge/facts/**
knowledge/tools/**
use_cases
use_cases
polymind/example_tools/**
277 changes: 276 additions & 1 deletion poetry.lock

Large diffs are not rendered by default.

6 changes: 3 additions & 3 deletions polymind/core/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ class AtomTask(BaseTask):
{input}
---

Please put the result into the ```json blob```.
Please put the result into the ```json blob```, and the key should be "output".
"""

def __init__(self, tool_manager: ToolManager, tool_retriever: RetrieveTool, **kwargs):
Expand All @@ -186,7 +186,7 @@ async def _use_tool(self, objective: str, tool_description: str) -> Message:
tool_description (str): The description of the tool.
"""
# Retrieve the tool using ToolRetriever.
tool_retrieve_message = Message(content={self.tool_retrieve_query_key: tool_description, "top_k": 1})
tool_retrieve_message = Message(content={self.tool_retrieve_query_key: tool_description, "top_k": 3})
tool_retrieve_result_message = await self._tool_retriever(tool_retrieve_message)
self._logger.debug(f"Tool retrieve result: {tool_retrieve_result_message.content}")
tool_name = tool_retrieve_result_message.content["results"][0]
Expand Down Expand Up @@ -263,7 +263,7 @@ async def _execute(self, input: Message) -> Message:
"input"
] = f"""
Context: {self.task_context}
Input from the previous step:
Input from the previous steps:
{input_field}
Objective: {self.task_name}
"""
Expand Down
209 changes: 164 additions & 45 deletions polymind/core/tool.py

Large diffs are not rendered by default.

123 changes: 122 additions & 1 deletion polymind/core_tools/llm_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,14 @@
import os
from typing import List

import anthropic
from openai import AsyncOpenAI
from pydantic import Field

from polymind.core.logger import Logger
from polymind.core.message import Message
from polymind.core.tool import BaseTool, Embedder, LLMTool, Param
from polymind.core.tool import (BaseTool, CodeGenerationTool, Embedder,
LLMTool, Param)
from polymind.core_tools.rest_api_tool import RestAPITool


Expand Down Expand Up @@ -220,3 +222,122 @@ async def _embedding(self, input: List[str]) -> List[List[float]]:
embedding_list = [entry.get("embedding", []) for entry in response.get("data", [])]
embeddings: List[List[float]] = [embedding[: self.embed_dim] for embedding in embedding_list]
return embeddings


class OpenAICodeGenerationTool(CodeGenerationTool):
"""Use OpenAI to generate code snippets based on the input prompt."""

tool_name: str = "open-ai-code-generation"

def _set_llm_client(self):
model_name = os.environ.get("MODEL_NAME", "gpt-3.5-turbo")
self._llm_tool = OpenAIChatTool(model_name=model_name)
# self._llm_tool = AnthropicClaudeTool()


class AnthropicClaudeTool(LLMTool):
"""AnthropicClaudeTool is a bridge to Anthropic's Claude API.
The tool can be initialized with system_prompt, max_tokens, and temperature.
The input message of this tool should contain a "prompt", and optionally a "system_prompt".
The "system_prompt" in the input message will override the default system_prompt.
The tool will return a message with key "answer" with the response from the Claude API.
"""

model_config = {
"arbitrary_types_allowed": True, # Allow arbitrary types
}
tool_name: str = "anthropic-claude"
model_name: str = Field(default="claude-3-opus-20240229", description="The name of the Claude model.")
descriptions: List[str] = [
"This tool is used to chat with Anthropic's Claude language model.",
"This tool can be used as the orchestrator to control the conversation and problem solving.",
"This tool can be used to breakdown the problem into smaller parts and solve them.",
"This tool can be used to generate the response from the chat.",
"This tool can be used to generate the code of new tools.",
"This tool can do simple calculation.",
"Simple calculator that does basic arithmetic calculation.",
]

client: anthropic.Client = Field(default=None)
system_prompt: str = Field(default="You are a helpful AI assistant.")
max_tokens: int = Field(default=4000)
temperature: float = Field(default=0.7)
stop: str = Field(default=None)
response_format: str = Field(default="text", description="The format of the response from the chat.")

def __init__(self, **kwargs):
super().__init__(**kwargs)
self._logger = Logger(__file__)
self._set_client()

def _set_client(self):
"""Set the client for the language model."""
self.client = anthropic.Anthropic()

def input_spec(self) -> List[Param]:
"""Return the input specification of the tool."""
return [
Param(
name="system_prompt",
type="str",
required=False,
example="You are a helpful AI assistant.",
description="The system prompt for the chat.",
),
Param(
name="input",
type="str",
required=True,
example="hello, how are you?",
description="The prompt for the chat.",
),
Param(
name="max_tokens",
type="int",
required=False,
example="1500",
description="The maximum number of tokens for the chat.",
),
Param(
name="temperature",
type="float",
required=False,
example="0.7",
description="The temperature for the chat.",
),
]

def output_spec(self) -> List[Param]:
"""Return the output specification of the tool."""
return [
Param(
name="output",
type="str",
required=True,
example="I'm good, how are you?",
description="The response from the chat.",
),
]

async def _invoke(self, input: Message) -> Message:
"""Execute the tool and return the result."""
prompt = input.get("input", "")
system_prompt = input.get("system_prompt", self.system_prompt)
prompt = f"{system_prompt}\n{prompt}"
temperature = input.get("temperature", self.temperature)
max_tokens = input.get("max_tokens", self.max_tokens)
messages = [
{"role": "user", "content": prompt},
]
response = self.client.messages.create(
model=self.model_name,
max_tokens=max_tokens,
temperature=temperature,
messages=messages,
)
content = response.content[0].text
self._logger.tool_log(f"[{self.tool_name}], System Prompt: [{system_prompt}]")
self._logger.tool_log(f"[{self.tool_name}], Prompt: [{prompt}]")
self._logger.tool_log(f"[{self.tool_name}], Response from Claude: [{content}]")
response_message = Message(content={"output": content})
return response_message
1 change: 1 addition & 0 deletions polymind/core_tools/retrieve_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,7 @@ def _set_client(self):
self._client.create_collection(self.collection_name, dimension=self.embed_dim, auto_id=True)
self.embedder = OpenAIEmbeddingTool(embed_dim=self.embed_dim)
self._llm_tool: LLMTool = OpenAIChatTool()
self.top_k = self.top_k or 3
self._logger = Logger(__file__)

def _extra_input_spec(self) -> List[Param]:
Expand Down
16 changes: 7 additions & 9 deletions polymind/thought_process/chain_of_tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,15 +31,13 @@ class ChainOfTasks(ThoughtProcess):

problem_decomposition_prompt: str = """
Please read the requirement carefully, and think step-by-step before answering the question.
Please decompose the problem into 1-5 steps, depending on the complexity of the problem.

Each of the following sub-task will use the output of the previous task as input.

Please write down your decomposition into the json blob.
For each step, please give it an "objective", "input" and "output".
FOr the objective, please make it less ambiguous and more specific.
And make it to explain how to use the input.
For input and output, please use declarative name and please describe the type as well.
Follow the below rules:
1. Please decompose the problem into 1-5 steps, depending on the complexity of the problem.
Each of the following sub-task will use the output of the previous task as input.
2. For each step, please give it an "objective", "input" and "output".
Objectives: Make it less ambiguous and more specific to the requirement, e.g. including date if provided.
Input: Make it to explain how to use the input. Use declarative name and please describe the type as well.
3. Please write down the decomposition into the json blob.

An example of the decomposition is as follows:

Expand Down
3 changes: 2 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "polymind"
version = "0.0.42" # Update this version before publishing to PyPI
version = "0.0.43" # Update this version before publishing to PyPI
description = "PolyMind is a customizable collaborative multi-agent framework for collective intelligence and distributed problem solving."
authors = ["TechTao"]
license = "MIT License"
Expand All @@ -18,6 +18,7 @@ pymilvus = "2.3.7"
faiss-cpu = "1.8.0"
colorama = "^0.4.6"
tavily-python = "^0.3.3"
anthropic = "^0.25.6"

[tool.poetry.group.dev.dependencies]
black = "^24.2.0"
Expand Down
132 changes: 0 additions & 132 deletions tests/polymind/core/test_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -481,135 +481,3 @@ async def test_invoke_tool(self, monkeypatch):
response = await manager.invoke_tool("test_tool", input=params)
assert response.get("result") == "tset", "The result should be the reverse of the input query"
assert response.get("result2") == "olleh", "The result should be the reverse of the input query2"


class TestCodeGenerationTool:
@pytest.fixture
def llm_tool_mock(self):
return AsyncMock()

@pytest.fixture
def code_gen_tool(self, llm_tool_mock):
return CodeGenerationTool(llm_tool=llm_tool_mock)

# @pytest.mark.asyncio
# async def test_execute_successful(self, code_gen_tool, llm_tool_mock):
# # Setup mock response
# llm_tool_mock.return_value = AsyncMock(
# content={
# "output": """
# ```python
# output = {"result": 42}
# ```
# """
# }
# )
# expected_code = 'output = {"result": 42}'
# expected_output = '{"result": 42}'
# input_message = Message(content={"input": "Sum two numbers"})

# # Execute the test
# result = await code_gen_tool(input_message)
# actual_output = result.content["output"]

# assert result.content["code"] == expected_code
# assert json.loads(actual_output) == json.loads(expected_output), actual_output

@pytest.mark.asyncio
async def test_execute_failure_max_attempts(self, code_gen_tool, llm_tool_mock):
# Simulate failures
llm_tool_mock.side_effect = [AsyncMock(side_effect=Exception("Error")) for _ in range(3)]
input_message = Message(content={"input": "Sum two numbers"})

with pytest.raises(Exception):
await code_gen_tool(input_message)

@pytest.mark.asyncio
async def test_code_gen_success(self, code_gen_tool, llm_tool_mock):
# Setup mock response
llm_tool_mock.return_value = AsyncMock(
content={
"output": """
```python
a = 10
b = 32
def total(a, b):
return a + b
output = {"result": total(a, b)}
```
"""
}
)
requirement = "Sum two numbers"
previous_errors = []

code = await code_gen_tool._code_gen(requirement=requirement, previous_errors=previous_errors)
expected_code = """
a = 10
b = 32
def total(a, b):
return a + b
output = {"result": total(a, b)}
"""
expected_code = re.sub("\s+", "", expected_code)
assert re.sub("\s+", "", code) == expected_code

@pytest.mark.asyncio
async def test_code_gen_failure_no_code(self, code_gen_tool, llm_tool_mock):
# Setup mock response
llm_tool_mock.return_value = AsyncMock(content={"output": ""})
requirement = "Sum two numbers"
previous_errors = []

with pytest.raises(ValueError):
await code_gen_tool._code_gen(requirement, previous_errors)

def test_extract_required_packages(self, code_gen_tool):
code = textwrap.dedent(
"""
import numpy
import pandas as pd
import matplotlib.pyplot as plt
import yfinance as yf
from sklearn.linear_model import LinearRegression
"""
)
expected_packages = set(["numpy", "pandas", "matplotlib", "yfinance", "sklearn"])
packages = code_gen_tool._extract_required_packages(code)
assert set(packages) == expected_packages

@pytest.mark.asyncio
async def test_code_run_valid_python(self, code_gen_tool):
code = 'output = {"result": 100, "price": 200, "name": "test", "ids": [1, 2, 3]}'
result = await code_gen_tool._code_run(code)
assert result == {"result": 100, "price": 200, "name": "test", "ids": [1, 2, 3]}

@pytest.mark.asyncio
async def test_code_run_invalid_python(self, code_gen_tool):
code = "for i in range(10 print(i)"
with pytest.raises(Exception):
await code_gen_tool._code_run(code)

@pytest.mark.asyncio
async def test_output_parse_success(self, code_gen_tool, llm_tool_mock):
# Setup mock response
llm_tool_mock.return_value = AsyncMock(
content={"output": json.dumps({"status": "success", "output": '{"result": 42}'})}
)
requirement = "Sum two numbers"
output = '{"result": 42}'

parsed_output = await code_gen_tool._output_parse(requirement=requirement, output=output)
assert json.loads(parsed_output) == output, parsed_output

@pytest.mark.asyncio
async def test_output_parse_failure(self, code_gen_tool, llm_tool_mock):
# Setup mock response
llm_tool_mock.return_value = AsyncMock(
content={"output": json.dumps({"status": "failure", "reason": "Error: Invalid input"})}
)
requirement = "Sum two numbers"
output = {"result": 42}

with pytest.raises(ValueError):
await code_gen_tool._output_parse(requirement=requirement, output=output)
Loading
Loading