diff --git a/.github/workflows/run-sdk-tests.yml b/.github/workflows/run-sdk-tests.yml new file mode 100644 index 0000000000..8bab4bd5ad --- /dev/null +++ b/.github/workflows/run-sdk-tests.yml @@ -0,0 +1,33 @@ +name: Run SDK tests + +on: + pull_request: + paths: + - 'agenta-cli/**' + - 'agenta-cli/pyproject.toml' + - 'agenta-cli/poetry.lock' + workflow_dispatch: + +jobs: + pytest: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.9' + + - name: Install Poetry + run: pip install poetry + + - name: Install dependencies and agenta SDK + run: | + cd agenta-cli + poetry install + + - name: Run pytest + run: | + cd agenta-cli + poetry run pytest tests diff --git a/.gitignore b/.gitignore index 4a739feaf8..0fd64da41b 100644 --- a/.gitignore +++ b/.gitignore @@ -8,8 +8,6 @@ agenta-cli/agenta/templates/main.py agenta-cli/agenta/templates/agenta.py .DS_Store agenta-cli/tmp/ -agenta-cli/tests/ - examples/pitch_genius/config.toml examples/pitch_genius/agenta.py examples/pitch_genius/main.py diff --git a/agenta-cli/agenta/__init__.py b/agenta-cli/agenta/__init__.py index b5187fe521..78711111ef 100644 --- a/agenta-cli/agenta/__init__.py +++ b/agenta-cli/agenta/__init__.py @@ -17,6 +17,6 @@ from .sdk.tracing.decorators import span from .sdk.agenta_init import Config, init, llm_tracing from .sdk.utils.helper.openai_cost import calculate_token_usage - +from .sdk.client import Agenta config = PreInitObject("agenta.config", Config) diff --git a/agenta-cli/agenta/sdk/client.py b/agenta-cli/agenta/sdk/client.py new file mode 100644 index 0000000000..ee94ced567 --- /dev/null +++ b/agenta-cli/agenta/sdk/client.py @@ -0,0 +1,56 @@ +import os + +from cachetools import TTLCache, cached + +from agenta.client.backend.client import AgentaApi + + +class Agenta: + """Client class for interacting with the Agenta API.""" + + def __init__(self, api_key: str = None, host: str = None): + """ + Initializes the Agenta client with API key and host. + + Raises: + EnvironmentError: If AGENTA_API_KEY is not set. + """ + if not api_key and not os.environ.get("AGENTA_API_KEY"): + raise EnvironmentError( + "Required environment variables AGENTA_API_KEY is not set." + ) + self.api_key = api_key if api_key else os.environ.get("AGENTA_API_KEY") + self.host = ( + host if host else os.environ.get("AGENTA_HOST", "https://cloud.agenta.ai") + ) + self.cache = TTLCache(maxsize=1024, ttl=300) + backend_url = f"{self.host}/api" + self.client = AgentaApi(base_url=backend_url, api_key=self.api_key) + + def get_config(self, base_id: str, environment: str, cache_timeout: int = 300): + """ + Fetches and caches the configuration for a specified base ID and environment. + + Args: + base_id (str): The unique identifier for the base. + environment (str): The environment name (e.g., 'production', 'development'). + cache_timeout (int): The TTL for the cache in seconds. Defaults to 300 seconds. + + Returns: + dict: The configuration data retrieved from the Agenta API. + + Raises: + EnvironmentError: If the required AGENTA_API_KEY is not set in the environment variables. + """ + if cache_timeout != self.cache.ttl: + self.cache = TTLCache( + maxsize=1024, ttl=cache_timeout + ) # TODO: We need to modify this to use a dynamic TTLCache implementation in the future + + @cached(cache=self.cache) + def fetch_config(base_id: str, environment: str = "production"): + return self.client.configs.get_config( + base_id=base_id, environment_name=environment + ) + + return fetch_config(base_id, environment) diff --git a/agenta-cli/poetry.lock b/agenta-cli/poetry.lock index 0210e4c006..a495fa93e9 100644 --- a/agenta-cli/poetry.lock +++ b/agenta-cli/poetry.lock @@ -80,6 +80,17 @@ files = [ {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"}, ] +[[package]] +name = "cachetools" +version = "5.3.3" +description = "Extensible memoizing collections and decorators" +optional = false +python-versions = ">=3.7" +files = [ + {file = "cachetools-5.3.3-py3-none-any.whl", hash = "sha256:0abad1021d3f8325b2fc1d2e9c8b9c9d57b04c3932657a72465447332c24d945"}, + {file = "cachetools-5.3.3.tar.gz", hash = "sha256:ba29e2dfa0b8b556606f097407ed1aa62080ee108ab0dc5ec9d6a723a007d105"}, +] + [[package]] name = "certifi" version = "2023.11.17" @@ -1101,4 +1112,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "6aa133eaf493ae3c5878cd99042e0ea6aa075ff278192ed75136d84011afc46e" +content-hash = "b9102b19411c6b7c1cf295665cde63146e0a15018820d88097032d7e097e6a50" diff --git a/agenta-cli/pyproject.toml b/agenta-cli/pyproject.toml index f705d58be2..d82a572361 100644 --- a/agenta-cli/pyproject.toml +++ b/agenta-cli/pyproject.toml @@ -30,6 +30,7 @@ posthog = "^3.1.0" pydantic = "1.10.13" httpx = "^0.27.0" pymongo = "^4.6.3" +cachetools = "^5.3.3" [tool.poetry.dev-dependencies] pytest = "^6.2" diff --git a/agenta-cli/tests/__init__.py b/agenta-cli/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/agenta-cli/tests/example_projects/simple_langchain/app.py b/agenta-cli/tests/example_projects/simple_langchain/app.py deleted file mode 100644 index fc33ae0264..0000000000 --- a/agenta-cli/tests/example_projects/simple_langchain/app.py +++ /dev/null @@ -1,22 +0,0 @@ -from agenta import post -from dotenv import load_dotenv -from langchain.chains import LLMChain -from langchain.llms import OpenAI -from langchain.prompts import PromptTemplate - - -@post -def completion(product: str) -> str: - llm = OpenAI(temperature=0.9) - prompt = PromptTemplate( - input_variables=["product"], - template="What is a good name for a company that makes {product}?", - ) - chain = LLMChain(llm=llm, prompt=prompt) - output = chain.run(product=product) - return output - - -if __name__ == "__main__": - load_dotenv() - print(completion("socks")) diff --git a/agenta-cli/tests/example_projects/simple_langchain/requirements.txt b/agenta-cli/tests/example_projects/simple_langchain/requirements.txt deleted file mode 100644 index c9e113d18f..0000000000 --- a/agenta-cli/tests/example_projects/simple_langchain/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -langchain -openai -python-dotenv diff --git a/agenta-cli/tests/example_projects/test2/README.md b/agenta-cli/tests/example_projects/test2/README.md deleted file mode 100644 index e939ea7674..0000000000 --- a/agenta-cli/tests/example_projects/test2/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# Using this template - -Please make sure to create a `.env` file with your OpenAI API key before running the project. -OPENAI_API_KEY=sk-xxxxxxx - -## Running -You can first test your app locally by using by running `python app.py`, after experimenting, send it to evaluation by running `agenta up folder`. -You will find then the app in the dashboard, where you can evaluate it and compare it to previous versions. - diff --git a/agenta-cli/tests/example_projects/test2/agenta.py b/agenta-cli/tests/example_projects/test2/agenta.py deleted file mode 100644 index f81e783ec9..0000000000 --- a/agenta-cli/tests/example_projects/test2/agenta.py +++ /dev/null @@ -1,111 +0,0 @@ -"""The code for the Agenta SDK""" -import argparse -import functools -import inspect -import os -import sys -import traceback -from typing import Any, Callable, Optional - -from dotenv import load_dotenv -from fastapi import FastAPI -from fastapi.middleware.cors import CORSMiddleware -from fastapi.responses import JSONResponse - -app = FastAPI() - -origins = [ - "http://localhost:3000", - "http://localhost:3001", -] - -app.add_middleware( - CORSMiddleware, - allow_origins=origins, - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"], -) - - -class TextParam(str): - @classmethod - def __modify_schema__(cls, field_schema): - field_schema.update({"x-parameter": "text"}) - - -class FloatParam(float): - @classmethod - def __modify_schema__(cls, field_schema): - field_schema.update({"x-parameter": "float"}) - - -def post(func: Callable[..., Any]): - load_dotenv() # TODO: remove later when we have a better way to inject env variables - sig = inspect.signature(func) - func_params = sig.parameters - - # find the optional parameters for the app - app_params = { - name: param - for name, param in func_params.items() - if param.annotation in {TextParam, FloatParam} - } - # find the default values for the optional parameters - for name, param in app_params.items(): - default_value = param.default if param.default is not param.empty else None - app_params[name] = default_value - - @functools.wraps(func) - def wrapper(*args, **kwargs): - kwargs = {**app_params, **kwargs} - try: - return func(*args, **kwargs) - except Exception as e: - traceback_str = "".join( - traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__) - ) - return JSONResponse( - status_code=500, content={"error": str(e), "traceback": traceback_str} - ) - - new_params = [] - for name, param in sig.parameters.items(): - if name in app_params: - new_params.append( - inspect.Parameter( - name, - inspect.Parameter.KEYWORD_ONLY, - default=app_params[name], - annotation=Optional[param.annotation], - ) - ) - else: - new_params.append(param) - - wrapper.__signature__ = sig.replace(parameters=new_params) - - route = "/generate" - app.post(route)(wrapper) - - # check if the module is being run as the main script - if ( - os.path.splitext(os.path.basename(sys.argv[0]))[0] - == os.path.splitext(os.path.basename(inspect.getfile(func)))[0] - ): - parser = argparse.ArgumentParser() - # add arguments to the command-line parser - for name, param in sig.parameters.items(): - if name in app_params: - # For optional parameters, we add them as options - parser.add_argument( - f"--{name}", type=type(param.default), default=param.default - ) - else: - # For required parameters, we add them as arguments - parser.add_argument(name, type=param.annotation) - - args = parser.parse_args() - print(func(**vars(args))) - - return wrapper diff --git a/agenta-cli/tests/example_projects/test2/app.py b/agenta-cli/tests/example_projects/test2/app.py deleted file mode 100644 index fc33ae0264..0000000000 --- a/agenta-cli/tests/example_projects/test2/app.py +++ /dev/null @@ -1,22 +0,0 @@ -from agenta import post -from dotenv import load_dotenv -from langchain.chains import LLMChain -from langchain.llms import OpenAI -from langchain.prompts import PromptTemplate - - -@post -def completion(product: str) -> str: - llm = OpenAI(temperature=0.9) - prompt = PromptTemplate( - input_variables=["product"], - template="What is a good name for a company that makes {product}?", - ) - chain = LLMChain(llm=llm, prompt=prompt) - output = chain.run(product=product) - return output - - -if __name__ == "__main__": - load_dotenv() - print(completion("socks")) diff --git a/agenta-cli/tests/example_projects/test2/config.toml b/agenta-cli/tests/example_projects/test2/config.toml deleted file mode 100644 index fbdd634cc3..0000000000 --- a/agenta-cli/tests/example_projects/test2/config.toml +++ /dev/null @@ -1,2 +0,0 @@ -app-name = "asdf" -variants = [ "again", "testme",] diff --git a/agenta-cli/tests/example_projects/test2/main.py b/agenta-cli/tests/example_projects/test2/main.py deleted file mode 100644 index 4f55595d04..0000000000 --- a/agenta-cli/tests/example_projects/test2/main.py +++ /dev/null @@ -1,7 +0,0 @@ -from uvicorn import run - -import agenta -import app # This will register the routes with the FastAPI application - -if __name__ == "__main__": - run("agenta:app", host="0.0.0.0", port=80) diff --git a/agenta-cli/tests/example_projects/test2/requirements.txt b/agenta-cli/tests/example_projects/test2/requirements.txt deleted file mode 100644 index c9e113d18f..0000000000 --- a/agenta-cli/tests/example_projects/test2/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -langchain -openai -python-dotenv diff --git a/agenta-cli/tests/sdk/test_client.py b/agenta-cli/tests/sdk/test_client.py new file mode 100644 index 0000000000..378fe2e7a0 --- /dev/null +++ b/agenta-cli/tests/sdk/test_client.py @@ -0,0 +1,73 @@ +from unittest.mock import patch + +import pytest +from agenta.sdk.client import Agenta + + +@pytest.fixture +def agenta_client(): + # Set up the Agenta client with a mock API key + with patch.dict( + "os.environ", + {"AGENTA_API_KEY": "mock_api_key", "AGENTA_HOST": "https://mock.agenta.ai"}, + ): + client = Agenta() + return client + + +def test_get_config_with_caching(agenta_client): + """ + Test the caching mechanism of the get_config method to ensure it returns cached data. + + Args: + agenta_client: The fixture providing an instance of the Agenta client. + """ + # Setup the mock to return a predefined configuration + with patch.object( + agenta_client.client.configs, + "get_config", + return_value={"parameters": "something"}, + ) as mock_get_config: + # Retrieve configuration to store in cache + response = agenta_client.get_config("base123", "production") + assert response == { + "parameters": "something" + }, "First response should match the mock data." + + # Modify the return value of the mock + mock_get_config.return_value = {"parameters": "something else"} + + # Attempt to retrieve configuration again, expecting cached data + response = agenta_client.get_config("base123", "production") + assert response == { + "parameters": "something" + }, "Second response should return cached data, not new mock data." + + +def test_get_config_without_caching(agenta_client): + """ + Test the get_config method without caching to ensure it always fetches new data. + + Args: + agenta_client: The fixture providing an instance of the Agenta client. + """ + # Setup the mock to return a predefined configuration + with patch.object( + agenta_client.client.configs, + "get_config", + return_value={"parameters": "something"}, + ) as mock_get_config: + # Retrieve configuration with caching disabled + response = agenta_client.get_config("base123", "production", cache_timeout=0) + assert response == { + "parameters": "something" + }, "First response should match the mock data." + + # Modify the return value of the mock + mock_get_config.return_value = {"parameters": "something else"} + + # Retrieve new configuration with caching disabled + response = agenta_client.get_config("base123", "production", cache_timeout=0) + assert response == { + "parameters": "something else" + }, "Second response should match the new mock data." diff --git a/docs/basic_guides/integrating.mdx b/docs/basic_guides/integrating.mdx new file mode 100644 index 0000000000..6d23b3bd85 --- /dev/null +++ b/docs/basic_guides/integrating.mdx @@ -0,0 +1,57 @@ +--- +title: 'Integrating with agenta' +description: 'Integrate applications and prompts created in agenta into your projects.' +--- + +## Overview + +Applications and prompts created in agenta can be integrated into your projects in two primary ways: + +1. **As a Middleware:** Use the applications hosted on agenta directly. +2. **As a Prompt Management System:** Fetch the latest version of prompts/configurations from agenta. + +## Using agenta as Middleware + +When using agenta as middleware, you directly interact with applications hosted on agenta. This method is straightforward and includes built-in observability features. + +### Steps to Integrate: + +- Navigate to the deployment view in agenta. +- Locate the API endpoints under the "Endpoints" menu. +- Use these endpoints in your application to interact with the hosted LLM apps. + +The applications are fully instrumented, and all traces can be viewed in the observability dashboard. + +## Using agenta as a Prompt Management System + +You can use agenta to manage and fetch prompts or configurations. + +### Step: + +- Install the agenta Python SDK (`pip install agenta`). +- Set up environment variables: + - `AGENTA_API_KEY` for cloud users. + - `AGENTA_HOST` set to `http://localhost` if you are self-hosting. + +### Example Code: + +```python +from agenta import Agenta +agenta = Agenta() +config = agenta.get_config(base_id="xxxxx", environment="production", cache_timeout=200) # Fetches the configuration with caching +```` +The response object is an instance of `GetConfigResponse` from `agenta.client.backend.types.get_config_response`. It contains the following attributes: +- `config_name`: 'default' +- `current_version`: 1 +- `parameters`: This dictionary contains the configuration of the application, for instance: +``` +{'temperature': 1.0, +'model': 'gpt-3.5-turbo', +'max_tokens': -1, +'prompt_system': 'You are an expert in geography.', +'prompt_user': 'What is the capital of {country}?', +'top_p': 1.0, +'frequence_penalty': 0.0, +'presence_penalty': 0.0, +'force_json': 0} +``` \ No newline at end of file diff --git a/docs/mint.json b/docs/mint.json index 0375502888..d8b78f8318 100644 --- a/docs/mint.json +++ b/docs/mint.json @@ -88,7 +88,8 @@ "basic_guides/automatic_evaluation", "basic_guides/human_evaluation", "basic_guides/deployment", - "basic_guides/team_management" + "basic_guides/team_management", + "basic_guides/integrating" ] }, {