Skip to content

Commit

Permalink
Merge branch 'main' into fix/response-format
Browse files Browse the repository at this point in the history
  • Loading branch information
shamuiscoding authored Sep 12, 2024
2 parents eccf9d6 + 9d97189 commit 42b4ea5
Show file tree
Hide file tree
Showing 6 changed files with 66 additions and 42 deletions.
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@

setup(
name="weavel",
version="1.8.2",
version="1.9.2",
packages=find_namespace_packages(),
entry_points={},
description="Weavel, Prompt Optimization and Evaluation for LLM Applications",
Expand Down
35 changes: 20 additions & 15 deletions weavel/_worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@
from weavel.utils import logger
from weavel.types import DatasetItem, Dataset, Prompt, PromptVersion, ResponseFormat


class Worker:
_instance = None

Expand Down Expand Up @@ -576,7 +577,7 @@ def create_test(
)
if response.status_code != 200:
raise Exception(f"Failed to create test: {response.text}")

# create, fetch, delete, list prompts
def create_prompt(
self,
Expand Down Expand Up @@ -667,7 +668,7 @@ async def adelete_prompt(self, name: str) -> None:
)
if response.status_code != 200:
raise Exception(f"Failed to delete prompt: {response.text}")

def list_prompts(self) -> List[Prompt]:
response = self.api_client.execute(
self.api_key,
Expand All @@ -693,18 +694,18 @@ async def alist_prompts(self) -> List[Prompt]:
return [Prompt(**prompt) for prompt in response.json()]
else:
raise Exception(f"Failed to list prompts: {response.text}")

# create, fetch, delete, list prompt versions
def create_prompt_version(
self,
prompt_name: str,
messages: List[Dict[str, Any]],
model: str = 'gpt-4o-mini',
model: str = "gpt-4o-mini",
temperature: float = 0.0,
response_format: Optional[ResponseFormat] = None,
input_vars: Optional[Dict[str, Any]] = None,
output_vars: Optional[Dict[str, Any]] = None,
metadata: Optional[Dict[str, Any]] = None
metadata: Optional[Dict[str, Any]] = None,
) -> None:
response = self.api_client.execute(
self.api_key,
Expand All @@ -718,8 +719,8 @@ def create_prompt_version(
"response_format": response_format,
"input_vars": input_vars,
"output_vars": output_vars,
"metadata": metadata
}
"metadata": metadata,
},
)
if response.status_code != 200:
raise Exception(f"Failed to create prompt version: {response.text}")
Expand All @@ -728,12 +729,12 @@ async def acreate_prompt_version(
self,
prompt_name: str,
messages: List[Dict[str, Any]],
model: str = 'gpt-4o-mini',
model: str = "gpt-4o-mini",
temperature: float = 0.0,
response_format: Optional[ResponseFormat] = None,
input_vars: Optional[Dict[str, Any]] = None,
output_vars: Optional[Dict[str, Any]] = None,
metadata: Optional[Dict[str, Any]] = None
metadata: Optional[Dict[str, Any]] = None,
) -> None:
response = await self.async_api_client.execute(
self.api_key,
Expand All @@ -747,13 +748,15 @@ async def acreate_prompt_version(
"response_format": response_format,
"input_vars": input_vars,
"output_vars": output_vars,
"metadata": metadata
}
"metadata": metadata,
},
)
if response.status_code != 200:
raise Exception(f"Failed to create prompt version: {response.text}")

def fetch_prompt_version(self, prompt_name: str, version: Union[str, int]) -> PromptVersion:

def fetch_prompt_version(
self, prompt_name: str, version: Union[str, int]
) -> PromptVersion:
response = self.api_client.execute(
self.api_key,
self.endpoint,
Expand All @@ -765,7 +768,9 @@ def fetch_prompt_version(self, prompt_name: str, version: Union[str, int]) -> Pr
else:
raise Exception(f"Failed to fetch prompt version: {response.text}")

async def afetch_prompt_version(self, prompt_name: str, version: Union[str, int]) -> PromptVersion:
async def afetch_prompt_version(
self, prompt_name: str, version: Union[str, int]
) -> PromptVersion:
response = await self.async_api_client.execute(
self.api_key,
self.endpoint,
Expand Down Expand Up @@ -796,7 +801,7 @@ async def adelete_prompt_version(self, prompt_name: str, version: int) -> None:
)
if response.status_code != 200:
raise Exception(f"Failed to delete prompt version: {response.text}")

def list_prompt_versions(self, prompt_name: int) -> List[PromptVersion]:
response = self.api_client.execute(
self.api_key,
Expand Down
3 changes: 1 addition & 2 deletions weavel/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,14 +13,13 @@
from dotenv import load_dotenv
from weavel._worker import Worker

# from weavel.types.instances import Session, Span, Trace
from weavel.object_clients import (
GenerationClient,
SessionClient,
SpanClient,
TraceClient,
)
from weavel.types.datasets import Dataset, DatasetItem, Prompt, PromptVersion
from weavel.types import Dataset, DatasetItem, Prompt, PromptVersion

load_dotenv()

Expand Down
28 changes: 23 additions & 5 deletions weavel/types/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,23 @@
from .datasets import *
from .observations import *
from .records import *
from .session import *
from .response_format import *
from .datasets import Dataset, DatasetItem
from .prompts import Prompt, PromptVersion
from .observations import Observation, Span, Generation, Log
from .records import Record, Message, TrackEvent, Trace
from .session import Session
from .response_format import ResponseFormat

__all__ = [
"Dataset",
"DatasetItem",
"Prompt",
"PromptVersion",
"Observation",
"Span",
"Generation",
"Log",
"Record",
"Message",
"TrackEvent",
"Trace",
"Session",
"ResponseFormat"
]
19 changes: 0 additions & 19 deletions weavel/types/datasets.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
from typing import Any, Dict, List, Optional, Union
from pydantic import BaseModel
from weavel.types import ResponseFormat

class DatasetItem(BaseModel):
uuid: Optional[str] = None
Expand All @@ -14,21 +13,3 @@ class Dataset(BaseModel):
created_at: str
description: Optional[str] = None
items: List[DatasetItem]


class Prompt(BaseModel):
name: str
description: Optional[str] = None
created_at: str


class PromptVersion(BaseModel):
version: int
messages: List[Dict[str, Any]]
model: str
temperature: float
response_format: Optional[ResponseFormat] = None
input_vars: Optional[Dict[str, Any]] = None
output_vars: Optional[Dict[str, Any]] = None
metadata: Optional[Dict[str, Any]] = None
created_at: str
21 changes: 21 additions & 0 deletions weavel/types/prompts.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
from typing import Any, Dict, List, Optional, Union
from pydantic import BaseModel
from ape.types import ResponseFormat


class Prompt(BaseModel):
name: str
description: Optional[str] = None
created_at: str


class PromptVersion(BaseModel):
version: int
messages: List[Dict[str, Any]]
model: str
temperature: float
response_format: Optional[ResponseFormat] = None
input_vars: Optional[Dict[str, Any]] = None
output_vars: Optional[Dict[str, Any]] = None
metadata: Optional[Dict[str, Any]] = None
created_at: str

0 comments on commit 42b4ea5

Please sign in to comment.