diff --git a/docs/agent.md b/docs/agent.md deleted file mode 100644 index 17cb534..0000000 --- a/docs/agent.md +++ /dev/null @@ -1,238 +0,0 @@ -# Agent - -LLMling provides an agent implementation that integrates with its resource and tool system. The agent can be used standalone or as part of a larger application. - -## Basic Usage - -Create and use an agent: - -```python -from llmling import RuntimeConfig -from llmling.agents import LLMlingAgent -from pydantic import BaseModel - -# Optional: Define structured output -class Analysis(BaseModel): - summary: str - suggestions: list[str] - -async with RuntimeConfig.from_file("config.yml") as runtime: - # Create agent with string output. It will have all resources and tools available from the config. - basic_agent = LLMlingAgent( - runtime, - model="openai:gpt-4", - system_prompt="You are a helpful assistant." - ) - result = await basic_agent.run("Analyze this code.") - print(result.data) # string response - - # Create agent with structured output - typed_agent = LLMlingAgent[Analysis]( - runtime, - result_type=Analysis, - model="openai:gpt-4", - system_prompt=[ - "You are a code analysis assistant.", - "Always provide structured results.", - ] - ) - result = await typed_agent.run("Analyze this code.") - print(result.data.summary) # Typed access - print(result.data.suggestions) # Type-checked -``` - -## Agent Configuration - -The agent can be configured with various options: - -```python -agent = LLMlingAgent( - runtime, - # Model settings - model="openai:gpt-4", # Model to use - result_type=Analysis, # Optional result type - - # Prompt configuration - system_prompt=[ # Static system prompts - "You are an assistant.", - "Be concise and clear.", - ], - name="code-assistant", # Agent name - - # Execution settings - retries=3, # Max retries - result_tool_name="output", # Tool name for final result - result_tool_description="Final analysis output", - defer_model_check=False, # Check model on init -) -``` - -## Running the Agent - -Different ways to run the agent: - -```python -# Basic run -result = await agent.run("Analyze this code.") - -# With message history -from pydantic_ai import messages - -history = [ - messages.Message(role="user", content="Previous message"), - messages.Message(role="assistant", content="Previous response") -] -result = await agent.run("Follow up question", message_history=history) - -# Stream responses -async with agent.run_stream("Analyze this.") as stream: - async for message in stream: - print(message.content) - -# Synchronous operation (convenience wrapper) -result = agent.run_sync("Quick question") -``` - -## Customizing Agent Behavior - -Add custom tools and system prompts: - -```python -class CodeAgent(LLMlingAgent[Analysis]): - def __init__(self, runtime: RuntimeConfig) -> None: - super().__init__( - runtime, - result_type=Analysis, - model="openai:gpt-4" - ) - self._setup_tools() - - def _setup_tools(self) -> None: - @self.tool - async def analyze_code( - ctx: RunContext[RuntimeConfig], - code: str - ) -> dict[str, Any]: - """Analyze Python code.""" - return await ctx.deps.execute_tool("analyze", code=code) - - @self.system_prompt - async def get_language_prompt( - ctx: RunContext[RuntimeConfig] - ) -> str: - """Dynamic system prompt.""" - langs = await ctx.deps.execute_tool("list_languages") - return f"Supported languages: {', '.join(langs)}" -``` - -## Tools and System Prompts - -Register tools and prompts with decorators: - -```python -# Register a tool -@agent.tool -async def my_tool( - ctx: RunContext[RuntimeConfig], - arg: str -) -> str: - """Tool description.""" - return f"Processed: {arg}" - -# Register a plain tool (no context) -@agent.tool_plain -def plain_tool(text: str) -> str: - """Plain tool without context.""" - return text.upper() - -# Register dynamic system prompt -@agent.system_prompt -async def get_prompt(ctx: RunContext[RuntimeConfig]) -> str: - """Dynamic system prompt.""" - resources = await ctx.deps.list_resources() - return f"Available resources: {', '.join(resources)}" - -# Register result validator -@agent.result_validator -async def validate( - ctx: RunContext[RuntimeConfig], - result: str -) -> str: - """Validate result before returning.""" - if len(result) < 10: - raise ModelRetry("Response too short") - return result -``` - -## Event Handling - -The agent can handle runtime events: - -```python -class MyAgent(LLMlingAgent[str]): - async def handle_event(self, event: Event) -> None: - """Handle runtime events.""" - match event.type: - case "RESOURCE_MODIFIED": - print(f"Resource changed: {event.name}") - case "TOOL_ADDED": - print(f"New tool available: {event.name}") -``` - -## Best Practices - -### Type Safety -- Use typed results when possible -- Validate tool inputs/outputs -- Handle model errors appropriately - -### Resource Management -```python -# Proper cleanup with context manager -async with runtime as r: - agent = LLMlingAgent(r) - try: - result = await agent.run("Query") - except Exception as e: - print(f"Agent error: {e}") -``` - -### Tool Design -- Keep tools focused -- Provide clear descriptions -- Use type hints -- Handle errors gracefully -- Report progress for long operations - -### System Prompts -- Keep them clear and focused -- Use dynamic prompts when needed -- Don't leak implementation details -- Consider using templates - -### Error Handling -```python -from llmling.core.exceptions import LLMError - -try: - result = await agent.run("Query") -except LLMError as e: - print(f"Model error: {e}") -except Exception as e: - print(f"General error: {e}") -``` - -## Performance Tips - -- Use streaming for long responses -- Cache expensive tool results -- Use appropriate timeouts -- Consider batching requests -- Profile tool performance - -## Next Steps - -For more information about agents and integration examples, see: -- [Agent Examples](https://llmling.readthedocs.io/en/latest/examples/agents/) -- [Tool Development](https://llmling.readthedocs.io/en/latest/tools/) -- [Type System](https://llmling.readthedocs.io/en/latest/types/) diff --git a/docs/usage.md b/docs/usage.md index 98e3b10..f882723 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -10,7 +10,7 @@ The main interface for using LLMling programmatically is the `RuntimeConfig` cla from llmling import Config, RuntimeConfig # Create from YAML file -async with RuntimeConfig.from_file("config.yml") as runtime: +async with RuntimeConfig.open("config.yml") as runtime: # Use runtime... pass @@ -20,16 +20,6 @@ async with RuntimeConfig.from_config(config) as runtime: # Use runtime... pass -# Create from YAML string -config_yaml = """ -resources: - greeting: - type: text - content: "Hello, {name}!" -""" -async with RuntimeConfig.from_yaml(config_yaml) as runtime: - # Use runtime... - pass ``` > **Important** @@ -38,7 +28,7 @@ async with RuntimeConfig.from_yaml(config_yaml) as runtime: ## Resource Operations ```python -async with RuntimeConfig.from_file("config.yml") as runtime: +async with RuntimeConfig.open("config.yml") as runtime: # Load a resource resource = await runtime.load_resource("my_resource") print(resource.content) @@ -56,17 +46,14 @@ async with RuntimeConfig.from_file("config.yml") as runtime: # Register new resource from llmling.config.models import TextResource - runtime.register_resource( - "new_resource", - TextResource(content="New content"), - replace=True # Optional, replace if exists - ) + resource = TextResource(content="Hello, World!") + runtime.register_resource("new_resource", resource) ``` ## Prompt Operations ```python -async with RuntimeConfig.from_file("config.yml") as runtime: +async with RuntimeConfig.open("config.yml") as runtime: # Format a prompt messages = await runtime.render_prompt( "my_prompt", @@ -86,14 +73,7 @@ async with RuntimeConfig.from_file("config.yml") as runtime: ## Tool Operations ```python -async with RuntimeConfig.from_file("config.yml") as runtime: - # Execute a tool - result = await runtime.execute_tool( - "my_tool", - arg1="value1", - arg2="value2" - ) - +async with RuntimeConfig.open("config.yml") as runtime: # List available tools tools = runtime.list_tools() @@ -102,6 +82,14 @@ async with RuntimeConfig.from_file("config.yml") as runtime: # Get all tools all_tools = runtime.get_tools() + + # Execute a tool + result = await runtime.execute_tool( + "my_tool", + arg1="value1", + arg2="value2" + ) + ``` ## Event Handling @@ -117,7 +105,7 @@ class MyEventHandler(EventHandler): case "TOOL_ADDED": print(f"New tool: {event.name}") -async with RuntimeConfig.from_file("config.yml") as runtime: +async with RuntimeConfig.open("config.yml") as runtime: # Add event handler runtime.add_event_handler(MyEventHandler()) ``` @@ -134,7 +122,7 @@ class ResourceObserver(RegistryEvents): def on_item_modified(self, key: str, item: Any) -> None: print(f"Resource modified: {key}") -async with RuntimeConfig.from_file("config.yml") as runtime: +async with RuntimeConfig.open("config.yml") as runtime: # Add observers runtime.add_resource_observer(ResourceObserver()) runtime.add_prompt_observer(PromptObserver()) @@ -147,7 +135,7 @@ Here's an example of using LLMling with an agent: ```python from llmling import RuntimeConfig -from llmling.agents import LLMlingAgent +from llmling_agent import LLMlingAgent from pydantic import BaseModel # Define structured output @@ -157,7 +145,7 @@ class Analysis(BaseModel): suggestions: list[str] # Create agent with runtime -async with RuntimeConfig.from_file("config.yml") as runtime: +async with RuntimeConfig.open("config.yml") as runtime: # Create agent with structured output agent = LLMlingAgent[Analysis]( runtime, @@ -180,45 +168,3 @@ async with RuntimeConfig.from_file("config.yml") as runtime: for suggestion in result.data.suggestions: print(f"- {suggestion}") ``` - -## Best Practices - -### Resource Management -- Always use async context managers -- Clean up resources properly -- Handle exceptions appropriately -- Use type hints consistently - -### Error Handling -```python -from llmling.core import exceptions - -async with RuntimeConfig.from_file("config.yml") as runtime: - try: - resource = await runtime.load_resource("missing") - except exceptions.ResourceError as e: - print(f"Resource error: {e}") - except exceptions.ConfigError as e: - print(f"Configuration error: {e}") - except exceptions.LLMLingError as e: - print(f"General error: {e}") -``` - -### Async Operations -- Use appropriate async patterns -- Don't block the event loop -- Handle cancellation properly -- Use asyncio.TaskGroup for concurrent operations - -### Type Safety -- Use type hints consistently -- Enable type checking in development -- Handle type conversions explicitly -- Validate external data - -## Next Steps - -For more examples and detailed API documentation, see: -- [API Reference](https://llmling.readthedocs.io/en/latest/api/) -- [Example Gallery](https://llmling.readthedocs.io/en/latest/examples/) -- [Contributing Guide](https://llmling.readthedocs.io/en/latest/contributing/) diff --git a/docs/yaml_config.md b/docs/yaml_config.md index 8abb7ca..f131e4e 100644 --- a/docs/yaml_config.md +++ b/docs/yaml_config.md @@ -124,16 +124,72 @@ tools: ## Toolsets -The `toolsets` section lets you include pre-built collections of tools: +The `toolsets` section lets you define collections of related tools. There are three types of toolsets: ```yaml toolsets: - my_tools: + # Entry point toolsets (tools from Python packages) + core_tools: type: entry_points module: llmling + namespace: core # Optional prefix for tool names + + # OpenAPI toolsets (tools from API specs) + petstore: + type: openapi + spec: "https://petstore.swagger.io/v2/swagger.json" + base_url: "https://api.example.com" # Optional API base URL + namespace: pet # Optional prefix for tool names + + # Custom toolsets (your own tool collections) + custom: + type: custom + import_path: "myapp.tools.CustomToolSet" + namespace: my # Optional prefix for tool names ``` -Toolsets are Python entry points that provide collections of related tools. +### Entry Point Toolsets + +Entry point toolsets load tools from Python packages that provide them through entry points: + +```yaml +toolsets: + llmling: + type: entry_points + module: llmling # Package name + namespace: core # Optional namespace +``` + +### OpenAPI Toolsets + +OpenAPI toolsets automatically create tools from OpenAPI/Swagger specifications: + +```yaml +toolsets: + api: + type: openapi + spec: "https://api.example.com/openapi.json" # URL or local path + base_url: "https://api.example.com" # Optional base URL + namespace: api # Optional namespace +``` + +### Custom Toolsets + +Custom toolsets load tool collections from your own Python classes: + +```yaml +toolsets: + custom: + type: custom + import_path: "myapp.tools.DatabaseTools" # Your toolset class + namespace: db # Optional namespace +``` + +### Namespacing + +Each toolset can have an optional `namespace` that prefixes its tool names to avoid conflicts. For example: +- Without namespace: `get_user`, `create_user` +- With namespace "db": `db.get_user`, `db.create_user` ## File Watching