diff --git a/src/llmling/llm/providers/litellm.py b/src/llmling/llm/providers/litellm.py index ece2477..be89f52 100644 --- a/src/llmling/llm/providers/litellm.py +++ b/src/llmling/llm/providers/litellm.py @@ -136,7 +136,13 @@ def _prepare_request_kwargs(self, **additional_kwargs: Any) -> dict[str, Any]: if k not in exclude_fields and v is not None }) # Add additional kwargs (highest priority) - kwargs.update({k: v for k, v in additional_kwargs.items() if v is not None}) + # Filter out empty tools array + filtered_kwargs = { + k: v + for k, v in additional_kwargs.items() + if v is not None and not (k == "tools" and not v) + } + kwargs.update(filtered_kwargs) return kwargs async def complete( @@ -156,6 +162,12 @@ async def complete( for msg in messages ] + # Clean up kwargs + # Remove empty tools array and related settings + if "tools" in kwargs and not kwargs["tools"]: + kwargs.pop("tools") + kwargs.pop("tool_choice", None) + # Prepare request kwargs request_kwargs = self._prepare_request_kwargs(**kwargs) @@ -180,19 +192,17 @@ async def complete_stream( """Implement streaming completion using LiteLLM.""" try: # Convert messages to dict format - messages_dict: list[dict[str, Any]] = [] - for msg in messages: - msg_dict: dict[str, Any] = { + messages_dict = [ + { "role": msg.role, "content": msg.content, + **({"name": msg.name} if msg.name else {}), } - if msg.name: - msg_dict["name"] = msg.name - if msg.tool_calls: - msg_dict["tool_calls"] = [tc.model_dump() for tc in msg.tool_calls] - messages_dict.append(msg_dict) + for msg in messages + ] - # Remove empty tools array if present + # Clean up kwargs + # Remove empty tools array and related settings if "tools" in kwargs and not kwargs["tools"]: kwargs.pop("tools") kwargs.pop("tool_choice", None) diff --git a/src/llmling/resources/test.yml b/src/llmling/resources/test.yml index c0891d7..3e69f12 100644 --- a/src/llmling/resources/test.yml +++ b/src/llmling/resources/test.yml @@ -159,13 +159,9 @@ task_templates: quick_review: provider: local-llama context: system_prompt - inherit_tools: false # Explicitly set - tools: [] # Empty list for testing settings: temperature: 0.7 max_tokens: 2048 - tools: [] # Empty list for testing - tool_choice: "auto" detailed_review: provider: code_review diff --git a/src/llmling/task/executor.py b/src/llmling/task/executor.py index 1f5c71a..aafa607 100644 --- a/src/llmling/task/executor.py +++ b/src/llmling/task/executor.py @@ -92,10 +92,10 @@ def _prepare_tool_config( if not available_tools: logger.debug("No tools available") - return None # Return None instead of empty tools config + return None # Get schemas for all available tools - tool_schemas: list[dict[str, Any]] = [] + tool_schemas = [] for tool_name in available_tools: # Verify tool exists before getting schema if not self.tool_registry.has_tool(tool_name): @@ -103,7 +103,6 @@ def _prepare_tool_config( continue schema = self.tool_registry.get_schema(tool_name) - logger.debug("Tool schema for %s: %s", tool_name, schema) tool_schemas.append(schema.function) # Only return tools config if we have actual tool schemas @@ -133,10 +132,11 @@ async def execute( ) -> TaskResult: """Execute a task.""" try: - # Add tool configuration if available - if tool_config := self._prepare_tool_config(task_context, task_provider): - logger.debug("Tool configuration prepared: %s", tool_config) + # Add tool configuration if available and non-empty + tool_config = self._prepare_tool_config(task_context, task_provider) + if tool_config and tool_config.get("tools"): # Only add if we have tools kwargs.update(tool_config) + # Load and process context context_result = await self._load_context(task_context) @@ -149,10 +149,6 @@ async def execute( task_provider.name, llm_config, ) - logger.debug( - "Sending request to LLM with tools config: %s", kwargs.get("tools") - ) - # Get completion with potential tool calls while True: completion = await provider.complete(messages, **kwargs)