Skip to content

Commit

Permalink
Merge pull request #22 from PCain02/Exception_Handling
Browse files Browse the repository at this point in the history
feat: Add Exception Handling for LiteLLM
  • Loading branch information
boulais01 authored Nov 12, 2024
2 parents baade64 + 1866c79 commit a285c20
Show file tree
Hide file tree
Showing 3 changed files with 207 additions and 104 deletions.
178 changes: 74 additions & 104 deletions execexam/advise.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@

import random
import socket
import sys
from typing import List, Optional

import openai
Expand All @@ -12,6 +11,7 @@
from rich.panel import Panel

from . import enumerations
from .exceptions import get_litellm_traceback


def load_litellm() -> None:
Expand Down Expand Up @@ -81,12 +81,10 @@ def check_advice_model(
)
and advice_model is None
):
return_code = 1
console.print()
console.print(
"[red]The --advice-model option is required when --report includes 'advice' or 'all'"
)
sys.exit(return_code)


def check_advice_server(
Expand All @@ -105,12 +103,10 @@ def check_advice_server(
and advice_method == enumerations.AdviceMethod.api_server
and advice_server is None
):
return_code = 1
console.print()
console.print(
"[red]The --advice-server option is required when --advice-method is 'api_server'"
)
sys.exit(return_code)
elif (
report is not None
and (
Expand All @@ -120,12 +116,10 @@ def check_advice_server(
and advice_method == enumerations.AdviceMethod.api_server
and not validate_url(advice_server)
):
return_code = 1
console.print()
console.print(
"[red]The --advice-server option did not specify a valid URL"
"[bold red]Before sending to LLM:\nThe --advice-server option did not specify a valid URL"
)
sys.exit(return_code)


def fix_failures( # noqa: PLR0913
Expand All @@ -147,108 +141,84 @@ def fix_failures( # noqa: PLR0913
# Call the handle_connection_error function
handle_connection_error(console)
return
with console.status(
"[bold green] Getting Feedback from ExecExam's Coding Mentor"
):
# the test overview is a string that contains both
# the filtered test output and the details about the passing
# and failing assertions in the test cases
test_overview = filtered_test_output + exec_exam_test_assertion_details
# create an LLM debugging request that contains all of the
# information that is needed to provide advice about how
# to fix the bug(s) in the program that are part of an
# executable examination; note that, essentially, an
# examination consists of Python functions that a student
# must complete and then test cases that confirm the correctness
# of the functions that are implemented; note also that
# ExecExam has a Pytest plugin that collects additional details
llm_debugging_request = (
"I am an undergraduate student completing a programming examination."
+ "You may never make suggestions to change the source code of the test cases."
+ "Always make suggestions about how to improve the Python source code of the program under test."
+ "Always give Python code in a Markdown fenced code block with your suggested program."
+ "Always start your response with a friendly greeting and overview of what you will provide."
+ "Always conclude by saying that you are making a helpful suggestion but could be wrong."
+ "Always be helpful, upbeat, friendly, encouraging, and concise when making a response."
+ "Your task is to suggest, in a step-by-step fashion, how to fix the bug(s) in the program?"
+ "What follows is all of the information you need to complete the debugging task."
+ f"Here is the test overview with test output and details about test assertions: {test_overview}"
+ f"Here is a brief overview of the test failure information: {failing_test_details}"
+ f"Here is the source code for the one or more failing test(s): {failing_test_code}"
)
# the API key approach expects that the person running the execexam
# tool has specified an API key for a support cloud-based LLM system
if advice_method == enumerations.AdviceMethod.api_key:
# submit the debugging request to the LLM-based mentoring system
response = completion( # type: ignore
model=advice_model,
messages=[{"role": "user", "content": llm_debugging_request}],
try:
with console.status(
"[bold green] Getting Feedback from ExecExam's Coding Mentor"
):
test_overview = (
filtered_test_output + exec_exam_test_assertion_details
)
# display the advice from the LLM-based mentoring system
# in a panel that is created by using the rich library
if fancy:
console.print(
Panel(
Markdown(
str(
response.choices[0].message.content, # type: ignore
llm_debugging_request = (
"I am an undergraduate student completing a programming examination."
+ " You may never make suggestions to change the source code of the test cases."
+ " Always make suggestions about how to improve the Python source code of the program under test."
+ " Always give Python code in a Markdown fenced code block with your suggested program."
+ " Always start your response with a friendly greeting and overview of what you will provide."
+ " Always conclude by saying that you are making a helpful suggestion but could be wrong."
+ " Always be helpful, upbeat, friendly, encouraging, and concise when making a response."
+ " Your task is to suggest, in a step-by-step fashion, how to fix the bug(s) in the program?"
+ f" Here is the test overview with test output and details about test assertions: {test_overview}"
+ f" Here is a brief overview of the test failure information: {failing_test_details}"
+ f" Here is the source code for the one or more failing test(s): {failing_test_code}"
)

if advice_method == enumerations.AdviceMethod.api_key:
# Submit the debugging request to the LLM-based mentoring system
response = completion( # type: ignore
model=advice_model,
messages=[
{"role": "user", "content": llm_debugging_request}
],
)
# Display the advice from the LLM-based mentoring system
if fancy:
console.print(
Panel(
Markdown(
str(response.choices[0].message.content), # type: ignore
),
code_theme=syntax_theme.value,
expand=False,
title="Advice from ExecExam's Coding Mentor (API Key)",
padding=1,
)
)
else:
console.print(
Markdown(
str(response.choices[0].message.content), # type: ignore
),
expand=False,
title="Advice from ExecExam's Coding Mentor (API Key)",
padding=1,
)
console.print()

elif advice_method == enumerations.AdviceMethod.api_server:
# Use the OpenAI approach to submit the debugging request
client = openai.OpenAI(
api_key="anything", base_url=advice_server
)
else:
console.print(
Markdown(
str(
response.choices[0].message.content, # type: ignore
),
code_theme=syntax_theme.value,
),
response = client.chat.completions.create(
model=advice_model,
messages=[
{"role": "user", "content": llm_debugging_request}
],
)
console.print()
# the apiserver approach expects that the person running the execexam
# tool will specify the URL of a remote LLM-based mentoring system
# that is configured to provide access to an LLM system for advice
elif advice_method == enumerations.AdviceMethod.api_server:
# use the OpenAI approach to submitting the
# debugging request to the LLM-based mentoring system
# that is currently running on a remote LiteLLM system;
# note that this does not seem to work correctly if
# you use the standard LiteLLM approach as done with
# the standard API key approach elsewhere in this file
client = openai.OpenAI(
api_key="anything",
base_url=advice_server,
)
# submit the debugging request to the LLM-based mentoring system
# using the specified model and the debugging prompt
response = client.chat.completions.create(
model=advice_model,
messages=[{"role": "user", "content": llm_debugging_request}],
)
if fancy:
console.print(
Panel(
if fancy:
console.print(
Panel(
Markdown(
str(response.choices[0].message.content),
code_theme=syntax_theme.value,
),
expand=False,
title="Advice from ExecExam's Coding Mentor (API Server)",
padding=1,
)
)
else:
console.print(
Markdown(
str(response.choices[0].message.content),
code_theme=syntax_theme.value,
str(response.choices[0].message.content), # type: ignore
),
expand=False,
title="Advice from ExecExam's Coding Mentor (API Server)",
padding=1,
)
)
else:
console.print(
Markdown(
str(
response.choices[0].message.content, # type: ignore
),
code_theme=syntax_theme.value,
),
)
console.print()
console.print()
except Exception:
get_litellm_traceback(console)
54 changes: 54 additions & 0 deletions execexam/exceptions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
"""Define exceptions for the input errors in the command line."""

import sys

# from rich.console import Console (ask if i need to import this?) i dont think i do because i pass it as an atg


def get_litellm_traceback(console) -> None:
"""Print the traceback of the last exception."""
exc_type, exc_obj, _ = sys.exc_info()

if exc_type is None:
return
# List of litellm exception types and their explanations
litellm_exceptions = {
"NotFoundError": "LLM resource not found. Please check your model and/or endpoint.",
"AuthenticationError": "API authentication failed. Please verify your API key.",
"RateLimitError": "Rate limit exceeded. Wait and retry or check API key.\nNOTE: This error can sometimes be caused by an invalid API key.",
"InvalidRequestError": "Malformed API request. Please review parameters.",
"APIError": "Internal LLM API error. Retry later.",
"APIConnectionError": "Connection failed. \nNOTE: This error can sometimes be caused by an invalid server URL. Verify your server URL.",
}

# if statements to display exceptions
if exc_type.__name__ in litellm_exceptions:
console.print(
f"[bold red]Exception Type: {exc_type.__name__}[/bold red]"
)
console.print(f"Explanation: {litellm_exceptions[exc_type.__name__]}")
else:
# default behavior for non-litellm exceptions
console.print(
f"[bold red]Exception Type: {exc_type.__name__}[/bold red]"
)
console.print(f"Error Message: {exc_obj!s}")

# general purpose ouput as a backup
console.print(
"\n[bold red]If your issue persists, ensure the model you entered is correct, such as:[/bold red]"
)
console.print("[bold blue]- anthropic/claude-3-haiku-20240307[/bold blue]")
console.print("[bold blue]- anthropic/claude-3-opus-20240229[/bold blue]")
console.print("[bold blue]- groq/llama3-8b-8192[/bold blue]")
console.print(
"[bold blue]- openrouter/meta-llama/llama-3.1-8b-instruct:free[/bold blue]"
)

console.print(
"\n[bold red]Please visit [bold blue]https://docs.litellm.ai/docs/providers [/bold blue]for more valid LiteLLM models[bold red]"
)

console.print(
"\n[bold red]For server connectivity issues, please visit [bold blue]https://docs.litellm.ai/docs/simple_proxy [/bold blue]for a valid LiteLLM proxy.[/bold red]"
)
79 changes: 79 additions & 0 deletions tests/test_exceptions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
"""Test Suite for Exceptions Module."""

from unittest.mock import patch

from rich.console import Console

from execexam.exceptions import get_litellm_traceback

# Create a console object for testing
console = Console()


def test_not_found_error():
"""Test case for NotFoundError."""
# Mocking sys.exc_info to simulate a NotFoundError exception
with patch(
"sys.exc_info",
return_value=(
type("NotFoundError", (Exception,), {}),
Exception("Resource not found"),
None,
),
):
with patch("rich.console.Console.print") as mock_print:
# Call the function to get the traceback
get_litellm_traceback(console)
# Assert that the correct messages are printed for NotFoundError
mock_print.assert_any_call(
"[bold red]Exception Type: NotFoundError[/bold red]"
)
mock_print.assert_any_call(
"Explanation: LLM resource not found. Please check your model and/or endpoint."
)


def test_authentication_error():
"""Test case for AuthenticationError."""
# Mocking sys.exc_info to simulate an AuthenticationError exception
with patch(
"sys.exc_info",
return_value=(
type("AuthenticationError", (Exception,), {}),
Exception("Authentication failed"),
None,
),
):
with patch("rich.console.Console.print") as mock_print:
# Call the function to get the traceback
get_litellm_traceback(console)
# Assert that the correct messages are printed for AuthenticationError
mock_print.assert_any_call(
"[bold red]Exception Type: AuthenticationError[/bold red]"
)
mock_print.assert_any_call(
"Explanation: API authentication failed. Please verify your API key."
)


def test_rate_limit_error():
"""Test case for RateLimitError."""
# Mocking sys.exc_info to simulate a RateLimitError exception
with patch(
"sys.exc_info",
return_value=(
type("RateLimitError", (Exception,), {}),
Exception("Rate limit exceeded"),
None,
),
):
with patch("rich.console.Console.print") as mock_print:
# Call the function to get the traceback
get_litellm_traceback(console)
# Assert that the correct messages are printed for RateLimitError
mock_print.assert_any_call(
"[bold red]Exception Type: RateLimitError[/bold red]"
)
mock_print.assert_any_call(
"Explanation: Rate limit exceeded. Wait and retry or check API key.\nNOTE: This error can sometimes be caused by an invalid API key."
)

0 comments on commit a285c20

Please sign in to comment.