diff --git a/guardrails_api/blueprints/guards.py b/guardrails_api/blueprints/guards.py index d84e08c..328fa46 100644 --- a/guardrails_api/blueprints/guards.py +++ b/guardrails_api/blueprints/guards.py @@ -191,26 +191,19 @@ def openai_v1_chat_completions(guard_name: str): pass if not stream: - try: - validation_outcome: ValidationOutcome = guard( - # todo make this come from the guard struct? - # currently we dont support .configure - num_reasks=0, - **payload, - ) - llm_response = guard.history[-1].iterations[-1].outputs.llm_response_info - result = outcome_to_chat_completion( - validation_outcome=validation_outcome, - llm_response=llm_response, - has_tool_gd_tool_call=has_tool_gd_tool_call, - ) - return result - except Exception as e: - raise HttpError( - status=400, - message="BadRequest", - cause=(str(e)), - ) + validation_outcome: ValidationOutcome = guard( + # todo make this come from the guard struct? + # currently we dont support .configure + num_reasks=0, + **payload, + ) + llm_response = guard.history[-1].iterations[-1].outputs.llm_response_info + result = outcome_to_chat_completion( + validation_outcome=validation_outcome, + llm_response=llm_response, + has_tool_gd_tool_call=has_tool_gd_tool_call, + ) + return result else: # need to return validated chunks that look identical to openai's