From 5bc6cd7bbe97252764961a9c55fe635c53f1f54f Mon Sep 17 00:00:00 2001 From: David Tam Date: Wed, 3 Jul 2024 11:06:47 -0700 Subject: [PATCH] lint --- guardrails_api/blueprints/guards.py | 50 +++++++++++++++-------------- guardrails_api/utils/openai.py | 49 +++++++++++++++++----------- 2 files changed, 56 insertions(+), 43 deletions(-) diff --git a/guardrails_api/blueprints/guards.py b/guardrails_api/blueprints/guards.py index e0e4707..a34bfc4 100644 --- a/guardrails_api/blueprints/guards.py +++ b/guardrails_api/blueprints/guards.py @@ -2,8 +2,8 @@ import os from guardrails.hub import * # noqa from string import Template -from typing import Any, Dict, cast, Iterator -from flask import Blueprint, Response, request, stream_with_context, jsonify, abort +from typing import Any, Dict, cast +from flask import Blueprint, Response, request, stream_with_context from urllib.parse import unquote_plus from guardrails import Guard from guardrails.classes import ValidationOutcome @@ -16,7 +16,10 @@ from guardrails_api.clients.postgres_client import postgres_is_enabled from guardrails_api.utils.handle_error import handle_error from guardrails_api.utils.get_llm_callable import get_llm_callable -from guardrails_api.utils.openai import outcome_to_chat_completion, outcome_to_stream_response +from guardrails_api.utils.openai import ( + outcome_to_chat_completion, + outcome_to_stream_response, +) guards_bp = Blueprint("guards", __name__, url_prefix="/guards") @@ -180,44 +183,42 @@ def chat_completions(guard_name: str): guard: Guard = Guard.from_dict(guard_struct.to_dict()) stream = payload.get("stream", False) has_tool_gd_tool_call = False - + try: tools = payload.get("tools", []) - tools.filter(lambda tool: tool["funcion"]["name"]== "gd_response_tool") + tools.filter(lambda tool: tool["funcion"]["name"] == "gd_response_tool") has_tool_gd_tool_call = len(tools) > 0 - except: + except KeyError: pass if not stream: try: validation_outcome: ValidationOutcome = guard( - # todo make this come from the guard struct? - # currently we dont support .configure - num_reasks=0, - **payload, - ) - llm_response = guard.history.last.iterations.last.outputs.llm_response_info + # todo make this come from the guard struct? + # currently we dont support .configure + num_reasks=0, + **payload, + ) + llm_response = guard.history.last.iterations.last.outputs.llm_response_info result = outcome_to_chat_completion( - validation_outcome=validation_outcome, + validation_outcome=validation_outcome, llm_response=llm_response, - has_tool_gd_tool_call=has_tool_gd_tool_call - ) + has_tool_gd_tool_call=has_tool_gd_tool_call, + ) return result except Exception as e: raise HttpError( status=400, message="BadRequest", - cause=( - str(e) - ), + cause=(str(e)), ) else: - # need to return validated chunks that look identical to openai's - # should look something like - # data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":None,"finish_reason":None}]} - # .... - # data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{},"logprobs":None,"finish_reason":"stop"}]} + # need to return validated chunks that look identical to openai's + # should look something like + # data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":None,"finish_reason":None}]} + # .... + # data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{},"logprobs":None,"finish_reason":"stop"}]} def openai_streamer(): guard_stream = guard( num_reasks=0, @@ -225,7 +226,7 @@ def openai_streamer(): ) for result in guard_stream: chunk_string = f"data: {json.dumps(outcome_to_stream_response(validation_outcome=result))}\n\n" - yield chunk_string.encode('utf-8') + yield chunk_string.encode("utf-8") # close the stream yield b"\n" @@ -233,6 +234,7 @@ def openai_streamer(): stream_with_context(openai_streamer()), ) + @guards_bp.route("//validate", methods=["POST"]) @handle_error def validate(guard_name: str): diff --git a/guardrails_api/utils/openai.py b/guardrails_api/utils/openai.py index 3545872..ac43e67 100644 --- a/guardrails_api/utils/openai.py +++ b/guardrails_api/utils/openai.py @@ -1,13 +1,13 @@ -from typing import List, Dict, Any from guardrails.classes import ValidationOutcome, LLMResponse + def outcome_to_stream_response(validation_outcome: ValidationOutcome): - stream_chunk_template ={ + stream_chunk_template = { "choices": [ { - "delta": { - "content": validation_outcome.validated_output, - }, + "delta": { + "content": validation_outcome.validated_output, + }, } ], "guardrails": { @@ -16,36 +16,47 @@ def outcome_to_stream_response(validation_outcome: ValidationOutcome): "error": validation_outcome.error or None, }, } - # does this even make sense with a stream? wed need each chunk as theyre emitted + # does this even make sense with a stream? wed need each chunk as theyre emitted stream_chunk = stream_chunk_template stream_chunk["choices"][0]["delta"]["content"] = validation_outcome.validated_output return stream_chunk -def outcome_to_chat_completion(validation_outcome: ValidationOutcome, llm_response: LLMResponse, has_tool_gd_tool_call=False,): - completion_template = { - "choices": [{"message": {"content": ""}}] - } if not has_tool_gd_tool_call else { - "choices": [{"message": {"tool_calls": [{"function": {"arguments": ""}}]}}] - } + +def outcome_to_chat_completion( + validation_outcome: ValidationOutcome, + llm_response: LLMResponse, + has_tool_gd_tool_call=False, +): + completion_template = ( + {"choices": [{"message": {"content": ""}}]} + if not has_tool_gd_tool_call + else { + "choices": [{"message": {"tool_calls": [{"function": {"arguments": ""}}]}}] + } + ) completion = getattr(llm_response, "full_raw_llm_output", completion_template) completion["guardrails"] = { "reask": validation_outcome.reask or None, "validation_passed": validation_outcome.validation_passed, "error": validation_outcome.error or None, } - + # string completion try: - completion["choices"][0]["message"]["content"] = validation_outcome.validated_output - except: + completion["choices"][0]["message"]["content"] = ( + validation_outcome.validated_output + ) + except KeyError: pass - + # tool completion try: choice = completion["choices"][0] # if this is accessible it means a tool was called so set our validated output to that - choice["message"]["tool_calls"][-1]["function"]["arguments"] = validation_outcome.validated_output - except: + choice["message"]["tool_calls"][-1]["function"]["arguments"] = ( + validation_outcome.validated_output + ) + except KeyError: pass - return completion \ No newline at end of file + return completion