Skip to content

Commit

Permalink
lint
Browse files Browse the repository at this point in the history
  • Loading branch information
dtam committed Jul 3, 2024
1 parent 67db043 commit 5bc6cd7
Show file tree
Hide file tree
Showing 2 changed files with 56 additions and 43 deletions.
50 changes: 26 additions & 24 deletions guardrails_api/blueprints/guards.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@
import os
from guardrails.hub import * # noqa
from string import Template
from typing import Any, Dict, cast, Iterator
from flask import Blueprint, Response, request, stream_with_context, jsonify, abort
from typing import Any, Dict, cast
from flask import Blueprint, Response, request, stream_with_context
from urllib.parse import unquote_plus
from guardrails import Guard
from guardrails.classes import ValidationOutcome
Expand All @@ -16,7 +16,10 @@
from guardrails_api.clients.postgres_client import postgres_is_enabled
from guardrails_api.utils.handle_error import handle_error
from guardrails_api.utils.get_llm_callable import get_llm_callable
from guardrails_api.utils.openai import outcome_to_chat_completion, outcome_to_stream_response
from guardrails_api.utils.openai import (
outcome_to_chat_completion,
outcome_to_stream_response,
)

guards_bp = Blueprint("guards", __name__, url_prefix="/guards")

Expand Down Expand Up @@ -180,59 +183,58 @@ def chat_completions(guard_name: str):
guard: Guard = Guard.from_dict(guard_struct.to_dict())
stream = payload.get("stream", False)
has_tool_gd_tool_call = False

try:
tools = payload.get("tools", [])
tools.filter(lambda tool: tool["funcion"]["name"]== "gd_response_tool")
tools.filter(lambda tool: tool["funcion"]["name"] == "gd_response_tool")
has_tool_gd_tool_call = len(tools) > 0
except:
except KeyError:
pass

if not stream:
try:
validation_outcome: ValidationOutcome = guard(
# todo make this come from the guard struct?
# currently we dont support .configure
num_reasks=0,
**payload,
)
llm_response = guard.history.last.iterations.last.outputs.llm_response_info
# todo make this come from the guard struct?
# currently we dont support .configure
num_reasks=0,
**payload,
)
llm_response = guard.history.last.iterations.last.outputs.llm_response_info
result = outcome_to_chat_completion(
validation_outcome=validation_outcome,
validation_outcome=validation_outcome,
llm_response=llm_response,
has_tool_gd_tool_call=has_tool_gd_tool_call
)
has_tool_gd_tool_call=has_tool_gd_tool_call,
)
return result
except Exception as e:
raise HttpError(
status=400,
message="BadRequest",
cause=(
str(e)
),
cause=(str(e)),
)

else:
# need to return validated chunks that look identical to openai's
# should look something like
# data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":None,"finish_reason":None}]}
# ....
# data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{},"logprobs":None,"finish_reason":"stop"}]}
# need to return validated chunks that look identical to openai's
# should look something like
# data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":None,"finish_reason":None}]}
# ....
# data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{},"logprobs":None,"finish_reason":"stop"}]}
def openai_streamer():
guard_stream = guard(
num_reasks=0,
**payload,
)
for result in guard_stream:
chunk_string = f"data: {json.dumps(outcome_to_stream_response(validation_outcome=result))}\n\n"
yield chunk_string.encode('utf-8')
yield chunk_string.encode("utf-8")
# close the stream
yield b"\n"

return Response(
stream_with_context(openai_streamer()),
)


@guards_bp.route("/<guard_name>/validate", methods=["POST"])
@handle_error
def validate(guard_name: str):
Expand Down
49 changes: 30 additions & 19 deletions guardrails_api/utils/openai.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
from typing import List, Dict, Any
from guardrails.classes import ValidationOutcome, LLMResponse


def outcome_to_stream_response(validation_outcome: ValidationOutcome):
stream_chunk_template ={
stream_chunk_template = {
"choices": [
{
"delta": {
"content": validation_outcome.validated_output,
},
"delta": {
"content": validation_outcome.validated_output,
},
}
],
"guardrails": {
Expand All @@ -16,36 +16,47 @@ def outcome_to_stream_response(validation_outcome: ValidationOutcome):
"error": validation_outcome.error or None,
},
}
# does this even make sense with a stream? wed need each chunk as theyre emitted
# does this even make sense with a stream? wed need each chunk as theyre emitted
stream_chunk = stream_chunk_template
stream_chunk["choices"][0]["delta"]["content"] = validation_outcome.validated_output
return stream_chunk

def outcome_to_chat_completion(validation_outcome: ValidationOutcome, llm_response: LLMResponse, has_tool_gd_tool_call=False,):
completion_template = {
"choices": [{"message": {"content": ""}}]
} if not has_tool_gd_tool_call else {
"choices": [{"message": {"tool_calls": [{"function": {"arguments": ""}}]}}]
}

def outcome_to_chat_completion(
validation_outcome: ValidationOutcome,
llm_response: LLMResponse,
has_tool_gd_tool_call=False,
):
completion_template = (
{"choices": [{"message": {"content": ""}}]}
if not has_tool_gd_tool_call
else {
"choices": [{"message": {"tool_calls": [{"function": {"arguments": ""}}]}}]
}
)
completion = getattr(llm_response, "full_raw_llm_output", completion_template)
completion["guardrails"] = {
"reask": validation_outcome.reask or None,
"validation_passed": validation_outcome.validation_passed,
"error": validation_outcome.error or None,
}

# string completion
try:
completion["choices"][0]["message"]["content"] = validation_outcome.validated_output
except:
completion["choices"][0]["message"]["content"] = (
validation_outcome.validated_output
)
except KeyError:
pass

# tool completion
try:
choice = completion["choices"][0]
# if this is accessible it means a tool was called so set our validated output to that
choice["message"]["tool_calls"][-1]["function"]["arguments"] = validation_outcome.validated_output
except:
choice["message"]["tool_calls"][-1]["function"]["arguments"] = (
validation_outcome.validated_output
)
except KeyError:
pass

return completion
return completion

0 comments on commit 5bc6cd7

Please sign in to comment.