From fb3dd89f1146455f17c2adcaf5fcaebe7055bee9 Mon Sep 17 00:00:00 2001 From: Joan Cabezas Date: Tue, 3 Sep 2024 13:47:06 -0700 Subject: [PATCH] deleted multion plugin --- backend/utils/plugins.py | 5 ++++- community-plugins.json | 2 +- plugins/example/_multion/router.py | 32 +++++++++++++++++----------- plugins/example/advanced/realtime.py | 21 ++++++++++-------- 4 files changed, 36 insertions(+), 24 deletions(-) diff --git a/backend/utils/plugins.py b/backend/utils/plugins.py index 8c1135f3b..51bd0518d 100644 --- a/backend/utils/plugins.py +++ b/backend/utils/plugins.py @@ -56,7 +56,8 @@ def get_plugins_data(uid: str, include_reviews: bool = False) -> List[Plugin]: def trigger_external_integrations(uid: str, memory: Memory) -> list: plugins: List[Plugin] = get_plugins_data(uid, include_reviews=False) - filtered_plugins = [plugin for plugin in plugins if plugin.triggers_on_memory_creation() and plugin.enabled] + filtered_plugins = [plugin for plugin in plugins if + plugin.triggers_on_memory_creation() and plugin.enabled and not plugin.deleted] if not filtered_plugins: return [] @@ -131,6 +132,8 @@ def _single(plugin: Plugin): return response_data = response.json() + if not response_data: + return message = response_data.get('message', '') print('Plugin', plugin.id, 'response:', message) if message and len(message) > 5: diff --git a/community-plugins.json b/community-plugins.json index 9aabacd97..8feb6d62c 100644 --- a/community-plugins.json +++ b/community-plugins.json @@ -703,6 +703,6 @@ "setup_completed_url": "https://based-hardware--plugins-api.modal.run/multion/check_setup_completion", "setup_instructions_file_path": "/plugins/instructions/multion-amazon/README.md" }, - "deleted": false + "deleted": true } ] diff --git a/plugins/example/_multion/router.py b/plugins/example/_multion/router.py index 5f7ea2427..68ec8f176 100644 --- a/plugins/example/_multion/router.py +++ b/plugins/example/_multion/router.py @@ -27,13 +27,17 @@ class BooksToBuy(BaseModel): def retrieve_books_to_buy(transcript: str) -> List[str]: - chat = ChatGroq(temperature=0, model="llama-3.1-8b-instant").with_structured_output(BooksToBuy) + chat = ChatGroq(temperature=0, model="llama3-groq-8b-8192-tool-use-preview").with_structured_output(BooksToBuy) - response: BooksToBuy = chat.invoke(f'''The following is the transcript of a conversation. + response: BooksToBuy = chat.invoke(f''' + The following is the transcript of a conversation: + ``` {transcript} - - Your task is to determine first if the speakers talked or mentioned books to each other \ - at some point during the conversation, and provide the titles of those.''') + ``` + Your task is to identify the titles of the books mentioned in the conversation, if any. + Make sure to find clear mentions of book titles, and not just random conversations. + Make sure to only include the titles of the books, and not any other information. + ''') print('Books to buy:', response.books) return response.books @@ -162,24 +166,26 @@ async def initiate_process_transcript(data: RealtimePluginRequest, uid: str = Qu if not user_id: raise HTTPException(status_code=400, detail="Invalid UID or USERID not found.") - # session_id = 'multion-' + data.session_id - # db.clean_all_transcripts_except(uid, session_id) - # transcript: List[TranscriptSegment] = db.append_segment_to_transcript(uid, session_id, data.segments) - transcript = data.segments + session_id = 'multion-' + data.session_id + db.clean_all_transcripts_except(uid, session_id) + transcript: List[TranscriptSegment] = db.append_segment_to_transcript(uid, session_id, data.segments) + transcript_str = TranscriptSegment.segments_as_string(transcript) + if len(transcript_str) > 100: + transcript_str = transcript_str[-100:] try: - books = retrieve_books_to_buy(transcript) + books = retrieve_books_to_buy(transcript_str) if not books: return {'message': ''} - # else: - # db.remove_transcript(uid, data.session_id) - + db.remove_transcript(uid, data.session_id) result = await asyncio.wait_for(call_multion(books, user_id), timeout=120) except asyncio.TimeoutError: print("Timeout error occurred") + db.remove_transcript(uid, data.session_id) return except Exception as e: print(f"Error calling Multion API: {str(e)}") + db.remove_transcript(uid, data.session_id) return if isinstance(result, bytes): diff --git a/plugins/example/advanced/realtime.py b/plugins/example/advanced/realtime.py index 971501e1a..d3e0518f6 100644 --- a/plugins/example/advanced/realtime.py +++ b/plugins/example/advanced/realtime.py @@ -3,11 +3,11 @@ from fastapi import APIRouter from langchain_community.tools.asknews import AskNewsSearch from langchain_core.pydantic_v1 import BaseModel, Field +from langchain_groq import ChatGroq from langchain_openai import ChatOpenAI from db import clean_all_transcripts_except, append_segment_to_transcript, remove_transcript from models import RealtimePluginRequest, EndpointResponse, TranscriptSegment -from langchain_groq import ChatGroq router = APIRouter() chat = ChatOpenAI(model='gpt-4o', temperature=0) @@ -15,7 +15,7 @@ chat_groq_8b = ChatGroq( temperature=0, # model="llama-3.1-70b-versatile", - model="llama-3.1-8b-instant", + model="llama3-70b-8192", # model='llama3-groq-8b-8192-tool-use-preview', ) @@ -28,9 +28,10 @@ def news_checker(conversation: List[TranscriptSegment]) -> str: chat_with_parser = chat_groq_8b.with_structured_output(NewsCheck) conversation_str = TranscriptSegment.segments_as_string(conversation) result: NewsCheck = chat_with_parser.invoke(f''' - You will be given a segment of an ongoing conversation. + You will be given the last few transcript words of an ongoing conversation. Your task is to determine if the conversation specifically discusses facts that appear conspiratorial, unscientific, or super biased. + Historic events, that seem to contradict logic and common sense should also be considered. Only if the topic is of significant importance and urgency for the user to be aware of, provide a question to be asked to a news search engine, in order to debunk the conversation in process. Otherwise, output an empty question. @@ -64,13 +65,14 @@ def news_checker(conversation: List[TranscriptSegment]) -> str: def news_checker_endpoint(uid: str, data: RealtimePluginRequest): # return {'message': ''} print('news_checker_endpoint', uid) - clean_all_transcripts_except(uid, data.session_id) - transcript: List[TranscriptSegment] = append_segment_to_transcript(uid, data.session_id, data.segments) + session_id = 'news-checker-' + data.session_id + clean_all_transcripts_except(uid, session_id) + transcript: List[TranscriptSegment] = append_segment_to_transcript(uid, session_id, data.segments) message = news_checker(transcript) if message: # so that in the next call with already triggered stuff, it doesn't trigger again - remove_transcript(uid, data.session_id) + remove_transcript(uid, session_id) return {'message': message} @@ -102,12 +104,13 @@ def emotional_support(segments: list[TranscriptSegment]) -> str: @router.post('/emotional-support', tags=['advanced', 'realtime'], response_model=EndpointResponse) def emotional_support_plugin(uid: str, data: RealtimePluginRequest): # return {'message': ''} - clean_all_transcripts_except(uid, data.session_id) - transcript: List[TranscriptSegment] = append_segment_to_transcript(uid, data.session_id, data.segments) + session_id = 'emotional-support-' + data.session_id + clean_all_transcripts_except(uid, session_id) + transcript: List[TranscriptSegment] = append_segment_to_transcript(uid, session_id, data.segments) message = emotional_support(transcript) if message: # so that in the next call with already triggered stuff, it doesn't trigger again - remove_transcript(uid, data.session_id) + remove_transcript(uid, session_id) return {'message': message}