Skip to content

Commit

Permalink
Sample Chainlit app placeholder
Browse files Browse the repository at this point in the history
Co-authored-by: Dan [email protected]
Co-authored-by: Brandon [email protected]
Co-authored-by: Enrico [email protected]
  • Loading branch information
daniel Foley committed Nov 27, 2024
1 parent 74eb75e commit 91b7015
Show file tree
Hide file tree
Showing 3 changed files with 86 additions and 136 deletions.
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ COPY --chown=user ./requirements.txt requirements.txt



RUN pip install --no-cache-dir --upgrade -r requirements.txt
RUN pip install --no-cache-dir -r requirements.txt



Expand Down
210 changes: 79 additions & 131 deletions app.py
Original file line number Diff line number Diff line change
@@ -1,265 +1,213 @@
import chainlit as cl



from typing import Optional



import time







# Store conversation history



conversation_memory = []







@cl.on_chat_start



async def start():



"""Initializes the chat session"""



# Send an initial message



await cl.Message(


import os

content="👋 Hello! I'm your AI assistant. How can I help you today?",
from typing import List



author="Assistant"
from langchain.embeddings.openai import OpenAIEmbeddings

from langchain.text_splitter import RecursiveCharacterTextSplitter

from langchain.vectorstores import Chroma

).send()
from langchain.chains import (

ConversationalRetrievalChain,

)

from langchain.chat_models import ChatOpenAI



# Set some session variables
from langchain.docstore.document import Document

from langchain.memory import ChatMessageHistory, ConversationBufferMemory


cl.user_session.set("conversation_started", True)

import chainlit as cl



os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")



@cl.on_message



async def main(message: cl.Message):



"""Main message handler"""







# Simulate some processing time



with cl.Step("Processing...") as step:



time.sleep(1) # Simulated delay



step.output = "Processed message"







# Store message in conversation history



conversation_memory.append({



"role": "user",
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)



"content": message.content


@cl.on_chat_start

})
async def on_chat_start():

files = None




# Wait for the user to upload a file

while files == None:

# Create a response
files = await cl.AskFileMessage(

content="Please upload a text file to begin!",

accept=["text/plain"],

response = f"I received your message: '{message.content}'. This is a demo response."
max_size_mb=20,

timeout=180,

).send()




file = files[0]

# Store response in conversation history


msg = cl.Message(content=f"Processing `{file.name}`...")

conversation_memory.append({
await msg.send()



"role": "assistant",
with open(file.path, "r", encoding="utf-8") as f:

text = f.read()


"content": response

# Split the text into chunks

texts = text_splitter.split_text(text)

})


# Create a metadata for each chunk


metadatas = [{"source": f"{i}-pl"} for i in range(len(texts))]



# Send response with typing effect
# Create a Chroma vector store

embeddings = OpenAIEmbeddings()

docsearch = await cl.make_async(Chroma.from_texts)(

await cl.Message(
texts, embeddings, metadatas=metadatas

)


content=response,

message_history = ChatMessageHistory()


author="Assistant"

memory = ConversationBufferMemory(

memory_key="chat_history",

).send()
output_key="answer",

chat_memory=message_history,

return_messages=True,

)



# Create a chain that uses the Chroma vector store

@cl.password_auth_callback
chain = ConversationalRetrievalChain.from_llm(

ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0, streaming=True),

chain_type="stuff",

def auth_callback(username: str, password: str) -> Optional[cl.User]:
retriever=docsearch.as_retriever(),

memory=memory,

return_source_documents=True,

"""Basic authentication handler"""
)



# This is a simple example - in production, use proper authentication
# Let the user know that the system is ready

msg.content = f"Processing `{file.name}` done. You can now ask questions!"

await msg.update()

if username == "demo" and password == "password":


cl.user_session.set("chain", chain)

return cl.User(identifier="demo", metadata={"role": "user"})



return None

@cl.on_message

async def main(message: cl.Message):

chain = cl.user_session.get("chain") # type: ConversationalRetrievalChain

cb = cl.AsyncLangchainCallbackHandler()



@cl.on_chat_end
res = await chain.acall(message.content, callbacks=[cb])

answer = res["answer"]

source_documents = res["source_documents"] # type: List[Document]

async def end():


text_elements = [] # type: List[cl.Text]

"""Cleanup when chat ends"""


if source_documents:

await cl.Message(content="👋 Thank you for chatting! Goodbye!").send()
for source_idx, source_doc in enumerate(source_documents):

source_name = f"source_{source_idx}"

# Create the text element referenced in the message

text_elements.append(

cl.Text(content=source_doc.page_content, name=source_name, display="side")

)

source_names = [text_el.name for text_el in text_elements]

# Custom action handler example


if source_names:

@cl.action_callback("feedback")
answer += f"\nSources: {', '.join(source_names)}"

else:

answer += "\nNo sources found"

async def on_action(action):


await cl.Message(content=answer, elements=text_elements).send()

"""Handles custom feedback action"""



await cl.Message(content=f"Received feedback: {action.value}").send()
10 changes: 6 additions & 4 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
fastapi
chainlit==1.3.0

langchain

langchain-community

uvicorn[standard]
pydantic==2.7.3

pydantic-settings==2.6.1


chainlit
pydantic_core==2.18.4

0 comments on commit 91b7015

Please sign in to comment.