-
Notifications
You must be signed in to change notification settings - Fork 0
/
app.py
97 lines (87 loc) · 4.41 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import streamlit as st
from langchain_community.document_loaders import PyPDFLoader
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
from langchain_openai import OpenAIEmbeddings, ChatOpenAI, OpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_community.vectorstores import FAISS
from langchain.callbacks import StreamingStdOutCallbackHandler
from dotenv import load_dotenv
import os
load_dotenv()
MYKEY=str(os.getenv('OPENAI'))
MYKEY = os.environ.get('OPENAI')
#MYKEY=str(os.getenv('OPENAI'))
# Set page title
st.title('PDF2BrainCells 📑➡️🧠')
# Upload PDF file
uploaded_file = st.sidebar.file_uploader('Upload a PDF file', type=['pdf'])
user_api_key = st.sidebar.text_input('Enter your OpenAI API key (Optional)',type='password')
if uploaded_file:
# Perform text processing
temp_file_path = 'temp.pdf'
with open(temp_file_path, 'wb') as f:
f.write(uploaded_file.getvalue())
loader = PyPDFLoader(temp_file_path)
pages= loader.load_and_split()
if user_api_key is None:
st.toast("For continued supprt, it is preferreable to use you won API key", icon="⚠️")
exit
else:
faiss_index = FAISS.from_documents(pages, OpenAIEmbeddings(openai_api_key=user_api_key if user_api_key else st.secrets["OPENAI"]))
retriever = faiss_index.as_retriever()
template = """Answer the question based for an examination point of view only on the following context in Markdown format. :
{context}
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
streamingcall=StreamingStdOutCallbackHandler()
model = ChatOpenAI(openai_api_key=user_api_key if user_api_key else st.secrets["OPENAI"],streaming=True,callbacks=[streamingcall],callback_manager=None)
chain = (
{"context": retriever, "question": RunnablePassthrough()}
| prompt
| model
| StrOutputParser()
)
# Select operation
option = st.selectbox('Select an operation', ['Generate Questions','Conversation on Text','Content Structure','Summarization'])
if option == 'Summarization':
# Perform summarization
summary=chain.invoke("Provide an executive summary of the document")
st.write(summary)
elif option == 'Content Structure':
# Perform structure extraction
structure=chain.invoke("Provide a structure of contents for this document")
print(structure)
st.write(structure)
elif option == 'Generate Questions':
# Generate questions based on content
questions = chain.invoke("Generate questions based on the content of the document")
st.header('Generated Questions:')
st.markdown(questions)
elif option == 'Conversation on Text':
def generate_response(input_text):
# Create a placeholder for the response
response_placeholder = st.empty()
# Initialize an empty string to store the response
response = ""
# Use a context manager to stream the response
with st.spinner('Generating response...'):
# Call your existing chain to get the response stream
stream = chain.stream(input_text + " \n Give a detailed answer in an examination point of view")
# Stream the response chunk by chunk
for chunk in stream:
# Append the current chunk to the response
response += chunk
# Update the placeholder with the current response
response_placeholder.markdown(response)
# Add a horizontal line after the response is complete
#st.markdown("---")
with st.form('my_form'):
text = st.text_area('Ask a Question:', 'What is this Document About?')
submitted = st.form_submit_button('Submit')
if not user_api_key.startswith('sk-'):
st.toast('It is preferrable to use your own OpenAI API key for continued working of the app', icon='❓')
if submitted:
generate_response(text)
# Add a button to the sidebar