HaryaniAnjali's picture
Update app.py
8776fe2 verified
import streamlit as st
import os
import requests
from bs4 import BeautifulSoup
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.memory import ConversationBufferMemory
from langchain.chat_models import ChatOpenAI
from langchain.agents import initialize_agent, AgentType
from langchain.tools import Tool
from langchain.chains import RetrievalQA
# Page configuration
st.set_page_config(page_title="Book Q&A System", page_icon="πŸ“š")
# App title
st.title("Alice in Wonderland Q&A System")
# Sidebar for API key
with st.sidebar:
st.title("Settings")
# API Key input with clear instructions
st.markdown("### OpenAI API Key")
st.markdown("**Enter your OpenAI API key to use this application**")
openai_api_key = st.text_input("OpenAI API Key", type="password",
help="Get your API key from https://platform.openai.com/account/api-keys")
# Test button for API key validation
if st.button("Validate API Key"):
if not openai_api_key:
st.error("Please enter an API key")
else:
try:
# Set the API key in environment variables
os.environ["OPENAI_API_KEY"] = openai_api_key
# Initialize a basic model to test
llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
temperature=0
)
# Test with a simple prompt
result = llm.predict("Say 'API key is valid' if you can read this.")
if "API key is valid" in result:
st.success("API key is valid!")
else:
st.error("API key validation failed.")
except Exception as e:
st.error(f"API key error: {str(e)}")
# Reset button
if st.button("Reset Chat"):
st.session_state.messages = []
st.session_state.agent = None
st.session_state.book_processed = False
st.experimental_rerun()
# Initialize session state
if "messages" not in st.session_state:
st.session_state.messages = []
if "book_processed" not in st.session_state:
st.session_state.book_processed = False
# Display chat history
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Alice in Wonderland URL
book_url = "https://www.gutenberg.org/files/11/11-0.txt"
# Function to fetch and process the book
@st.cache_resource
def process_book(book_url, api_key):
# Download the book
response = requests.get(book_url)
if response.status_code != 200:
return "Error downloading the book"
# Parse the text
text = response.text
# Clean the text (simple cleaning)
soup = BeautifulSoup(text, 'html.parser')
cleaned_text = soup.get_text()
# Split the text into chunks
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
chunks = text_splitter.split_text(cleaned_text)
# Create embeddings using OpenAI
embeddings = OpenAIEmbeddings(
openai_api_key=openai_api_key
)
# Create vector store
vector_store = FAISS.from_texts(chunks, embeddings)
return vector_store
# Function to create a LangChain Agent with tools
def create_langchain_agent(vector_store):
# Set API key for the session
os.environ["OPENAI_API_KEY"] = openai_api_key
llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
temperature=0.7
)
# Create a retrieval tool
retrieval_qa = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=vector_store.as_retriever(search_kwargs={"k": 3})
)
tools = [
Tool(
name="BookQATool",
func=retrieval_qa.run,
description="Useful for answering questions about Alice in Wonderland. Input should be a question about the book."
)
]
# Create memory
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
# Initialize agent
agent = initialize_agent(
tools,
llm,
agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,
verbose=True,
memory=memory,
handle_parsing_errors=True
)
return agent
# Main content area
if not openai_api_key:
# Display prominent instructions if no API key
st.warning(" Please enter your OpenAI API key in the sidebar to use this application.")
else:
# Process the book when needed
if not st.session_state.book_processed:
with st.spinner("Processing Alice in Wonderland... This may take a minute."):
vector_store = process_book(book_url, openai_api_key)
st.session_state.agent = create_langchain_agent(vector_store)
st.session_state.book_processed = True
st.success("Alice in Wonderland processed successfully!")
# Chat input - only show if API key is provided
if prompt := st.chat_input("Ask a question about Alice in Wonderland"):
# Add user message to chat
st.session_state.messages.append({"role": "user", "content": prompt})
# Display user message
with st.chat_message("user"):
st.markdown(prompt)
# Generate and display assistant response
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
try:
response = st.session_state.agent.run(input=prompt)
st.markdown(response)
except Exception as e:
st.error(f"Error: {str(e)}")
response = "I encountered an error processing your question. Please try a different question or reset the chat."
st.markdown(response)
# Add assistant response to chat
st.session_state.messages.append({"role": "assistant", "content": response})