Spaces:
Sleeping
Sleeping
import streamlit as st | |
from langchain_groq import ChatGroq | |
from langchain_community.utilities import ArxivAPIWrapper, WikipediaAPIWrapper | |
from langchain_community.tools import ArxivQueryRun, WikipediaQueryRun, DuckDuckGoSearchRun | |
from langchain.agents import initialize_agent, AgentType | |
from langchain.callbacks import StreamlitCallbackHandler | |
import os | |
from dotenv import load_dotenv | |
# Load environment variables | |
load_dotenv() | |
## Arxiv and Wikipedia Tools | |
arxiv_wrapper = ArxivAPIWrapper(top_k_results=1, doc_content_chars_max=200) | |
arxiv = ArxivQueryRun(api_wrapper=arxiv_wrapper) | |
wiki_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=200) | |
wiki = WikipediaQueryRun(api_wrapper=wiki_wrapper) | |
search = DuckDuckGoSearchRun(name="Search") | |
# Streamlit UI | |
st.title("π LangChain - Chat with search") | |
st.markdown(""" | |
In this example, we're using `StreamlitCallbackHandler` to display the thoughts and actions of an agent in an interactive Streamlit app. | |
Try more LangChain π€ Streamlit Agent examples at [github.com/langchain-ai/streamlit-agent](https://github.com/langchain-ai/streamlit-agent). | |
""") | |
# Sidebar for settings | |
st.sidebar.title("Settings") | |
api_key = st.sidebar.text_input("Enter your Groq API Key:", type="password") | |
# Initialize session state for messages if not already | |
if "messages" not in st.session_state: | |
st.session_state["messages"] = [ | |
{"role": "assistant", "content": "Hi, I'm a chatbot who can search the web. How can I help you?"} | |
] | |
# Display previous chat messages | |
for msg in st.session_state.messages: | |
st.chat_message(msg["role"]).write(msg["content"]) | |
# Capture user input from chat | |
if prompt := st.chat_input(placeholder="What is machine learning?"): | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
st.chat_message("user").write(prompt) | |
# Initialize the language model | |
if api_key: # Ensure API key is entered | |
llm = ChatGroq(groq_api_key=api_key, model_name="Llama3-8b-8192", streaming=True) | |
tools = [search, arxiv, wiki] | |
# Initialize the search agent with tools and the language model | |
search_agent = initialize_agent( | |
tools, | |
llm, | |
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, | |
handle_parsing_errors=True # Enable error handling for parsing issues | |
) | |
with st.chat_message("assistant"): | |
st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False) | |
try: | |
# Try running the agent | |
response = search_agent.run(st.session_state.messages, callbacks=[st_cb]) | |
st.session_state.messages.append({'role': 'assistant', "content": response}) | |
st.write(response) | |
except ValueError as e: | |
# Catch and display output parsing errors | |
st.error(f"An error occurred while parsing the LLM's output: {str(e)}") | |
st.session_state.messages.append({'role': 'assistant', "content": "Sorry, I encountered an error processing your request."}) | |
st.write("Sorry, I encountered an error processing your request.") | |
else: | |
st.error("Please enter your Groq API Key in the settings.") | |