from openai import OpenAI import streamlit as st from langchain_openai import ChatOpenAI from tools import sentiment_analysis_util import numpy as np from dotenv import load_dotenv import os st.set_page_config(page_title="LangChain Agent", layout="wide") load_dotenv() OPENAI_API_KEY = os.environ["OPENAI_API_KEY"] llm = ChatOpenAI(model="gpt-3.5-turbo") from langchain_core.runnables import RunnableConfig st.title("💬 ExpressMood") st.image('el_pic.png') #@st.cache_resource if "messages" not in st.session_state: st.session_state["messages"] = [{"role":"system", "content":"""💬 How can I help you?"""}] # Display all previous messages for msg in st.session_state.messages: st.chat_message(msg["role"]).write(msg["content"]) #initialize_session_state() sideb=st.sidebar with st.sidebar: prompt=st.text_input("Enter topic for sentiment analysis: ") check1=sideb.button(f"analyze {prompt}") if check1: # Add user message to chat history st.session_state.messages.append({"role": "user", "content": prompt}) # Display user message in chat message container with st.chat_message("user"): st.markdown(prompt) # ========================== Sentiment analysis #Perform sentiment analysis on the cryptocurrency news & predict dominant sentiment along with plotting the sentiment breakdown chart # Downloading from reddit # Downloading from alpaca if len(prompt.split(' '))<2: print('here') st.write('I am analyzing Google News ...') news_articles = sentiment_analysis_util.fetch_news(str(prompt)) st.write('Now, I am analyzing Reddit ...') reddit_news_articles=sentiment_analysis_util.fetch_reddit_news(prompt) analysis_results = [] #Perform sentiment analysis for each product review if len(prompt.split(' '))<2: print('here') for article in news_articles: if prompt.lower()[0:6] in article['News_Article'].lower(): sentiment_analysis_result = sentiment_analysis_util.analyze_sentiment(article['News_Article']) # Display sentiment analysis results #print(f'News Article: {sentiment_analysis_result["News_Article"]} : Sentiment: {sentiment_analysis_result["Sentiment"]}', '\n') result = { 'News_Article': sentiment_analysis_result["News_Article"], 'Sentiment': sentiment_analysis_result["Sentiment"][0]['label'], 'Index': sentiment_analysis_result["Sentiment"][0]['score'], 'URL': article['URL'] } analysis_results.append(result) articles_url=[] for article in reddit_news_articles: if prompt.lower()[0:6] in article.lower(): sentiment_analysis_result_reddit = sentiment_analysis_util.analyze_sentiment(article) # Display sentiment analysis results #print(f'News Article: {sentiment_analysis_result_reddit["News_Article"]} : Sentiment: {sentiment_analysis_result_reddit["Sentiment"]}', '\n') result = { 'News_Article': sentiment_analysis_result_reddit["News_Article"], 'Index':np.round(sentiment_analysis_result_reddit["Sentiment"][0]['score'],2) } analysis_results.append(np.append(result,np.append(article.split('URL:')[-1:], ((article.split('Date: ')[-1:])[0][0:10])))) # print(analysis_results) # import pandas as pd # print('STOP') # df_analysis_results=pd.DataFrame(analysis_results['News_Article']) # print(df_analysis_results) # df_analysis_results.sort_values(by='Date') # df_analysis_results.to_csv('analysis_results.csv') #Generate summarized message rationalize dominant sentiment summary = sentiment_analysis_util.generate_summary_of_sentiment(analysis_results) #, dominant_sentiment) st.chat_message("assistant").write((summary)) st.session_state.messages.append({"role": "assistant", "content": summary}) #answers=np.append(res["messages"][-1].content,summary) client = OpenAI(api_key=OPENAI_API_KEY) if "openai_model" not in st.session_state: st.session_state["openai_model"] = "gpt-3.5-turbo" if prompt := st.chat_input("Any other questions? "): # Add user message to chat history st.session_state.messages.append({"role": "user", "content": prompt}) # Display user message in chat message container with st.chat_message("user"): st.markdown(prompt) # Display assistant response in chat message container with st.chat_message("assistant"): stream = client.chat.completions.create( model=st.session_state["openai_model"], messages=[ {"role": m["role"], "content": m["content"]} for m in st.session_state.messages ], stream=True, ) response = st.write_stream(stream) st.session_state.messages.append({"role": "assistant", "content": response})