File size: 11,113 Bytes
05a3e2c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
from openai import OpenAI
from langchain.agents import AgentExecutor, create_openai_tools_agent
from langchain_core.messages import BaseMessage, HumanMessage
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from typing import Annotated
import operator
from typing import Sequence, TypedDict
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
import numpy as np
import pandas as pd
from dotenv import load_dotenv
import os
from typing import Annotated
import operator
from typing import Sequence, TypedDict
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
import matplotlib.pyplot as plt
from langchain.schema.output_parser import StrOutputParser
from tools import data_analyst #forecasting_expert_arima, forecasting_expert_rf, evaluator, investment_advisor
from tools import crypto_sentiment_analysis_util
import app_crypto_rf_model as rf
import app_crypto_scrape as sa
import app_crypto_arima_model as arima
import streamlit as st

from datetime import date
today = date.today()

st.set_page_config(page_title="LangChain Agent", layout="wide")
load_dotenv()
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]

llm = ChatOpenAI(model="gpt-3.5-turbo")

#======================== AGENTS ==================================
# The agent state is the input to each node in the graph
class AgentState(TypedDict):
    # The annotation tells the graph that new messages will always
    # be added to the current states
    messages: Annotated[Sequence[BaseMessage], operator.add]
    # The 'next' field indicates where to route to next
    next: str
             
tool=data_analyst.data_analyst_tools()

from langchain_core.runnables import RunnableConfig
st.title("💬 Krypto")

#@st.cache_data

#@st.cache_resource
#def initialize_session_state():
if "chat_history" not in st.session_state:
    st.session_state["messages"] = [{"role":"system", "content":"""
How can I help you?
"""}]

#initialize_session_state()

# Streamlit UI elements
st.image('crypto_image.png')
#st.text("Start by entering the currency.")

sideb = st.sidebar

with st.sidebar:
    #st.subheader("This is the LangGraph workflow visualization of this application rendered in real-time.")
    #st.image(create_graph_image())
    
    title = st.text_input("Start by entering the currency name:")

check1 = sideb.button(f"analyze {title}")
results=[]

if check1:
    st.write(f"I am now producing analysis for {title}")
    
    model = ChatOpenAI(temperature=0.7, api_key=OPENAI_API_KEY)
    chain= model | StrOutputParser()
    result=chain.invoke(f"You are a cryptocurrency data analyst.\
                Provide correct cryptocurrency ticker from Coingecko website for cryptocurrency: {title}.\
                Expected output: ticker.\
                Provide it in the following format: >>cryptocurrencyticker>> \
                for example: >>BTC>>")
    

    # for s in graph_data.stream(inputs):
    #         for key, value in s.items():
    #             print(f"Finished running: {value}:")
    #             result = value["messages"][-1].content
    #             results.append(value["messages"][-1].content)

    print(result)
    print('ticker',str(result).split(">>")[0])
    if len(str(result).split(">>")[1])<10:
        cryptocurrencyticker=(str(result).split(">>")[1])
    else:
        cryptocurrencyticker=(str(result).split(">>")[0])
    cryptocurrency=title

    print(cryptocurrency,cryptocurrencyticker)
    print('here')

    #================== Scrape Current/Historical Price ====================
    df=sa.scrape_crypto(cryptocurrency,cryptocurrencyticker)
    if len(df)>0:
        print("Running forecasting models on historical prices")
        df_with_forecast_rf, accuracy_rf, result_rf=rf.model_run(df)
        
        df_with_forecast_arima, accuracy_arima, result_arima=arima.model_run(df)

        #--- for llm
        if accuracy_rf<accuracy_arima:
            forecasted_price=(np.round(np.array(df_with_forecast_arima['prices'])[-1]),2)
            prompt = f"You are an investment recommendation expert for crypto currency {cryptocurrency}.You are selecting the predicted price from the ARIMA model because its accuracy (R2 measure:{(np.round(accuracy_arima,2))}) is higher than the accuracy (R2:{(np.round(accuracy_rf,2))}) for random forest model.Compare current price to the predicted price. If current price exceeds predicted price, recommend selling the stock, otherwise recommend buying. Tell the user what the current price, predicted price and accuracy values are. You know that the predicted price for tomorrow using random forest model is {(np.round(np.array(df_with_forecast_rf['prices'])[-1],2))}. The prediction accuracy for the random forest model is {(np.round(accuracy_rf,2))}. The current price of {cryptocurrency} is: {(np.round(df['prices'][-1],2))}. "
        

        else:
            forecasted_price=(np.round(np.array(df_with_forecast_rf['prices'])[-1],2))
            prompt = f"You are an investment recommendation expert for crypto currency {cryptocurrency}. You are selecting the predicted price from the random forest model because its accuracy (R2 measure:{(np.round(accuracy_rf,2))}) is higher than the accuracy (R2:{(np.round(accuracy_arima,2))}) for arima model. Compare current price to the predicted price. If current price exceeds predicted price, recommend selling the stock, otherwise recommend buying. Tell the user what the current price, predicted price and accuracy values are. You know that the predicted price for tomorrow using random forest model is {(np.round(np.array(df_with_forecast_arima['prices'])[-1]),2)}. The prediction accuracy for the random forest model is {(np.round(accuracy_arima,2))}. The current price of {cryptocurrency} is: {(np.round(df['prices'][-1],2))}. "
        current_forecast=pd.read_csv('current_forecast.csv',index_col='date',parse_dates=True,infer_datetime_format=True)        
        today=pd.to_datetime(today).strftime('%Y-%m-%d')
        print([(np.array(df_with_forecast_arima['prices'])[-1]),np.array(df_with_forecast_rf['prices'])[-1],today])

        if today not in (current_forecast.index):
            prices_arima=np.append(current_forecast['prices_arima'],(np.array(df_with_forecast_arima['prices'])[-1]))
            prices_rf=np.append(current_forecast['prices_rf'],(np.array(df_with_forecast_rf['prices'])[-1]))
            dates=np.append(current_forecast.index[0].strftime('%Y-%m-%d'),today)
            current_forecast=pd.DataFrame({'date':dates, 'prices_rf':prices_rf,'prices_arima':prices_arima})
            current_forecast.to_csv('current_forecast.csv')

        #prompt=str(prompt)
        inputs_reccommend = {"messages": [HumanMessage(content=prompt)]}

        model = ChatOpenAI(temperature=0.7, api_key=OPENAI_API_KEY)
        response=model.invoke(prompt)
        response_content=response.content
        st.chat_message("assistant").markdown((response_content))
        st.session_state.messages.append({"role": "assistant", "content": prompt})

        fig, ax = plt.subplots(1,2, figsize=(10, 3))
        ax[0].plot(result_arima['prediction'], color='blue', marker='o')
        ax[0].plot(result_arima['data'], color='orange', marker='o')
        ax[0].set_title('ARIMA')
        ax[1].plot(result_rf['prediction'], color='blue', marker='o')
        ax[1].plot(result_rf['data'], color='orange', marker='o')
        ax[1].set_title('RF')
        fig.suptitle('Prediction vs Actuals')
        plt.legend(['prediction','actuals'])
        st.pyplot(fig)
    # ========================== Sentiment analysis
    #Perform sentiment analysis on the cryptocurrency news & predict dominant sentiment along with plotting the sentiment breakdown chart
    # Downloading from reddit
    
    # Downloading from alpaca
    news_articles = crypto_sentiment_analysis_util.fetch_news(cryptocurrency)
    reddit_news_articles=crypto_sentiment_analysis_util.fetch_reddit_news(cryptocurrency)
    #os.system('scrapy crawl reddit -o crypto_reddit.txt')


    #crypto_sentiment_analysis_util.fetch_reddit_news() #(f"cryptocurrency {cryptocurrency}")
    analysis_results = []
    
    #Perform sentiment analysis for each product review
    for article in news_articles:
        if cryptocurrency[0:6] in article['News_Article'].lower():
            sentiment_analysis_result = crypto_sentiment_analysis_util.analyze_sentiment(article['News_Article'])

            # Display sentiment analysis results
            #print(f'News Article: {sentiment_analysis_result["News_Article"]} : Sentiment: {sentiment_analysis_result["Sentiment"]}', '\n')

            result = {
                        'News_Article': sentiment_analysis_result["News_Article"],
                        'Sentiment': sentiment_analysis_result["Sentiment"][0]['label'],
                        'Index': sentiment_analysis_result["Sentiment"][0]['score']
                    }
            
            analysis_results.append(result)

    for article in reddit_news_articles:
        if cryptocurrency[0:6] in article.lower():
            sentiment_analysis_result_reddit = crypto_sentiment_analysis_util.analyze_sentiment(article)

            # Display sentiment analysis results
            #print(f'News Article: {sentiment_analysis_result_reddit["News_Article"]} : Sentiment: {sentiment_analysis_result_reddit["Sentiment"]}', '\n')

            result = {
                        'News_Article': sentiment_analysis_result_reddit["News_Article"],
                        'Index':np.round(sentiment_analysis_result_reddit["Sentiment"][0]['score'],2)
                    }
            analysis_results.append(result)

    #Generate summarized message rationalize dominant sentiment
    summary = crypto_sentiment_analysis_util.generate_summary_of_sentiment(analysis_results)
    st.chat_message("assistant").write(str(summary))
    st.session_state.messages.append({"role": "assistant", "content": summary})
    #answers=np.append(res["messages"][-1].content,summary)

# Set OpenAI API key from Streamlit secrets
client = OpenAI(api_key=OPENAI_API_KEY)

# Set a default model
if "openai_model" not in st.session_state:
    st.session_state["openai_model"] = "gpt-3.5-turbo"

#model = ChatOpenAI(temperature=0.7, api_key=OPENAI_API_KEY)
if prompt := st.chat_input("Some other questions?"):
    # Add user message to chat history
    st.session_state.messages.append({"role": "user", "content": prompt})
    # Display user message in chat message container
    with st.chat_message("user"):
        st.markdown(prompt)
    # Display assistant response in chat message container
    with st.chat_message("assistant"):
        stream = client.chat.completions.create(
            model=st.session_state["openai_model"],
            messages=[
                {"role": m["role"], "content": m["content"]}
                for m in st.session_state.messages
            ],
            stream=True,
        )
        response = st.write_stream(stream)
    st.session_state.messages.append({"role": "assistant", "content": response})