niyaa commited on
Commit
a5f31f0
·
1 Parent(s): 05545e9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +71 -75
app.py CHANGED
@@ -3,18 +3,16 @@ import yfinance as yf
3
  import pandas as pd
4
  import cufflinks as cf
5
  import datetime
6
- import datetime.datetime as dt
7
  import plotly.graph_objects as go
8
  from bs4 import BeautifulSoup
9
  import requests
10
  import os
11
- from datetime import date, timedelta, datetime
12
 
13
  # App title
14
  st.markdown('''
15
  # Sovrenn Market Sentiment Indicator App
16
  Shown are the stock price data for the selected company!
17
-
18
  **Credits**
19
  - App built by SRL
20
  ''')
@@ -84,72 +82,70 @@ if tickerSymbol:
84
 
85
 
86
 
87
- else:
88
- st.warning("Please enter a valid Stock Ticker Symbol.")
89
 
90
 
91
 
92
 
93
- d0 = start_date
94
- d1 = dt.date(2008, 1, 1)
95
- delta = d0 - d1
96
 
97
- st.write(delta)
 
 
98
 
99
- Begindatestring = start_date
100
 
 
101
 
102
- #Begindatestring = datetime.strptime(Begindatestring, "%Y-%m-%d").date()
103
 
 
104
 
105
- val = 39448 + int(delta.days)
106
- url = 'https://economictimes.indiatimes.com/archivelist/year-'+str(Begindatestring.year)+',month-'+str(Begindatestring.month)+',starttime-'+str(val)+'.cms' # Replace with your URL
107
 
108
- response = requests.get(url)
 
109
 
110
- if response.status_code == 200:
111
- html_text = response.text
112
- soup = BeautifulSoup(html_text, "lxml")
113
- else:
114
- st.write(f"Failed to fetch the page. Status code: {response.status_code}")
115
- jobs = soup.find_all("li")
116
- headlines = []
117
- for job in jobs:
118
- try:
119
- target_element = job.find("a")
120
- target_element.text
121
- headlines.append(target_element.text)
122
- except:
123
- continue
124
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
 
126
 
127
 
128
- index = [idx for idx, s in enumerate(headlines) if s=='Most Read' ][0]
129
- del headlines[index:]
130
- news = pd.DataFrame({"News": headlines})
131
- news.insert(0, 'Date', Begindatestring)
132
- #st.dataframe(df[0:1])
133
 
 
 
 
 
 
134
 
135
- news = news.drop_duplicates()
136
- news = news.dropna(how='any')
137
- news = news.reset_index(drop=True)
138
 
139
- import pandas as pd
140
- import numpy as np
 
141
 
 
 
142
 
143
- from transformers import pipeline
144
- import torch
145
- from transformers import AutoTokenizer, AutoModelForSequenceClassification
146
 
147
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
 
148
 
 
149
 
150
- tokenizer = AutoTokenizer.from_pretrained("nickmuchi/sec-bert-finetuned-finance-classification")
151
- model = AutoModelForSequenceClassification.from_pretrained("nickmuchi/sec-bert-finetuned-finance-classification")
152
 
 
 
153
 
154
 
155
 
@@ -159,55 +155,55 @@ model = AutoModelForSequenceClassification.from_pretrained("nickmuchi/sec-bert-f
159
 
160
 
161
 
162
- nlp = pipeline("text-classification", model=model, tokenizer=tokenizer, device=device)
163
 
164
- length = len(news[ 'News'].to_list())
165
- news_list = news[ 'News'].to_list()
166
 
167
- df = pd.DataFrame()
168
- for i in range (0, length):
169
 
 
 
170
 
171
- results = nlp(news_list[i])
172
- df.loc[i, "News"] = news_list[i]
173
- df.loc[i , 'label'] = results[0]["label"]
174
- df.loc[i , 'score'] = results[0]["score"]
175
 
 
 
 
 
176
 
177
 
178
- #st.dataframe(df)
179
 
180
- # Filter the DataFrame to get rows with "neutral" sentiment
181
- bullish_rows = df[df['label'] == 'bullish']
182
 
183
- # Calculate the sum of the 'Score' column for "neutral" rows
184
- bullish_score_sum = bullish_rows['score'].sum()
185
 
186
- num_bullish_rows = len(bullish_rows)
187
- # Calculate the average score for "neutral" sentiment
188
- average_score_for_bullish = bullish_score_sum / num_bullish_rows
189
 
 
 
 
190
 
191
- # Filter the DataFrame to get rows with "neutral" sentiment
192
- bearish_rows = df[df['label'] == 'bearish']
193
 
194
- # Calculate the sum of the 'Score' column for "neutral" rows
195
- bearish_score_sum = bearish_rows['score'].sum()
196
 
197
- # Cabearishlculate the number of "neutral" rows
198
- num_bearish_rows = len(bearish_rows)
199
 
200
- # Calculate the average score for "neutral" sentiment
201
- average_score_for_bearish = bearish_score_sum / num_bearish_rows
202
-
203
-
204
- if(average_score_for_bearish > average_score_for_bullish):
205
- st.write("Stock will go down")
206
- if(average_score_for_bearish < average_score_for_bullish):
207
- st.write("Stock will go up")
208
 
 
 
209
 
210
 
 
 
 
 
211
 
212
 
213
 
 
 
 
3
  import pandas as pd
4
  import cufflinks as cf
5
  import datetime
 
6
  import plotly.graph_objects as go
7
  from bs4 import BeautifulSoup
8
  import requests
9
  import os
10
+ from datetime import date, timedelta
11
 
12
  # App title
13
  st.markdown('''
14
  # Sovrenn Market Sentiment Indicator App
15
  Shown are the stock price data for the selected company!
 
16
  **Credits**
17
  - App built by SRL
18
  ''')
 
82
 
83
 
84
 
 
 
85
 
86
 
87
 
88
 
 
 
 
89
 
90
+ d0 = start_date
91
+ d1 = datetime.date(2008, 1, 1)
92
+ delta = d0 - d1
93
 
94
+ st.write(delta)
95
 
96
+ Begindatestring = start_date
97
 
 
98
 
99
+ #Begindatestring = datetime.strptime(Begindatestring, "%Y-%m-%d").date()
100
 
 
 
101
 
102
+ val = 39448 + int(delta.days)
103
+ url = 'https://economictimes.indiatimes.com/archivelist/year-'+str(Begindatestring.year)+',month-'+str(Begindatestring.month)+',starttime-'+str(val)+'.cms' # Replace with your URL
104
 
105
+ response = requests.get(url)
 
 
 
 
 
 
 
 
 
 
 
 
 
106
 
107
+ if response.status_code == 200:
108
+ html_text = response.text
109
+ soup = BeautifulSoup(html_text, "lxml")
110
+ else:
111
+ st.write(f"Failed to fetch the page. Status code: {response.status_code}")
112
+ jobs = soup.find_all("li")
113
+ headlines = []
114
+ for job in jobs:
115
+ try:
116
+ target_element = job.find("a")
117
+ target_element.text
118
+ headlines.append(target_element.text)
119
+ except:
120
+ continue
121
 
122
 
123
 
 
 
 
 
 
124
 
125
+ index = [idx for idx, s in enumerate(headlines) if s=='Most Read' ][0]
126
+ del headlines[index:]
127
+ news = pd.DataFrame({"News": headlines})
128
+ news.insert(0, 'Date', Begindatestring)
129
+ #st.dataframe(df[0:1])
130
 
 
 
 
131
 
132
+ news = news.drop_duplicates()
133
+ news = news.dropna(how='any')
134
+ news = news.reset_index(drop=True)
135
 
136
+ import pandas as pd
137
+ import numpy as np
138
 
 
 
 
139
 
140
+ from transformers import pipeline
141
+ import torch
142
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
143
 
144
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
145
 
 
 
146
 
147
+ tokenizer = AutoTokenizer.from_pretrained("nickmuchi/sec-bert-finetuned-finance-classification")
148
+ model = AutoModelForSequenceClassification.from_pretrained("nickmuchi/sec-bert-finetuned-finance-classification")
149
 
150
 
151
 
 
155
 
156
 
157
 
 
158
 
159
+ nlp = pipeline("text-classification", model=model, tokenizer=tokenizer, device=device)
 
160
 
161
+ length = len(news[ 'News'].to_list())
162
+ news_list = news[ 'News'].to_list()
163
 
164
+ df = pd.DataFrame()
165
+ for i in range (0, length):
166
 
 
 
 
 
167
 
168
+ results = nlp(news_list[i])
169
+ df.loc[i, "News"] = news_list[i]
170
+ df.loc[i , 'label'] = results[0]["label"]
171
+ df.loc[i , 'score'] = results[0]["score"]
172
 
173
 
 
174
 
175
+ #st.dataframe(df)
 
176
 
177
+ # Filter the DataFrame to get rows with "neutral" sentiment
178
+ bullish_rows = df[df['label'] == 'bullish']
179
 
180
+ # Calculate the sum of the 'Score' column for "neutral" rows
181
+ bullish_score_sum = bullish_rows['score'].sum()
 
182
 
183
+ num_bullish_rows = len(bullish_rows)
184
+ # Calculate the average score for "neutral" sentiment
185
+ average_score_for_bullish = bullish_score_sum / num_bullish_rows
186
 
 
 
187
 
188
+ # Filter the DataFrame to get rows with "neutral" sentiment
189
+ bearish_rows = df[df['label'] == 'bearish']
190
 
191
+ # Calculate the sum of the 'Score' column for "neutral" rows
192
+ bearish_score_sum = bearish_rows['score'].sum()
193
 
194
+ # Cabearishlculate the number of "neutral" rows
195
+ num_bearish_rows = len(bearish_rows)
 
 
 
 
 
 
196
 
197
+ # Calculate the average score for "neutral" sentiment
198
+ average_score_for_bearish = bearish_score_sum / num_bearish_rows
199
 
200
 
201
+ if(average_score_for_bearish > average_score_for_bullish):
202
+ st.write("Stock will go down")
203
+ if(average_score_for_bearish < average_score_for_bullish):
204
+ st.write("Stock will go up")
205
 
206
 
207
 
208
+ else:
209
+ st.warning("Please enter a valid Stock Ticker Symbol.")