niyaa commited on
Commit
acecbbf
1 Parent(s): 2211999

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +114 -0
app.py CHANGED
@@ -84,4 +84,118 @@ else:
84
 
85
 
86
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
 
 
84
 
85
 
86
 
87
+ Begindatestring = start_date
88
+
89
+
90
+ #Begindatestring = datetime.strptime(Begindatestring, "%Y-%m-%d").date()
91
+
92
+
93
+ val = 39448 + int(delta.days)
94
+ url = 'https://economictimes.indiatimes.com/archivelist/year-'+str(Begindatestring.year)+',month-'+str(Begindatestring.month)+',starttime-'+str(val)+'.cms' # Replace with your URL
95
+
96
+ response = requests.get(url)
97
+
98
+ if response.status_code == 200:
99
+ html_text = response.text
100
+ soup = BeautifulSoup(html_text, "lxml")
101
+ else:
102
+ st.write(f"Failed to fetch the page. Status code: {response.status_code}")
103
+ jobs = soup.find_all("li")
104
+ headlines = []
105
+ for job in jobs:
106
+ try:
107
+ target_element = job.find("a")
108
+ target_element.text
109
+ headlines.append(target_element.text)
110
+ except:
111
+ continue
112
+
113
+
114
+
115
+
116
+ index = [idx for idx, s in enumerate(headlines) if s=='Most Read' ][0]
117
+ del headlines[index:]
118
+ news = pd.DataFrame({"News": headlines})
119
+ news.insert(0, 'Date', Begindatestring)
120
+ #st.dataframe(df[0:1])
121
+
122
+
123
+ news = news.drop_duplicates()
124
+ news = news.dropna(how='any')
125
+ news = news.reset_index(drop=True)
126
+
127
+ import pandas as pd
128
+ import numpy as np
129
+
130
+
131
+ from transformers import pipeline
132
+ import torch
133
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
134
+
135
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
136
+
137
+
138
+ tokenizer = AutoTokenizer.from_pretrained("nickmuchi/sec-bert-finetuned-finance-classification")
139
+ model = AutoModelForSequenceClassification.from_pretrained("nickmuchi/sec-bert-finetuned-finance-classification")
140
+
141
+
142
+
143
+
144
+
145
+
146
+
147
+
148
+
149
+
150
+ nlp = pipeline("text-classification", model=model, tokenizer=tokenizer, device=device)
151
+
152
+ length = len(news[ 'News'].to_list())
153
+ news_list = news[ 'News'].to_list()
154
+
155
+ df = pd.DataFrame()
156
+ for i in range (0, length):
157
+
158
+
159
+ results = nlp(news_list[i])
160
+ df.loc[i, "News"] = news_list[i]
161
+ df.loc[i , 'label'] = results[0]["label"]
162
+ df.loc[i , 'score'] = results[0]["score"]
163
+
164
+
165
+
166
+ #st.dataframe(df)
167
+
168
+ # Filter the DataFrame to get rows with "neutral" sentiment
169
+ bullish_rows = df[df['label'] == 'bullish']
170
+
171
+ # Calculate the sum of the 'Score' column for "neutral" rows
172
+ bullish_score_sum = bullish_rows['score'].sum()
173
+
174
+ num_bullish_rows = len(bullish_rows)
175
+ # Calculate the average score for "neutral" sentiment
176
+ average_score_for_bullish = bullish_score_sum / num_bullish_rows
177
+
178
+
179
+ # Filter the DataFrame to get rows with "neutral" sentiment
180
+ bearish_rows = df[df['label'] == 'bearish']
181
+
182
+ # Calculate the sum of the 'Score' column for "neutral" rows
183
+ bearish_score_sum = bearish_rows['score'].sum()
184
+
185
+ # Cabearishlculate the number of "neutral" rows
186
+ num_bearish_rows = len(bearish_rows)
187
+
188
+ # Calculate the average score for "neutral" sentiment
189
+ average_score_for_bearish = bearish_score_sum / num_bearish_rows
190
+
191
+
192
+ if(average_score_for_bearish > average_score_for_bullish):
193
+ st.write("Stock will go down")
194
+ if(average_score_for_bearish < average_score_for_bullish):
195
+ st.write("Stock will go up")
196
+
197
+
198
+
199
+
200
+
201