niyaa commited on
Commit
062e3be
1 Parent(s): 19e5e9a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -1
app.py CHANGED
@@ -11,6 +11,9 @@ from datetime import date, timedelta
11
  from transformers import pipeline
12
  import torch
13
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
 
 
 
14
 
15
 
16
  def scrapper (start_date):
@@ -124,7 +127,12 @@ if tickerSymbol:
124
 
125
 
126
  Begindatestring = start_date
127
- headlines = scrapper(start_date)
 
 
 
 
 
128
  index = [idx for idx, s in enumerate(headlines) if s=='Most Read' ][0]
129
  del headlines[index:]
130
  news = pd.DataFrame({"News": headlines})
@@ -139,6 +147,7 @@ if tickerSymbol:
139
  tokenizer = AutoTokenizer.from_pretrained("nickmuchi/sec-bert-finetuned-finance-classification")
140
  model = AutoModelForSequenceClassification.from_pretrained("nickmuchi/sec-bert-finetuned-finance-classification")
141
 
 
142
 
143
  nlp = pipeline("text-classification", model=model, tokenizer=tokenizer, device=device)
144
 
 
11
  from transformers import pipeline
12
  import torch
13
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
14
+ from contextlib import redirect_stdout
15
+ from io import StringIO
16
+
17
 
18
 
19
  def scrapper (start_date):
 
127
 
128
 
129
  Begindatestring = start_date
130
+ # Create a dummy stream to suppress output
131
+ dummy_stream = StringIO()
132
+
133
+ with redirect_stdout(dummy_stream):
134
+ headlines = scrapper(start_date)
135
+
136
  index = [idx for idx, s in enumerate(headlines) if s=='Most Read' ][0]
137
  del headlines[index:]
138
  news = pd.DataFrame({"News": headlines})
 
147
  tokenizer = AutoTokenizer.from_pretrained("nickmuchi/sec-bert-finetuned-finance-classification")
148
  model = AutoModelForSequenceClassification.from_pretrained("nickmuchi/sec-bert-finetuned-finance-classification")
149
 
150
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
151
 
152
  nlp = pipeline("text-classification", model=model, tokenizer=tokenizer, device=device)
153