one last time
Browse files
app.py
CHANGED
@@ -1,11 +1,10 @@
|
|
1 |
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
|
2 |
import gradio as gr
|
3 |
import os
|
|
|
4 |
|
5 |
asr = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h")
|
6 |
summarizer = pipeline("summarization", model="knkarthick/MEETING_SUMMARY")
|
7 |
-
|
8 |
-
auth_token = os.environ.get("HF_Token")
|
9 |
tokenizer = AutoTokenizer.from_pretrained("demo-org/auditor_review_model",use_auth_token=auth_token)
|
10 |
audit_model = AutoModelForSequenceClassification.from_pretrained("demo-org/auditor_review_model",use_auth_token=auth_token)
|
11 |
nlp = pipeline("text-classification", model=audit_model, tokenizer=tokenizer)
|
@@ -28,11 +27,9 @@ def text_to_sentiment(text):
|
|
28 |
|
29 |
def ner(text):
|
30 |
api = gr.Interface.load("dslim/bert-base-NER", src='models')
|
31 |
-
print (api)
|
32 |
spans = api(text)
|
33 |
-
|
34 |
-
|
35 |
-
return replaced_spans
|
36 |
|
37 |
demo = gr.Blocks()
|
38 |
|
|
|
1 |
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
|
2 |
import gradio as gr
|
3 |
import os
|
4 |
+
auth_token = os.environ.get("HF_Token")
|
5 |
|
6 |
asr = pipeline("automatic-speech-recognition", "facebook/wav2vec2-base-960h")
|
7 |
summarizer = pipeline("summarization", model="knkarthick/MEETING_SUMMARY")
|
|
|
|
|
8 |
tokenizer = AutoTokenizer.from_pretrained("demo-org/auditor_review_model",use_auth_token=auth_token)
|
9 |
audit_model = AutoModelForSequenceClassification.from_pretrained("demo-org/auditor_review_model",use_auth_token=auth_token)
|
10 |
nlp = pipeline("text-classification", model=audit_model, tokenizer=tokenizer)
|
|
|
27 |
|
28 |
def ner(text):
|
29 |
api = gr.Interface.load("dslim/bert-base-NER", src='models')
|
|
|
30 |
spans = api(text)
|
31 |
+
#replaced_spans = [(key, None) if value=='No Disease' else (key, value) for (key, value) in spans]
|
32 |
+
return spans
|
|
|
33 |
|
34 |
demo = gr.Blocks()
|
35 |
|