Spaces:
Running
Running
First commit
Browse files- .gitattributes +1 -0
- app.py +26 -0
- content/models/config.json +3 -0
- content/models/pytorch_model.bin +3 -0
- content/models/special_tokens_map.json +3 -0
- content/models/tokenizer.json +3 -0
- content/models/tokenizer_config.json +3 -0
- content/models/training_args.bin +3 -0
- content/models/unigram.json +3 -0
.gitattributes
CHANGED
@@ -25,3 +25,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
25 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
25 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.json filter=lfs diff=lfs merge=lfs -text
|
app.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
from datasets import load_dataset
|
4 |
+
from transformers import AutoTokenizer, DataCollatorWithPadding
|
5 |
+
from transformers import TrainingArguments
|
6 |
+
from transformers import AutoModelForSequenceClassification
|
7 |
+
from transformers import Trainer
|
8 |
+
from datasets import load_metric
|
9 |
+
import numpy as np
|
10 |
+
from transformers import pipeline
|
11 |
+
|
12 |
+
|
13 |
+
model_loader = AutoModelForSequenceClassification.from_pretrained("content/models")
|
14 |
+
tokenizer_loader = AutoTokenizer.from_pretrained("content/models")
|
15 |
+
model_loader.eval()
|
16 |
+
print("loaded")
|
17 |
+
|
18 |
+
|
19 |
+
classifier = pipeline("sentiment-analysis", model=model_loader, tokenizer=tokenizer_loader, device=0)
|
20 |
+
|
21 |
+
def greet(twitter):
|
22 |
+
pred = classifier(twitter)[0]
|
23 |
+
return "twitter is %s with score=%.4f" % (pred['label'], pred['score'])
|
24 |
+
|
25 |
+
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
|
26 |
+
iface.launch()
|
content/models/config.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9b87c4626ca6cd15e093bb7c3a8f330df7c02016926ab895386c2c77424cf86f
|
3 |
+
size 1038
|
content/models/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fc15d30859935c50984a3cc56dc773ef0a0d28424cea748a41360c0953fc6cd0
|
3 |
+
size 470708397
|
content/models/special_tokens_map.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:378eb3bf733eb16e65792d7e3fda5b8a4631387ca04d2015199c4d4f22ae554d
|
3 |
+
size 239
|
content/models/tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f9f761e0e122116627dac81039f134bed26d85535a2903d7ff05a1f8f3926ad5
|
3 |
+
size 9081402
|
content/models/tokenizer_config.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5fe21da4b94e6a864977e6aebe7cca9fffef0ce4d7b5806ae172751daf7b5646
|
3 |
+
size 513
|
content/models/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:02e29a91e7a8e0ff4e459b65f3de06bebf7e1777bf8fecfa89fea132e7d39d97
|
3 |
+
size 2799
|
content/models/unigram.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:71b44701d7efd054205115acfa6ef126c5d2f84bd3affe0c59e48163674d19a6
|
3 |
+
size 14763234
|