Update README.md
Browse files
README.md
CHANGED
@@ -17,8 +17,7 @@ This is a XLM-roBERTa-base model trained on ~198M tweets and finetuned for emoti
|
|
17 |
```python
|
18 |
from transformers import pipeline
|
19 |
model_path = "daveni/twitter-xlm-roberta-emotion-es"
|
20 |
-
|
21 |
-
emotion_analysis = pipeline("text-classification", model=model_path, tokenizer=tokenizer_path)
|
22 |
emotion_analysis("Einstein dijo: Solo hay dos cosas infinitas, el universo y los pinches anuncios de bitcoin en Twitter. Paren ya carajo aaaaaaghhgggghhh me quiero murir")
|
23 |
```
|
24 |
```
|
@@ -27,7 +26,6 @@ emotion_analysis("Einstein dijo: Solo hay dos cosas infinitas, el universo y los
|
|
27 |
## Full classification example
|
28 |
```python
|
29 |
from transformers import AutoModelForSequenceClassification
|
30 |
-
from transformers import TFAutoModelForSequenceClassification
|
31 |
from transformers import AutoTokenizer, AutoConfig
|
32 |
import numpy as np
|
33 |
from scipy.special import softmax
|
@@ -40,13 +38,13 @@ def preprocess(text):
|
|
40 |
new_text.append(t)
|
41 |
return " ".join(new_text)
|
42 |
model_path = "daveni/twitter-xlm-roberta-emotion-es"
|
43 |
-
|
44 |
-
tokenizer = AutoTokenizer.from_pretrained(tokenizer_path )
|
45 |
config = AutoConfig.from_pretrained(model_path )
|
46 |
# PT
|
47 |
model = AutoModelForSequenceClassification.from_pretrained(model_path )
|
48 |
text = "Se ha quedao bonito día para publicar vídeo, ¿no? Hoy del tema más diferente que hemos tocado en el canal."
|
49 |
text = preprocess(text)
|
|
|
50 |
encoded_input = tokenizer(text, return_tensors='pt')
|
51 |
output = model(**encoded_input)
|
52 |
scores = output[0][0].detach().numpy()
|
@@ -60,6 +58,7 @@ for i in range(scores.shape[0]):
|
|
60 |
print(f"{i+1}) {l} {np.round(float(s), 4)}")
|
61 |
```
|
62 |
Output:
|
|
|
63 |
```
|
64 |
Se ha quedao bonito día para publicar vídeo, ¿no? Hoy del tema más diferente que hemos tocado en el canal.
|
65 |
1) joy 0.7887
|
|
|
17 |
```python
|
18 |
from transformers import pipeline
|
19 |
model_path = "daveni/twitter-xlm-roberta-emotion-es"
|
20 |
+
emotion_analysis = pipeline("text-classification", framework="pt", model=model_path, tokenizer=model_path)
|
|
|
21 |
emotion_analysis("Einstein dijo: Solo hay dos cosas infinitas, el universo y los pinches anuncios de bitcoin en Twitter. Paren ya carajo aaaaaaghhgggghhh me quiero murir")
|
22 |
```
|
23 |
```
|
|
|
26 |
## Full classification example
|
27 |
```python
|
28 |
from transformers import AutoModelForSequenceClassification
|
|
|
29 |
from transformers import AutoTokenizer, AutoConfig
|
30 |
import numpy as np
|
31 |
from scipy.special import softmax
|
|
|
38 |
new_text.append(t)
|
39 |
return " ".join(new_text)
|
40 |
model_path = "daveni/twitter-xlm-roberta-emotion-es"
|
41 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path )
|
|
|
42 |
config = AutoConfig.from_pretrained(model_path )
|
43 |
# PT
|
44 |
model = AutoModelForSequenceClassification.from_pretrained(model_path )
|
45 |
text = "Se ha quedao bonito día para publicar vídeo, ¿no? Hoy del tema más diferente que hemos tocado en el canal."
|
46 |
text = preprocess(text)
|
47 |
+
print(text)
|
48 |
encoded_input = tokenizer(text, return_tensors='pt')
|
49 |
output = model(**encoded_input)
|
50 |
scores = output[0][0].detach().numpy()
|
|
|
58 |
print(f"{i+1}) {l} {np.round(float(s), 4)}")
|
59 |
```
|
60 |
Output:
|
61 |
+
|
62 |
```
|
63 |
Se ha quedao bonito día para publicar vídeo, ¿no? Hoy del tema más diferente que hemos tocado en el canal.
|
64 |
1) joy 0.7887
|