|
---
|
|
license: cc
|
|
language: es
|
|
widget:
|
|
- text: "Me cae muy bien."
|
|
example_title: "Non-racist example"
|
|
- text: "Nos vienen a robar el trabajo."
|
|
example_title: "Racist example"
|
|
|
|
---
|
|
|
|
|
|
Model to predict whether a given text is racist or not:
|
|
* `LABEL_0` output indicates non-racist text
|
|
* `LABEL_1` output indicates racist text
|
|
|
|
Usage:
|
|
|
|
```python
|
|
from transformers import pipeline
|
|
|
|
RACISM_MODEL = "davidmasip/racism"
|
|
racism_analysis_pipe = pipeline("text-classification",
|
|
model=RACISM_MODEL, tokenizer=RACISM_MODEL)
|
|
|
|
results = racism_analysis_pipe("Nos vienen a robar el trabajo")
|
|
|
|
|
|
def clean_labels(results):
|
|
for result in results:
|
|
label = "Non-racist" if results["label"] == "LABEL_0" else "Racist"
|
|
result["label"] = label
|
|
|
|
|
|
clean_labels(results)
|
|
print(results)
|
|
``` |