FpOliveira commited on
Commit
7358929
1 Parent(s): f2c34a6

app: first build

Browse files
Files changed (2) hide show
  1. app.py +126 -0
  2. requirements.txt +4 -0
app.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
3
+ import torch
4
+ from collections import Counter
5
+ from scipy.special import softmax
6
+
7
+ article_string = "Author: <a href=\"https://huggingface.co/FpOliveira\">Felipe Ramos de Oliveira</a>. Read more about our <a href=\"https://github.com/Silly-Machine/TuPi-Portuguese-Hate-Speech-Dataset\">The Portuguese hate speech dataset (TuPI) </a>."
8
+
9
+ app_title = "Portuguese hate speech classifier (classicação de discurso de ódio)"
10
+
11
+ app_description = """
12
+ This app detects offensive language on Portuguese text using multiple models. You can either introduce your own sentences by filling in "Text" or click on one of the examples provided below.
13
+ (Este aplicativo detecta linguagem ofensiva em texto em português usando vários modelos. Introduza suas próprias frases preenchendo o campo "Text", ou clique em um dos exemplos fornecidos abaixo.)
14
+ """
15
+
16
+ app_examples = [
17
+ ["Bom dia mundo!!!"],
18
+ ["Quem não deve não teme!!"],
19
+ ["Que nojo!🤮🤮🤮🤮🤮"],
20
+ ["Vagabunda,Ordinária"],
21
+ ["Vou mandar um óleo de peroba pra ela de presente! 😂😂😂😂"],
22
+ ["Porque é corrupta é conivente com o desgoverno anterior"],
23
+ ["A cada dia fico mais admirado com a cara de pau da elite dominante desse mundo até quando irão nos fazer de otários"]
24
+ ]
25
+
26
+ output_textbox_component_description = """
27
+ This box will display hate speech results based on the average score of multiple models.
28
+ (Esta caixa exibirá resultados da classicação de discurso de ódio com base na pontuação média de vários modelos.)
29
+ """
30
+
31
+ output_json_component_description = { "breakdown": """
32
+ This box presents a detailed breakdown of the evaluation for each model.
33
+ """,
34
+ "detalhamento": """
35
+ (Esta caixa apresenta um detalhamento da avaliação para cada modelo.)
36
+ """ }
37
+
38
+ short_score_descriptions = {
39
+ 0: "Not hate",
40
+ 1: "Hate"
41
+ }
42
+
43
+ score_descriptions = {
44
+ 0: "This text is not a hate speech.",
45
+ 1: "This text is a hate speech.",
46
+ }
47
+
48
+ score_descriptions_pt = {
49
+ 1: "(Este texto é um discurso de ódio)",
50
+ 0: "(Este texto não é um discurso de ódio)",
51
+ }
52
+
53
+ model_list = [
54
+ "FpOliveira/tupi-bert-large-portuguese-cased",
55
+ "FpOliveira/tupi-bert-base-portuguese-cased",
56
+ ]
57
+
58
+ user_friendly_name = {
59
+ "FpOliveira/tupi-bert-large-portuguese-cased": "BERTimbau large (TuPi)",
60
+ "FpOliveira/tupi-bert-base-portuguese-cased": "BERTimbau large (TuPi)",
61
+ }
62
+
63
+ reverse_user_friendly_name = { v:k for k,v in user_friendly_name.items() }
64
+
65
+ user_friendly_name_list = list(user_friendly_name.values())
66
+
67
+ model_array = []
68
+
69
+ for model_name in model_list:
70
+ row = {}
71
+ row["name"] = model_name
72
+ row["tokenizer"] = AutoTokenizer.from_pretrained(model_name)
73
+ row["model"] = AutoModelForSequenceClassification.from_pretrained(model_name)
74
+ model_array.append(row)
75
+
76
+ def most_frequent(array):
77
+ occurence_count = Counter(array)
78
+ return occurence_count.most_common(1)[0][0]
79
+
80
+
81
+ def predict(s1, chosen_model):
82
+ if not chosen_model:
83
+ chosen_model = user_friendly_name_list[0]
84
+ scores = {}
85
+ full_chosen_model_name = reverse_user_friendly_name[chosen_model]
86
+ for row in model_array:
87
+ name = row["name"]
88
+ if name != full_chosen_model_name:
89
+ continue
90
+ else:
91
+ tokenizer = row["tokenizer"]
92
+ model = row["model"]
93
+ model_input = tokenizer(*([s1],), padding=True, return_tensors="pt")
94
+ with torch.no_grad():
95
+ output = model(**model_input)
96
+ logits = output[0][0].detach().numpy()
97
+ logits = softmax(logits).tolist()
98
+ break
99
+ def get_description(idx):
100
+ description = score_descriptions[idx]
101
+ description_pt = score_descriptions_pt[idx]
102
+ final_description = description + "\n \n" + description_pt
103
+ return final_description
104
+
105
+ max_pos = logits.index(max(logits))
106
+ markdown_description = get_description(max_pos)
107
+ scores = { short_score_descriptions[k]:v for k,v in enumerate(logits) }
108
+
109
+ return scores, markdown_description
110
+
111
+
112
+ inputs = [
113
+ gr.Textbox(label="Text", value=app_examples[0][0]),
114
+ gr.Dropdown(label="Model", choices=user_friendly_name_list, value=user_friendly_name_list[0])
115
+ ]
116
+
117
+ outputs = [
118
+ gr.Label(label="Result"),
119
+ gr.Markdown(),
120
+ ]
121
+
122
+
123
+ gr.Interface(fn=predict, inputs=inputs, outputs=outputs, title=app_title,
124
+ description=app_description,
125
+ examples=app_examples,
126
+ article = article_string).launch()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ torch
2
+ gradio
3
+ transformers
4
+ scipy