Spaces:
Sleeping
Sleeping
Ichcanziho
commited on
Commit
•
f96a028
1
Parent(s):
54eb1e5
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import pipeline
|
2 |
+
import gradio as gr
|
3 |
+
from PIL import Image
|
4 |
+
|
5 |
+
|
6 |
+
def classify_img(im):
|
7 |
+
im = Image.fromarray(im.astype('uint8'), 'RGB')
|
8 |
+
ans = image_cla(im)
|
9 |
+
labels = {v["label"]: v["score"] for v in ans}
|
10 |
+
return labels
|
11 |
+
|
12 |
+
|
13 |
+
def voice2text(audio):
|
14 |
+
text = voice_cla(audio)["text"]
|
15 |
+
return text
|
16 |
+
|
17 |
+
|
18 |
+
def text2sentiment(text):
|
19 |
+
sentiment = text_cla(text)[0]["label"]
|
20 |
+
return sentiment
|
21 |
+
|
22 |
+
|
23 |
+
def make_block(dem):
|
24 |
+
with dem:
|
25 |
+
gr.Markdown("""
|
26 |
+
|
27 |
+
# Ejemplo de `space` multiclassifier:
|
28 |
+
|
29 |
+
Este `space` contiene los siguientes modelos:
|
30 |
+
|
31 |
+
- ASR: [Wav2Vec2](https://huggingface.co/facebook/wav2vec2-large-xlsr-53-spanish)
|
32 |
+
- Text Classification: [Robertuito](https://huggingface.co/pysentimiento/robertuito-sentiment-analysis)
|
33 |
+
- Image classifier: [Swin-small-patch4](https://huggingface.co/microsoft/swin-small-patch4-window7-224)
|
34 |
+
|
35 |
+
Autor del demo: [Gabriel Ichcanziho](https://www.linkedin.com/in/ichcanziho/)
|
36 |
+
|
37 |
+
Puedes probar un demo de cada uno en las siguientes pestañas:
|
38 |
+
""")
|
39 |
+
with gr.Tabs():
|
40 |
+
with gr.TabItem("Transcribe audio en español"):
|
41 |
+
with gr.Row():
|
42 |
+
audio = gr.Audio(source="microphone", type="filepath")
|
43 |
+
transcripcion = gr.Textbox()
|
44 |
+
b1 = gr.Button("Voz a Texto")
|
45 |
+
|
46 |
+
with gr.TabItem("Análisis de sentimiento en español"):
|
47 |
+
with gr.Row():
|
48 |
+
texto = gr.Textbox()
|
49 |
+
label = gr.Label()
|
50 |
+
b2 = gr.Button("Texto a Sentimiento")
|
51 |
+
with gr.TabItem("Clasificación de Imágenes"):
|
52 |
+
with gr.Row():
|
53 |
+
image = gr.Image(label="Carga una imagen aquí")
|
54 |
+
label_image = gr.Label(num_top_classes=5)
|
55 |
+
b3 = gr.Button("Clasifica")
|
56 |
+
|
57 |
+
b1.click(voice2text, inputs=audio, outputs=transcripcion)
|
58 |
+
b2.click(text2sentiment, inputs=texto, outputs=label)
|
59 |
+
b3.click(classify_img, inputs=image, outputs=label_image)
|
60 |
+
|
61 |
+
|
62 |
+
if __name__ == '__main__':
|
63 |
+
image_cla = pipeline("image-classification", model="microsoft/swin-tiny-patch4-window7-224")
|
64 |
+
voice_cla = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-large-xlsr-53-spanish")
|
65 |
+
text_cla = pipeline("text-classification", model="pysentimiento/robertuito-sentiment-analysis")
|
66 |
+
|
67 |
+
demo = gr.Blocks()
|
68 |
+
make_block(demo)
|
69 |
+
demo.launch()
|
70 |
+
|