TheStinger commited on
Commit
458da1c
1 Parent(s): 56d3f1c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -45
app.py CHANGED
@@ -1,52 +1,21 @@
1
  import gradio as gr
 
 
 
2
 
3
- # Crea una funzione per gestire la selezione dell'applicazione
4
- def select_app(app_name):
5
- if app_name == 'App 1':
6
-
7
- from tts_voice import tts_order_voice
8
- import edge_tts
9
- import gradio as gr
10
- import tempfile
11
- import anyio
12
 
13
- language_dict = tts_order_voice
 
14
 
15
- async def text_to_speech_edge(text, language_code):
16
- voice = language_dict[language_code]
17
- communicate = edge_tts.Communicate(text, voice)
18
- with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file:
19
- tmp_path = tmp_file.name
20
-
21
- await communicate.save(tmp_path)
22
 
23
- return "Text input:{}".format(text), tmp_path
 
24
 
25
-
26
-
27
- input_text = gr.inputs.Textbox(lines=5, label="Text")
28
- output_text = gr.outputs.Textbox(label="Text input")
29
- output_audio = gr.outputs.Audio(type="filepath", label="Audio output")
30
- default_language = list(language_dict.keys())[0]
31
- language = gr.inputs.Dropdown(choices=list(language_dict.keys()), default=default_language, label="Choose the language and the model")
32
-
33
-
34
- interface = gr.Interface(fn=text_to_speech_edge, inputs=[input_text, language], outputs=[output_text, output_audio], title="Ilaria TTS 💖")
35
-
36
-
37
- if __name__ == "__main__":
38
- anyio.run(interface.launch, backend="asyncio")
39
- pass
40
- elif app_name == 'App 2':
41
- # Codice per l'applicazione 2
42
- pass
43
- elif app_name == 'App 3':
44
- # Codice per l'applicazione 3
45
- pass
46
- elif app_name == 'App 4':
47
- # Codice per l'applicazione 4
48
- pass
49
-
50
- # Crea l'interfaccia principale con la selezione dell'applicazione
51
- iface = gr.Interface(fn=select_app, inputs="dropdown", outputs="textbox")
52
  iface.launch()
 
1
  import gradio as gr
2
+ import matplotlib.pyplot as plt
3
+ import numpy as np
4
+ from scipy.io import wavfile
5
 
6
+ def create_spectrogram(audio_file):
7
+ # Leggi il file audio
8
+ sample_rate, data = wavfile.read(audio_file.name)
 
 
 
 
 
 
9
 
10
+ # Crea lo spettrogramma
11
+ plt.specgram(data, Fs=sample_rate)
12
 
13
+ # Salva lo spettrogramma in un file PNG
14
+ plt.savefig('spectrogram.png')
 
 
 
 
 
15
 
16
+ # Ritorna il file PNG dello spettrogramma
17
+ return 'spectrogram.png'
18
 
19
+ # Crea l'interfaccia Gradio
20
+ iface = gr.Interface(fn=create_spectrogram, inputs=gr.inputs.Audio(type="file"), outputs="image")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  iface.launch()