File size: 2,749 Bytes
de60a6a
458da1c
a0110cc
 
 
eb0bc41
d2811d8
 
9fe4a42
2b4b5d9
 
706d05f
5e3d1e1
 
9fe4a42
5e3d1e1
2b4b5d9
 
9fe4a42
2b04f62
bbc007c
5e3d1e1
 
9fe4a42
b39b97f
 
 
9fe4a42
2b4b5d9
 
 
d2811d8
77121d6
a0110cc
 
 
 
 
77121d6
5e3d1e1
 
 
77121d6
a0110cc
 
 
6af81aa
77121d6
458da1c
6af81aa
77121d6
d0b2fc8
 
 
5a86410
0166575
 
 
 
 
 
 
 
77121d6
5a86410
d2811d8
0166575
5a86410
0166575
 
 
 
 
5a86410
41bada8
77121d6
41bada8
a0110cc
 
905e520
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import gradio as gr
import matplotlib.pyplot as plt
import numpy as np
import os
import soundfile as sf

def main():
    # Gradio Interface
    with gr.Blocks() as app:
        gr.Markdown(
            """
            # <div align="center"> Ilaria Audio Analyzer 💖 (BETA) </div>
            Audio Analyzer Software by Ilaria, Help me on Ko-Fi\n
            Special thanks to Alex Murkoff for helping me coding it!
    
            Need help with AI? Join AI Hub!
            """
        )
    
        with gr.Row(elem_id=1):
            audio_input = gr.Audio(type='filepath')
                with gr.Column():
                    create_spec_butt = gr.Button(value='Create Spectrogram And Get Info', variant='primary')
    
        with gr.Row(elem_id=2):
            output_markdown = gr.Markdown(value="", visible=True)
            image_output = gr.Image(type='filepath', interactive=False)
    
        create_spec_butt.click(fn=create_spectrogram_and_get_info, inputs=[audio_input], outputs=[output_markdown, image_output])
        
        app.queue(max_size=1022).launch(share=True)

def create_spectrogram_and_get_info(audio_file):
    # Clear figure in case it has data in it
    plt.clf()
    
    # Read the audio data from the file
    audio_data, sample_rate = sf.read(audio_file)

    # Convert to mono if it's not mono
    if len(audio_data.shape) > 1:
        audio_data = np.mean(audio_data, axis=1)

    # Create the spectrogram
    plt.specgram(audio_data, Fs=sample_rate / 1, NFFT=4096, sides='onesided',
                 cmap="Reds_r", scale_by_freq=True, scale='dB', mode='magnitude')

    # Save the spectrogram to a PNG file
    plt.savefig('spectrogram.png')

    # Get the audio file info
    audio_info = sf.info(audio_file)

    bit_depth = {'PCM_16': 16, 'FLOAT': 32}.get(audio_info.subtype, 0)
    
    # Convert duration to minutes, seconds, and milliseconds
    minutes, seconds = divmod(audio_info.duration, 60)
    seconds, milliseconds = divmod(seconds, 1)
    milliseconds *= 1000  # convert from seconds to milliseconds

    # Convert bitrate to Mb/s
    bitrate = audio_info.samplerate * audio_info.channels * bit_depth / 8 / 1024 / 1024

    # Create a table with the audio file info
    info_table = f"""
    
    | Information | Value |
    | --- | --- |
    | Duration | {int(minutes)} minutes - {int(seconds)} seconds - {int(milliseconds)} milliseconds |
    | Samples per second | {audio_info.samplerate} Hz |
    | Audio Channels | {audio_info.channels} |
    | Bitrate | {bitrate:.2f} Mb/s |
    | Extension | {os.path.splitext(audio_file)[1]} |
    """

    # Return the PNG file of the spectrogram and the info table
    return info_table, 'spectrogram.png'

# Create the Gradio interface
main()