import gradio as gr import os # import inference def audio_deepfake_detection(demonstration_paths, audio_path): """Audio deepfake detection function""" # Replace with your actual detection logic print("Demonstration audio paths: {}".format(demonstration_paths)) print("Query audio path: {}".format(audio_path)) # Example return value, modify according to your model result = inference.detect(demonstration_paths, audio_path) # Return detection results and confidence scores return { "Is AI Generated": result["is_fake"], "Confidence": f"{result['confidence']:.2f}%" } with gr.Blocks() as demo: gr.Markdown( """ # Audio Deepfake Detection System This demo helps you detect whether an audio clip is AI-generated or authentic. """ ) gr.Markdown( """ ## Upload Audio **Note**: Supports common audio formats (wav, mp3, etc.). """ ) # Demonstration audio input component demonstration_audio_input = gr.Audio( sources=["upload"], label="Demonstration Audios", type="filepath", ) # Audio input component query_audio_input = gr.Audio( sources=["upload"], label="Query Audio (Audio for Detection)", type="filepath", ) # Submit button submit_btn = gr.Button(value="Start Detection", variant="primary") # Output results output_labels = gr.Json(label="Detection Results") # Set click event submit_btn.click( fn=audio_deepfake_detection, inputs=[demonstration_audio_input, query_audio_input], outputs=[output_labels] ) # Examples section gr.Markdown("## Test Examples") gr.Examples( examples=[ ["examples/real_audio.wav", "examples/query_audio.wav"], ["examples/fake_audio.wav", "examples/query_audio.wav"], ], inputs=[demonstration_audio_input, query_audio_input], ) if __name__ == "__main__": demo.launch()