Spaces:
Runtime error
Runtime error
File size: 4,946 Bytes
420400a 0a57289 420400a 84af4c6 420400a 7275eb6 0a57289 7275eb6 670efcf 7275eb6 0a57289 420400a 7275eb6 420400a 7275eb6 670efcf 420400a 0a57289 420400a 0a57289 420400a 670efcf 420400a 0a57289 420400a 670efcf 7275eb6 420400a 670efcf 420400a 670efcf 7275eb6 420400a 7275eb6 420400a 7275eb6 420400a 09da2a2 4d2def1 420400a 670efcf 7275eb6 420400a 0a57289 420400a 4d2def1 420400a 4d2def1 420400a 0a57289 84af4c6 420400a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 |
import gradio as gr
import torch
from torchaudio.sox_effects import apply_effects_file
from transformers import AutoFeatureExtractor, AutoModelForAudioXVector
import os
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
STYLE = """
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css" integrity="sha256-YvdLHPgkqJ8DVUxjjnGVlMMJtNimJ6dYkowFFvp4kKs=" crossorigin="anonymous">
"""
OUTPUT_OK = (
STYLE
+ """
<div class="container">
<div class="row"><h1 style="text-align: center">The speakers are</h1></div>
<div class="row"><h1 class="display-1 text-success" style="text-align: center">{:.1f}%</h1></div>
<div class="row"><h1 style="text-align: center">similar</h1></div>
<div class="row"><h1 class="text-success" style="text-align: center">Welcome, human!</h1></div>
<div class="row"><small style="text-align: center">(You must get at least 85% to be considered the same person)</small><div class="row">
</div>
"""
)
OUTPUT_FAIL = (
STYLE
+ """
<div class="container">
<div class="row"><h1 style="text-align: center">The speakers are</h1></div>
<div class="row"><h1 class="display-1 text-danger" style="text-align: center">{:.1f}%</h1></div>
<div class="row"><h1 style="text-align: center">similar</h1></div>
<div class="row"><h1 class="text-danger" style="text-align: center">You shall not pass!</h1></div>
<div class="row"><small style="text-align: center">(You must get at least 85% to be considered the same person)</small><div class="row">
</div>
"""
)
EFFECTS = [
["remix", "-"],
["channels", "1"],
["rate", "16000"],
["gain", "-1.0"],
["silence", "1", "0.1", "0.1%", "-1", "0.1", "0.1%"],
["trim", "0", "10"],
]
THRESHOLD = 0.85
model_name = "microsoft/unispeech-sat-base-plus-sv"
feature_extractor = AutoFeatureExtractor.from_pretrained(model_name)
model = AutoModelForAudioXVector.from_pretrained(model_name).to(device)
cosine_sim = torch.nn.CosineSimilarity(dim=-1)
def similarity_fn(path1, path2):
if not (path1 and path2):
return '<b style="color:red">ERROR: Please record audio for *both* speakers!</b>'
wav1, _ = apply_effects_file(path1, EFFECTS)
wav2, _ = apply_effects_file(path2, EFFECTS)
print(wav1.shape, wav2.shape)
input1 = feature_extractor(wav1.squeeze(0), return_tensors="pt", sampling_rate=16000).input_values.to(device)
input2 = feature_extractor(wav2.squeeze(0), return_tensors="pt", sampling_rate=16000).input_values.to(device)
with torch.no_grad():
emb1 = model(input1).embeddings
emb2 = model(input2).embeddings
emb1 = torch.nn.functional.normalize(emb1, dim=-1).cpu()
emb2 = torch.nn.functional.normalize(emb2, dim=-1).cpu()
similarity = cosine_sim(emb1, emb2).numpy()[0]
if similarity >= THRESHOLD:
output = OUTPUT_OK.format(similarity * 100)
else:
output = OUTPUT_FAIL.format(similarity * 100)
return output
inputs = [
gr.inputs.Audio(source="microphone", type="filepath", optional=True, label="Speaker #1"),
gr.inputs.Audio(source="microphone", type="filepath", optional=True, label="Speaker #2"),
]
output = gr.outputs.HTML(label="")
description = (
"This demo from Microsoft will compare two speech samples and determine if they are from the same speaker. "
"Try it with your own voice! If you find an incorrect prediction, you can click FLAG to save the recordings to a public dataset: "
"https://huggingface.co./datasets/abidlabs/voice-verification-adversarial-dataset, "
"consisting of samples on which the model makes mistakes, which may further improve research in this field. Disclaimer: this will "
"save the recordings to a PUBLIC dataset so please be careful about what you FLAG."
)
article = (
"<p style='text-align: center'>"
"<a href='https://huggingface.co./microsoft/unispeech-sat-large-sv' target='_blank'>ποΈ Learn more about UniSpeech-SAT</a> | "
"<a href='https://arxiv.org/abs/2110.05752' target='_blank'>π UniSpeech-SAT paper</a> | "
"<a href='https://www.danielpovey.com/files/2018_icassp_xvectors.pdf' target='_blank'>π X-Vector paper</a>"
"</p>"
)
examples = [
["samples/cate_blanch.mp3", "samples/cate_blanch_2.mp3"],
["samples/cate_blanch.mp3", "samples/kirsten_dunst.wav"],
]
HF_TOKEN = os.getenv('HF_TOKEN')
hf_saver = gr.HuggingFaceDatasetSaver(HF_TOKEN, "voice-verification-adversarial-dataset")
interface = gr.Interface(
fn=similarity_fn,
inputs=inputs,
outputs=output,
description=description,
title="Break this voice verification model!",
layout="horizontal",
theme="huggingface",
live=False,
examples=examples,
article="[Link to dataset](https://huggingface.co./datasets/abidlabs/voice-verification-adversarial-dataset)",
)
interface.launch(enable_queue=True)
|