audi_data / app.py
leadr64's picture
Ajouter le script Gradio et les dépendances
0d3e4a3
raw
history blame
1.51 kB
import os
import gradio as gr
from qdrant_client import QdrantClient
from transformers import ClapModel, ClapProcessor
# Loading the Qdrant DB in local ###################################################################
client = QdrantClient("https://ebe79742-e3ac-4d09-a2c6-63946024cc7a.us-east4-0.gcp.cloud.qdrant.io", api_key="_NnGLuSMH4Qwv-ancoFh88YvzuR7WbyidAorVOVQ_eMCbPhxTb2TSw")
print("[INFO] Client created...")
# loading the model
print("[INFO] Loading the model...")
model_name = "laion/larger_clap_general"
model = ClapModel.from_pretrained(model_name)
processor = ClapProcessor.from_pretrained(model_name)
# Gradio Interface #################################################################################
max_results = 10
def sound_search(query):
text_inputs = processor(text=query, return_tensors="pt")
text_embed = model.get_text_features(**text_inputs)[0]
hits = client.search(
collection_name="demo_spaces_db",
query_vector=text_embed,
limit=max_results,
)
return [
gr.Audio(
hit.payload['audio_path'],
label=f"style: {hit.payload['style']} -- score: {hit.score}")
for hit in hits
]
with gr.Blocks() as demo:
gr.Markdown(
"""# Sound search database """
)
inp = gr.Textbox(placeholder="What sound are you looking for ?")
out = [gr.Audio(label=f"{x}") for x in range(max_results)] # Necessary to have different objs
inp.change(sound_search, inp, out)
demo.launch()