rayl-aoit's picture
Update app.py
aec2c03 verified
import streamlit as st
from transformers import pipeline
st.title("Building Generative AI Tool")
st.subheader("Converting Text to Speech")
text = st.text_input("Enter your text here...", value="")
# # Using model="suno/bark-small"
# pipe_t2a = pipeline("text-to-speech", model="suno/bark-small", device='cpu') # Use 'cpu' to avoid device recognition error
# # Perform text-to-speech conversion if text is provided
# if text:
# output = pipe_t2a(text)
# # Display the audio output
# st.audio(output["audio"], format="audio/wav", sample_rate=output["sampling_rate"])
# facebook/fastspeech2-en-ljspeech
from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub
from fairseq.models.text_to_speech.hub_interface import TTSHubInterface
# import IPython.display as ipd
models, cfg, task = load_model_ensemble_and_task_from_hf_hub(
"facebook/fastspeech2-en-ljspeech",
arg_overrides={"vocoder": "hifigan", "fp16": False}
)
model = models[0]
TTSHubInterface.update_cfg_with_data_cfg(cfg, task.data_cfg)
generator = task.build_generator([model], cfg)
# text = "Hello, this is a test run."
#ipd.Audio(wav, rate=rate)
if text:
sample = TTSHubInterface.get_model_input(task, text)
wav, rate = TTSHubInterface.get_prediction(task, model, generator, sample)
# Display the audio output
st.audio(wav, format="audio/wav", sample_rate=rate)