Spaces:
Sleeping
Sleeping
import os | |
# os.system("pip install scipy") | |
os.system('pip install -r requirements.txt') | |
import streamlit as st | |
from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan, pipeline | |
from datasets import load_dataset | |
import torch | |
import soundfile as sf | |
from PIL import Image | |
import io | |
# 定义生成语音的函数 | |
def generate_speech(text, model, processor, vocoder, speaker_embeddings): | |
inputs = processor(text=text, return_tensors="pt") | |
generated_speech = model.generate_speech(inputs["input_ids"], speaker_embeddings, vocoder=vocoder) | |
sf.write("speech.wav", generated_speech.numpy(), samplerate=16000) | |
return "speech.wav" | |
# 初始化模型和处理器 | |
processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts") | |
model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts") | |
vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan") | |
embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") | |
speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0) | |
# Streamlit UI | |
st.title('Visual Question Answering and Text-to-Speech App') | |
uploaded_image = st.file_uploader("Upload an image", type=["jpg", "png"]) | |
default_question = "How many people and what is the color of this image?" | |
user_question = st.text_input("Enter your question", value=default_question) | |
if st.button("Answer and Generate Speech"): | |
if uploaded_image: | |
image = Image.open(io.BytesIO(uploaded_image.getvalue())) | |
vqa_pipeline = pipeline("visual-question-answering", model="dandelin/vilt-b32-finetuned-vqa") | |
vqa_result = vqa_pipeline({"image": image, "question": user_question}) | |
answer = vqa_result[0]['answer'] # Assume the answer is in the first result | |
display_text = f"Question: {user_question} Answer: {answer}" | |
st.write(display_text) # Display the answer | |
# Generate and play speech | |
audio_path = generate_speech(display_text, model, processor, vocoder, speaker_embeddings) | |
audio_file = open(audio_path, 'rb') | |
audio_bytes = audio_file.read() | |
st.audio(audio_bytes, format="audio/wav") | |
else: | |
st.write("Please upload an image and enter a question.") | |