gradio_tutorial / app.py
kmiyazaki's picture
Upload 2 files
9f2bfe9 verified
import gradio as gr
import torch
from torchvision.models import resnet50, ResNet50_Weights
from PIL import Image
import tempfile
from gtts import gTTS
import whisper
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
# ----- 画像認識用モデル (ResNet-50) -----
weights = ResNet50_Weights.IMAGENET1K_V2
img_model = resnet50(weights=weights)
img_model.eval()
img_transform = weights.transforms()
imagenet_classes = weights.meta["categories"]
def image_classify(img: Image.Image):
img_tensor = img_transform(img).unsqueeze(0)
with torch.no_grad():
outputs = img_model(img_tensor)
probabilities = torch.nn.functional.softmax(outputs[0], dim=0)
top5_prob, top5_catid = torch.topk(probabilities, 5)
result = {imagenet_classes[top5_catid[i]]: float(top5_prob[i]) for i in range(5)}
return result
model_name = "cyberagent/open-calm-1b"
model = AutoModelForCausalLM.from_pretrained(
model_name, device_map="auto", torch_dtype=torch.float16
)
tokenizer = AutoTokenizer.from_pretrained(
model_name, use_fast=True, trust_remote_code=True
)
text_gen_pipeline = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_length=128,
temperature=0.7,
top_p=0.9,
pad_token_id=tokenizer.eos_token_id,
)
# ----- 言語モデル (LM) -----
def generate_text(prompt):
# promptに基づき続きのテキストを生成
result = text_gen_pipeline(prompt, do_sample=True, num_return_sequences=1)
generated_text = result[0]["generated_text"]
# prompt部分を含めた全文が返るので、prompt部分はそのままでOK
return generated_text
# ----- 音声合成 (TTS) -----
def text_to_speech(text, lang="ja"):
tts = gTTS(text=text, lang=lang)
with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as fp:
tts.save(fp.name)
return fp.name
# ----- 音声認識 (ASR) -----
whisper_model = whisper.load_model("small")
def speech_to_text(audio_file):
result = whisper_model.transcribe(audio_file)
return result["text"]
# ----- Gradio UI -----
def run():
with gr.Blocks() as demo:
gr.Markdown("# 画像認識・言語モデル・音声合成・音声認識")
with gr.Tabs():
with gr.TabItem("画像認識"):
gr.Markdown("### 画像認識 (ResNet-50)")
gr.Interface(
fn=image_classify,
inputs=gr.Image(type="pil"),
outputs=gr.Label(num_top_classes=5),
description="画像をアップロードして分類します。(ImageNet)",
)
with gr.TabItem("言語モデル"):
gr.Markdown("### 言語モデル")
lm_output = gr.Textbox(label="生成結果")
user_input = gr.Textbox(label="入力テキスト")
send_btn = gr.Button("送信")
send_btn.click(generate_text, inputs=user_input, outputs=lm_output)
with gr.TabItem("音声合成"):
gr.Markdown("### 音声合成 (gTTS)")
tts_input = gr.Textbox(label="音声にしたいテキスト")
tts_output = gr.Audio(label="合成音声")
tts_button = gr.Button("合成")
tts_button.click(text_to_speech, inputs=tts_input, outputs=tts_output)
with gr.TabItem("音声認識"):
gr.Markdown("### 音声認識 (Whisper)")
gr.Interface(
fn=speech_to_text,
inputs=gr.Audio(sources=["microphone", "upload"], type="filepath"),
outputs="text",
description="マイクから録音して文字起こし",
)
demo.launch()
if __name__ == "__main__":
run()