Spaces:
Sleeping
Sleeping
File size: 860 Bytes
eb4c2ff f72ef0e a19fec0 612c19e eb4c2ff a19fec0 1081558 20aef52 eb4c2ff a19fec0 612c19e affb25b eb4c2ff affb25b eb4c2ff affb25b a19fec0 eb4c2ff affb25b f72ef0e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
import spaces
import gradio as gr
from transformers import AutoModelForSeq2SeqLM
import os
from huggingface_hub import login
@spaces.GPU
def fine_tune_model(model_name):
#login(api_key.strip())
# Load the model and tokenizer
model = AutoModelForSeq2SeqLM.from_pretrained(model_name.strip())
return 'WORKS!'#model
# Create Gradio interface
try:
iface = gr.Interface(
fn=fine_tune_model,
inputs=[
gr.Textbox(label="Model Name (e.g., 'google/t5-efficient-tiny-nh8')"),
],
outputs="text",
title="Fine-Tune Hugging Face Model",
description="This interface allows you to fine-tune a Hugging Face model on a specified dataset."
)
# Launch the interface
iface.launch()
except Exception as e:
print(f"An error occurred: {str(e)}, TB: {traceback.format_exc()}")
|