from transformers import AutoTokenizer, AutoModelForCausalLM, TextGenerationPipeline import gradio as gr model_name = "Hadeel11/fine-tuned-model-2" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) text_generator = TextGenerationPipeline(model=model, tokenizer=tokenizer) def generate_text(prompt): return text_generator(prompt, max_length=512, num_return_sequences=1)[0]['generated_text'] iface = gr.Interface(fn=generate_text, inputs="text", outputs="text", title="Text Generation Inference") iface.launch()