YaserDS-777's picture
Update app.py
42a71b8 verified
raw
history blame
853 Bytes
import os
import gradio as gr
from transformers import pipeline
# Get the Hugging Face API token from environment variables
HUGGINGFACE_API_TOKEN = os.getenv("HUGGINGFACE_API_TOKEN")
# Initialize the text generation pipeline with authentication
pipe = pipeline("text-generation", model="meta-llama/Meta-Llama-3.1-405B", use_auth_token=HUGGINGFACE_API_TOKEN)
# Define the function to generate text
def generate_text(prompt):
result = pipe(prompt, max_length=50, num_return_sequences=1)
return result[0]['generated_text']
# Create a Gradio interface
iface = gr.Interface(
fn=generate_text,
inputs=gr.inputs.Textbox(lines=2, placeholder="Enter your prompt here..."),
outputs="text",
title="Meta-Llama Text Generation",
description="Generate text using the Meta-Llama 3.1 405B model."
)
# Launch the interface
iface.launch()