import gradio as gr | |
from transformers import AutoModelForCausalLM | |
# Load the Llama-2-7b-chat model | |
model = AutoModelForCausalLM.from_pretrained("llama-2-7b-chat") | |
# Define a function to generate text using the model | |
def generate_text(prompt): | |
# Generate text using the model | |
generated_text = model.generate(prompt, max_length=4000) | |
# Return the generated text | |
return generated_text | |
# Create a Gradio app | |
interface = gr.Interface(fn=generate_text, inputs="text", outputs="text") | |
# Launch the Gradio app | |
interface.launch() | |