chrisociepa's picture
Upload app.py
e20e7d3
raw
history blame
1.39 kB
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
model_name = "Azurro/APT-1B-Base"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
generator = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
torch_dtype=torch.bfloat16,
device_map="auto",
)
def generate_text(prompt, max_length, temperature, top_k, top_p, beams):
output = generator(prompt,
max_length=max_length,
temperature=temperature,
top_k=top_k,
do_sample=True,
top_p=top_p,
num_beams=beams)
return output[0]['generated_text']
input_text = gr.inputs.Textbox(label="Input Text")
max_length = gr.inputs.Slider(1, 200, step=1, default=100, label="Max Length")
temperature = gr.inputs.Slider(0.1, 1.0, step=0.1, default=0.8, label="Temperature")
top_k = gr.inputs.Slider(1, 200, step=1, default=10, label="Top K")
top_p = gr.inputs.Slider(0.1, 2.0, step=0.1, default=0.95, label="Top P")
beams = gr.inputs.Slider(1, 20, step=1, default=1, label="Beams")
outputs = gr.outputs.Textbox(label="Generated Text")
gr.Interface(generate_text, inputs=[input_text, max_length, temperature, top_k, top_p, beams], outputs=outputs).launch()