chrisociepa commited on
Commit
e20e7d3
1 Parent(s): 6dd5f06

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -0
app.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
4
+
5
+ model_name = "Azurro/APT-1B-Base"
6
+
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+ model = AutoModelForCausalLM.from_pretrained(model_name)
9
+
10
+ generator = pipeline(
11
+ "text-generation",
12
+ model=model,
13
+ tokenizer=tokenizer,
14
+ torch_dtype=torch.bfloat16,
15
+ device_map="auto",
16
+ )
17
+
18
+ def generate_text(prompt, max_length, temperature, top_k, top_p, beams):
19
+ output = generator(prompt,
20
+ max_length=max_length,
21
+ temperature=temperature,
22
+ top_k=top_k,
23
+ do_sample=True,
24
+ top_p=top_p,
25
+ num_beams=beams)
26
+ return output[0]['generated_text']
27
+
28
+ input_text = gr.inputs.Textbox(label="Input Text")
29
+ max_length = gr.inputs.Slider(1, 200, step=1, default=100, label="Max Length")
30
+ temperature = gr.inputs.Slider(0.1, 1.0, step=0.1, default=0.8, label="Temperature")
31
+ top_k = gr.inputs.Slider(1, 200, step=1, default=10, label="Top K")
32
+ top_p = gr.inputs.Slider(0.1, 2.0, step=0.1, default=0.95, label="Top P")
33
+ beams = gr.inputs.Slider(1, 20, step=1, default=1, label="Beams")
34
+
35
+ outputs = gr.outputs.Textbox(label="Generated Text")
36
+
37
+ gr.Interface(generate_text, inputs=[input_text, max_length, temperature, top_k, top_p, beams], outputs=outputs).launch()