adr2432 commited on
Commit
1488bee
1 Parent(s): 02c5619

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -13
app.py CHANGED
@@ -1,19 +1,22 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM
3
 
4
- # Load the Llama-2-7b-chat model
5
- model = AutoModelForCausalLM.from_pretrained("llama-2-7b-chat")
6
 
7
- # Define a function to generate text using the model
8
- def generate_text(prompt):
9
- # Generate text using the model
10
- generated_text = model.generate(prompt, max_length=4000)
11
 
12
- # Return the generated text
13
- return generated_text
 
 
14
 
15
- # Create a Gradio app
16
- interface = gr.Interface(fn=generate_text, inputs="text", outputs="text")
 
 
 
 
17
 
18
- # Launch the Gradio app
19
- interface.launch()
 
1
  import gradio as gr
2
+ from transformers import pipeline
3
 
4
+ generator = pipeline('text-generation', model='llama-2-7b-chat')
 
5
 
6
+ def generate(text):
7
+ result = generator(text, max_length=100, num_return_sequences=1)
8
+ return result[0]["generated_text"]
 
9
 
10
+ examples = [
11
+ ["The Moon's orbit around Earth has"],
12
+ ["The smooth Borealis basin in the Northern Hemisphere covers 40%"],
13
+ ]
14
 
15
+ demo = gr.Interface(
16
+ fn=generate,
17
+ inputs=gr.inputs.Textbox(lines=5, label="Input Text"),
18
+ outputs=gr.outputs.Textbox(label="Generated Text"),
19
+ examples=examples
20
+ )
21
 
22
+ demo.launch()