Spaces:
Running
Running
format
Browse files
app.py
CHANGED
@@ -21,6 +21,7 @@ article = """---
|
|
21 |
|
22 |
These models generate instructions for Large Language Models (LLMs) from arbitrary text. They are fine-tuned on the [fleece2instructions](https://huggingface.co/datasets/pszemraj/fleece2instructions) dataset, which is a filtered/formatted version of the [alpaca](https://huggingface.co/datasets/tatsu-lab/alpaca) dataset."""
|
23 |
|
|
|
24 |
def inference(text):
|
25 |
output_flan_t5 = pipe_flan_t5(
|
26 |
text, num_beams=2, early_stopping=True, max_length=100
|
@@ -35,8 +36,10 @@ io = gr.Interface(
|
|
35 |
inference,
|
36 |
gr.Textbox(
|
37 |
lines=3,
|
38 |
-
|
|
|
39 |
interactive=True,
|
|
|
40 |
),
|
41 |
outputs=[
|
42 |
gr.Textbox(lines=1, label="Flan T5 Small", interactive=False),
|
@@ -48,4 +51,4 @@ io = gr.Interface(
|
|
48 |
examples=examples,
|
49 |
cache_examples=False,
|
50 |
)
|
51 |
-
io.launch()
|
|
|
21 |
|
22 |
These models generate instructions for Large Language Models (LLMs) from arbitrary text. They are fine-tuned on the [fleece2instructions](https://huggingface.co/datasets/pszemraj/fleece2instructions) dataset, which is a filtered/formatted version of the [alpaca](https://huggingface.co/datasets/tatsu-lab/alpaca) dataset."""
|
23 |
|
24 |
+
|
25 |
def inference(text):
|
26 |
output_flan_t5 = pipe_flan_t5(
|
27 |
text, num_beams=2, early_stopping=True, max_length=100
|
|
|
36 |
inference,
|
37 |
gr.Textbox(
|
38 |
lines=3,
|
39 |
+
max_lines=10,
|
40 |
+
placeholder="Add text here & find out what LLM intruction/prompt might output that result!",
|
41 |
interactive=True,
|
42 |
+
show_label=False,
|
43 |
),
|
44 |
outputs=[
|
45 |
gr.Textbox(lines=1, label="Flan T5 Small", interactive=False),
|
|
|
51 |
examples=examples,
|
52 |
cache_examples=False,
|
53 |
)
|
54 |
+
io.launch()
|