File size: 2,882 Bytes
2afe341
0a7f02e
193579c
 
 
0a7f02e
2e92b1f
 
97138d8
0a7f02e
 
97138d8
0a7f02e
 
 
97138d8
 
 
 
 
 
71a48d6
2afe341
193579c
 
 
0a7f02e
 
 
 
 
2e92b1f
1b17c2e
2e92b1f
193579c
2e92b1f
193579c
 
 
 
 
0a7f02e
 
 
 
 
 
 
 
 
2e92b1f
0a7f02e
2e92b1f
0a7f02e
 
 
2e92b1f
0a7f02e
2e92b1f
0a7f02e
 
193579c
810185a
1e8c65e
193579c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
#import libraries and dependencies
#from gradio.mix import Parallel

import gradio as gr
import torch
from transformers import pipeline

#instantiate variables as strings
title="Text Generator"
#title1="Level 1 Text Generator"
#title2="Level 3 Text Generator"
description="This text generator has been trained to chat and to respond to natural language instructions."
#description1="This is the basic text generator all students were taught to code using an older, smaller language model. Input text, submit, and the text generator will generate one output text instance."
#description2="This is a more advanced text generator that many students were taught to code. Input text and the text generator generates three output text instances from three language models. Importantly, two of these language models were designed to process explicit instructions."
#description3="This is the most advanced text generator that a few students were taught to code. Input text and the text generator generates an output text instance. You can resubmit to include that new text as input text."
examples = [
    ["What is the capital of China?"],
    ["How do I apply for an Australian visa?"],
    ["Write a short story."],
    ["Once upon a time, "]
]

#instantiate variables as functions
#pipe = pipeline("text-generation", model='EleutherAI/gpt-neo-2.7B', trust_remote_code=True)

ans = pipeline(model="databricks/dolly-v2-3b", torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto")

#model1 = gr.Interface.load("huggingface/bigscience/bloom-560m")
#model2 = gr.Interface.load("huggingface/google/flan-t5-xl")
#model3 = gr.Interface.load("huggingface/bigscience/bloomz-7b1")
#model4 = gr.Interface.load("huggingface/EleutherAI/gpt-j-6B")

#togethercomputer/GPT-NeoXT-Chat-Base-20B
#decapoda-research/llama-7b-hf

#define functions

def answer(query):
  out=ans(query)
  return out

#def complete_with_gpt(text):
#    # Use the last 50 characters of the text as context
#    return text[:-50] + model4(text[-50:])

#with gr.Blocks() as demo:
#    with gr.Row():
#        textbox = gr.Textbox(placeholder=description3, lines=8)
#        with gr.Column():
#            btn = gr.Button("Submit")

#    btn.click(complete_with_gpt, textbox, textbox)

#tab1 = gr.Interface.load("huggingface/gpt2", title=title1, description=description1, examples=examples)
#tab2 = gr.Parallel(model1, model2, model3, inputs=gr.Textbox(lines=5, label="Input explicit or implicit instructions"), title=title2, description=description2, examples=examples)
#tab3 = demo

#demo1 = gr.TabbedInterface([tab1, tab2, tab3], ["Level 1", "Level 3", "Level 5"], title=title)

#if __name__ == "__main__":
#    demo1.launch(debug=True)
#gr.Interface.from_pipeline(pipe).launch()

Demo = gr.Interface(fn=answer,inputs='text',outputs='text', title=title, description=description, examples=examples)
Demo.launch()