Dabs commited on
Commit
0a271b1
1 Parent(s): 2494b26

innitial commit

Browse files
Files changed (2) hide show
  1. app.py +28 -0
  2. requirements.txt +2 -0
app.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoModelForCausalLM, AutoTokenizer
2
+ import torch
3
+
4
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
5
+ model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
6
+
7
+ def predict(input, history=[]):
8
+ # tokenize the new input sentence
9
+ new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors='pt')
10
+
11
+ # append the new user input tokens to the chat history
12
+ bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
13
+
14
+ # generate a response
15
+ history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist()
16
+
17
+ # convert the tokens to text, and then split the responses into the right format
18
+ response = tokenizer.decode(history[0]).split("<|endoftext|>")
19
+ response = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)] # convert to tuples of list
20
+ return response, history
21
+
22
+ import gradio as gr
23
+
24
+ gr.Interface(fn=predict,
25
+ theme="default",
26
+ css=".footer {display:none !important}",
27
+ inputs=["text", "state"],
28
+ outputs=["chatbot", "state"]).launch()
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ transformers
2
+ torch