Spaces:
Runtime error
Runtime error
init: app
Browse files- .gitignore +1 -0
- app.py +144 -0
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
.python-version
|
app.py
ADDED
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torch
|
3 |
+
from transformers import GPT2LMHeadModel, T5Tokenizer
|
4 |
+
|
5 |
+
model_name = "akiFQC/japanese-dialogpt-small-aozora"
|
6 |
+
tokenizer = T5Tokenizer.from_pretrained(model_name)
|
7 |
+
tokenizer.do_lower_case = True # due to some bug of tokenizer config loading
|
8 |
+
model = GPT2LMHeadModel.from_pretrained(model_name)
|
9 |
+
|
10 |
+
|
11 |
+
class DialogGPT:
|
12 |
+
def __init__(self, tokenizer, model, n_candidate=4, param_lambda=0.1):
|
13 |
+
self.tokenizer = tokenizer
|
14 |
+
self.model = model
|
15 |
+
self.model.eval()
|
16 |
+
self.n_candidate = n_candidate
|
17 |
+
self.param_lambda = param_lambda
|
18 |
+
|
19 |
+
def _calc_single_scores(self, token_ids):
|
20 |
+
with torch.inference_mode():
|
21 |
+
candidate_token_ids = token_ids[:, :-1]
|
22 |
+
label_token_ids = token_ids[:, 1:]
|
23 |
+
outputs = self.model(candidate_token_ids, labels=label_token_ids)
|
24 |
+
_, logits = outputs[:2]
|
25 |
+
logits = torch.log_softmax(logits, dim=-1)
|
26 |
+
|
27 |
+
logit_at_target = logits.gather(
|
28 |
+
dim=-1, index=candidate_token_ids.unsqueeze(-1)
|
29 |
+
).squeeze(-1)
|
30 |
+
|
31 |
+
# mask out pad token positio
|
32 |
+
mask_at_pad = candidate_token_ids == self.tokenizer.pad_token_id
|
33 |
+
# log_likelihood (b, l)
|
34 |
+
log_likelihood = logit_at_target
|
35 |
+
log_likelihood.masked_fill_(mask_at_pad, 0.0)
|
36 |
+
log_likelihood_per_candidate = log_likelihood.sum(dim=1)
|
37 |
+
# normalize by length
|
38 |
+
# log_likelihood_per_candidate = log_likelihood_per_candidate / (candidate_token_ids.shape[1] - mask_at_pad.sum(dim=1))
|
39 |
+
return log_likelihood_per_candidate
|
40 |
+
|
41 |
+
def _calc_scores(self, sequences, scores, input_ids=None):
|
42 |
+
transition_scores = model.compute_transition_scores(
|
43 |
+
sequences, scores, normalize_logits=True
|
44 |
+
)
|
45 |
+
if input_ids is None:
|
46 |
+
input_length = 0
|
47 |
+
else:
|
48 |
+
input_length = input_ids.shape[1]
|
49 |
+
generated_tokens = sequences[:, input_length:] # n x l
|
50 |
+
assert (
|
51 |
+
generated_tokens.shape[1] == transition_scores.shape[1]
|
52 |
+
), f"{generated_tokens.shape[1]} != {transition_scores.shape[1]}"
|
53 |
+
# print(transition_scores.shape)
|
54 |
+
# print(generated_tokens)
|
55 |
+
transition_scores.masked_fill_(
|
56 |
+
generated_tokens == self.tokenizer.pad_token_id, 0.0
|
57 |
+
)
|
58 |
+
transition_scores = transition_scores.sum(dim=1)
|
59 |
+
# print(transition_scores)
|
60 |
+
return transition_scores
|
61 |
+
|
62 |
+
def reply(self, reply, history) -> str:
|
63 |
+
chat_history_ids = torch.LongTensor(history).unsqueeze(0)
|
64 |
+
# encode the new user input, add the eos_token and return a tensor in Pytorch
|
65 |
+
new_user_input_ids = self.tokenizer.encode(
|
66 |
+
reply + self.tokenizer.eos_token, return_tensors="pt"
|
67 |
+
)
|
68 |
+
|
69 |
+
# append the new user input tokens to the chat history
|
70 |
+
bot_input_ids = (
|
71 |
+
torch.cat([chat_history_ids, new_user_input_ids], dim=-1)
|
72 |
+
if chat_history_ids is not None
|
73 |
+
else new_user_input_ids
|
74 |
+
)
|
75 |
+
|
76 |
+
# generated a response while limiting the total chat history to 1000 tokens,
|
77 |
+
with torch.inference_mode():
|
78 |
+
output = model.generate(
|
79 |
+
bot_input_ids,
|
80 |
+
pad_token_id=self.tokenizer.pad_token_id,
|
81 |
+
do_sample=True,
|
82 |
+
top_p=0.93,
|
83 |
+
temperature=0.5,
|
84 |
+
repetition_penalty=1.17,
|
85 |
+
max_time=10,
|
86 |
+
num_return_sequences=self.n_candidate,
|
87 |
+
max_length=512,
|
88 |
+
min_length=2,
|
89 |
+
forced_eos_token_id=self.tokenizer.pad_token_id,
|
90 |
+
return_dict_in_generate=True,
|
91 |
+
output_scores=True,
|
92 |
+
min_new_tokens=2,
|
93 |
+
)
|
94 |
+
|
95 |
+
# score of each candidate
|
96 |
+
scores_condition_s2t = self._calc_scores(
|
97 |
+
sequences=output.sequences, scores=output.scores, input_ids=bot_input_ids
|
98 |
+
)
|
99 |
+
new_token_ids = output.sequences[:, bot_input_ids.shape[-1] :]
|
100 |
+
single_scores = self._calc_single_scores(new_token_ids) * self.param_lambda
|
101 |
+
|
102 |
+
total_scores = scores_condition_s2t - single_scores
|
103 |
+
id_selected = torch.argmax(total_scores)
|
104 |
+
|
105 |
+
chat_history_ids = output.sequences[id_selected].unsqueeze(
|
106 |
+
0
|
107 |
+
) # update chat history
|
108 |
+
# remove pad token
|
109 |
+
chat_history_ids = chat_history_ids[
|
110 |
+
:, chat_history_ids[0] != self.tokenizer.pad_token_id
|
111 |
+
]
|
112 |
+
replay_string = tokenizer.decode(
|
113 |
+
chat_history_ids[:, :][0], skip_special_tokens=False
|
114 |
+
)
|
115 |
+
return replay_string, chat_history_ids[0].tolist()
|
116 |
+
|
117 |
+
|
118 |
+
bot = DialogGPT(
|
119 |
+
tokenizer,
|
120 |
+
model,
|
121 |
+
)
|
122 |
+
|
123 |
+
|
124 |
+
def predict(input, history=[]):
|
125 |
+
replay_string, history = bot.reply(input, history)
|
126 |
+
response = replay_string.split(tokenizer.eos_token)
|
127 |
+
response = [
|
128 |
+
(response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)
|
129 |
+
] # convert to tuples of list
|
130 |
+
return response, history
|
131 |
+
|
132 |
+
|
133 |
+
with gr.Blocks() as demo:
|
134 |
+
chatbot = gr.Chatbot()
|
135 |
+
state = gr.State([])
|
136 |
+
|
137 |
+
with gr.Row():
|
138 |
+
txt = gr.Textbox(
|
139 |
+
show_label=False, placeholder="Enter text and press enter"
|
140 |
+
).style(container=False)
|
141 |
+
|
142 |
+
txt.submit(predict, [txt, state], [chatbot, state])
|
143 |
+
|
144 |
+
demo.launch()
|