|
import gradio as gr |
|
from transformers import GPT2LMHeadModel, GPT2Tokenizer |
|
|
|
|
|
model_name = "gpt2" |
|
|
|
tokenizer = GPT2Tokenizer.from_pretrained(model_name) |
|
model = GPT2LMHeadModel.from_pretrained(model_name) |
|
|
|
def chat_with_bot(input_text): |
|
|
|
inputs = tokenizer.encode(input_text + tokenizer.eos_token, return_tensors="pt") |
|
|
|
|
|
with torch.no_grad(): |
|
outputs = model.generate(inputs, max_length=150, num_return_sequences=1, no_repeat_ngram_size=2) |
|
|
|
|
|
response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
return response |
|
|
|
|
|
iface = gr.Interface(fn=chat_with_bot, |
|
inputs="text", |
|
outputs="text", |
|
title="Chatbot", |
|
description="A simple chatbot using GPT-2") |
|
|
|
iface.launch() |
|
|