YAML Metadata
Warning:
empty or missing yaml metadata in repo card
(https://huggingface.co./docs/hub/model-cards#model-card-metadata)
from transformers import GPT2Tokenizer, GPT2LMHeadModel
def generate_response(input_text):
inputs = tokenizer(input_text, return_tensors="pt")
output_sequences = model.generate(
input_ids=inputs['input_ids'],
attention_mask=inputs['attention_mask'],
max_length=100, # Adjusted max_length
temperature=0.3,
top_k=40,
top_p=0.85,
num_return_sequences=1,
no_repeat_ngram_size=2,
pad_token_id=tokenizer.eos_token_id,
eos_token_id=tokenizer.eos_token_id,
early_stopping=True,
do_sample=True,
use_cache=True,
)
full_generated_text = tokenizer.decode(output_sequences[0], skip_special_tokens=True)
bot_response_start = full_generated_text.find('[Bot]') + len('[Bot]')
bot_response = full_generated_text[bot_response_start:]
return bot_response
model_name = 'KhantKyaw/Chat_GPT-2'
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
model = GPT2LMHeadModel.from_pretrained(model_name)
response = generate_response(user_input)
print("Chatbot:", response)
- Downloads last month
- 202
This model does not have enough activity to be deployed to Inference API (serverless) yet. Increase its social
visibility and check back later, or deploy to Inference Endpoints (dedicated)
instead.