import torch import gradio as gr from transformers import GPT2Tokenizer, GPT2LMHeadModel, pipeline tokenizer = GPT2Tokenizer.from_pretrained('distilgpt2') tokenizer.add_special_tokens({'pad_token': '[PAD]'}) model = GPT2LMHeadModel.from_pretrained('FredZhang7/anime-anything-promptgen-v2') # prompt = r'1girl, genshin' # generate text using fine-tuned model nlp = pipeline('text-generation', model=model, tokenizer=tokenizer) def generate(prompt): # generate 10 samples using contrastive search outs = nlp(prompt, max_length=76, num_return_sequences=3, do_sample=True, repetition_penalty=1.2, temperature=0.7, top_k=3, early_stopping=True) outs_json=outs.to_json(orient="records") print(prompt) print(outs_json) return outs # for i in range(len(outs)): # remove trailing commas and double spaces # outs[i] = str(outs[i]['generated_text']).replace(' ', '').rstrip(',') # print('\033[92m' + '\n\n'.join(outs) + '\033[0m\n') # print(str(outs[i]['generated_text'])) input_component = gr.Textbox(label = "Input a prompt", value = "1girl, genshin") output_component = gr.Textbox(label = "detail Prompt") examples = [] description = "" gr.Interface(generate, inputs = input_component, outputs=output_component, examples=examples, title = "anything prompt", description=description).launch()