hshetty's picture
Update app.py
3c89335
raw
history blame
1.56 kB
import gradio as gr
from diffusers import StableDiffusionPipeline
import requests
import base64
import torch
import os
device = "cuda"
generator = torch.Generator(device=device)
seed = 496012807434005 #generator.seed()
generator = generator.manual_seed(seed)
HF_TOKEN = os.getenv('HF_TOKEN')
hf_writer =gr.HuggingFaceDatasetSaver(HF_TOKEN, "dst-movie-poster-demo")
def improve_image(img):
# ANSWER HERE
img_in_base64 = gr.processing_utils.encode_pil_to_base64(img)
scale=3
resp_obj = requests.post('https://hf.space/embed/abidlabs/GFPGAN/+/api/predict',json={'data':[img_in_base64,scale]})
resp_img = gr.processing_utils.decode_base64_to_image((resp_obj.json())['data'][0])
return resp_img
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
pipe = pipe.to("cuda")
def generate(celebrity, setting):
# ANSWER HERE
prompt = 'A movie poster of {} in the movie{}'.format(celebrity,setting)
latent_sample = torch.randn((1,4,64,64),generator = generator,device=device)
gen_img = pipe(prompt,latents=latent_sample,num_inference_steps=100,guidance_scale=g_scale[i]).images[0]
image = improve_image(gen_img)
return image
gr.Interface(
# ANSWER HERE
fn=generate,
inputs=[gr.Textbox(label='Celebrity'), gr.Dropdown(['The Godfather', 'Titanic', 'Fast and Furious'], label='Movie')],
outputs = gr.Image(type='pill'),
allow_flagging="manual",
flagging_options = ['Incorrect movie poster','Incorrect Actor','Other Problem'],
flagging_callback=hf_writer,
flagging_dir='/flagged_data'
).launch()