import os import sys #import numpy as np import streamlit as st #from PIL import Image # import clip # sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # import gradio as gr # from dalle.models import Dalle # from dalle.utils.utils import clip_score, set_seed device = "cpu" # model = Dalle.from_pretrained("minDALL-E/1.3B") # This will automatically download the pretrained model. # model.to(device=device) # model_clip, preprocess_clip = clip.load("ViT-B/32", device=device) # model_clip.to(device=device) # def sample(prompt): # # Sampling # images = ( # model.sampling(prompt=prompt, top_k=256, top_p=None, softmax_temperature=1.0, num_candidates=3, device=device) # .cpu() # .numpy() # ) # images = np.transpose(images, (0, 2, 3, 1)) # # CLIP Re-ranking # rank = clip_score( # prompt=prompt, images=images, model_clip=model_clip, preprocess_clip=preprocess_clip, device=device # ) # # Save images # images = images[rank] # # print(rank, images.shape) # pil_images = [] # for i in range(len(images)): # im = Image.fromarray((images[i] * 255).astype(np.uint8)) # pil_images.append(im) # # im = Image.fromarray((images[0] * 255).astype(np.uint8)) # return pil_images # title = "Interactive demo: ImageGPT" # description = "Demo for OpenAI's ImageGPT: Generative Pretraining from Pixels. To use it, simply upload an image or use the example image below and click 'submit'. Results will show up in a few seconds." # article = "
ImageGPT: Generative Pretraining from Pixels | Official blog
" # iface = gr.Interface( # fn=sample, # inputs=[gr.inputs.Textbox(label="What would you like to see?")], # outputs=gr.outputs.Image(type="pil", label="Model input + completions"), # title=title, # description=description, # article=article, # #examples=examples, # enable_queue=True, # ) # iface.launch(debug=True) #!/usr/bin/env python # coding: utf-8 st.sidebar.markdown( """""", unsafe_allow_html=True, ) st.sidebar.markdown( """ ___
DALL·E mini is an AI model that generates images from any prompt you give!
Created by Boris Dayma et al. 2021
GitHub | Project Report