#!/usr/bin/env python import os import gradio as gr import torch #from app_image_to_3d import create_demo as create_demo_image_to_3d from app_text_to_3d import create_demo as create_demo_text_to_3d from model import Model DESCRIPTION = '# [Shap-E](https://github.com/openai/shap-e)' if (SPACE_ID := os.getenv('SPACE_ID')) is not None: DESCRIPTION += f'\n
For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings.
' if not torch.cuda.is_available(): DESCRIPTION += '\nRunning on CPU 🥶 This demo does not work on CPU.
' model = Model() with gr.Blocks(css='style.css') as demo: gr.Markdown(DESCRIPTION) create_demo_text_to_3d(model) demo.queue(max_size=10).launch()