gokaygokay commited on
Commit
521472f
1 Parent(s): 565c7be

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +285 -283
app.py CHANGED
@@ -1,284 +1,286 @@
1
- import os
2
- import warnings
3
- from huggingface_hub import hf_hub_download
4
- import gradio as gr
5
- from glob import glob
6
- import shutil
7
- import torch
8
- import numpy as np
9
- from PIL import Image
10
- from einops import rearrange
11
- import argparse
12
-
13
- # Suppress warnings
14
- warnings.simplefilter('ignore', category=UserWarning)
15
- warnings.simplefilter('ignore', category=FutureWarning)
16
- warnings.simplefilter('ignore', category=DeprecationWarning)
17
-
18
- def download_models():
19
- # Create weights directory if it doesn't exist
20
- os.makedirs("weights", exist_ok=True)
21
- os.makedirs("weights/hunyuanDiT", exist_ok=True)
22
-
23
- # Download Hunyuan3D-1 model
24
- try:
25
- hf_hub_download(
26
- repo_id="tencent/Hunyuan3D-1",
27
- local_dir="./weights",
28
- resume_download=True
29
- )
30
- print("Successfully downloaded Hunyuan3D-1 model")
31
- except Exception as e:
32
- print(f"Error downloading Hunyuan3D-1: {e}")
33
-
34
- # Download HunyuanDiT model
35
- try:
36
- hf_hub_download(
37
- repo_id="Tencent-Hunyuan/HunyuanDiT-v1.1-Diffusers-Distilled",
38
- local_dir="./weights/hunyuanDiT",
39
- resume_download=True
40
- )
41
- print("Successfully downloaded HunyuanDiT model")
42
- except Exception as e:
43
- print(f"Error downloading HunyuanDiT: {e}")
44
-
45
- # Download models before starting the app
46
- download_models()
47
-
48
- # Parse arguments
49
- parser = argparse.ArgumentParser()
50
- parser.add_argument("--use_lite", default=False, action="store_true")
51
- parser.add_argument("--mv23d_cfg_path", default="./svrm/configs/svrm.yaml", type=str)
52
- parser.add_argument("--mv23d_ckt_path", default="weights/svrm/svrm.safetensors", type=str)
53
- parser.add_argument("--text2image_path", default="weights/hunyuanDiT", type=str)
54
- parser.add_argument("--save_memory", default=False, action="store_true")
55
- parser.add_argument("--device", default="cuda:0", type=str)
56
- args = parser.parse_args()
57
-
58
- # Constants
59
- CONST_PORT = 8080
60
- CONST_MAX_QUEUE = 1
61
- CONST_SERVER = '0.0.0.0'
62
-
63
- CONST_HEADER = '''
64
- <h2><b>Official 🤗 Gradio Demo</b></h2>
65
- <h2><a href='https://github.com/tencent/Hunyuan3D-1' target='_blank'>
66
- <b>Hunyuan3D-1.0: A Unified Framework for Text-to-3D and Image-to-3D Generation</b></a></h2>
67
- '''
68
-
69
- # Helper functions
70
- def get_example_img_list():
71
- print('Loading example img list ...')
72
- return sorted(glob('./demos/example_*.png'))
73
-
74
- def get_example_txt_list():
75
- print('Loading example txt list ...')
76
- txt_list = []
77
- for line in open('./demos/example_list.txt'):
78
- txt_list.append(line.strip())
79
- return txt_list
80
-
81
- example_is = get_example_img_list()
82
- example_ts = get_example_txt_list()
83
-
84
- # Import required workers
85
- from infer import seed_everything, save_gif
86
- from infer import Text2Image, Removebg, Image2Views, Views2Mesh, GifRenderer
87
-
88
- # Initialize workers
89
- worker_xbg = Removebg()
90
- print(f"loading {args.text2image_path}")
91
- worker_t2i = Text2Image(
92
- pretrain=args.text2image_path,
93
- device=args.device,
94
- save_memory=args.save_memory
95
- )
96
- worker_i2v = Image2Views(
97
- use_lite=args.use_lite,
98
- device=args.device
99
- )
100
- worker_v23 = Views2Mesh(
101
- args.mv23d_cfg_path,
102
- args.mv23d_ckt_path,
103
- use_lite=args.use_lite,
104
- device=args.device
105
- )
106
- worker_gif = GifRenderer(args.device)
107
-
108
- # Pipeline stages
109
- def stage_0_t2i(text, image, seed, step):
110
- os.makedirs('./outputs/app_output', exist_ok=True)
111
- exists = set(int(_) for _ in os.listdir('./outputs/app_output') if not _.startswith("."))
112
- cur_id = min(set(range(30)) - exists) if len(exists) < 30 else 0
113
-
114
- if os.path.exists(f"./outputs/app_output/{(cur_id + 1) % 30}"):
115
- shutil.rmtree(f"./outputs/app_output/{(cur_id + 1) % 30}")
116
- save_folder = f'./outputs/app_output/{cur_id}'
117
- os.makedirs(save_folder, exist_ok=True)
118
-
119
- dst = save_folder + '/img.png'
120
-
121
- if not text:
122
- if image is None:
123
- return dst, save_folder
124
- image.save(dst)
125
- return dst, save_folder
126
-
127
- image = worker_t2i(text, seed, step)
128
- image.save(dst)
129
- dst = worker_xbg(image, save_folder)
130
- return dst, save_folder
131
-
132
- def stage_1_xbg(image, save_folder):
133
- if isinstance(image, str):
134
- image = Image.open(image)
135
- dst = save_folder + '/img_nobg.png'
136
- rgba = worker_xbg(image)
137
- rgba.save(dst)
138
- return dst
139
-
140
- def stage_2_i2v(image, seed, step, save_folder):
141
- if isinstance(image, str):
142
- image = Image.open(image)
143
- gif_dst = save_folder + '/views.gif'
144
- res_img, pils = worker_i2v(image, seed, step)
145
- save_gif(pils, gif_dst)
146
- views_img, cond_img = res_img[0], res_img[1]
147
- img_array = np.asarray(views_img, dtype=np.uint8)
148
- show_img = rearrange(img_array, '(n h) (m w) c -> (n m) h w c', n=3, m=2)
149
- show_img = show_img[worker_i2v.order, ...]
150
- show_img = rearrange(show_img, '(n m) h w c -> (n h) (m w) c', n=2, m=3)
151
- show_img = Image.fromarray(show_img)
152
- return views_img, cond_img, show_img
153
-
154
- def stage_3_v23(views_pil, cond_pil, seed, save_folder, target_face_count=30000,
155
- do_texture_mapping=True, do_render=True):
156
- do_texture_mapping = do_texture_mapping or do_render
157
- obj_dst = save_folder + '/mesh_with_colors.obj'
158
- glb_dst = save_folder + '/mesh.glb'
159
- worker_v23(
160
- views_pil,
161
- cond_pil,
162
- seed=seed,
163
- save_folder=save_folder,
164
- target_face_count=target_face_count,
165
- do_texture_mapping=do_texture_mapping
166
- )
167
- return obj_dst, glb_dst
168
-
169
- def stage_4_gif(obj_dst, save_folder, do_render_gif=True):
170
- if not do_render_gif:
171
- return None
172
- gif_dst = save_folder + '/output.gif'
173
- worker_gif(
174
- save_folder + '/mesh.obj',
175
- gif_dst_path=gif_dst
176
- )
177
- return gif_dst
178
-
179
- # Gradio Interface
180
- with gr.Blocks() as demo:
181
- gr.Markdown(CONST_HEADER)
182
-
183
- with gr.Row(variant="panel"):
184
- with gr.Column(scale=2):
185
- with gr.Tab("Text to 3D"):
186
- with gr.Column():
187
- text = gr.TextArea('一只黑白相间的熊猫在白色背景上居中坐着,呈现出卡通风格和可爱氛围。',
188
- lines=1, max_lines=10, label='Input text')
189
- with gr.Row():
190
- textgen_seed = gr.Number(value=0, label="T2I seed", precision=0)
191
- textgen_step = gr.Number(value=25, label="T2I step", precision=0)
192
- textgen_SEED = gr.Number(value=0, label="Gen seed", precision=0)
193
- textgen_STEP = gr.Number(value=50, label="Gen step", precision=0)
194
- textgen_max_faces = gr.Number(value=90000, label="max number of faces", precision=0)
195
-
196
- with gr.Row():
197
- textgen_do_texture_mapping = gr.Checkbox(label="texture mapping", value=False)
198
- textgen_do_render_gif = gr.Checkbox(label="Render gif", value=False)
199
- textgen_submit = gr.Button("Generate", variant="primary")
200
-
201
- gr.Examples(examples=example_ts, inputs=[text], label="Txt examples")
202
-
203
- with gr.Tab("Image to 3D"):
204
- with gr.Column():
205
- input_image = gr.Image(label="Input image", width=256, height=256,
206
- type="pil", image_mode="RGBA", sources="upload")
207
- with gr.Row():
208
- imggen_SEED = gr.Number(value=0, label="Gen seed", precision=0)
209
- imggen_STEP = gr.Number(value=50, label="Gen step", precision=0)
210
- imggen_max_faces = gr.Number(value=90000, label="max number of faces", precision=0)
211
-
212
- with gr.Row():
213
- imggen_do_texture_mapping = gr.Checkbox(label="texture mapping", value=False)
214
- imggen_do_render_gif = gr.Checkbox(label="Render gif", value=False)
215
- imggen_submit = gr.Button("Generate", variant="primary")
216
-
217
- gr.Examples(examples=example_is, inputs=[input_image], label="Img examples")
218
-
219
- with gr.Column(scale=3):
220
- with gr.Tab("rembg image"):
221
- rem_bg_image = gr.Image(label="No background image", width=256, height=256,
222
- type="pil", image_mode="RGBA")
223
-
224
- with gr.Tab("Multi views"):
225
- result_image = gr.Image(label="Multi views", type="pil")
226
- with gr.Tab("Obj"):
227
- result_3dobj = gr.Model3D(label="Output obj")
228
- with gr.Tab("Glb"):
229
- result_3dglb = gr.Model3D(label="Output glb")
230
- with gr.Tab("GIF"):
231
- result_gif = gr.Image(label="Rendered GIF")
232
-
233
- # States
234
- none = gr.State(None)
235
- save_folder = gr.State()
236
- cond_image = gr.State()
237
- views_image = gr.State()
238
- text_image = gr.State()
239
-
240
- # Event handlers
241
- textgen_submit.click(
242
- fn=stage_0_t2i,
243
- inputs=[text, none, textgen_seed, textgen_step],
244
- outputs=[rem_bg_image, save_folder],
245
- ).success(
246
- fn=stage_2_i2v,
247
- inputs=[rem_bg_image, textgen_SEED, textgen_STEP, save_folder],
248
- outputs=[views_image, cond_image, result_image],
249
- ).success(
250
- fn=stage_3_v23,
251
- inputs=[views_image, cond_image, textgen_SEED, save_folder, textgen_max_faces,
252
- textgen_do_texture_mapping, textgen_do_render_gif],
253
- outputs=[result_3dobj, result_3dglb],
254
- ).success(
255
- fn=stage_4_gif,
256
- inputs=[result_3dglb, save_folder, textgen_do_render_gif],
257
- outputs=[result_gif],
258
- )
259
-
260
- imggen_submit.click(
261
- fn=stage_0_t2i,
262
- inputs=[none, input_image, textgen_seed, textgen_step],
263
- outputs=[text_image, save_folder],
264
- ).success(
265
- fn=stage_1_xbg,
266
- inputs=[text_image, save_folder],
267
- outputs=[rem_bg_image],
268
- ).success(
269
- fn=stage_2_i2v,
270
- inputs=[rem_bg_image, imggen_SEED, imggen_STEP, save_folder],
271
- outputs=[views_image, cond_image, result_image],
272
- ).success(
273
- fn=stage_3_v23,
274
- inputs=[views_image, cond_image, imggen_SEED, save_folder, imggen_max_faces,
275
- imggen_do_texture_mapping, imggen_do_render_gif],
276
- outputs=[result_3dobj, result_3dglb],
277
- ).success(
278
- fn=stage_4_gif,
279
- inputs=[result_3dglb, save_folder, imggen_do_render_gif],
280
- outputs=[result_gif],
281
- )
282
-
283
- demo.queue(max_size=CONST_MAX_QUEUE)
 
 
284
  demo.launch(server_name=CONST_SERVER, server_port=CONST_PORT)
 
1
+ import spaces
2
+ import os
3
+ import warnings
4
+ from huggingface_hub import snapshot_dowload
5
+ import gradio as gr
6
+ from glob import glob
7
+ import shutil
8
+ import torch
9
+ import numpy as np
10
+ from PIL import Image
11
+ from einops import rearrange
12
+ import argparse
13
+
14
+ # Suppress warnings
15
+ warnings.simplefilter('ignore', category=UserWarning)
16
+ warnings.simplefilter('ignore', category=FutureWarning)
17
+ warnings.simplefilter('ignore', category=DeprecationWarning)
18
+
19
+ def download_models():
20
+ # Create weights directory if it doesn't exist
21
+ os.makedirs("weights", exist_ok=True)
22
+ os.makedirs("weights/hunyuanDiT", exist_ok=True)
23
+
24
+ # Download Hunyuan3D-1 model
25
+ try:
26
+ snapshot_dowload(
27
+ repo_id="tencent/Hunyuan3D-1",
28
+ local_dir="./weights",
29
+ resume_download=True
30
+ )
31
+ print("Successfully downloaded Hunyuan3D-1 model")
32
+ except Exception as e:
33
+ print(f"Error downloading Hunyuan3D-1: {e}")
34
+
35
+ # Download HunyuanDiT model
36
+ try:
37
+ snapshot_dowload(
38
+ repo_id="Tencent-Hunyuan/HunyuanDiT-v1.1-Diffusers-Distilled",
39
+ local_dir="./weights/hunyuanDiT",
40
+ resume_download=True
41
+ )
42
+ print("Successfully downloaded HunyuanDiT model")
43
+ except Exception as e:
44
+ print(f"Error downloading HunyuanDiT: {e}")
45
+
46
+ # Download models before starting the app
47
+ download_models()
48
+
49
+ # Parse arguments
50
+ parser = argparse.ArgumentParser()
51
+ parser.add_argument("--use_lite", default=False, action="store_true")
52
+ parser.add_argument("--mv23d_cfg_path", default="./svrm/configs/svrm.yaml", type=str)
53
+ parser.add_argument("--mv23d_ckt_path", default="weights/svrm/svrm.safetensors", type=str)
54
+ parser.add_argument("--text2image_path", default="weights/hunyuanDiT", type=str)
55
+ parser.add_argument("--save_memory", default=False, action="store_true")
56
+ parser.add_argument("--device", default="cuda:0", type=str)
57
+ args = parser.parse_args()
58
+
59
+ # Constants
60
+ CONST_PORT = 8080
61
+ CONST_MAX_QUEUE = 1
62
+ CONST_SERVER = '0.0.0.0'
63
+
64
+ CONST_HEADER = '''
65
+ <h2><b>Official 🤗 Gradio Demo</b></h2>
66
+ <h2><a href='https://github.com/tencent/Hunyuan3D-1' target='_blank'>
67
+ <b>Hunyuan3D-1.0: A Unified Framework for Text-to-3D and Image-to-3D Generation</b></a></h2>
68
+ '''
69
+
70
+ # Helper functions
71
+ def get_example_img_list():
72
+ print('Loading example img list ...')
73
+ return sorted(glob('./demos/example_*.png'))
74
+
75
+ def get_example_txt_list():
76
+ print('Loading example txt list ...')
77
+ txt_list = []
78
+ for line in open('./demos/example_list.txt'):
79
+ txt_list.append(line.strip())
80
+ return txt_list
81
+
82
+ example_is = get_example_img_list()
83
+ example_ts = get_example_txt_list()
84
+
85
+ # Import required workers
86
+ from infer import seed_everything, save_gif
87
+ from infer import Text2Image, Removebg, Image2Views, Views2Mesh, GifRenderer
88
+
89
+ # Initialize workers
90
+ worker_xbg = Removebg()
91
+ print(f"loading {args.text2image_path}")
92
+ worker_t2i = Text2Image(
93
+ pretrain=args.text2image_path,
94
+ device=args.device,
95
+ save_memory=args.save_memory
96
+ )
97
+ worker_i2v = Image2Views(
98
+ use_lite=args.use_lite,
99
+ device=args.device
100
+ )
101
+ worker_v23 = Views2Mesh(
102
+ args.mv23d_cfg_path,
103
+ args.mv23d_ckt_path,
104
+ use_lite=args.use_lite,
105
+ device=args.device
106
+ )
107
+ worker_gif = GifRenderer(args.device)
108
+
109
+ # Pipeline stages
110
+ @spaces.GPU
111
+ def stage_0_t2i(text, image, seed, step):
112
+ os.makedirs('./outputs/app_output', exist_ok=True)
113
+ exists = set(int(_) for _ in os.listdir('./outputs/app_output') if not _.startswith("."))
114
+ cur_id = min(set(range(30)) - exists) if len(exists) < 30 else 0
115
+
116
+ if os.path.exists(f"./outputs/app_output/{(cur_id + 1) % 30}"):
117
+ shutil.rmtree(f"./outputs/app_output/{(cur_id + 1) % 30}")
118
+ save_folder = f'./outputs/app_output/{cur_id}'
119
+ os.makedirs(save_folder, exist_ok=True)
120
+
121
+ dst = save_folder + '/img.png'
122
+
123
+ if not text:
124
+ if image is None:
125
+ return dst, save_folder
126
+ image.save(dst)
127
+ return dst, save_folder
128
+
129
+ image = worker_t2i(text, seed, step)
130
+ image.save(dst)
131
+ dst = worker_xbg(image, save_folder)
132
+ return dst, save_folder
133
+ @spaces.GPU
134
+ def stage_1_xbg(image, save_folder):
135
+ if isinstance(image, str):
136
+ image = Image.open(image)
137
+ dst = save_folder + '/img_nobg.png'
138
+ rgba = worker_xbg(image)
139
+ rgba.save(dst)
140
+ return dst
141
+ @spaces.GPU
142
+ def stage_2_i2v(image, seed, step, save_folder):
143
+ if isinstance(image, str):
144
+ image = Image.open(image)
145
+ gif_dst = save_folder + '/views.gif'
146
+ res_img, pils = worker_i2v(image, seed, step)
147
+ save_gif(pils, gif_dst)
148
+ views_img, cond_img = res_img[0], res_img[1]
149
+ img_array = np.asarray(views_img, dtype=np.uint8)
150
+ show_img = rearrange(img_array, '(n h) (m w) c -> (n m) h w c', n=3, m=2)
151
+ show_img = show_img[worker_i2v.order, ...]
152
+ show_img = rearrange(show_img, '(n m) h w c -> (n h) (m w) c', n=2, m=3)
153
+ show_img = Image.fromarray(show_img)
154
+ return views_img, cond_img, show_img
155
+ @spaces.GPU
156
+ def stage_3_v23(views_pil, cond_pil, seed, save_folder, target_face_count=30000,
157
+ do_texture_mapping=True, do_render=True):
158
+ do_texture_mapping = do_texture_mapping or do_render
159
+ obj_dst = save_folder + '/mesh_with_colors.obj'
160
+ glb_dst = save_folder + '/mesh.glb'
161
+ worker_v23(
162
+ views_pil,
163
+ cond_pil,
164
+ seed=seed,
165
+ save_folder=save_folder,
166
+ target_face_count=target_face_count,
167
+ do_texture_mapping=do_texture_mapping
168
+ )
169
+ return obj_dst, glb_dst
170
+ @spaces.GPU
171
+ def stage_4_gif(obj_dst, save_folder, do_render_gif=True):
172
+ if not do_render_gif:
173
+ return None
174
+ gif_dst = save_folder + '/output.gif'
175
+ worker_gif(
176
+ save_folder + '/mesh.obj',
177
+ gif_dst_path=gif_dst
178
+ )
179
+ return gif_dst
180
+
181
+ # Gradio Interface
182
+ with gr.Blocks() as demo:
183
+ gr.Markdown(CONST_HEADER)
184
+
185
+ with gr.Row(variant="panel"):
186
+ with gr.Column(scale=2):
187
+ with gr.Tab("Text to 3D"):
188
+ with gr.Column():
189
+ text = gr.TextArea('一只黑白相间的熊猫在白色背景上居中坐着,呈现出卡通风格和可爱氛围。',
190
+ lines=1, max_lines=10, label='Input text')
191
+ with gr.Row():
192
+ textgen_seed = gr.Number(value=0, label="T2I seed", precision=0)
193
+ textgen_step = gr.Number(value=25, label="T2I step", precision=0)
194
+ textgen_SEED = gr.Number(value=0, label="Gen seed", precision=0)
195
+ textgen_STEP = gr.Number(value=50, label="Gen step", precision=0)
196
+ textgen_max_faces = gr.Number(value=90000, label="max number of faces", precision=0)
197
+
198
+ with gr.Row():
199
+ textgen_do_texture_mapping = gr.Checkbox(label="texture mapping", value=False)
200
+ textgen_do_render_gif = gr.Checkbox(label="Render gif", value=False)
201
+ textgen_submit = gr.Button("Generate", variant="primary")
202
+
203
+ gr.Examples(examples=example_ts, inputs=[text], label="Txt examples")
204
+
205
+ with gr.Tab("Image to 3D"):
206
+ with gr.Column():
207
+ input_image = gr.Image(label="Input image", width=256, height=256,
208
+ type="pil", image_mode="RGBA", sources="upload")
209
+ with gr.Row():
210
+ imggen_SEED = gr.Number(value=0, label="Gen seed", precision=0)
211
+ imggen_STEP = gr.Number(value=50, label="Gen step", precision=0)
212
+ imggen_max_faces = gr.Number(value=90000, label="max number of faces", precision=0)
213
+
214
+ with gr.Row():
215
+ imggen_do_texture_mapping = gr.Checkbox(label="texture mapping", value=False)
216
+ imggen_do_render_gif = gr.Checkbox(label="Render gif", value=False)
217
+ imggen_submit = gr.Button("Generate", variant="primary")
218
+
219
+ gr.Examples(examples=example_is, inputs=[input_image], label="Img examples")
220
+
221
+ with gr.Column(scale=3):
222
+ with gr.Tab("rembg image"):
223
+ rem_bg_image = gr.Image(label="No background image", width=256, height=256,
224
+ type="pil", image_mode="RGBA")
225
+
226
+ with gr.Tab("Multi views"):
227
+ result_image = gr.Image(label="Multi views", type="pil")
228
+ with gr.Tab("Obj"):
229
+ result_3dobj = gr.Model3D(label="Output obj")
230
+ with gr.Tab("Glb"):
231
+ result_3dglb = gr.Model3D(label="Output glb")
232
+ with gr.Tab("GIF"):
233
+ result_gif = gr.Image(label="Rendered GIF")
234
+
235
+ # States
236
+ none = gr.State(None)
237
+ save_folder = gr.State()
238
+ cond_image = gr.State()
239
+ views_image = gr.State()
240
+ text_image = gr.State()
241
+
242
+ # Event handlers
243
+ textgen_submit.click(
244
+ fn=stage_0_t2i,
245
+ inputs=[text, none, textgen_seed, textgen_step],
246
+ outputs=[rem_bg_image, save_folder],
247
+ ).success(
248
+ fn=stage_2_i2v,
249
+ inputs=[rem_bg_image, textgen_SEED, textgen_STEP, save_folder],
250
+ outputs=[views_image, cond_image, result_image],
251
+ ).success(
252
+ fn=stage_3_v23,
253
+ inputs=[views_image, cond_image, textgen_SEED, save_folder, textgen_max_faces,
254
+ textgen_do_texture_mapping, textgen_do_render_gif],
255
+ outputs=[result_3dobj, result_3dglb],
256
+ ).success(
257
+ fn=stage_4_gif,
258
+ inputs=[result_3dglb, save_folder, textgen_do_render_gif],
259
+ outputs=[result_gif],
260
+ )
261
+
262
+ imggen_submit.click(
263
+ fn=stage_0_t2i,
264
+ inputs=[none, input_image, textgen_seed, textgen_step],
265
+ outputs=[text_image, save_folder],
266
+ ).success(
267
+ fn=stage_1_xbg,
268
+ inputs=[text_image, save_folder],
269
+ outputs=[rem_bg_image],
270
+ ).success(
271
+ fn=stage_2_i2v,
272
+ inputs=[rem_bg_image, imggen_SEED, imggen_STEP, save_folder],
273
+ outputs=[views_image, cond_image, result_image],
274
+ ).success(
275
+ fn=stage_3_v23,
276
+ inputs=[views_image, cond_image, imggen_SEED, save_folder, imggen_max_faces,
277
+ imggen_do_texture_mapping, imggen_do_render_gif],
278
+ outputs=[result_3dobj, result_3dglb],
279
+ ).success(
280
+ fn=stage_4_gif,
281
+ inputs=[result_3dglb, save_folder, imggen_do_render_gif],
282
+ outputs=[result_gif],
283
+ )
284
+
285
+ demo.queue(max_size=CONST_MAX_QUEUE)
286
  demo.launch(server_name=CONST_SERVER, server_port=CONST_PORT)