Spaces:
Running
on
Zero
Running
on
Zero
update
Browse files
__pycache__/viewcrafter.cpython-39.pyc
CHANGED
Binary files a/__pycache__/viewcrafter.cpython-39.pyc and b/__pycache__/viewcrafter.cpython-39.pyc differ
|
|
app.py
CHANGED
@@ -3,7 +3,7 @@ import torch
|
|
3 |
import sys
|
4 |
import spaces #fixme
|
5 |
|
6 |
-
|
7 |
import gradio as gr
|
8 |
import random
|
9 |
from configs.infer_config import get_parser
|
@@ -38,10 +38,13 @@ def download_model():
|
|
38 |
download_model() #fixme
|
39 |
parser = get_parser() # infer_config.py
|
40 |
opts = parser.parse_args() # default device: 'cuda:0'
|
41 |
-
|
|
|
42 |
os.makedirs(opts.save_dir,exist_ok=True)
|
43 |
test_tensor = torch.Tensor([0]).cuda()
|
44 |
opts.device = str(test_tensor.device)
|
|
|
|
|
45 |
|
46 |
# install pytorch3d # fixme
|
47 |
pyt_version_str=torch.__version__.split("+")[0].replace(".", "")
|
@@ -63,7 +66,7 @@ def viewcrafter_demo(opts):
|
|
63 |
css = """#input_img {max-width: 1024px !important} #output_vid {max-width: 1024px; max-height:576px} #random_button {max-width: 100px !important}"""
|
64 |
image2video = ViewCrafter(opts, gradio = True)
|
65 |
image2video.run_traj = spaces.GPU(image2video.run_traj, duration=50) # fixme
|
66 |
-
image2video.run_gen = spaces.GPU(image2video.run_gen, duration=
|
67 |
with gr.Blocks(analytics_enabled=False, css=css) as viewcrafter_iface:
|
68 |
gr.Markdown("<div align='center'> <h1> ViewCrafter: Taming Video Diffusion Models for High-fidelity Novel View Synthesis </span> </h1> \
|
69 |
<h2 style='font-weight: 450; font-size: 1rem; margin: 0rem'>\
|
@@ -123,7 +126,7 @@ def viewcrafter_demo(opts):
|
|
123 |
with gr.Row():
|
124 |
with gr.Column():
|
125 |
i2v_steps = gr.Slider(minimum=1, maximum=50, step=1, elem_id="i2v_steps", label="Sampling steps", value=50)
|
126 |
-
i2v_seed = gr.Slider(label='Random seed', minimum=0, maximum=max_seed, step=1, value=
|
127 |
i2v_end_btn = gr.Button("Generate video")
|
128 |
# with gr.Tab(label='Result'):
|
129 |
with gr.Column():
|
|
|
3 |
import sys
|
4 |
import spaces #fixme
|
5 |
|
6 |
+
import random
|
7 |
import gradio as gr
|
8 |
import random
|
9 |
from configs.infer_config import get_parser
|
|
|
38 |
download_model() #fixme
|
39 |
parser = get_parser() # infer_config.py
|
40 |
opts = parser.parse_args() # default device: 'cuda:0'
|
41 |
+
tmp = str(random.randint(10**(5-1), 10**5 - 1))
|
42 |
+
opts.save_dir = f'./{tmp}'
|
43 |
os.makedirs(opts.save_dir,exist_ok=True)
|
44 |
test_tensor = torch.Tensor([0]).cuda()
|
45 |
opts.device = str(test_tensor.device)
|
46 |
+
opts.config = './configs/inference_pvd_1024_gradio.yaml' #fixme
|
47 |
+
# opts.config = './configs/inference_pvd_1024_local.yaml' #fixme
|
48 |
|
49 |
# install pytorch3d # fixme
|
50 |
pyt_version_str=torch.__version__.split("+")[0].replace(".", "")
|
|
|
66 |
css = """#input_img {max-width: 1024px !important} #output_vid {max-width: 1024px; max-height:576px} #random_button {max-width: 100px !important}"""
|
67 |
image2video = ViewCrafter(opts, gradio = True)
|
68 |
image2video.run_traj = spaces.GPU(image2video.run_traj, duration=50) # fixme
|
69 |
+
image2video.run_gen = spaces.GPU(image2video.run_gen, duration=260) # fixme
|
70 |
with gr.Blocks(analytics_enabled=False, css=css) as viewcrafter_iface:
|
71 |
gr.Markdown("<div align='center'> <h1> ViewCrafter: Taming Video Diffusion Models for High-fidelity Novel View Synthesis </span> </h1> \
|
72 |
<h2 style='font-weight: 450; font-size: 1rem; margin: 0rem'>\
|
|
|
126 |
with gr.Row():
|
127 |
with gr.Column():
|
128 |
i2v_steps = gr.Slider(minimum=1, maximum=50, step=1, elem_id="i2v_steps", label="Sampling steps", value=50)
|
129 |
+
i2v_seed = gr.Slider(label='Random seed', minimum=0, maximum=max_seed, step=1, value=0)
|
130 |
i2v_end_btn = gr.Button("Generate video")
|
131 |
# with gr.Tab(label='Result'):
|
132 |
with gr.Column():
|
configs/{inference_pvd_1024.yaml → inference_pvd_1024_gradio.yaml}
RENAMED
File without changes
|
viewcrafter.py
CHANGED
@@ -26,6 +26,7 @@ from pytorch_lightning import seed_everything
|
|
26 |
from utils.diffusion_utils import instantiate_from_config,load_model_checkpoint,image_guided_synthesis
|
27 |
from pathlib import Path
|
28 |
from torchvision.utils import save_image
|
|
|
29 |
|
30 |
class ViewCrafter:
|
31 |
def __init__(self, opts, gradio = False):
|
@@ -371,23 +372,24 @@ class ViewCrafter:
|
|
371 |
# self.img_ori: torch.Size([576, 1024, 3]), [0,1]
|
372 |
# self.images, self.img_ori = self.load_initial_images(image_dir=i2v_input_image)
|
373 |
self.run_dust3r(input_images=self.images)
|
|
|
374 |
render_results = self.nvs_single_view(gradio=True)
|
375 |
-
save_video(render_results, os.path.join(self.opts.save_dir, 'render0.mp4'))
|
376 |
traj_dir = os.path.join(self.opts.save_dir, "viz_traj.mp4")
|
377 |
return traj_dir
|
378 |
|
379 |
def run_gen(self,i2v_steps, i2v_seed):
|
380 |
self.opts.ddim_steps = i2v_steps
|
381 |
seed_everything(i2v_seed)
|
382 |
-
render_dir = os.path.join(self.opts.save_dir, 'render0.mp4')
|
383 |
-
video = imageio.get_reader(render_dir, 'ffmpeg')
|
384 |
-
frames = []
|
385 |
-
for frame in video:
|
386 |
-
|
387 |
-
|
388 |
-
frames = np.array(frames)
|
389 |
-
##torch.Size([25, 576, 1024, 3])
|
390 |
-
render_results = torch.from_numpy(frames).to(self.device).half()
|
391 |
|
392 |
gen_dir = os.path.join(self.opts.save_dir, "diffusion0.mp4")
|
393 |
diffusion_results = self.run_diffusion(render_results)
|
|
|
26 |
from utils.diffusion_utils import instantiate_from_config,load_model_checkpoint,image_guided_synthesis
|
27 |
from pathlib import Path
|
28 |
from torchvision.utils import save_image
|
29 |
+
render_results = None
|
30 |
|
31 |
class ViewCrafter:
|
32 |
def __init__(self, opts, gradio = False):
|
|
|
372 |
# self.img_ori: torch.Size([576, 1024, 3]), [0,1]
|
373 |
# self.images, self.img_ori = self.load_initial_images(image_dir=i2v_input_image)
|
374 |
self.run_dust3r(input_images=self.images)
|
375 |
+
global render_results
|
376 |
render_results = self.nvs_single_view(gradio=True)
|
377 |
+
# save_video(render_results, os.path.join(self.opts.save_dir, 'render0.mp4'))
|
378 |
traj_dir = os.path.join(self.opts.save_dir, "viz_traj.mp4")
|
379 |
return traj_dir
|
380 |
|
381 |
def run_gen(self,i2v_steps, i2v_seed):
|
382 |
self.opts.ddim_steps = i2v_steps
|
383 |
seed_everything(i2v_seed)
|
384 |
+
# render_dir = os.path.join(self.opts.save_dir, 'render0.mp4')
|
385 |
+
# video = imageio.get_reader(render_dir, 'ffmpeg')
|
386 |
+
# frames = []
|
387 |
+
# for frame in video:
|
388 |
+
# frame = frame / 255.0
|
389 |
+
# frames.append(frame)
|
390 |
+
# frames = np.array(frames)
|
391 |
+
# ##torch.Size([25, 576, 1024, 3])
|
392 |
+
# render_results = torch.from_numpy(frames).to(self.device).half()
|
393 |
|
394 |
gen_dir = os.path.join(self.opts.save_dir, "diffusion0.mp4")
|
395 |
diffusion_results = self.run_diffusion(render_results)
|