Spaces:
Running
on
Zero
Running
on
Zero
File size: 3,940 Bytes
fc9d64f 8d3ae99 da2f55c fc9d64f 6b3779f da2f55c 6b3779f fc9d64f 6b3779f fc9d64f 6b3779f fc9d64f 8d3ae99 6b3779f fc9d64f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 |
import spaces
import gradio as gr
from omegaconf import OmegaConf
from app.demo import *
def prepare_cfg(is_static:bool, video_path:str, demo_id:str):
output_root = Path(video_path).parent / 'output'
output_root = str(output_root.absolute())
# Cfg
with initialize_config_module(version_base="1.3", config_module=f"hmr4d.configs"):
overrides = [
f"video_name={demo_id}",
f"static_cam={is_static}",
f"verbose={False}",
]
# Allow to change output root
overrides.append(f"output_root={output_root}")
register_store_gvhmr()
cfg = compose(config_name="demo", overrides=overrides)
# Output
Log.info(f"[Output Dir]: {cfg.output_dir}")
Path(cfg.output_dir).mkdir(parents=True, exist_ok=True)
Path(cfg.preprocess_dir).mkdir(parents=True, exist_ok=True)
# Copy raw-input-video to video_path
Log.info(f"[Copy Video] {video_path} -> {cfg.video_path}")
if not Path(cfg.video_path).exists() or get_video_lwh(video_path)[0] != get_video_lwh(cfg.video_path)[0]:
reader = get_video_reader(video_path)
writer = get_writer(cfg.video_path, fps=30, crf=CRF)
for img in tqdm(reader, total=get_video_lwh(video_path)[0], desc=f"Copy"):
writer.write_frame(img)
writer.close()
reader.close()
return cfg
def run_demo(cfg, progress, GPU_quota):
''' Allow user to adjust GPU quota. '''
@spaces.GPU(duration=int(GPU_quota))
def run_GPU_task():
paths = cfg.paths
Log.info(f"[GPU]: {torch.cuda.get_device_name()}")
Log.info(f'[GPU]: {torch.cuda.get_device_properties("cuda")}')
# ===== Preprocess and save to disk ===== #
run_preprocess(cfg)
data = load_data_dict(cfg)
# ===== HMR4D ===== #
if not Path(paths.hmr4d_results).exists():
Log.info("[HMR4D] Predicting")
model: DemoPL = hydra.utils.instantiate(cfg.model, _recursive_=False)
model.load_pretrained_model(cfg.ckpt_path)
model = model.eval().cuda()
tic = Log.sync_time()
pred = model.predict(data, static_cam=cfg.static_cam)
pred = detach_to_cpu(pred)
data_time = data["length"] / 30
Log.info(f"[HMR4D] Elapsed: {Log.sync_time() - tic:.2f}s for data-length={data_time:.1f}s")
torch.save(pred, paths.hmr4d_results)
# ===== Render ===== #
render_incam(cfg)
render_global(cfg)
if not Path(paths.incam_global_horiz_video).exists():
Log.info("[Merge Videos]")
merge_videos_horizontal([paths.incam_video, paths.global_video], paths.incam_global_horiz_video)
run_GPU_task()
return
def handler(video_path, cam_status, GPU_quota, progress=gr.Progress()):
# 0. Check validity of inputs.
if cam_status not in ['Static Camera', 'Dynamic Camera']:
raise gr.Error('Please define the camera status!', duration=5)
if video_path is None or not Path(video_path).exists():
raise gr.Error('Can not find the video!', duration=5)
# 1. Deal with APP inputs.
is_static = cam_status == 'Static Camera'
Log.info(f"[Input Args] is_static: {is_static}")
Log.info(f"[Input Args] video_path: {video_path}")
if not is_static:
Log.info("[Warning] Dynamic Camera is not supported yet.")
raise gr.Error('DPVO is not supported in spaces yet. Try to run videos with static camera instead!', duration=20)
# 2. Prepare cfg.
Log.info(f"[Video]: {video_path}")
demo_id = f'{Path(video_path).stem}_{np.random.randint(0, 1024):04d}'
cfg = prepare_cfg(is_static, video_path, demo_id)
# 3. Run demo.
cfg = OmegaConf.to_container(cfg, resolve=True)
cfg = OmegaConf.create(cfg)
run_demo(cfg, progress, GPU_quota)
# 4. Prepare the output.
return cfg.paths.incam_video, cfg.paths.global_video
|