File size: 1,274 Bytes
0c47772
d0d912c
0c47772
 
d0d912c
 
0c47772
d0d912c
 
0c47772
516b1c3
d0d912c
 
 
516b1c3
d0d912c
 
 
 
 
 
 
 
 
516b1c3
0c47772
d0d912c
0c47772
 
 
3ece203
 
0c47772
516b1c3
0c47772
 
d0d912c
0c47772
d0d912c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import os
import sys
import gradio as gr
import subprocess
from fastai.vision.all import *
import torch

# set device to GPU
device = torch.device('cpu')


os.system('git clone https://github.com/iPERDance/iPERCore.git')
os.system('curl -o ./iPERCore/assets/checkpoints.zip http://101.32.75.151:10086/checkpoints.zip')
os.system('curl -o ./iPERCore/assets/samples.zip http://101.32.75.151:12345/iper_plus_plus_latest_samples.zip')

title = "full-body-animation"
description = "A demo for full-body-animation Official Repo"
article = "Official Repo: https://github.com/iPERDance/iPERCore"

def inference(img, video):
    if not os.path.exists('temp'):
        os.system('mkdir temp')
    
    os.system(f"python iPERCore.services.run_imitator --gpu_ids 0 --num_source 2 --src_path {img} --ref_path {video} --output_dir tmp/result.mp4")
    return "tmp/result.mp4"


gr.Interface(
    inference,
    [
        gr.inputs.Image(type="filepath", label="Source (only used if mode is animation)"),
        gr.inputs.Video(type="mp4", label="Driving Video")
    ],
    outputs=gr.outputs.Video(type="mp4", label="Output Video"),
    title=title,
    description=description,
    article=article,
    theme="huggingface",
    allow_flagging=False).launch(debug=True,enable_queue=True