Spaces:
Paused
Paused
File size: 5,719 Bytes
4913d8e b3b6e78 4913d8e ec4ad71 22fa11a ec4ad71 ee88ea4 9a876dd ee88ea4 14563b1 4913d8e fc6dfba 4913d8e 9caa336 4913d8e b3b6e78 4913d8e b3b6e78 4913d8e 9a876dd b3b6e78 ec4ad71 9caa336 ec4ad71 14563b1 ec4ad71 9caa336 ec4ad71 3de8992 de0d30c ec4ad71 de0d30c 9a876dd 7ea551a de0d30c 9a876dd de0d30c b3b6e78 de0d30c b3b6e78 ec4ad71 b3b6e78 4913d8e 9a876dd 4913d8e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 |
import gradio as gr
import os
import shutil
import yaml
import tempfile
import huggingface_hub
import subprocess
import threading
def stream_output(pipe):
for line in iter(pipe.readline, ''):
print(line, end='')
pipe.close()
HF_TKN = os.environ.get("GATED_HF_TOKEN")
huggingface_hub.login(token=HF_TKN)
huggingface_hub.hf_hub_download(
repo_id='yzd-v/DWPose',
filename='yolox_l.onnx',
local_dir='./models/DWPose',
local_dir_use_symlinks=False,
)
huggingface_hub.hf_hub_download(
repo_id='yzd-v/DWPose',
filename='dw-ll_ucoco_384.onnx',
local_dir='./models/DWPose',
local_dir_use_symlinks=False,
)
huggingface_hub.hf_hub_download(
repo_id='ixaac/MimicMotion',
filename='MimicMotion_1-1.pth',
local_dir='./models',
local_dir_use_symlinks=False,
)
def print_directory_contents(path):
for root, dirs, files in os.walk(path):
level = root.replace(path, '').count(os.sep)
indent = ' ' * 4 * (level)
print(f"{indent}{os.path.basename(root)}/")
subindent = ' ' * 4 * (level + 1)
for f in files:
print(f"{subindent}{f}")
def check_outputs_folder(folder_path):
# Check if the folder exists
if os.path.exists(folder_path) and os.path.isdir(folder_path):
# Delete all contents inside the folder
for filename in os.listdir(folder_path):
file_path = os.path.join(folder_path, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path) # Remove file or link
elif os.path.isdir(file_path):
shutil.rmtree(file_path) # Remove directory
except Exception as e:
print(f'Failed to delete {file_path}. Reason: {e}')
else:
print(f'The folder {folder_path} does not exist.')
def check_for_mp4_in_outputs():
# Define the path to the outputs folder
outputs_folder = './outputs'
# Check if the outputs folder exists
if not os.path.exists(outputs_folder):
return None
# Check if there is a .mp4 file in the outputs folder
mp4_files = [f for f in os.listdir(outputs_folder) if f.endswith('.mp4')]
# Return the path to the mp4 file if it exists
if mp4_files:
return os.path.join(outputs_folder, mp4_files[0])
else:
return None
def infer(ref_image_in, ref_video_in):
# check if 'outputs' dir exists and empty it if necessary
check_outputs_folder('./outputs')
# Create a temporary directory
with tempfile.TemporaryDirectory() as temp_dir:
print("Temporary directory created:", temp_dir)
# Define the values for the variables
ref_video_path = ref_video_in
ref_image_path = ref_image_in
num_frames = 72
resolution = 576
frames_overlap = 6
num_inference_steps = 25
noise_aug_strength = 0
guidance_scale = 2.0
sample_stride = 2
fps = 15
seed = 42
# Create the data structure
data = {
'base_model_path': 'stabilityai/stable-video-diffusion-img2vid-xt-1-1',
'ckpt_path': 'models/MimicMotion_1-1.pth',
'test_case': [
{
'ref_video_path': ref_video_path,
'ref_image_path': ref_image_path,
'num_frames': num_frames,
'resolution': resolution,
'frames_overlap': frames_overlap,
'num_inference_steps': num_inference_steps,
'noise_aug_strength': noise_aug_strength,
'guidance_scale': guidance_scale,
'sample_stride': sample_stride,
'fps': fps,
'seed': seed
}
]
}
# Define the file path
file_path = os.path.join(temp_dir, 'config.yaml')
# Write the data to a YAML file
with open(file_path, 'w') as file:
yaml.dump(data, file, default_flow_style=False)
print("YAML file 'config.yaml' created successfully in", file_path)
# Execute the inference command
command = ['python', 'inference.py', '--inference_config', file_path]
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1)
# Create threads to handle stdout and stderr
stdout_thread = threading.Thread(target=stream_output, args=(process.stdout,))
stderr_thread = threading.Thread(target=stream_output, args=(process.stderr,))
# Start the threads
stdout_thread.start()
stderr_thread.start()
# Wait for the process to complete and the threads to finish
process.wait()
stdout_thread.join()
stderr_thread.join()
print("Inference script finished with return code:", process.returncode)
# Print the outputs directory contents
print_directory_contents('./outputs')
# Call the function and print the result
mp4_file_path = check_for_mp4_in_outputs()
print(mp4_file_path)
return mp4_file_path
with gr.Blocks() as demo:
with gr.Column():
with gr.Row():
with gr.Column():
with gr.Row():
ref_image_in = gr.Image(type="filepath")
ref_video_in = gr.Video()
submit_btn = gr.Button("Submit")
output_video = gr.Video()
submit_btn.click(
fn = infer,
inputs = [ref_image_in, ref_video_in],
outputs = [output_video]
)
demo.launch() |