fffiloni commited on
Commit
ec4ad71
1 Parent(s): 22fa11a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +61 -3
app.py CHANGED
@@ -1,7 +1,9 @@
1
  import gradio as gr
2
  import os
3
  import yaml
 
4
  import huggingface_hub
 
5
 
6
  huggingface_hub.hf_hub_download(
7
  repo_id='yzd-v/DWPose',
@@ -39,12 +41,68 @@ directory_path = './models'
39
  # Print the directory contents
40
  print_directory_contents(directory_path)
41
 
42
- def infer(text):
43
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
  demo = gr.Interface(
46
  fn = infer,
47
- inputs = [gr.Textbox()],
48
  outputs = [gr.Textbox()]
49
  )
50
 
 
1
  import gradio as gr
2
  import os
3
  import yaml
4
+ import tempfile
5
  import huggingface_hub
6
+ import subprocess
7
 
8
  huggingface_hub.hf_hub_download(
9
  repo_id='yzd-v/DWPose',
 
41
  # Print the directory contents
42
  print_directory_contents(directory_path)
43
 
44
+ def infer(ref_video_in, ref_image_in):
45
+ # Create a temporary directory
46
+ with tempfile.TemporaryDirectory() as temp_dir:
47
+ print("Temporary directory created:", temp_dir)
48
+
49
+ # Define the values for the variables
50
+ ref_video_path = ref_video_in
51
+ ref_image_path = ref_image_in
52
+ num_frames = 72
53
+ resolution = 576
54
+ frames_overlap = 6
55
+ num_inference_steps = 25
56
+ noise_aug_strength = 0
57
+ guidance_scale = 2.0
58
+ sample_stride = 2
59
+ fps = 15
60
+ seed = 42
61
+
62
+ # Create the data structure
63
+ data = {
64
+ 'base_model_path': 'stabilityai/stable-video-diffusion-img2vid-xt-1-1',
65
+ 'ckpt_path': 'models/MimicMotion_1-1.pth',
66
+ 'test_case': [
67
+ {
68
+ 'ref_video_path': ref_video_path,
69
+ 'ref_image_path': ref_image_path,
70
+ 'num_frames': num_frames,
71
+ 'resolution': resolution,
72
+ 'frames_overlap': frames_overlap,
73
+ 'num_inference_steps': num_inference_steps,
74
+ 'noise_aug_strength': noise_aug_strength,
75
+ 'guidance_scale': guidance_scale,
76
+ 'sample_stride': sample_stride,
77
+ 'fps': fps,
78
+ 'seed': seed
79
+ }
80
+ ]
81
+ }
82
+
83
+ # Define the file path
84
+ file_path = os.path.join(temp_dir, 'config.yaml')
85
+
86
+ # Write the data to a YAML file
87
+ with open(file_path, 'w') as file:
88
+ yaml.dump(data, file, default_flow_style=False)
89
+
90
+ print("YAML file 'config.yaml' created successfully in", file_path)
91
+
92
+ # Execute the inference command
93
+ command = ['python', 'inference.py', '--inference_config', file_path]
94
+ result = subprocess.run(command, capture_output=True, text=True)
95
+
96
+ # Print the command output
97
+ print("Command output:", result.stdout)
98
+ print("Command errors:", result.stderr)
99
+
100
+
101
+ return "done"
102
 
103
  demo = gr.Interface(
104
  fn = infer,
105
+ inputs = [gr.Video(type="filepath"), gr.Image(type="filepath")],
106
  outputs = [gr.Textbox()]
107
  )
108