Spaces:
Runtime error
Runtime error
yitianlian
commited on
Commit
•
d69f7a8
1
Parent(s):
f95c474
update requirement
Browse files- .history/app_20240515130102.py +188 -0
- .history/app_20240515134629.py +188 -0
- .history/app_20240515134637.py +188 -0
- .history/app_20240515134639.py +188 -0
- app.py +8 -8
.history/app_20240515130102.py
ADDED
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
|
3 |
+
from __future__ import annotations
|
4 |
+
|
5 |
+
import argparse
|
6 |
+
import os
|
7 |
+
import pathlib
|
8 |
+
import subprocess
|
9 |
+
|
10 |
+
import gradio as gr
|
11 |
+
|
12 |
+
if os.getenv("SYSTEM") == "spaces":
|
13 |
+
import mim
|
14 |
+
|
15 |
+
mim.uninstall("mmcv-full", confirm_yes=True)
|
16 |
+
mim.install("mmcv-full==1.5.2", is_yes=True)
|
17 |
+
|
18 |
+
with open("patch") as f:
|
19 |
+
subprocess.run("patch -p1".split(), cwd="Text2Human", stdin=f)
|
20 |
+
|
21 |
+
from model import Model
|
22 |
+
|
23 |
+
DESCRIPTION = """# Text2Human
|
24 |
+
|
25 |
+
- Algorthm is original from <a href="https://github.com/yumingj/Text2Human">https://github.com/yumingj/Text2Human</a> made by <a href="https://huggingface.co/spaces/hysts/Text2Human">@hysts</a>. Thanks for it's awesome work.
|
26 |
+
|
27 |
+
- By varying seeds, you can sample different human images under the same pose, shape description, and texture description. The larger the sample steps, the better quality of the generated images. (The default value of sample steps is 256 in the original repo.)
|
28 |
+
|
29 |
+
- Label image generation step can be skipped. However, in that case, the input label image must be 512x256 in size and must contain only the specified colors.
|
30 |
+
"""
|
31 |
+
|
32 |
+
|
33 |
+
def parse_args() -> argparse.Namespace:
|
34 |
+
parser = argparse.ArgumentParser()
|
35 |
+
parser.add_argument("--device", type=str, default="cpu")
|
36 |
+
parser.add_argument("--theme", type=str)
|
37 |
+
parser.add_argument("--share", action="store_true")
|
38 |
+
parser.add_argument("--port", type=int)
|
39 |
+
parser.add_argument("--disable-queue", dest="enable_queue", action="store_false")
|
40 |
+
return parser.parse_args()
|
41 |
+
|
42 |
+
|
43 |
+
# def set_example_image(example: list) -> dict:
|
44 |
+
# return gr.Image.update(value=example[0])
|
45 |
+
|
46 |
+
|
47 |
+
def set_example_image(example: list) -> dict:
|
48 |
+
return gr.update(value=example[0]["path"])
|
49 |
+
|
50 |
+
|
51 |
+
# def set_example_text(example: list) -> dict:
|
52 |
+
# return gr.Textbox.change(value=example[0])
|
53 |
+
|
54 |
+
|
55 |
+
def set_example_text(example: list) -> dict:
|
56 |
+
# Update the Textbox with the example text
|
57 |
+
return gr.update(value=example[0])
|
58 |
+
|
59 |
+
|
60 |
+
def main():
|
61 |
+
args = parse_args()
|
62 |
+
print(args.device)
|
63 |
+
model = Model(args.device)
|
64 |
+
|
65 |
+
with gr.Blocks(theme=args.theme, css="style.css") as demo:
|
66 |
+
gr.Markdown(DESCRIPTION)
|
67 |
+
|
68 |
+
with gr.Row():
|
69 |
+
with gr.Column():
|
70 |
+
with gr.Row():
|
71 |
+
input_image = gr.Image(
|
72 |
+
label="Input Pose Image", type="pil", elem_id="input-image"
|
73 |
+
)
|
74 |
+
pose_data = gr.State()
|
75 |
+
with gr.Row():
|
76 |
+
paths = sorted(pathlib.Path("pose_images").glob("*.png"))
|
77 |
+
example_images = gr.Dataset(
|
78 |
+
components=[input_image],
|
79 |
+
samples=[[path.as_posix()] for path in paths],
|
80 |
+
)
|
81 |
+
|
82 |
+
with gr.Row():
|
83 |
+
shape_text = gr.Textbox(
|
84 |
+
label="Shape Description",
|
85 |
+
placeholder="""<gender>, <sleeve length>, <length of lower clothing>, <outer clothing type>, <other accessories1>, ...
|
86 |
+
Note: The outer clothing type and accessories can be omitted.""",
|
87 |
+
)
|
88 |
+
with gr.Row():
|
89 |
+
shape_example_texts = gr.Dataset(
|
90 |
+
components=[shape_text],
|
91 |
+
samples=[
|
92 |
+
["man, sleeveless T-shirt, long pants"],
|
93 |
+
["woman, short-sleeve T-shirt, short jeans"],
|
94 |
+
],
|
95 |
+
)
|
96 |
+
with gr.Row():
|
97 |
+
generate_label_button = gr.Button("Generate Label Image")
|
98 |
+
|
99 |
+
with gr.Column():
|
100 |
+
with gr.Row():
|
101 |
+
label_image = gr.Image(
|
102 |
+
label="Label Image", type="numpy", elem_id="label-image"
|
103 |
+
)
|
104 |
+
|
105 |
+
with gr.Row():
|
106 |
+
texture_text = gr.Textbox(
|
107 |
+
label="Texture Description",
|
108 |
+
placeholder="""<upper clothing texture>, <lower clothing texture>, <outer clothing texture>
|
109 |
+
Note: Currently, only 5 types of textures are supported, i.e., pure color, stripe/spline, plaid/lattice, floral, denim.""",
|
110 |
+
)
|
111 |
+
with gr.Row():
|
112 |
+
texture_example_texts = gr.Dataset(
|
113 |
+
components=[texture_text],
|
114 |
+
samples=[["pure color, denim"], ["floral, stripe"]],
|
115 |
+
)
|
116 |
+
with gr.Row():
|
117 |
+
sample_steps = gr.Slider(
|
118 |
+
10, 300, value=10, step=10, label="Sample Steps"
|
119 |
+
)
|
120 |
+
with gr.Row():
|
121 |
+
seed = gr.Slider(0, 1000000, value=0, step=1, label="Seed")
|
122 |
+
with gr.Row():
|
123 |
+
generate_human_button = gr.Button("Generate Human")
|
124 |
+
|
125 |
+
with gr.Column():
|
126 |
+
with gr.Row():
|
127 |
+
result = gr.Image(
|
128 |
+
label="Result", type="numpy", elem_id="result-image"
|
129 |
+
)
|
130 |
+
|
131 |
+
|
132 |
+
input_image.change(
|
133 |
+
fn=model.process_pose_image, inputs=input_image, outputs=pose_data
|
134 |
+
)
|
135 |
+
generate_label_button.click(
|
136 |
+
fn=model.generate_label_image,
|
137 |
+
inputs=[
|
138 |
+
pose_data,
|
139 |
+
shape_text,
|
140 |
+
],
|
141 |
+
outputs=label_image,
|
142 |
+
)
|
143 |
+
# generate_human_button.click(
|
144 |
+
# fn=model.generate_human,
|
145 |
+
# inputs=[
|
146 |
+
# label_image,
|
147 |
+
# texture_text,
|
148 |
+
# sample_steps,
|
149 |
+
# seed,
|
150 |
+
# ],
|
151 |
+
# outputs=result,
|
152 |
+
# )
|
153 |
+
generate_human_button.click(
|
154 |
+
fn=model.generate_human,
|
155 |
+
inputs=[
|
156 |
+
pose_data,
|
157 |
+
shape_text,
|
158 |
+
texture_text,
|
159 |
+
sample_steps,
|
160 |
+
seed,
|
161 |
+
],
|
162 |
+
outputs=result,
|
163 |
+
)
|
164 |
+
example_images.click(
|
165 |
+
fn=set_example_image,
|
166 |
+
inputs=example_images,
|
167 |
+
outputs=example_images._components,
|
168 |
+
)
|
169 |
+
shape_example_texts.click(
|
170 |
+
fn=set_example_text,
|
171 |
+
inputs=shape_example_texts,
|
172 |
+
outputs=shape_example_texts._components,
|
173 |
+
)
|
174 |
+
texture_example_texts.click(
|
175 |
+
fn=set_example_text,
|
176 |
+
inputs=texture_example_texts,
|
177 |
+
outputs=texture_example_texts._components,
|
178 |
+
)
|
179 |
+
|
180 |
+
demo.launch(
|
181 |
+
# enable_queue=args.enable_queue,
|
182 |
+
server_port=args.port,
|
183 |
+
share=args.share,
|
184 |
+
)
|
185 |
+
|
186 |
+
|
187 |
+
if __name__ == "__main__":
|
188 |
+
main()
|
.history/app_20240515134629.py
ADDED
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
|
3 |
+
from __future__ import annotations
|
4 |
+
|
5 |
+
import argparse
|
6 |
+
import os
|
7 |
+
import pathlib
|
8 |
+
import subprocess
|
9 |
+
|
10 |
+
import gradio as gr
|
11 |
+
|
12 |
+
# if os.getenv("SYSTEM") == "spaces":
|
13 |
+
# import mim
|
14 |
+
|
15 |
+
# mim.uninstall("mmcv-full", confirm_yes=True)
|
16 |
+
# mim.install("mmcv-full==1.5.2", is_yes=True)
|
17 |
+
|
18 |
+
# with open("patch") as f:
|
19 |
+
# subprocess.run("patch -p1".split(), cwd="Text2Human", stdin=f)
|
20 |
+
|
21 |
+
from model import Model
|
22 |
+
|
23 |
+
DESCRIPTION = """# Text2Human
|
24 |
+
|
25 |
+
- Algorthm is original from <a href="https://github.com/yumingj/Text2Human">https://github.com/yumingj/Text2Human</a> made by <a href="https://huggingface.co/spaces/hysts/Text2Human">@hysts</a>. Thanks for it's awesome work.
|
26 |
+
|
27 |
+
- By varying seeds, you can sample different human images under the same pose, shape description, and texture description. The larger the sample steps, the better quality of the generated images. (The default value of sample steps is 256 in the original repo.)
|
28 |
+
|
29 |
+
- Label image generation step can be skipped. However, in that case, the input label image must be 512x256 in size and must contain only the specified colors.
|
30 |
+
"""
|
31 |
+
|
32 |
+
|
33 |
+
def parse_args() -> argparse.Namespace:
|
34 |
+
parser = argparse.ArgumentParser()
|
35 |
+
parser.add_argument("--device", type=str, default="cpu")
|
36 |
+
parser.add_argument("--theme", type=str)
|
37 |
+
parser.add_argument("--share", action="store_true")
|
38 |
+
parser.add_argument("--port", type=int)
|
39 |
+
parser.add_argument("--disable-queue", dest="enable_queue", action="store_false")
|
40 |
+
return parser.parse_args()
|
41 |
+
|
42 |
+
|
43 |
+
# def set_example_image(example: list) -> dict:
|
44 |
+
# return gr.Image.update(value=example[0])
|
45 |
+
|
46 |
+
|
47 |
+
def set_example_image(example: list) -> dict:
|
48 |
+
return gr.update(value=example[0]["path"])
|
49 |
+
|
50 |
+
|
51 |
+
# def set_example_text(example: list) -> dict:
|
52 |
+
# return gr.Textbox.change(value=example[0])
|
53 |
+
|
54 |
+
|
55 |
+
def set_example_text(example: list) -> dict:
|
56 |
+
# Update the Textbox with the example text
|
57 |
+
return gr.update(value=example[0])
|
58 |
+
|
59 |
+
|
60 |
+
def main():
|
61 |
+
args = parse_args()
|
62 |
+
print(args.device)
|
63 |
+
model = Model(args.device)
|
64 |
+
|
65 |
+
with gr.Blocks(theme=args.theme, css="style.css") as demo:
|
66 |
+
gr.Markdown(DESCRIPTION)
|
67 |
+
|
68 |
+
with gr.Row():
|
69 |
+
with gr.Column():
|
70 |
+
with gr.Row():
|
71 |
+
input_image = gr.Image(
|
72 |
+
label="Input Pose Image", type="pil", elem_id="input-image"
|
73 |
+
)
|
74 |
+
pose_data = gr.State()
|
75 |
+
with gr.Row():
|
76 |
+
paths = sorted(pathlib.Path("pose_images").glob("*.png"))
|
77 |
+
example_images = gr.Dataset(
|
78 |
+
components=[input_image],
|
79 |
+
samples=[[path.as_posix()] for path in paths],
|
80 |
+
)
|
81 |
+
|
82 |
+
with gr.Row():
|
83 |
+
shape_text = gr.Textbox(
|
84 |
+
label="Shape Description",
|
85 |
+
placeholder="""<gender>, <sleeve length>, <length of lower clothing>, <outer clothing type>, <other accessories1>, ...
|
86 |
+
Note: The outer clothing type and accessories can be omitted.""",
|
87 |
+
)
|
88 |
+
with gr.Row():
|
89 |
+
shape_example_texts = gr.Dataset(
|
90 |
+
components=[shape_text],
|
91 |
+
samples=[
|
92 |
+
["man, sleeveless T-shirt, long pants"],
|
93 |
+
["woman, short-sleeve T-shirt, short jeans"],
|
94 |
+
],
|
95 |
+
)
|
96 |
+
with gr.Row():
|
97 |
+
generate_label_button = gr.Button("Generate Label Image")
|
98 |
+
|
99 |
+
with gr.Column():
|
100 |
+
with gr.Row():
|
101 |
+
label_image = gr.Image(
|
102 |
+
label="Label Image", type="numpy", elem_id="label-image"
|
103 |
+
)
|
104 |
+
|
105 |
+
with gr.Row():
|
106 |
+
texture_text = gr.Textbox(
|
107 |
+
label="Texture Description",
|
108 |
+
placeholder="""<upper clothing texture>, <lower clothing texture>, <outer clothing texture>
|
109 |
+
Note: Currently, only 5 types of textures are supported, i.e., pure color, stripe/spline, plaid/lattice, floral, denim.""",
|
110 |
+
)
|
111 |
+
with gr.Row():
|
112 |
+
texture_example_texts = gr.Dataset(
|
113 |
+
components=[texture_text],
|
114 |
+
samples=[["pure color, denim"], ["floral, stripe"]],
|
115 |
+
)
|
116 |
+
with gr.Row():
|
117 |
+
sample_steps = gr.Slider(
|
118 |
+
10, 300, value=10, step=10, label="Sample Steps"
|
119 |
+
)
|
120 |
+
with gr.Row():
|
121 |
+
seed = gr.Slider(0, 1000000, value=0, step=1, label="Seed")
|
122 |
+
with gr.Row():
|
123 |
+
generate_human_button = gr.Button("Generate Human")
|
124 |
+
|
125 |
+
with gr.Column():
|
126 |
+
with gr.Row():
|
127 |
+
result = gr.Image(
|
128 |
+
label="Result", type="numpy", elem_id="result-image"
|
129 |
+
)
|
130 |
+
|
131 |
+
|
132 |
+
input_image.change(
|
133 |
+
fn=model.process_pose_image, inputs=input_image, outputs=pose_data
|
134 |
+
)
|
135 |
+
generate_label_button.click(
|
136 |
+
fn=model.generate_label_image,
|
137 |
+
inputs=[
|
138 |
+
pose_data,
|
139 |
+
shape_text,
|
140 |
+
],
|
141 |
+
outputs=label_image,
|
142 |
+
)
|
143 |
+
# generate_human_button.click(
|
144 |
+
# fn=model.generate_human,
|
145 |
+
# inputs=[
|
146 |
+
# label_image,
|
147 |
+
# texture_text,
|
148 |
+
# sample_steps,
|
149 |
+
# seed,
|
150 |
+
# ],
|
151 |
+
# outputs=result,
|
152 |
+
# )
|
153 |
+
generate_human_button.click(
|
154 |
+
fn=model.generate_human,
|
155 |
+
inputs=[
|
156 |
+
pose_data,
|
157 |
+
shape_text,
|
158 |
+
texture_text,
|
159 |
+
sample_steps,
|
160 |
+
seed,
|
161 |
+
],
|
162 |
+
outputs=result,
|
163 |
+
)
|
164 |
+
example_images.click(
|
165 |
+
fn=set_example_image,
|
166 |
+
inputs=example_images,
|
167 |
+
outputs=example_images._components,
|
168 |
+
)
|
169 |
+
shape_example_texts.click(
|
170 |
+
fn=set_example_text,
|
171 |
+
inputs=shape_example_texts,
|
172 |
+
outputs=shape_example_texts._components,
|
173 |
+
)
|
174 |
+
texture_example_texts.click(
|
175 |
+
fn=set_example_text,
|
176 |
+
inputs=texture_example_texts,
|
177 |
+
outputs=texture_example_texts._components,
|
178 |
+
)
|
179 |
+
|
180 |
+
demo.launch(
|
181 |
+
# enable_queue=args.enable_queue,
|
182 |
+
server_port=args.port,
|
183 |
+
share=args.share,
|
184 |
+
)
|
185 |
+
|
186 |
+
|
187 |
+
if __name__ == "__main__":
|
188 |
+
main()
|
.history/app_20240515134637.py
ADDED
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
|
3 |
+
from __future__ import annotations
|
4 |
+
|
5 |
+
import argparse
|
6 |
+
import os
|
7 |
+
import pathlib
|
8 |
+
import subprocess
|
9 |
+
|
10 |
+
import gradio as gr
|
11 |
+
|
12 |
+
from model import Model
|
13 |
+
|
14 |
+
# if os.getenv("SYSTEM") == "spaces":
|
15 |
+
# import mim
|
16 |
+
|
17 |
+
# mim.uninstall("mmcv-full", confirm_yes=True)
|
18 |
+
# mim.install("mmcv-full==1.5.2", is_yes=True)
|
19 |
+
|
20 |
+
# with open("patch") as f:
|
21 |
+
# subprocess.run("patch -p1".split(), cwd="Text2Human", stdin=f)
|
22 |
+
|
23 |
+
|
24 |
+
DESCRIPTION = """# Text2Human
|
25 |
+
|
26 |
+
- Algorthm is original from <a href="https://github.com/yumingj/Text2Human">https://github.com/yumingj/Text2Human</a> made by <a href="https://huggingface.co/spaces/hysts/Text2Human">@hysts</a>. Thanks for it's awesome work.
|
27 |
+
|
28 |
+
- By varying seeds, you can sample different human images under the same pose, shape description, and texture description. The larger the sample steps, the better quality of the generated images. (The default value of sample steps is 256 in the original repo.)
|
29 |
+
|
30 |
+
- Label image generation step can be skipped. However, in that case, the input label image must be 512x256 in size and must contain only the specified colors.
|
31 |
+
"""
|
32 |
+
|
33 |
+
|
34 |
+
def parse_args() -> argparse.Namespace:
|
35 |
+
parser = argparse.ArgumentParser()
|
36 |
+
parser.add_argument("--device", type=str, default="cpu")
|
37 |
+
parser.add_argument("--theme", type=str)
|
38 |
+
parser.add_argument("--share", action="store_true")
|
39 |
+
parser.add_argument("--port", type=int)
|
40 |
+
parser.add_argument("--disable-queue", dest="enable_queue", action="store_false")
|
41 |
+
return parser.parse_args()
|
42 |
+
|
43 |
+
|
44 |
+
# def set_example_image(example: list) -> dict:
|
45 |
+
# return gr.Image.update(value=example[0])
|
46 |
+
|
47 |
+
|
48 |
+
def set_example_image(example: list) -> dict:
|
49 |
+
return gr.update(value=example[0]["path"])
|
50 |
+
|
51 |
+
|
52 |
+
# def set_example_text(example: list) -> dict:
|
53 |
+
# return gr.Textbox.change(value=example[0])
|
54 |
+
|
55 |
+
|
56 |
+
def set_example_text(example: list) -> dict:
|
57 |
+
# Update the Textbox with the example text
|
58 |
+
return gr.update(value=example[0])
|
59 |
+
|
60 |
+
|
61 |
+
def main():
|
62 |
+
args = parse_args()
|
63 |
+
print(args.device)
|
64 |
+
model = Model(args.device)
|
65 |
+
|
66 |
+
with gr.Blocks(theme=args.theme, css="style.css") as demo:
|
67 |
+
gr.Markdown(DESCRIPTION)
|
68 |
+
|
69 |
+
with gr.Row():
|
70 |
+
with gr.Column():
|
71 |
+
with gr.Row():
|
72 |
+
input_image = gr.Image(
|
73 |
+
label="Input Pose Image", type="pil", elem_id="input-image"
|
74 |
+
)
|
75 |
+
pose_data = gr.State()
|
76 |
+
with gr.Row():
|
77 |
+
paths = sorted(pathlib.Path("pose_images").glob("*.png"))
|
78 |
+
example_images = gr.Dataset(
|
79 |
+
components=[input_image],
|
80 |
+
samples=[[path.as_posix()] for path in paths],
|
81 |
+
)
|
82 |
+
|
83 |
+
with gr.Row():
|
84 |
+
shape_text = gr.Textbox(
|
85 |
+
label="Shape Description",
|
86 |
+
placeholder="""<gender>, <sleeve length>, <length of lower clothing>, <outer clothing type>, <other accessories1>, ...
|
87 |
+
Note: The outer clothing type and accessories can be omitted.""",
|
88 |
+
)
|
89 |
+
with gr.Row():
|
90 |
+
shape_example_texts = gr.Dataset(
|
91 |
+
components=[shape_text],
|
92 |
+
samples=[
|
93 |
+
["man, sleeveless T-shirt, long pants"],
|
94 |
+
["woman, short-sleeve T-shirt, short jeans"],
|
95 |
+
],
|
96 |
+
)
|
97 |
+
with gr.Row():
|
98 |
+
generate_label_button = gr.Button("Generate Label Image")
|
99 |
+
|
100 |
+
with gr.Column():
|
101 |
+
with gr.Row():
|
102 |
+
label_image = gr.Image(
|
103 |
+
label="Label Image", type="numpy", elem_id="label-image"
|
104 |
+
)
|
105 |
+
|
106 |
+
with gr.Row():
|
107 |
+
texture_text = gr.Textbox(
|
108 |
+
label="Texture Description",
|
109 |
+
placeholder="""<upper clothing texture>, <lower clothing texture>, <outer clothing texture>
|
110 |
+
Note: Currently, only 5 types of textures are supported, i.e., pure color, stripe/spline, plaid/lattice, floral, denim.""",
|
111 |
+
)
|
112 |
+
with gr.Row():
|
113 |
+
texture_example_texts = gr.Dataset(
|
114 |
+
components=[texture_text],
|
115 |
+
samples=[["pure color, denim"], ["floral, stripe"]],
|
116 |
+
)
|
117 |
+
with gr.Row():
|
118 |
+
sample_steps = gr.Slider(
|
119 |
+
10, 300, value=10, step=10, label="Sample Steps"
|
120 |
+
)
|
121 |
+
with gr.Row():
|
122 |
+
seed = gr.Slider(0, 1000000, value=0, step=1, label="Seed")
|
123 |
+
with gr.Row():
|
124 |
+
generate_human_button = gr.Button("Generate Human")
|
125 |
+
|
126 |
+
with gr.Column():
|
127 |
+
with gr.Row():
|
128 |
+
result = gr.Image(
|
129 |
+
label="Result", type="numpy", elem_id="result-image"
|
130 |
+
)
|
131 |
+
|
132 |
+
input_image.change(
|
133 |
+
fn=model.process_pose_image, inputs=input_image, outputs=pose_data
|
134 |
+
)
|
135 |
+
generate_label_button.click(
|
136 |
+
fn=model.generate_label_image,
|
137 |
+
inputs=[
|
138 |
+
pose_data,
|
139 |
+
shape_text,
|
140 |
+
],
|
141 |
+
outputs=label_image,
|
142 |
+
)
|
143 |
+
# generate_human_button.click(
|
144 |
+
# fn=model.generate_human,
|
145 |
+
# inputs=[
|
146 |
+
# label_image,
|
147 |
+
# texture_text,
|
148 |
+
# sample_steps,
|
149 |
+
# seed,
|
150 |
+
# ],
|
151 |
+
# outputs=result,
|
152 |
+
# )
|
153 |
+
generate_human_button.click(
|
154 |
+
fn=model.generate_human,
|
155 |
+
inputs=[
|
156 |
+
pose_data,
|
157 |
+
shape_text,
|
158 |
+
texture_text,
|
159 |
+
sample_steps,
|
160 |
+
seed,
|
161 |
+
],
|
162 |
+
outputs=result,
|
163 |
+
)
|
164 |
+
example_images.click(
|
165 |
+
fn=set_example_image,
|
166 |
+
inputs=example_images,
|
167 |
+
outputs=example_images._components,
|
168 |
+
)
|
169 |
+
shape_example_texts.click(
|
170 |
+
fn=set_example_text,
|
171 |
+
inputs=shape_example_texts,
|
172 |
+
outputs=shape_example_texts._components,
|
173 |
+
)
|
174 |
+
texture_example_texts.click(
|
175 |
+
fn=set_example_text,
|
176 |
+
inputs=texture_example_texts,
|
177 |
+
outputs=texture_example_texts._components,
|
178 |
+
)
|
179 |
+
|
180 |
+
demo.launch(
|
181 |
+
# enable_queue=args.enable_queue,
|
182 |
+
server_port=args.port,
|
183 |
+
share=args.share,
|
184 |
+
)
|
185 |
+
|
186 |
+
|
187 |
+
if __name__ == "__main__":
|
188 |
+
main()
|
.history/app_20240515134639.py
ADDED
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
|
3 |
+
from __future__ import annotations
|
4 |
+
|
5 |
+
import argparse
|
6 |
+
import os
|
7 |
+
import pathlib
|
8 |
+
import subprocess
|
9 |
+
|
10 |
+
import gradio as gr
|
11 |
+
|
12 |
+
from model import Model
|
13 |
+
|
14 |
+
# if os.getenv("SYSTEM") == "spaces":
|
15 |
+
# import mim
|
16 |
+
|
17 |
+
# mim.uninstall("mmcv-full", confirm_yes=True)
|
18 |
+
# mim.install("mmcv-full==1.5.2", is_yes=True)
|
19 |
+
|
20 |
+
# with open("patch") as f:
|
21 |
+
# subprocess.run("patch -p1".split(), cwd="Text2Human", stdin=f)
|
22 |
+
|
23 |
+
|
24 |
+
DESCRIPTION = """# Text2Human
|
25 |
+
|
26 |
+
- Algorthm is original from <a href="https://github.com/yumingj/Text2Human">https://github.com/yumingj/Text2Human</a> made by <a href="https://huggingface.co/spaces/hysts/Text2Human">@hysts</a>. Thanks for it's awesome work.
|
27 |
+
|
28 |
+
- By varying seeds, you can sample different human images under the same pose, shape description, and texture description. The larger the sample steps, the better quality of the generated images. (The default value of sample steps is 256 in the original repo.)
|
29 |
+
|
30 |
+
- Label image generation step can be skipped. However, in that case, the input label image must be 512x256 in size and must contain only the specified colors.
|
31 |
+
"""
|
32 |
+
|
33 |
+
|
34 |
+
def parse_args() -> argparse.Namespace:
|
35 |
+
parser = argparse.ArgumentParser()
|
36 |
+
parser.add_argument("--device", type=str, default="cpu")
|
37 |
+
parser.add_argument("--theme", type=str)
|
38 |
+
parser.add_argument("--share", action="store_true")
|
39 |
+
parser.add_argument("--port", type=int)
|
40 |
+
parser.add_argument("--disable-queue", dest="enable_queue", action="store_false")
|
41 |
+
return parser.parse_args()
|
42 |
+
|
43 |
+
|
44 |
+
# def set_example_image(example: list) -> dict:
|
45 |
+
# return gr.Image.update(value=example[0])
|
46 |
+
|
47 |
+
|
48 |
+
def set_example_image(example: list) -> dict:
|
49 |
+
return gr.update(value=example[0]["path"])
|
50 |
+
|
51 |
+
|
52 |
+
# def set_example_text(example: list) -> dict:
|
53 |
+
# return gr.Textbox.change(value=example[0])
|
54 |
+
|
55 |
+
|
56 |
+
def set_example_text(example: list) -> dict:
|
57 |
+
# Update the Textbox with the example text
|
58 |
+
return gr.update(value=example[0])
|
59 |
+
|
60 |
+
|
61 |
+
def main():
|
62 |
+
args = parse_args()
|
63 |
+
print(args.device)
|
64 |
+
model = Model(args.device)
|
65 |
+
|
66 |
+
with gr.Blocks(theme=args.theme, css="style.css") as demo:
|
67 |
+
gr.Markdown(DESCRIPTION)
|
68 |
+
|
69 |
+
with gr.Row():
|
70 |
+
with gr.Column():
|
71 |
+
with gr.Row():
|
72 |
+
input_image = gr.Image(
|
73 |
+
label="Input Pose Image", type="pil", elem_id="input-image"
|
74 |
+
)
|
75 |
+
pose_data = gr.State()
|
76 |
+
with gr.Row():
|
77 |
+
paths = sorted(pathlib.Path("pose_images").glob("*.png"))
|
78 |
+
example_images = gr.Dataset(
|
79 |
+
components=[input_image],
|
80 |
+
samples=[[path.as_posix()] for path in paths],
|
81 |
+
)
|
82 |
+
|
83 |
+
with gr.Row():
|
84 |
+
shape_text = gr.Textbox(
|
85 |
+
label="Shape Description",
|
86 |
+
placeholder="""<gender>, <sleeve length>, <length of lower clothing>, <outer clothing type>, <other accessories1>, ...
|
87 |
+
Note: The outer clothing type and accessories can be omitted.""",
|
88 |
+
)
|
89 |
+
with gr.Row():
|
90 |
+
shape_example_texts = gr.Dataset(
|
91 |
+
components=[shape_text],
|
92 |
+
samples=[
|
93 |
+
["man, sleeveless T-shirt, long pants"],
|
94 |
+
["woman, short-sleeve T-shirt, short jeans"],
|
95 |
+
],
|
96 |
+
)
|
97 |
+
with gr.Row():
|
98 |
+
generate_label_button = gr.Button("Generate Label Image")
|
99 |
+
|
100 |
+
with gr.Column():
|
101 |
+
with gr.Row():
|
102 |
+
label_image = gr.Image(
|
103 |
+
label="Label Image", type="numpy", elem_id="label-image"
|
104 |
+
)
|
105 |
+
|
106 |
+
with gr.Row():
|
107 |
+
texture_text = gr.Textbox(
|
108 |
+
label="Texture Description",
|
109 |
+
placeholder="""<upper clothing texture>, <lower clothing texture>, <outer clothing texture>
|
110 |
+
Note: Currently, only 5 types of textures are supported, i.e., pure color, stripe/spline, plaid/lattice, floral, denim.""",
|
111 |
+
)
|
112 |
+
with gr.Row():
|
113 |
+
texture_example_texts = gr.Dataset(
|
114 |
+
components=[texture_text],
|
115 |
+
samples=[["pure color, denim"], ["floral, stripe"]],
|
116 |
+
)
|
117 |
+
with gr.Row():
|
118 |
+
sample_steps = gr.Slider(
|
119 |
+
10, 300, value=10, step=10, label="Sample Steps"
|
120 |
+
)
|
121 |
+
with gr.Row():
|
122 |
+
seed = gr.Slider(0, 1000000, value=0, step=1, label="Seed")
|
123 |
+
with gr.Row():
|
124 |
+
generate_human_button = gr.Button("Generate Human")
|
125 |
+
|
126 |
+
with gr.Column():
|
127 |
+
with gr.Row():
|
128 |
+
result = gr.Image(
|
129 |
+
label="Result", type="numpy", elem_id="result-image"
|
130 |
+
)
|
131 |
+
|
132 |
+
input_image.change(
|
133 |
+
fn=model.process_pose_image, inputs=input_image, outputs=pose_data
|
134 |
+
)
|
135 |
+
generate_label_button.click(
|
136 |
+
fn=model.generate_label_image,
|
137 |
+
inputs=[
|
138 |
+
pose_data,
|
139 |
+
shape_text,
|
140 |
+
],
|
141 |
+
outputs=label_image,
|
142 |
+
)
|
143 |
+
# generate_human_button.click(
|
144 |
+
# fn=model.generate_human,
|
145 |
+
# inputs=[
|
146 |
+
# label_image,
|
147 |
+
# texture_text,
|
148 |
+
# sample_steps,
|
149 |
+
# seed,
|
150 |
+
# ],
|
151 |
+
# outputs=result,
|
152 |
+
# )
|
153 |
+
generate_human_button.click(
|
154 |
+
fn=model.generate_human,
|
155 |
+
inputs=[
|
156 |
+
pose_data,
|
157 |
+
shape_text,
|
158 |
+
texture_text,
|
159 |
+
sample_steps,
|
160 |
+
seed,
|
161 |
+
],
|
162 |
+
outputs=result,
|
163 |
+
)
|
164 |
+
example_images.click(
|
165 |
+
fn=set_example_image,
|
166 |
+
inputs=example_images,
|
167 |
+
outputs=example_images._components,
|
168 |
+
)
|
169 |
+
shape_example_texts.click(
|
170 |
+
fn=set_example_text,
|
171 |
+
inputs=shape_example_texts,
|
172 |
+
outputs=shape_example_texts._components,
|
173 |
+
)
|
174 |
+
texture_example_texts.click(
|
175 |
+
fn=set_example_text,
|
176 |
+
inputs=texture_example_texts,
|
177 |
+
outputs=texture_example_texts._components,
|
178 |
+
)
|
179 |
+
|
180 |
+
demo.launch(
|
181 |
+
# enable_queue=args.enable_queue,
|
182 |
+
server_port=args.port,
|
183 |
+
share=args.share,
|
184 |
+
)
|
185 |
+
|
186 |
+
|
187 |
+
if __name__ == "__main__":
|
188 |
+
main()
|
app.py
CHANGED
@@ -9,16 +9,17 @@ import subprocess
|
|
9 |
|
10 |
import gradio as gr
|
11 |
|
12 |
-
|
13 |
-
import mim
|
14 |
|
15 |
-
|
16 |
-
|
17 |
|
18 |
-
|
19 |
-
|
|
|
|
|
|
|
20 |
|
21 |
-
from model import Model
|
22 |
|
23 |
DESCRIPTION = """# Text2Human
|
24 |
|
@@ -128,7 +129,6 @@ Note: Currently, only 5 types of textures are supported, i.e., pure color, strip
|
|
128 |
label="Result", type="numpy", elem_id="result-image"
|
129 |
)
|
130 |
|
131 |
-
|
132 |
input_image.change(
|
133 |
fn=model.process_pose_image, inputs=input_image, outputs=pose_data
|
134 |
)
|
|
|
9 |
|
10 |
import gradio as gr
|
11 |
|
12 |
+
from model import Model
|
|
|
13 |
|
14 |
+
# if os.getenv("SYSTEM") == "spaces":
|
15 |
+
# import mim
|
16 |
|
17 |
+
# mim.uninstall("mmcv-full", confirm_yes=True)
|
18 |
+
# mim.install("mmcv-full==1.5.2", is_yes=True)
|
19 |
+
|
20 |
+
# with open("patch") as f:
|
21 |
+
# subprocess.run("patch -p1".split(), cwd="Text2Human", stdin=f)
|
22 |
|
|
|
23 |
|
24 |
DESCRIPTION = """# Text2Human
|
25 |
|
|
|
129 |
label="Result", type="numpy", elem_id="result-image"
|
130 |
)
|
131 |
|
|
|
132 |
input_image.change(
|
133 |
fn=model.process_pose_image, inputs=input_image, outputs=pose_data
|
134 |
)
|