Spaces:
Running
Running
Update
Browse files- README.md +1 -1
- app.py +8 -7
- requirements.txt +7 -7
README.md
CHANGED
@@ -4,7 +4,7 @@ emoji: ⚡
|
|
4 |
colorFrom: indigo
|
5 |
colorTo: red
|
6 |
sdk: gradio
|
7 |
-
sdk_version:
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
suggested_hardware: t4-small
|
|
|
4 |
colorFrom: indigo
|
5 |
colorTo: red
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 4.36.0
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
suggested_hardware: t4-small
|
app.py
CHANGED
@@ -22,13 +22,13 @@ def load_hairstyle_list() -> list[str]:
|
|
22 |
|
23 |
|
24 |
def set_example_image(example: list) -> dict:
|
25 |
-
return gr.Image
|
26 |
|
27 |
|
28 |
def update_step2_components(choice: str) -> tuple[dict, dict]:
|
29 |
return (
|
30 |
-
gr.Dropdown
|
31 |
-
gr.Textbox
|
32 |
)
|
33 |
|
34 |
|
@@ -36,7 +36,7 @@ model = Model()
|
|
36 |
|
37 |
with gr.Blocks(css="style.css") as demo:
|
38 |
gr.Markdown(DESCRIPTION)
|
39 |
-
with gr.
|
40 |
gr.Markdown("## Step 1")
|
41 |
with gr.Row():
|
42 |
with gr.Column():
|
@@ -48,13 +48,13 @@ with gr.Blocks(css="style.css") as demo:
|
|
48 |
aligned_face = gr.Image(label="Aligned Face", type="pil", interactive=False)
|
49 |
with gr.Column():
|
50 |
reconstructed_face = gr.Image(label="Reconstructed Face", type="numpy")
|
51 |
-
latent = gr.
|
52 |
|
53 |
with gr.Row():
|
54 |
paths = sorted(pathlib.Path("images").glob("*.jpg"))
|
55 |
gr.Examples(examples=[[path.as_posix()] for path in paths], inputs=input_image)
|
56 |
|
57 |
-
with gr.
|
58 |
gr.Markdown("## Step 2")
|
59 |
with gr.Row():
|
60 |
with gr.Column():
|
@@ -87,4 +87,5 @@ with gr.Blocks(css="style.css") as demo:
|
|
87 |
outputs=result,
|
88 |
)
|
89 |
|
90 |
-
|
|
|
|
22 |
|
23 |
|
24 |
def set_example_image(example: list) -> dict:
|
25 |
+
return gr.Image(value=example[0])
|
26 |
|
27 |
|
28 |
def update_step2_components(choice: str) -> tuple[dict, dict]:
|
29 |
return (
|
30 |
+
gr.Dropdown(visible=choice in ["hairstyle", "both"]),
|
31 |
+
gr.Textbox(visible=choice in ["color", "both"]),
|
32 |
)
|
33 |
|
34 |
|
|
|
36 |
|
37 |
with gr.Blocks(css="style.css") as demo:
|
38 |
gr.Markdown(DESCRIPTION)
|
39 |
+
with gr.Group():
|
40 |
gr.Markdown("## Step 1")
|
41 |
with gr.Row():
|
42 |
with gr.Column():
|
|
|
48 |
aligned_face = gr.Image(label="Aligned Face", type="pil", interactive=False)
|
49 |
with gr.Column():
|
50 |
reconstructed_face = gr.Image(label="Reconstructed Face", type="numpy")
|
51 |
+
latent = gr.State()
|
52 |
|
53 |
with gr.Row():
|
54 |
paths = sorted(pathlib.Path("images").glob("*.jpg"))
|
55 |
gr.Examples(examples=[[path.as_posix()] for path in paths], inputs=input_image)
|
56 |
|
57 |
+
with gr.Group():
|
58 |
gr.Markdown("## Step 2")
|
59 |
with gr.Row():
|
60 |
with gr.Column():
|
|
|
87 |
outputs=result,
|
88 |
)
|
89 |
|
90 |
+
if __name__ == "__main__":
|
91 |
+
demo.queue(max_size=10).launch()
|
requirements.txt
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
-
dlib==19.
|
2 |
git+https://github.com/openai/CLIP.git
|
3 |
-
numpy==1.
|
4 |
-
opencv-python-headless==4.
|
5 |
-
Pillow==9.
|
6 |
-
scipy==1.
|
7 |
-
torch==
|
8 |
-
torchvision==0.
|
|
|
1 |
+
dlib==19.24.4
|
2 |
git+https://github.com/openai/CLIP.git
|
3 |
+
numpy==1.26.4
|
4 |
+
opencv-python-headless==4.10.0.82
|
5 |
+
Pillow==9.5.0
|
6 |
+
scipy==1.13.1
|
7 |
+
torch==2.0.1
|
8 |
+
torchvision==0.15.2
|