Spaces:
Running
on
L4
Running
on
L4
Update
Browse files- .pre-commit-config.yaml +9 -17
- .vscode/extensions.json +8 -0
- .vscode/settings.json +5 -14
- README.md +1 -1
- app.py +0 -2
- model.py +18 -22
- preprocessor.py +10 -7
- pyproject.toml +41 -5
- requirements.txt +44 -46
- utils.py +1 -1
- uv.lock +0 -0
.pre-commit-config.yaml
CHANGED
@@ -18,13 +18,15 @@ repos:
|
|
18 |
hooks:
|
19 |
- id: docformatter
|
20 |
args: ["--in-place"]
|
21 |
-
- repo: https://github.com/
|
22 |
-
rev:
|
23 |
hooks:
|
24 |
-
- id:
|
25 |
-
args: ["--
|
|
|
|
|
26 |
- repo: https://github.com/pre-commit/mirrors-mypy
|
27 |
-
rev: v1.
|
28 |
hooks:
|
29 |
- id: mypy
|
30 |
args: ["--ignore-missing-imports"]
|
@@ -35,18 +37,8 @@ repos:
|
|
35 |
"types-PyYAML",
|
36 |
"types-pytz",
|
37 |
]
|
38 |
-
- repo: https://github.com/psf/black
|
39 |
-
rev: 24.10.0
|
40 |
-
hooks:
|
41 |
-
- id: black
|
42 |
-
language_version: python3.10
|
43 |
-
args: ["--line-length", "119"]
|
44 |
-
- repo: https://github.com/charliermarsh/ruff-pre-commit
|
45 |
-
rev: v0.7.0
|
46 |
-
hooks:
|
47 |
-
- id: ruff
|
48 |
- repo: https://github.com/kynan/nbstripout
|
49 |
-
rev: 0.
|
50 |
hooks:
|
51 |
- id: nbstripout
|
52 |
args:
|
@@ -55,7 +47,7 @@ repos:
|
|
55 |
"metadata.interpreter metadata.kernelspec cell.metadata.pycharm",
|
56 |
]
|
57 |
- repo: https://github.com/nbQA-dev/nbQA
|
58 |
-
rev: 1.
|
59 |
hooks:
|
60 |
- id: nbqa-black
|
61 |
- id: nbqa-pyupgrade
|
|
|
18 |
hooks:
|
19 |
- id: docformatter
|
20 |
args: ["--in-place"]
|
21 |
+
- repo: https://github.com/astral-sh/ruff-pre-commit
|
22 |
+
rev: v0.8.4
|
23 |
hooks:
|
24 |
+
- id: ruff
|
25 |
+
args: ["--fix"]
|
26 |
+
- id: ruff-format
|
27 |
+
args: ["--line-length", "119"]
|
28 |
- repo: https://github.com/pre-commit/mirrors-mypy
|
29 |
+
rev: v1.14.0
|
30 |
hooks:
|
31 |
- id: mypy
|
32 |
args: ["--ignore-missing-imports"]
|
|
|
37 |
"types-PyYAML",
|
38 |
"types-pytz",
|
39 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
- repo: https://github.com/kynan/nbstripout
|
41 |
+
rev: 0.8.1
|
42 |
hooks:
|
43 |
- id: nbstripout
|
44 |
args:
|
|
|
47 |
"metadata.interpreter metadata.kernelspec cell.metadata.pycharm",
|
48 |
]
|
49 |
- repo: https://github.com/nbQA-dev/nbQA
|
50 |
+
rev: 1.9.1
|
51 |
hooks:
|
52 |
- id: nbqa-black
|
53 |
- id: nbqa-pyupgrade
|
.vscode/extensions.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"recommendations": [
|
3 |
+
"ms-python.python",
|
4 |
+
"charliermarsh.ruff",
|
5 |
+
"streetsidesoftware.code-spell-checker",
|
6 |
+
"tamasfe.even-better-toml"
|
7 |
+
]
|
8 |
+
}
|
.vscode/settings.json
CHANGED
@@ -2,29 +2,20 @@
|
|
2 |
"editor.formatOnSave": true,
|
3 |
"files.insertFinalNewline": false,
|
4 |
"[python]": {
|
5 |
-
"editor.defaultFormatter": "
|
6 |
"editor.formatOnType": true,
|
7 |
"editor.codeActionsOnSave": {
|
|
|
8 |
"source.organizeImports": "explicit"
|
9 |
}
|
10 |
},
|
11 |
"[jupyter]": {
|
12 |
"files.insertFinalNewline": false
|
13 |
},
|
14 |
-
"black-formatter.args": [
|
15 |
-
"--line-length=119"
|
16 |
-
],
|
17 |
-
"isort.args": ["--profile", "black"],
|
18 |
-
"flake8.args": [
|
19 |
-
"--max-line-length=119"
|
20 |
-
],
|
21 |
-
"ruff.lint.args": [
|
22 |
-
"--line-length=119"
|
23 |
-
],
|
24 |
"notebook.output.scrolling": true,
|
25 |
"notebook.formatOnCellExecution": true,
|
26 |
"notebook.formatOnSave.enabled": true,
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
}
|
|
|
2 |
"editor.formatOnSave": true,
|
3 |
"files.insertFinalNewline": false,
|
4 |
"[python]": {
|
5 |
+
"editor.defaultFormatter": "charliermarsh.ruff",
|
6 |
"editor.formatOnType": true,
|
7 |
"editor.codeActionsOnSave": {
|
8 |
+
"source.fixAll.ruff": "explicit",
|
9 |
"source.organizeImports": "explicit"
|
10 |
}
|
11 |
},
|
12 |
"[jupyter]": {
|
13 |
"files.insertFinalNewline": false
|
14 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
"notebook.output.scrolling": true,
|
16 |
"notebook.formatOnCellExecution": true,
|
17 |
"notebook.formatOnSave.enabled": true,
|
18 |
+
"notebook.codeActionsOnSave": {
|
19 |
+
"source.organizeImports": "explicit"
|
20 |
+
}
|
21 |
}
|
README.md
CHANGED
@@ -4,7 +4,7 @@ emoji: 📉
|
|
4 |
colorFrom: yellow
|
5 |
colorTo: green
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 5.
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: mit
|
|
|
4 |
colorFrom: yellow
|
5 |
colorTo: green
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 5.9.1
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: mit
|
app.py
CHANGED
@@ -1,7 +1,5 @@
|
|
1 |
#!/usr/bin/env python
|
2 |
|
3 |
-
from __future__ import annotations
|
4 |
-
|
5 |
import gradio as gr
|
6 |
import torch
|
7 |
|
|
|
1 |
#!/usr/bin/env python
|
2 |
|
|
|
|
|
3 |
import gradio as gr
|
4 |
import torch
|
5 |
|
model.py
CHANGED
@@ -1,5 +1,3 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
import gc
|
4 |
|
5 |
import numpy as np
|
@@ -40,14 +38,16 @@ def download_all_controlnet_weights() -> None:
|
|
40 |
|
41 |
|
42 |
class Model:
|
43 |
-
def __init__(
|
|
|
|
|
44 |
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
45 |
self.base_model_id = ""
|
46 |
self.task_name = ""
|
47 |
self.pipe = self.load_pipe(base_model_id, task_name)
|
48 |
self.preprocessor = Preprocessor()
|
49 |
|
50 |
-
def load_pipe(self, base_model_id: str, task_name) -> DiffusionPipeline:
|
51 |
if (
|
52 |
base_model_id == self.base_model_id
|
53 |
and task_name == self.task_name
|
@@ -78,7 +78,7 @@ class Model:
|
|
78 |
gc.collect()
|
79 |
try:
|
80 |
self.pipe = self.load_pipe(base_model_id, self.task_name)
|
81 |
-
except Exception:
|
82 |
self.pipe = self.load_pipe(self.base_model_id, self.task_name)
|
83 |
return self.base_model_id
|
84 |
|
@@ -98,11 +98,7 @@ class Model:
|
|
98 |
self.task_name = task_name
|
99 |
|
100 |
def get_prompt(self, prompt: str, additional_prompt: str) -> str:
|
101 |
-
if not prompt
|
102 |
-
prompt = additional_prompt
|
103 |
-
else:
|
104 |
-
prompt = f"{prompt}, {additional_prompt}"
|
105 |
-
return prompt
|
106 |
|
107 |
@torch.autocast("cuda")
|
108 |
def run_pipe(
|
@@ -163,7 +159,7 @@ class Model:
|
|
163 |
guidance_scale=guidance_scale,
|
164 |
seed=seed,
|
165 |
)
|
166 |
-
return [control_image
|
167 |
|
168 |
@torch.inference_mode()
|
169 |
def process_mlsd(
|
@@ -206,7 +202,7 @@ class Model:
|
|
206 |
guidance_scale=guidance_scale,
|
207 |
seed=seed,
|
208 |
)
|
209 |
-
return [control_image
|
210 |
|
211 |
@torch.inference_mode()
|
212 |
def process_scribble(
|
@@ -260,7 +256,7 @@ class Model:
|
|
260 |
guidance_scale=guidance_scale,
|
261 |
seed=seed,
|
262 |
)
|
263 |
-
return [control_image
|
264 |
|
265 |
@torch.inference_mode()
|
266 |
def process_scribble_interactive(
|
@@ -297,7 +293,7 @@ class Model:
|
|
297 |
guidance_scale=guidance_scale,
|
298 |
seed=seed,
|
299 |
)
|
300 |
-
return [control_image
|
301 |
|
302 |
@torch.inference_mode()
|
303 |
def process_softedge(
|
@@ -355,7 +351,7 @@ class Model:
|
|
355 |
guidance_scale=guidance_scale,
|
356 |
seed=seed,
|
357 |
)
|
358 |
-
return [control_image
|
359 |
|
360 |
@torch.inference_mode()
|
361 |
def process_openpose(
|
@@ -401,7 +397,7 @@ class Model:
|
|
401 |
guidance_scale=guidance_scale,
|
402 |
seed=seed,
|
403 |
)
|
404 |
-
return [control_image
|
405 |
|
406 |
@torch.inference_mode()
|
407 |
def process_segmentation(
|
@@ -446,7 +442,7 @@ class Model:
|
|
446 |
guidance_scale=guidance_scale,
|
447 |
seed=seed,
|
448 |
)
|
449 |
-
return [control_image
|
450 |
|
451 |
@torch.inference_mode()
|
452 |
def process_depth(
|
@@ -491,7 +487,7 @@ class Model:
|
|
491 |
guidance_scale=guidance_scale,
|
492 |
seed=seed,
|
493 |
)
|
494 |
-
return [control_image
|
495 |
|
496 |
@torch.inference_mode()
|
497 |
def process_normal(
|
@@ -536,7 +532,7 @@ class Model:
|
|
536 |
guidance_scale=guidance_scale,
|
537 |
seed=seed,
|
538 |
)
|
539 |
-
return [control_image
|
540 |
|
541 |
@torch.inference_mode()
|
542 |
def process_lineart(
|
@@ -593,7 +589,7 @@ class Model:
|
|
593 |
guidance_scale=guidance_scale,
|
594 |
seed=seed,
|
595 |
)
|
596 |
-
return [control_image
|
597 |
|
598 |
@torch.inference_mode()
|
599 |
def process_shuffle(
|
@@ -636,7 +632,7 @@ class Model:
|
|
636 |
guidance_scale=guidance_scale,
|
637 |
seed=seed,
|
638 |
)
|
639 |
-
return [control_image
|
640 |
|
641 |
@torch.inference_mode()
|
642 |
def process_ip2p(
|
@@ -671,4 +667,4 @@ class Model:
|
|
671 |
guidance_scale=guidance_scale,
|
672 |
seed=seed,
|
673 |
)
|
674 |
-
return [control_image
|
|
|
|
|
|
|
1 |
import gc
|
2 |
|
3 |
import numpy as np
|
|
|
38 |
|
39 |
|
40 |
class Model:
|
41 |
+
def __init__(
|
42 |
+
self, base_model_id: str = "stable-diffusion-v1-5/stable-diffusion-v1-5", task_name: str = "Canny"
|
43 |
+
) -> None:
|
44 |
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
45 |
self.base_model_id = ""
|
46 |
self.task_name = ""
|
47 |
self.pipe = self.load_pipe(base_model_id, task_name)
|
48 |
self.preprocessor = Preprocessor()
|
49 |
|
50 |
+
def load_pipe(self, base_model_id: str, task_name: str) -> DiffusionPipeline:
|
51 |
if (
|
52 |
base_model_id == self.base_model_id
|
53 |
and task_name == self.task_name
|
|
|
78 |
gc.collect()
|
79 |
try:
|
80 |
self.pipe = self.load_pipe(base_model_id, self.task_name)
|
81 |
+
except Exception: # noqa: BLE001
|
82 |
self.pipe = self.load_pipe(self.base_model_id, self.task_name)
|
83 |
return self.base_model_id
|
84 |
|
|
|
98 |
self.task_name = task_name
|
99 |
|
100 |
def get_prompt(self, prompt: str, additional_prompt: str) -> str:
|
101 |
+
return additional_prompt if not prompt else f"{prompt}, {additional_prompt}"
|
|
|
|
|
|
|
|
|
102 |
|
103 |
@torch.autocast("cuda")
|
104 |
def run_pipe(
|
|
|
159 |
guidance_scale=guidance_scale,
|
160 |
seed=seed,
|
161 |
)
|
162 |
+
return [control_image, *results]
|
163 |
|
164 |
@torch.inference_mode()
|
165 |
def process_mlsd(
|
|
|
202 |
guidance_scale=guidance_scale,
|
203 |
seed=seed,
|
204 |
)
|
205 |
+
return [control_image, *results]
|
206 |
|
207 |
@torch.inference_mode()
|
208 |
def process_scribble(
|
|
|
256 |
guidance_scale=guidance_scale,
|
257 |
seed=seed,
|
258 |
)
|
259 |
+
return [control_image, *results]
|
260 |
|
261 |
@torch.inference_mode()
|
262 |
def process_scribble_interactive(
|
|
|
293 |
guidance_scale=guidance_scale,
|
294 |
seed=seed,
|
295 |
)
|
296 |
+
return [control_image, *results]
|
297 |
|
298 |
@torch.inference_mode()
|
299 |
def process_softedge(
|
|
|
351 |
guidance_scale=guidance_scale,
|
352 |
seed=seed,
|
353 |
)
|
354 |
+
return [control_image, *results]
|
355 |
|
356 |
@torch.inference_mode()
|
357 |
def process_openpose(
|
|
|
397 |
guidance_scale=guidance_scale,
|
398 |
seed=seed,
|
399 |
)
|
400 |
+
return [control_image, *results]
|
401 |
|
402 |
@torch.inference_mode()
|
403 |
def process_segmentation(
|
|
|
442 |
guidance_scale=guidance_scale,
|
443 |
seed=seed,
|
444 |
)
|
445 |
+
return [control_image, *results]
|
446 |
|
447 |
@torch.inference_mode()
|
448 |
def process_depth(
|
|
|
487 |
guidance_scale=guidance_scale,
|
488 |
seed=seed,
|
489 |
)
|
490 |
+
return [control_image, *results]
|
491 |
|
492 |
@torch.inference_mode()
|
493 |
def process_normal(
|
|
|
532 |
guidance_scale=guidance_scale,
|
533 |
seed=seed,
|
534 |
)
|
535 |
+
return [control_image, *results]
|
536 |
|
537 |
@torch.inference_mode()
|
538 |
def process_lineart(
|
|
|
589 |
guidance_scale=guidance_scale,
|
590 |
seed=seed,
|
591 |
)
|
592 |
+
return [control_image, *results]
|
593 |
|
594 |
@torch.inference_mode()
|
595 |
def process_shuffle(
|
|
|
632 |
guidance_scale=guidance_scale,
|
633 |
seed=seed,
|
634 |
)
|
635 |
+
return [control_image, *results]
|
636 |
|
637 |
@torch.inference_mode()
|
638 |
def process_ip2p(
|
|
|
667 |
guidance_scale=guidance_scale,
|
668 |
seed=seed,
|
669 |
)
|
670 |
+
return [control_image, *results]
|
preprocessor.py
CHANGED
@@ -1,4 +1,8 @@
|
|
1 |
import gc
|
|
|
|
|
|
|
|
|
2 |
|
3 |
import numpy as np
|
4 |
import PIL.Image
|
@@ -25,11 +29,11 @@ from image_segmentor import ImageSegmentor
|
|
25 |
class Preprocessor:
|
26 |
MODEL_ID = "lllyasviel/Annotators"
|
27 |
|
28 |
-
def __init__(self):
|
29 |
-
self.model = None
|
30 |
self.name = ""
|
31 |
|
32 |
-
def load(self, name: str) -> None:
|
33 |
if name == self.name:
|
34 |
return
|
35 |
if name == "HED":
|
@@ -62,7 +66,7 @@ class Preprocessor:
|
|
62 |
gc.collect()
|
63 |
self.name = name
|
64 |
|
65 |
-
def __call__(self, image: PIL.Image.Image, **kwargs) -> PIL.Image.Image:
|
66 |
if self.name == "Canny":
|
67 |
if "detect_resolution" in kwargs:
|
68 |
detect_resolution = kwargs.pop("detect_resolution")
|
@@ -71,7 +75,7 @@ class Preprocessor:
|
|
71 |
image = resize_image(image, resolution=detect_resolution)
|
72 |
image = self.model(image, **kwargs)
|
73 |
return PIL.Image.fromarray(image)
|
74 |
-
|
75 |
detect_resolution = kwargs.pop("detect_resolution", 512)
|
76 |
image_resolution = kwargs.pop("image_resolution", 512)
|
77 |
image = np.array(image)
|
@@ -81,5 +85,4 @@ class Preprocessor:
|
|
81 |
image = HWC3(image)
|
82 |
image = resize_image(image, resolution=image_resolution)
|
83 |
return PIL.Image.fromarray(image)
|
84 |
-
|
85 |
-
return self.model(image, **kwargs)
|
|
|
1 |
import gc
|
2 |
+
from typing import TYPE_CHECKING
|
3 |
+
|
4 |
+
if TYPE_CHECKING:
|
5 |
+
from collections.abc import Callable
|
6 |
|
7 |
import numpy as np
|
8 |
import PIL.Image
|
|
|
29 |
class Preprocessor:
|
30 |
MODEL_ID = "lllyasviel/Annotators"
|
31 |
|
32 |
+
def __init__(self) -> None:
|
33 |
+
self.model: Callable = None # type: ignore
|
34 |
self.name = ""
|
35 |
|
36 |
+
def load(self, name: str) -> None: # noqa: C901, PLR0912
|
37 |
if name == self.name:
|
38 |
return
|
39 |
if name == "HED":
|
|
|
66 |
gc.collect()
|
67 |
self.name = name
|
68 |
|
69 |
+
def __call__(self, image: PIL.Image.Image, **kwargs) -> PIL.Image.Image: # noqa: ANN003
|
70 |
if self.name == "Canny":
|
71 |
if "detect_resolution" in kwargs:
|
72 |
detect_resolution = kwargs.pop("detect_resolution")
|
|
|
75 |
image = resize_image(image, resolution=detect_resolution)
|
76 |
image = self.model(image, **kwargs)
|
77 |
return PIL.Image.fromarray(image)
|
78 |
+
if self.name == "Midas":
|
79 |
detect_resolution = kwargs.pop("detect_resolution", 512)
|
80 |
image_resolution = kwargs.pop("image_resolution", 512)
|
81 |
image = np.array(image)
|
|
|
85 |
image = HWC3(image)
|
86 |
image = resize_image(image, resolution=image_resolution)
|
87 |
return PIL.Image.fromarray(image)
|
88 |
+
return self.model(image, **kwargs)
|
|
pyproject.toml
CHANGED
@@ -5,18 +5,54 @@ description = ""
|
|
5 |
readme = "README.md"
|
6 |
requires-python = ">=3.10"
|
7 |
dependencies = [
|
8 |
-
"accelerate>=1.
|
9 |
"controlnet-aux>=0.0.9",
|
10 |
-
"diffusers>=0.
|
11 |
"einops>=0.8.0",
|
12 |
-
"gradio>=5.
|
13 |
"hf-transfer>=0.1.8",
|
14 |
"mediapipe>=0.10.18",
|
15 |
"opencv-python-headless>=4.10.0.84",
|
16 |
"safetensors>=0.4.5",
|
17 |
-
"spaces>=0.
|
18 |
"torch==2.4.0",
|
19 |
"torchvision>=0.19.0",
|
20 |
-
"transformers>=4.
|
21 |
"xformers>=0.0.27.post2",
|
22 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
readme = "README.md"
|
6 |
requires-python = ">=3.10"
|
7 |
dependencies = [
|
8 |
+
"accelerate>=1.2.1",
|
9 |
"controlnet-aux>=0.0.9",
|
10 |
+
"diffusers>=0.32.1",
|
11 |
"einops>=0.8.0",
|
12 |
+
"gradio>=5.9.1",
|
13 |
"hf-transfer>=0.1.8",
|
14 |
"mediapipe>=0.10.18",
|
15 |
"opencv-python-headless>=4.10.0.84",
|
16 |
"safetensors>=0.4.5",
|
17 |
+
"spaces>=0.31.1",
|
18 |
"torch==2.4.0",
|
19 |
"torchvision>=0.19.0",
|
20 |
+
"transformers>=4.47.1",
|
21 |
"xformers>=0.0.27.post2",
|
22 |
]
|
23 |
+
|
24 |
+
[tool.ruff]
|
25 |
+
line-length = 119
|
26 |
+
|
27 |
+
[tool.ruff.lint]
|
28 |
+
select = ["ALL"]
|
29 |
+
ignore = [
|
30 |
+
"COM812", # missing-trailing-comma
|
31 |
+
"D203", # one-blank-line-before-class
|
32 |
+
"D213", # multi-line-summary-second-line
|
33 |
+
"E501", # line-too-long
|
34 |
+
"SIM117", # multiple-with-statements
|
35 |
+
]
|
36 |
+
extend-ignore = [
|
37 |
+
"D100", # undocumented-public-module
|
38 |
+
"D101", # undocumented-public-class
|
39 |
+
"D102", # undocumented-public-method
|
40 |
+
"D103", # undocumented-public-function
|
41 |
+
"D104", # undocumented-public-package
|
42 |
+
"D105", # undocumented-magic-method
|
43 |
+
"D107", # undocumented-public-init
|
44 |
+
"EM101", # raw-string-in-exception
|
45 |
+
"FBT001", # boolean-type-hint-positional-argument
|
46 |
+
"FBT002", # boolean-default-value-positional-argument
|
47 |
+
"PD901", # pandas-df-variable-name
|
48 |
+
"PGH003", # blanket-type-ignore
|
49 |
+
"PLR0913", # too-many-arguments
|
50 |
+
"PLR0915", # too-many-statements
|
51 |
+
"TRY003", # raise-vanilla-args
|
52 |
+
]
|
53 |
+
unfixable = [
|
54 |
+
"F401", # unused-import
|
55 |
+
]
|
56 |
+
|
57 |
+
[tool.ruff.format]
|
58 |
+
docstring-code-format = true
|
requirements.txt
CHANGED
@@ -2,29 +2,29 @@
|
|
2 |
# uv pip compile pyproject.toml -o requirements.txt
|
3 |
absl-py==2.1.0
|
4 |
# via mediapipe
|
5 |
-
accelerate==1.
|
6 |
# via controlnet-v1-1 (pyproject.toml)
|
7 |
aiofiles==23.2.1
|
8 |
# via gradio
|
9 |
annotated-types==0.7.0
|
10 |
# via pydantic
|
11 |
-
anyio==4.
|
12 |
# via
|
13 |
# gradio
|
14 |
# httpx
|
15 |
# starlette
|
16 |
-
attrs==24.
|
17 |
# via mediapipe
|
18 |
-
certifi==2024.
|
19 |
# via
|
20 |
# httpcore
|
21 |
# httpx
|
22 |
# requests
|
23 |
cffi==1.17.1
|
24 |
# via sounddevice
|
25 |
-
charset-normalizer==3.4.
|
26 |
# via requests
|
27 |
-
click==8.1.
|
28 |
# via
|
29 |
# typer
|
30 |
# uvicorn
|
@@ -34,7 +34,7 @@ controlnet-aux==0.0.9
|
|
34 |
# via controlnet-v1-1 (pyproject.toml)
|
35 |
cycler==0.12.1
|
36 |
# via matplotlib
|
37 |
-
diffusers==0.
|
38 |
# via controlnet-v1-1 (pyproject.toml)
|
39 |
einops==0.8.0
|
40 |
# via
|
@@ -42,9 +42,9 @@ einops==0.8.0
|
|
42 |
# controlnet-aux
|
43 |
exceptiongroup==1.2.2
|
44 |
# via anyio
|
45 |
-
fastapi==0.115.
|
46 |
# via gradio
|
47 |
-
ffmpy==0.
|
48 |
# via gradio
|
49 |
filelock==3.16.1
|
50 |
# via
|
@@ -54,20 +54,20 @@ filelock==3.16.1
|
|
54 |
# torch
|
55 |
# transformers
|
56 |
# triton
|
57 |
-
flatbuffers==24.
|
58 |
# via mediapipe
|
59 |
-
fonttools==4.55.
|
60 |
# via matplotlib
|
61 |
-
fsspec==2024.
|
62 |
# via
|
63 |
# gradio-client
|
64 |
# huggingface-hub
|
65 |
# torch
|
66 |
-
gradio==5.
|
67 |
# via
|
68 |
# controlnet-v1-1 (pyproject.toml)
|
69 |
# spaces
|
70 |
-
gradio-client==1.
|
71 |
# via gradio
|
72 |
h11==0.14.0
|
73 |
# via
|
@@ -77,13 +77,13 @@ hf-transfer==0.1.8
|
|
77 |
# via controlnet-v1-1 (pyproject.toml)
|
78 |
httpcore==1.0.7
|
79 |
# via httpx
|
80 |
-
httpx==0.
|
81 |
# via
|
82 |
# gradio
|
83 |
# gradio-client
|
84 |
# safehttpx
|
85 |
# spaces
|
86 |
-
huggingface-hub==0.
|
87 |
# via
|
88 |
# accelerate
|
89 |
# controlnet-aux
|
@@ -97,23 +97,23 @@ idna==3.10
|
|
97 |
# anyio
|
98 |
# httpx
|
99 |
# requests
|
100 |
-
imageio==2.36.
|
101 |
# via scikit-image
|
102 |
importlib-metadata==8.5.0
|
103 |
# via
|
104 |
# controlnet-aux
|
105 |
# diffusers
|
106 |
-
jax==0.4.
|
107 |
# via mediapipe
|
108 |
-
jaxlib==0.4.
|
109 |
# via
|
110 |
# jax
|
111 |
# mediapipe
|
112 |
-
jinja2==3.1.
|
113 |
# via
|
114 |
# gradio
|
115 |
# torch
|
116 |
-
kiwisolver==1.4.
|
117 |
# via matplotlib
|
118 |
lazy-loader==0.4
|
119 |
# via scikit-image
|
@@ -123,11 +123,11 @@ markupsafe==2.1.5
|
|
123 |
# via
|
124 |
# gradio
|
125 |
# jinja2
|
126 |
-
matplotlib==3.
|
127 |
# via mediapipe
|
128 |
mdurl==0.1.2
|
129 |
# via markdown-it-py
|
130 |
-
mediapipe==0.10.
|
131 |
# via controlnet-v1-1 (pyproject.toml)
|
132 |
ml-dtypes==0.5.0
|
133 |
# via
|
@@ -186,7 +186,7 @@ nvidia-cusparse-cu12==12.1.0.106
|
|
186 |
# torch
|
187 |
nvidia-nccl-cu12==2.20.5
|
188 |
# via torch
|
189 |
-
nvidia-nvjitlink-cu12==12.6.
|
190 |
# via
|
191 |
# nvidia-cusolver-cu12
|
192 |
# nvidia-cusparse-cu12
|
@@ -200,7 +200,7 @@ opencv-python-headless==4.10.0.84
|
|
200 |
# controlnet-aux
|
201 |
opt-einsum==3.4.0
|
202 |
# via jax
|
203 |
-
orjson==3.10.
|
204 |
# via gradio
|
205 |
packaging==24.2
|
206 |
# via
|
@@ -232,24 +232,24 @@ psutil==5.9.8
|
|
232 |
# spaces
|
233 |
pycparser==2.22
|
234 |
# via cffi
|
235 |
-
pydantic==2.
|
236 |
# via
|
237 |
# fastapi
|
238 |
# gradio
|
239 |
# spaces
|
240 |
-
pydantic-core==2.
|
241 |
# via pydantic
|
242 |
pydub==0.25.1
|
243 |
# via gradio
|
244 |
pygments==2.18.0
|
245 |
# via rich
|
246 |
-
pyparsing==3.2.
|
247 |
# via matplotlib
|
248 |
python-dateutil==2.9.0.post0
|
249 |
# via
|
250 |
# matplotlib
|
251 |
# pandas
|
252 |
-
python-multipart==0.0.
|
253 |
# via gradio
|
254 |
pytz==2024.2
|
255 |
# via pandas
|
@@ -271,9 +271,9 @@ requests==2.32.3
|
|
271 |
# transformers
|
272 |
rich==13.9.4
|
273 |
# via typer
|
274 |
-
ruff==0.
|
275 |
# via gradio
|
276 |
-
safehttpx==0.1.
|
277 |
# via gradio
|
278 |
safetensors==0.4.5
|
279 |
# via
|
@@ -281,7 +281,7 @@ safetensors==0.4.5
|
|
281 |
# accelerate
|
282 |
# diffusers
|
283 |
# transformers
|
284 |
-
scikit-image==0.
|
285 |
# via controlnet-aux
|
286 |
scipy==1.14.1
|
287 |
# via
|
@@ -295,15 +295,13 @@ sentencepiece==0.2.0
|
|
295 |
# via mediapipe
|
296 |
shellingham==1.5.4
|
297 |
# via typer
|
298 |
-
six==1.
|
299 |
# via python-dateutil
|
300 |
sniffio==1.3.1
|
301 |
-
# via
|
302 |
-
# anyio
|
303 |
-
# httpx
|
304 |
sounddevice==0.5.1
|
305 |
# via mediapipe
|
306 |
-
spaces==0.
|
307 |
# via controlnet-v1-1 (pyproject.toml)
|
308 |
starlette==0.41.3
|
309 |
# via
|
@@ -311,13 +309,13 @@ starlette==0.41.3
|
|
311 |
# gradio
|
312 |
sympy==1.13.3
|
313 |
# via torch
|
314 |
-
tifffile==2024.
|
315 |
# via scikit-image
|
316 |
timm==0.6.7
|
317 |
# via controlnet-aux
|
318 |
-
tokenizers==0.
|
319 |
# via transformers
|
320 |
-
tomlkit==0.
|
321 |
# via gradio
|
322 |
torch==2.4.0
|
323 |
# via
|
@@ -332,15 +330,15 @@ torchvision==0.19.0
|
|
332 |
# controlnet-v1-1 (pyproject.toml)
|
333 |
# controlnet-aux
|
334 |
# timm
|
335 |
-
tqdm==4.67.
|
336 |
# via
|
337 |
# huggingface-hub
|
338 |
# transformers
|
339 |
-
transformers==4.
|
340 |
# via controlnet-v1-1 (pyproject.toml)
|
341 |
triton==3.0.0
|
342 |
# via torch
|
343 |
-
typer==0.
|
344 |
# via gradio
|
345 |
typing-extensions==4.12.2
|
346 |
# via
|
@@ -358,11 +356,11 @@ typing-extensions==4.12.2
|
|
358 |
# uvicorn
|
359 |
tzdata==2024.2
|
360 |
# via pandas
|
361 |
-
urllib3==2.
|
362 |
# via requests
|
363 |
-
uvicorn==0.
|
364 |
# via gradio
|
365 |
-
websockets==
|
366 |
# via gradio-client
|
367 |
xformers==0.0.27.post2
|
368 |
# via controlnet-v1-1 (pyproject.toml)
|
|
|
2 |
# uv pip compile pyproject.toml -o requirements.txt
|
3 |
absl-py==2.1.0
|
4 |
# via mediapipe
|
5 |
+
accelerate==1.2.1
|
6 |
# via controlnet-v1-1 (pyproject.toml)
|
7 |
aiofiles==23.2.1
|
8 |
# via gradio
|
9 |
annotated-types==0.7.0
|
10 |
# via pydantic
|
11 |
+
anyio==4.7.0
|
12 |
# via
|
13 |
# gradio
|
14 |
# httpx
|
15 |
# starlette
|
16 |
+
attrs==24.3.0
|
17 |
# via mediapipe
|
18 |
+
certifi==2024.12.14
|
19 |
# via
|
20 |
# httpcore
|
21 |
# httpx
|
22 |
# requests
|
23 |
cffi==1.17.1
|
24 |
# via sounddevice
|
25 |
+
charset-normalizer==3.4.1
|
26 |
# via requests
|
27 |
+
click==8.1.8
|
28 |
# via
|
29 |
# typer
|
30 |
# uvicorn
|
|
|
34 |
# via controlnet-v1-1 (pyproject.toml)
|
35 |
cycler==0.12.1
|
36 |
# via matplotlib
|
37 |
+
diffusers==0.32.1
|
38 |
# via controlnet-v1-1 (pyproject.toml)
|
39 |
einops==0.8.0
|
40 |
# via
|
|
|
42 |
# controlnet-aux
|
43 |
exceptiongroup==1.2.2
|
44 |
# via anyio
|
45 |
+
fastapi==0.115.6
|
46 |
# via gradio
|
47 |
+
ffmpy==0.5.0
|
48 |
# via gradio
|
49 |
filelock==3.16.1
|
50 |
# via
|
|
|
54 |
# torch
|
55 |
# transformers
|
56 |
# triton
|
57 |
+
flatbuffers==24.12.23
|
58 |
# via mediapipe
|
59 |
+
fonttools==4.55.3
|
60 |
# via matplotlib
|
61 |
+
fsspec==2024.12.0
|
62 |
# via
|
63 |
# gradio-client
|
64 |
# huggingface-hub
|
65 |
# torch
|
66 |
+
gradio==5.9.1
|
67 |
# via
|
68 |
# controlnet-v1-1 (pyproject.toml)
|
69 |
# spaces
|
70 |
+
gradio-client==1.5.2
|
71 |
# via gradio
|
72 |
h11==0.14.0
|
73 |
# via
|
|
|
77 |
# via controlnet-v1-1 (pyproject.toml)
|
78 |
httpcore==1.0.7
|
79 |
# via httpx
|
80 |
+
httpx==0.28.1
|
81 |
# via
|
82 |
# gradio
|
83 |
# gradio-client
|
84 |
# safehttpx
|
85 |
# spaces
|
86 |
+
huggingface-hub==0.27.0
|
87 |
# via
|
88 |
# accelerate
|
89 |
# controlnet-aux
|
|
|
97 |
# anyio
|
98 |
# httpx
|
99 |
# requests
|
100 |
+
imageio==2.36.1
|
101 |
# via scikit-image
|
102 |
importlib-metadata==8.5.0
|
103 |
# via
|
104 |
# controlnet-aux
|
105 |
# diffusers
|
106 |
+
jax==0.4.38
|
107 |
# via mediapipe
|
108 |
+
jaxlib==0.4.38
|
109 |
# via
|
110 |
# jax
|
111 |
# mediapipe
|
112 |
+
jinja2==3.1.5
|
113 |
# via
|
114 |
# gradio
|
115 |
# torch
|
116 |
+
kiwisolver==1.4.8
|
117 |
# via matplotlib
|
118 |
lazy-loader==0.4
|
119 |
# via scikit-image
|
|
|
123 |
# via
|
124 |
# gradio
|
125 |
# jinja2
|
126 |
+
matplotlib==3.10.0
|
127 |
# via mediapipe
|
128 |
mdurl==0.1.2
|
129 |
# via markdown-it-py
|
130 |
+
mediapipe==0.10.20
|
131 |
# via controlnet-v1-1 (pyproject.toml)
|
132 |
ml-dtypes==0.5.0
|
133 |
# via
|
|
|
186 |
# torch
|
187 |
nvidia-nccl-cu12==2.20.5
|
188 |
# via torch
|
189 |
+
nvidia-nvjitlink-cu12==12.6.85
|
190 |
# via
|
191 |
# nvidia-cusolver-cu12
|
192 |
# nvidia-cusparse-cu12
|
|
|
200 |
# controlnet-aux
|
201 |
opt-einsum==3.4.0
|
202 |
# via jax
|
203 |
+
orjson==3.10.13
|
204 |
# via gradio
|
205 |
packaging==24.2
|
206 |
# via
|
|
|
232 |
# spaces
|
233 |
pycparser==2.22
|
234 |
# via cffi
|
235 |
+
pydantic==2.10.4
|
236 |
# via
|
237 |
# fastapi
|
238 |
# gradio
|
239 |
# spaces
|
240 |
+
pydantic-core==2.27.2
|
241 |
# via pydantic
|
242 |
pydub==0.25.1
|
243 |
# via gradio
|
244 |
pygments==2.18.0
|
245 |
# via rich
|
246 |
+
pyparsing==3.2.1
|
247 |
# via matplotlib
|
248 |
python-dateutil==2.9.0.post0
|
249 |
# via
|
250 |
# matplotlib
|
251 |
# pandas
|
252 |
+
python-multipart==0.0.20
|
253 |
# via gradio
|
254 |
pytz==2024.2
|
255 |
# via pandas
|
|
|
271 |
# transformers
|
272 |
rich==13.9.4
|
273 |
# via typer
|
274 |
+
ruff==0.8.4
|
275 |
# via gradio
|
276 |
+
safehttpx==0.1.6
|
277 |
# via gradio
|
278 |
safetensors==0.4.5
|
279 |
# via
|
|
|
281 |
# accelerate
|
282 |
# diffusers
|
283 |
# transformers
|
284 |
+
scikit-image==0.25.0
|
285 |
# via controlnet-aux
|
286 |
scipy==1.14.1
|
287 |
# via
|
|
|
295 |
# via mediapipe
|
296 |
shellingham==1.5.4
|
297 |
# via typer
|
298 |
+
six==1.17.0
|
299 |
# via python-dateutil
|
300 |
sniffio==1.3.1
|
301 |
+
# via anyio
|
|
|
|
|
302 |
sounddevice==0.5.1
|
303 |
# via mediapipe
|
304 |
+
spaces==0.31.1
|
305 |
# via controlnet-v1-1 (pyproject.toml)
|
306 |
starlette==0.41.3
|
307 |
# via
|
|
|
309 |
# gradio
|
310 |
sympy==1.13.3
|
311 |
# via torch
|
312 |
+
tifffile==2024.12.12
|
313 |
# via scikit-image
|
314 |
timm==0.6.7
|
315 |
# via controlnet-aux
|
316 |
+
tokenizers==0.21.0
|
317 |
# via transformers
|
318 |
+
tomlkit==0.13.2
|
319 |
# via gradio
|
320 |
torch==2.4.0
|
321 |
# via
|
|
|
330 |
# controlnet-v1-1 (pyproject.toml)
|
331 |
# controlnet-aux
|
332 |
# timm
|
333 |
+
tqdm==4.67.1
|
334 |
# via
|
335 |
# huggingface-hub
|
336 |
# transformers
|
337 |
+
transformers==4.47.1
|
338 |
# via controlnet-v1-1 (pyproject.toml)
|
339 |
triton==3.0.0
|
340 |
# via torch
|
341 |
+
typer==0.15.1
|
342 |
# via gradio
|
343 |
typing-extensions==4.12.2
|
344 |
# via
|
|
|
356 |
# uvicorn
|
357 |
tzdata==2024.2
|
358 |
# via pandas
|
359 |
+
urllib3==2.3.0
|
360 |
# via requests
|
361 |
+
uvicorn==0.34.0
|
362 |
# via gradio
|
363 |
+
websockets==14.1
|
364 |
# via gradio-client
|
365 |
xformers==0.0.27.post2
|
366 |
# via controlnet-v1-1 (pyproject.toml)
|
utils.py
CHANGED
@@ -5,5 +5,5 @@ from settings import MAX_SEED
|
|
5 |
|
6 |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
7 |
if randomize_seed:
|
8 |
-
seed = random.randint(0, MAX_SEED)
|
9 |
return seed
|
|
|
5 |
|
6 |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
7 |
if randomize_seed:
|
8 |
+
seed = random.randint(0, MAX_SEED) # noqa: S311
|
9 |
return seed
|
uv.lock
CHANGED
The diff for this file is too large to render.
See raw diff
|
|