Spaces:
Runtime error
Runtime error
okaris
commited on
Commit
•
89a49d8
1
Parent(s):
0bcd2e0
Replicate demo
Browse files- cog.yaml +29 -0
- predict.py +60 -0
cog.yaml
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Configuration for Cog ⚙️
|
2 |
+
# Reference: https://github.com/replicate/cog/blob/main/docs/yaml.md
|
3 |
+
|
4 |
+
build:
|
5 |
+
# set to true if your model requires a GPU
|
6 |
+
gpu: true
|
7 |
+
|
8 |
+
# a list of ubuntu apt packages to install
|
9 |
+
# system_packages:
|
10 |
+
# - "libgl1-mesa-glx"
|
11 |
+
# - "libglib2.0-0"
|
12 |
+
|
13 |
+
# python version in the form '3.8' or '3.8.12'
|
14 |
+
python_version: "3.8"
|
15 |
+
python_requirements: "requirements.txt"
|
16 |
+
|
17 |
+
# a list of packages in the format <package-name>==<version>
|
18 |
+
# python_packages:
|
19 |
+
# - "numpy==1.19.4"
|
20 |
+
# - "torch==1.8.0"
|
21 |
+
# - "torchvision==0.9.0"
|
22 |
+
|
23 |
+
# commands run after the environment is setup
|
24 |
+
# run:
|
25 |
+
# - "echo env is ready!"
|
26 |
+
# - "echo another command if needed"
|
27 |
+
|
28 |
+
# predict.py defines how predictions are run on your model
|
29 |
+
predict: "predict.py:Predictor"
|
predict.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Prediction interface for Cog ⚙️
|
2 |
+
# https://github.com/replicate/cog/blob/main/docs/python.md
|
3 |
+
|
4 |
+
from cog import BasePredictor, Input, Path
|
5 |
+
from omni_zero import OmniZeroSingle
|
6 |
+
|
7 |
+
class Predictor(BasePredictor):
|
8 |
+
def setup(self):
|
9 |
+
"""Load the model into memory to make running multiple predictions efficient"""
|
10 |
+
# self.model = torch.load("./weights.pth")
|
11 |
+
self.omni_zero = OmniZeroSingle(
|
12 |
+
base_model="frankjoshua/albedobaseXL_v13",
|
13 |
+
)
|
14 |
+
|
15 |
+
def predict(
|
16 |
+
self,
|
17 |
+
seed: int = Input(description="Random seed for the model", default=42),
|
18 |
+
prompt: str = Input(description="Prompt for the model", default="A person"),
|
19 |
+
negative_prompt: str = Input(description="Negative prompt for the model", default="blurry, out of focus"),
|
20 |
+
guidance_scale: float = Input(description="Guidance scale for the model", default=3.0, ge=0.0, le=14.0),
|
21 |
+
number_of_images: int = Input(description="Number of images to generate", default=1, ge=1, le=4),
|
22 |
+
number_of_steps: int = Input(description="Number of steps for the model", default=10, ge=1, le=50),
|
23 |
+
base_image: Path = Input(description="Base image for the model"),
|
24 |
+
base_image_strength: float = Input(description="Base image strength for the model", default=0.15, ge=0.0, le=1.0),
|
25 |
+
composition_image: Path = Input(description="Composition image for the model"),
|
26 |
+
composition_image_strength: float = Input(description="Composition image strength for the model", default=1.0, ge=0.0, le=1.0),
|
27 |
+
style_image: Path = Input(description="Style image for the model"),
|
28 |
+
style_image_strength: float = Input(description="Style image strength for the model", default=1.0, ge=0.0, le=1.0),
|
29 |
+
identity_image: Path = Input(description="Identity image for the model"),
|
30 |
+
identity_image_strength: float = Input(description="Identity image strength for the model", default=1.0, ge=0.0, le=1.0),
|
31 |
+
depth_image: Path = Input(description="Depth image for the model"),
|
32 |
+
depth_image_strength: float = Input(description="Depth image strength for the model, if not supplied the composition image will be used for depth", default=0.5, ge=0.0, le=1.0),
|
33 |
+
) -> Path:
|
34 |
+
"""Run a single prediction on the model"""
|
35 |
+
images = self.omni_zero.generate(
|
36 |
+
seed=seed,
|
37 |
+
prompt=prompt,
|
38 |
+
negative_prompt=negative_prompt,
|
39 |
+
guidance_scale=guidance_scale,
|
40 |
+
number_of_images=number_of_images,
|
41 |
+
number_of_steps=number_of_steps,
|
42 |
+
base_image=base_image,
|
43 |
+
base_image_strength=base_image_strength,
|
44 |
+
composition_image=composition_image,
|
45 |
+
composition_image_strength=composition_image_strength,
|
46 |
+
style_image=style_image,
|
47 |
+
style_image_strength=style_image_strength,
|
48 |
+
identity_image=identity_image,
|
49 |
+
identity_image_strength=identity_image_strength,
|
50 |
+
depth_image=depth_image,
|
51 |
+
depth_image_strength=depth_image_strength,
|
52 |
+
)
|
53 |
+
|
54 |
+
outputs = []
|
55 |
+
for i, image in enumerate(images):
|
56 |
+
output_path = f"oz_output_{i}.jpg"
|
57 |
+
image.save(output_path)
|
58 |
+
outputs.append(Path(output_path))
|
59 |
+
|
60 |
+
return outputs
|