Spaces:
Running
on
Zero
Running
on
Zero
import torch | |
from diffusers import DiffusionPipeline, LCMScheduler, UNet2DConditionModel | |
import logging | |
# Configure logging | |
logging.basicConfig(level=logging.INFO) | |
logger = logging.getLogger(__name__) | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
unet = UNet2DConditionModel.from_pretrained( | |
"gvecchio/StableMaterials", | |
subfolder="unet_lcm", | |
torch_dtype=torch.float16, | |
) | |
pipe = DiffusionPipeline.from_pretrained( | |
"gvecchio/StableMaterials", | |
trust_remote_code=True, | |
unet=unet, | |
torch_dtype=torch.float16 | |
).to(device) | |
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) | |
def generate_material(prompt, seed=-1, resolution=512, refinement=False): | |
try: | |
seed = seed if seed != -1 else torch.randint(0, 10000, (1,)).item() | |
logger.info(f"Generating images for prompt: {prompt} with seed: {seed}") | |
generator = torch.Generator(device=pipe.device).manual_seed(seed) | |
image = pipe( | |
prompt=[prompt], | |
tileable=True, | |
num_images_per_prompt=1, | |
num_inference_steps=4, | |
generator=generator, | |
).images[0] | |
image = image.resize((resolution, resolution)) | |
if refinement: | |
pass | |
return image | |
except Exception as e: | |
logger.error(f"Exception occurred while generating images: {e}") | |
raise | |