Diffusion DPO LoRA
Collection
How to train: https://github.com/huggingface/diffusers/tree/main/examples/research_projects/diffusion_dpo
•
4 items
•
Updated
•
5
Model trained with LoRA implementation of Diffusion DPO Read more here
Base Model: https://huggingface.co./stabilityai/stable-diffusion-xl-base-1.0
import torch
from diffusers import AutoPipelineForText2Image, DPMSolverMultistepScheduler
from diffusers.utils import make_image_grid
pipe = AutoPipelineForText2Image.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
torch_dtype=torch.float16,
use_safetensors=True,
variant="fp16",
)
pipe.scheduler = DPMSolverMultistepScheduler.from_config(
pipe.scheduler.config,
use_karras_sigmas=True,
algorithm_type="sde-dpmsolver++"
)
pipe.to("cuda");
seed = 12341234123
prompt = "professional portrait photo of a girl, photograph, highly detailed face, depth of field, moody light, golden hour, style by Dan Winters, Russell James, Steve McCurry, centered, extremely detailed, Nikon D850, award winning photography"
negative_prompt = "3d render, cartoon, drawing, art, low light, blur, pixelated, low resolution, black and white"
num_inference_steps = 40
height = 1024
width = height
guidance_scale = 7.5
pipe.unload_lora_weights()
pipe.load_lora_weights(
"radames/sdxl-DPO-LoRA",
adapter_name="sdxl-dpo-lora",
)
pipe.set_adapters(["sdxl-dpo-lora"], adapter_weights=[0.9])
generator = torch.Generator().manual_seed(seed)
with_dpo = pipe(
prompt=prompt,
guidance_scale=guidance_scale,
negative_prompt=negative_prompt,
num_inference_steps=num_inference_steps,
width=width,
height=height,
generator=generator,
).images[0]
with_dpo
adapter_weights
https://huggingface.co./radames/sdxl-DPO-LoRA/raw/main/workflow-sdxl-dpo-lora.json
Base model
stabilityai/stable-diffusion-xl-base-1.0