bayndrysf commited on
Commit
ba22b2a
1 Parent(s): a551af7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -20
app.py CHANGED
@@ -3,38 +3,60 @@ import numpy as np
3
  import random
4
  from diffusers import DiffusionPipeline
5
  import torch
 
6
 
7
  device = "cuda" if torch.cuda.is_available() else "cpu"
8
 
 
 
 
 
9
  if torch.cuda.is_available():
10
  torch.cuda.max_memory_allocated(device=device)
11
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
12
  pipe.enable_xformers_memory_efficient_attention()
13
- pipe = pipe.to(device)
 
14
  else:
15
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
16
- pipe = pipe.to(device)
 
17
 
18
  MAX_SEED = np.iinfo(np.int32).max
19
  MAX_IMAGE_SIZE = 1024
20
 
21
- def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
22
 
23
- if randomize_seed:
24
- seed = random.randint(0, MAX_SEED)
25
-
26
- generator = torch.Generator().manual_seed(seed)
27
-
28
- image = pipe(
29
- prompt = prompt,
30
- negative_prompt = negative_prompt,
31
- guidance_scale = guidance_scale,
32
- num_inference_steps = num_inference_steps,
33
- width = width,
34
- height = height,
35
- generator = generator
36
- ).images[0]
37
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  return image
39
 
40
  examples = [
 
3
  import random
4
  from diffusers import DiffusionPipeline
5
  import torch
6
+ from PIL import Image
7
 
8
  device = "cuda" if torch.cuda.is_available() else "cpu"
9
 
10
+ base_model = 'stabilityai/stable-diffusion-2'
11
+ prj_path = "bayndrysf/dreambooth-project-styl"
12
+ huggingface_user = "bayndrysf"
13
+
14
  if torch.cuda.is_available():
15
  torch.cuda.max_memory_allocated(device=device)
16
+ pipe = DiffusionPipeline.from_pretrained(base_model)
17
  pipe.enable_xformers_memory_efficient_attention()
18
+ pipe.to(device);
19
+ pipe.load_lora_weights(prj_path, weight_name="pytorch_lora_weights.safetensors")
20
  else:
21
+ pipe = DiffusionPipeline.from_pretrained(base_model)
22
+ pipe.to(device);
23
+ pipe.load_lora_weights(prj_path, weight_name="pytorch_lora_weights.safetensors")
24
 
25
  MAX_SEED = np.iinfo(np.int32).max
26
  MAX_IMAGE_SIZE = 1024
27
 
 
28
 
29
+
30
+ def image_grid(imgs, rows, cols, resize=256):
31
+ assert len(imgs) == rows * cols
32
+
33
+ if resize is not None:
34
+ imgs = [img.resize((resize, resize)) for img in imgs]
35
+
36
+ w, h = imgs[0].size
37
+ grid_w, grid_h = cols * w, rows * h
38
+ grid = Image.new("RGB", size=(grid_w, grid_h))
39
+
40
+ for i, img in enumerate(imgs):
41
+ x = i % cols * w
42
+ y = i // cols * h
43
+ grid.paste(img, box=(x, y))
44
+
45
+ return grid
46
+
47
+
48
+ MAX_SEED = np.iinfo(np.int32).max
49
+ MAX_IMAGE_SIZE = 1024
50
+
51
+
52
+ def generate_image(prompt):
53
+ image = pipe(prompt=prompt, num_inference_steps=25, num_images_per_prompt = 1)
54
+ return image_grid(image.images, 1, 1, 512)
55
+
56
+
57
+
58
+ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
59
+ image = generate_image(prompt)
60
  return image
61
 
62
  examples = [