Matthew Trentacoste commited on
Commit
df26d21
1 Parent(s): e70b7fc

updating with to use ImageVariantEmeds in my diffusers repo

Browse files
Files changed (2) hide show
  1. app.py +25 -9
  2. requirements.txt +4 -2
app.py CHANGED
@@ -2,26 +2,35 @@ import gradio as gr
2
  import torch
3
  from PIL import Image
4
 
5
- from lambda_diffusers import StableDiffusionImageEmbedPipeline
6
 
7
  def main(
8
  input_im,
 
 
 
9
  scale=3.0,
10
- n_samples=4,
11
  steps=25,
12
  seed=0,
13
  ):
 
14
  generator = torch.Generator(device=device).manual_seed(int(seed))
15
 
 
16
  images_list = pipe(
17
  n_samples*[input_im],
 
 
 
18
  guidance_scale=scale,
19
  num_inference_steps=steps,
20
  generator=generator,
21
  )
22
 
 
 
23
  images = []
24
- for i, image in enumerate(images_list["sample"]):
25
  if(images_list["nsfw_content_detected"][i]):
26
  safe_image = Image.open(r"unsafe.png")
27
  images.append(safe_image)
@@ -57,16 +66,22 @@ More details on the method and training will come in a future blog post.
57
  """
58
 
59
  device = "cuda" if torch.cuda.is_available() else "cpu"
60
- pipe = StableDiffusionImageEmbedPipeline.from_pretrained(
61
- "lambdalabs/sd-image-variations-diffusers",
62
- revision="273115e88df42350019ef4d628265b8c29ef4af5",
63
  )
64
  pipe = pipe.to(device)
65
 
 
 
 
 
 
66
  inputs = [
67
  gr.Image(),
 
 
 
68
  gr.Slider(0, 25, value=3, step=1, label="Guidance scale"),
69
- gr.Slider(1, 4, value=1, step=1, label="Number images"),
70
  gr.Slider(5, 50, value=25, step=5, label="Steps"),
71
  gr.Number(0, labal="Seed", precision=0)
72
  ]
@@ -74,8 +89,9 @@ output = gr.Gallery(label="Generated variations")
74
  output.style(grid=2)
75
 
76
  examples = [
77
- ["examples/vermeer.jpg", 3, 1, 25, 0],
78
- ["examples/matisse.jpg", 3, 1, 25, 0],
 
79
  ]
80
 
81
  demo = gr.Interface(
 
2
  import torch
3
  from PIL import Image
4
 
5
+ from diffusers import StableDiffusionImageVariationEmbedsPipeline
6
 
7
  def main(
8
  input_im,
9
+ base_prompt=None,
10
+ edit_prompt=None,
11
+ edit_prompt_weight=1.0,
12
  scale=3.0,
 
13
  steps=25,
14
  seed=0,
15
  ):
16
+
17
  generator = torch.Generator(device=device).manual_seed(int(seed))
18
 
19
+ n_samples = 1
20
  images_list = pipe(
21
  n_samples*[input_im],
22
+ base_prompt=base_prompt,
23
+ edit_prompt=edit_prompt,
24
+ edit_prompt_weight=edit_prompt_weight,
25
  guidance_scale=scale,
26
  num_inference_steps=steps,
27
  generator=generator,
28
  )
29
 
30
+ return images_list.images
31
+
32
  images = []
33
+ for i, image in enumerate(images_list.images):
34
  if(images_list["nsfw_content_detected"][i]):
35
  safe_image = Image.open(r"unsafe.png")
36
  images.append(safe_image)
 
66
  """
67
 
68
  device = "cuda" if torch.cuda.is_available() else "cpu"
69
+ pipe = StableDiffusionImageVariationEmbedsPipeline.from_pretrained(
70
+ "matttrent/sd-image-variations-diffusers",
 
71
  )
72
  pipe = pipe.to(device)
73
 
74
+ def dummy(images, **kwargs):
75
+ return images, False * len(images)
76
+
77
+ pipe.safety_checker = dummy
78
+
79
  inputs = [
80
  gr.Image(),
81
+ gr.Textbox(label="Base prompt"),
82
+ gr.Textbox(label="Edit prompt"),
83
+ gr.Slider(0.0, 2.0, value=1.0, step=0.1, label="Edit prompt weight"),
84
  gr.Slider(0, 25, value=3, step=1, label="Guidance scale"),
 
85
  gr.Slider(5, 50, value=25, step=5, label="Steps"),
86
  gr.Number(0, labal="Seed", precision=0)
87
  ]
 
89
  output.style(grid=2)
90
 
91
  examples = [
92
+ ["examples/painted ladies.png", None, None, 1.0, 3, 25, 0],
93
+ ["examples/painted ladies.png", "a color photograph", "a black and white photograph", 1.0, 3, 25, 0],
94
+ ["examples/painted ladies.png", "a color photograph", "a brightly colored oil painting", 1.0, 3, 25, 0],
95
  ]
96
 
97
  demo = gr.Interface(
requirements.txt CHANGED
@@ -1,3 +1,5 @@
1
- git+https://github.com/LambdaLabsML/lambda-diffusers.git#egg=lambda-diffusers
2
  --extra-index-url https://download.pytorch.org/whl/cu113
3
- torch
 
 
 
1
+ git+https://github.com/matttrent/diffusers.git#egg=diffusers
2
  --extra-index-url https://download.pytorch.org/whl/cu113
3
+ torch
4
+ transformers
5
+ accelerate