blanchon commited on
Commit
045d323
·
1 Parent(s): 4788158

Add smooth transition

Browse files
Files changed (2) hide show
  1. README.md +2 -2
  2. app.py +37 -39
README.md CHANGED
@@ -8,8 +8,8 @@ python_version: 3.12
8
  sdk_version: 5.12.0
9
  suggested_hardware: a100-large
10
  app_file: app.py
11
- fullWidth: true
12
- header: mini
13
  # models: blanchon/anyfurnish
14
  # datasets: blanchon/anyfurnish-dataset
15
  tags:
 
8
  sdk_version: 5.12.0
9
  suggested_hardware: a100-large
10
  app_file: app.py
11
+ # fullWidth: true
12
+ # header: mini
13
  # models: blanchon/anyfurnish
14
  # datasets: blanchon/anyfurnish-dataset
15
  tags:
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import secrets
2
  from pathlib import Path
3
  from typing import cast
@@ -25,7 +26,9 @@ MASK_CONTEXT_PADDING = 16 * 8
25
  if not torch.cuda.is_available():
26
 
27
  def _dummy_pipe(image: Image.Image, *args, **kwargs): # noqa: ARG001
28
- return {"images": [image]}
 
 
29
 
30
  pipe = _dummy_pipe
31
  else:
@@ -98,10 +101,10 @@ def pad(
98
  if palette is not None:
99
  out.putpalette(palette)
100
  if resized.width != size[0]:
101
- x = round((size[0] - resized.width) * max(0, max(centering[0], 1)))
102
  out.paste(resized, (x, 0))
103
  else:
104
- y = round((size[1] - resized.height) * max(0, max(centering[1], 1)))
105
  out.paste(resized, (0, y))
106
  return out, resized_size
107
 
@@ -113,18 +116,6 @@ def unpad(
113
  centering: tuple[float, float] = (1, 1),
114
  method: int = Image.Resampling.BICUBIC,
115
  ) -> Image.Image:
116
- """
117
- Remove the padding added by the `pad` function to recover the original resized image.
118
-
119
- Args:
120
- padded_image (Image.Image): The padded image.
121
- padded_size (tuple[int, int]): The original size of the resized image before padding.
122
- centering (tuple[float, float]): The centering used during padding (x, y), defaults to (1, 1).
123
-
124
- Returns:
125
- Image.Image: The cropped image matching the original resized dimensions.
126
-
127
- """
128
  width, height = padded_image.size
129
  padded_width, padded_height = padded_size
130
 
@@ -210,50 +201,54 @@ def infer(
210
  raise ValueError(msg)
211
  room_mask = cast("Image.Image", room_mask)
212
 
213
- mask_bbox_x_min, mask_bbox_y_min, mask_bbox_x_max, mask_bbox_y_max = (
214
- room_mask.getbbox(alpha_only=False)
215
- )
216
  mask_bbox_x_min, mask_bbox_y_min, mask_bbox_x_max, mask_bbox_y_max = (
217
  adjust_bbox_to_divisible_16(
218
- mask_bbox_x_min,
219
- mask_bbox_y_min,
220
- mask_bbox_x_max,
221
- mask_bbox_y_max,
222
- room_mask.width,
223
- room_mask.height,
224
  padding=MASK_CONTEXT_PADDING,
225
  )
226
  )
227
 
228
- bbox_longest_side = max(
229
- mask_bbox_x_max - mask_bbox_x_min,
230
- mask_bbox_y_max - mask_bbox_y_min,
231
- )
232
-
233
  room_image_cropped = room_image.crop((
234
  mask_bbox_x_min,
235
  mask_bbox_y_min,
236
  mask_bbox_x_max,
237
  mask_bbox_y_max,
238
  ))
239
- room_image_cropped, room_image_cropped_size = pad(
240
  room_image_cropped,
241
  (max_dimension, max_dimension),
242
  )
243
 
244
- room_mask_cropped = room_mask.crop((
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
245
  mask_bbox_x_min,
246
  mask_bbox_y_min,
247
  mask_bbox_x_max,
248
  mask_bbox_y_max,
249
  ))
250
- room_mask_cropped, _ = pad(
251
  room_mask_cropped,
252
  (max_dimension, max_dimension),
253
  )
254
 
255
- room_image_cropped.save("room_image_cropped.png")
256
- room_mask_cropped.save("room_mask_cropped.png")
257
 
258
  furniture_image, _ = pad(
259
  furniture_image_input,
@@ -269,7 +264,7 @@ def infer(
269
  )
270
  # Paste on the center of the image
271
  image.paste(furniture_image, (0, 0))
272
- image.paste(room_image_cropped, (max_dimension, 0))
273
 
274
  mask = Image.new(
275
  "RGB",
@@ -277,7 +272,7 @@ def infer(
277
  (255, 255, 255),
278
  )
279
  mask.paste(furniture_mask, (0, 0))
280
- mask.paste(room_mask_cropped, (max_dimension, 0), room_mask_cropped)
281
  # Invert the mask
282
  mask = ImageOps.invert(mask)
283
  # Blur the mask
@@ -311,7 +306,7 @@ def infer(
311
 
312
  image_generated = unpad(
313
  image,
314
- room_image_cropped_size,
315
  (
316
  mask_bbox_x_max - mask_bbox_x_min,
317
  mask_bbox_y_max - mask_bbox_y_min,
@@ -319,7 +314,11 @@ def infer(
319
  )
320
  # Paste the image on the room image as the crop was done
321
  # on the room image
322
- final_image.paste(image_generated, (mask_bbox_x_min, mask_bbox_y_min))
 
 
 
 
323
  final_images.append(final_image)
324
 
325
  return final_images, seed
@@ -439,7 +438,6 @@ with gr.Blocks(css=css) as demo:
439
  )
440
  results = gr.Gallery(
441
  label="Results",
442
- format="png",
443
  show_label=False,
444
  columns=2,
445
  height=500,
 
1
+ import math
2
  import secrets
3
  from pathlib import Path
4
  from typing import cast
 
26
  if not torch.cuda.is_available():
27
 
28
  def _dummy_pipe(image: Image.Image, *args, **kwargs): # noqa: ARG001
29
+ # return {"images": [image]} # noqa: ERA001
30
+ blue_image = Image.new("RGB", image.size, (0, 0, 255))
31
+ return {"images": [blue_image]}
32
 
33
  pipe = _dummy_pipe
34
  else:
 
101
  if palette is not None:
102
  out.putpalette(palette)
103
  if resized.width != size[0]:
104
+ x = round((size[0] - resized.width) * max(0, min(centering[0], 1)))
105
  out.paste(resized, (x, 0))
106
  else:
107
+ y = round((size[1] - resized.height) * max(0, min(centering[1], 1)))
108
  out.paste(resized, (0, y))
109
  return out, resized_size
110
 
 
116
  centering: tuple[float, float] = (1, 1),
117
  method: int = Image.Resampling.BICUBIC,
118
  ) -> Image.Image:
 
 
 
 
 
 
 
 
 
 
 
 
119
  width, height = padded_image.size
120
  padded_width, padded_height = padded_size
121
 
 
201
  raise ValueError(msg)
202
  room_mask = cast("Image.Image", room_mask)
203
 
 
 
 
204
  mask_bbox_x_min, mask_bbox_y_min, mask_bbox_x_max, mask_bbox_y_max = (
205
  adjust_bbox_to_divisible_16(
206
+ *room_mask.getbbox(alpha_only=False),
207
+ width=room_mask.width,
208
+ height=room_mask.height,
 
 
 
209
  padding=MASK_CONTEXT_PADDING,
210
  )
211
  )
212
 
 
 
 
 
 
213
  room_image_cropped = room_image.crop((
214
  mask_bbox_x_min,
215
  mask_bbox_y_min,
216
  mask_bbox_x_max,
217
  mask_bbox_y_max,
218
  ))
219
+ room_image_padded, room_image_padded_size = pad(
220
  room_image_cropped,
221
  (max_dimension, max_dimension),
222
  )
223
 
224
+ # grow_and_blur_mask
225
+ grow_pixels = 10
226
+ sigma_grow = grow_pixels / 4
227
+ kernel_size_grow = math.ceil(sigma_grow * 1.5 + 1)
228
+ room_mask_grow = room_mask.filter(
229
+ ImageFilter.MaxFilter(size=2 * kernel_size_grow + 1)
230
+ )
231
+
232
+ blur_pixels = 33
233
+ sigma_blur = blur_pixels / 4
234
+ kernel_size_blur = math.ceil(sigma_blur * 1.5 + 1)
235
+ room_mask_blurred = room_mask_grow.filter(
236
+ ImageFilter.GaussianBlur(radius=kernel_size_blur)
237
+ )
238
+
239
+ room_mask_cropped = room_mask_blurred.crop((
240
  mask_bbox_x_min,
241
  mask_bbox_y_min,
242
  mask_bbox_x_max,
243
  mask_bbox_y_max,
244
  ))
245
+ room_mask_padded, _ = pad(
246
  room_mask_cropped,
247
  (max_dimension, max_dimension),
248
  )
249
 
250
+ room_image_padded.save("room_image_padded.png")
251
+ room_mask_padded.save("room_mask_padded.png")
252
 
253
  furniture_image, _ = pad(
254
  furniture_image_input,
 
264
  )
265
  # Paste on the center of the image
266
  image.paste(furniture_image, (0, 0))
267
+ image.paste(room_image_padded, (max_dimension, 0))
268
 
269
  mask = Image.new(
270
  "RGB",
 
272
  (255, 255, 255),
273
  )
274
  mask.paste(furniture_mask, (0, 0))
275
+ mask.paste(room_mask_padded, (max_dimension, 0), room_mask_padded)
276
  # Invert the mask
277
  mask = ImageOps.invert(mask)
278
  # Blur the mask
 
306
 
307
  image_generated = unpad(
308
  image,
309
+ room_image_padded_size,
310
  (
311
  mask_bbox_x_max - mask_bbox_x_min,
312
  mask_bbox_y_max - mask_bbox_y_min,
 
314
  )
315
  # Paste the image on the room image as the crop was done
316
  # on the room image
317
+ final_image.paste(
318
+ image_generated,
319
+ (mask_bbox_x_min, mask_bbox_y_min),
320
+ room_mask_cropped,
321
+ )
322
  final_images.append(final_image)
323
 
324
  return final_images, seed
 
438
  )
439
  results = gr.Gallery(
440
  label="Results",
 
441
  show_label=False,
442
  columns=2,
443
  height=500,