Commit
·
0feaae9
1
Parent(s):
75c898e
call zero to 100
Browse files- handler.py +148 -150
handler.py
CHANGED
@@ -165,160 +165,158 @@ class EndpointHandler():
|
|
165 |
self.app.prepare(ctx_id=0, det_size=(640, 640))
|
166 |
|
167 |
def __call__(self, param):
|
168 |
-
|
169 |
-
|
170 |
-
# self.pipe.scheduler = diffusers.LCMScheduler.from_config(self.pipe.scheduler.config)
|
171 |
-
# self.pipe.enable_lora()
|
172 |
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
|
180 |
-
|
181 |
-
|
182 |
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
#
|
190 |
-
#
|
191 |
-
#
|
192 |
-
#
|
193 |
-
#
|
194 |
-
#
|
195 |
-
#
|
196 |
-
|
197 |
-
|
198 |
|
199 |
-
#
|
200 |
-
#
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
#
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
#
|
212 |
-
#
|
213 |
-
#
|
214 |
-
#
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
|
223 |
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
|
|
165 |
self.app.prepare(ctx_id=0, det_size=(640, 640))
|
166 |
|
167 |
def __call__(self, param):
|
168 |
+
self.pipe.scheduler = diffusers.LCMScheduler.from_config(self.pipe.scheduler.config)
|
169 |
+
self.pipe.enable_lora()
|
|
|
|
|
170 |
|
171 |
+
adapter_strength_ratio = 0.8
|
172 |
+
identitynet_strength_ratio = 0.8
|
173 |
+
pose_strength = 0.4
|
174 |
+
canny_strength = 0.3
|
175 |
+
depth_strength = 0.5
|
176 |
+
controlnet_selection = ["pose", "canny", "depth"]
|
177 |
|
178 |
+
face_image_path = "./kaifu_resize.png"
|
179 |
+
pose_image_path = "./pose.jpg"
|
180 |
|
181 |
+
def convert_from_cv2_to_image(img: np.ndarray) -> Image:
|
182 |
+
return Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
|
183 |
+
|
184 |
+
def convert_from_image_to_cv2(img: Image) -> np.ndarray:
|
185 |
+
return cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
|
186 |
+
|
187 |
+
# check if the input is valid
|
188 |
+
# if face_image_path is None:
|
189 |
+
# raise gr.Error(
|
190 |
+
# f"Cannot find any input face image! Please upload the face image"
|
191 |
+
# )
|
192 |
+
# check the prompt
|
193 |
+
# if prompt is None:
|
194 |
+
prompt = "a person"
|
195 |
+
negative_prompt=""
|
196 |
|
197 |
+
# apply the style template
|
198 |
+
# prompt, negative_prompt = apply_style(style_name, prompt, negative_prompt)
|
199 |
+
|
200 |
+
face_image = load_image(face_image_path)
|
201 |
+
face_image = resize_img(face_image, max_side=1024)
|
202 |
+
face_image_cv2 = convert_from_image_to_cv2(face_image)
|
203 |
+
height, width, _ = face_image_cv2.shape
|
204 |
+
|
205 |
+
# Extract face features
|
206 |
+
face_info = self.app.get(face_image_cv2)
|
207 |
+
|
208 |
+
print("error si no hay face")
|
209 |
+
# if len(face_info) == 0:
|
210 |
+
# raise gr.Error(
|
211 |
+
# f"Unable to detect a face in the image. Please upload a different photo with a clear face."
|
212 |
+
# )
|
213 |
+
|
214 |
+
face_info = sorted(
|
215 |
+
face_info,
|
216 |
+
key=lambda x: (x["bbox"][2] - x["bbox"][0]) * x["bbox"][3] - x["bbox"][1],
|
217 |
+
)[
|
218 |
+
-1
|
219 |
+
] # only use the maximum face
|
220 |
|
221 |
|
222 |
+
def resize_img(
|
223 |
+
input_image,
|
224 |
+
max_side=1280,
|
225 |
+
min_side=1024,
|
226 |
+
size=None,
|
227 |
+
pad_to_max_side=False,
|
228 |
+
mode=PIL.Image.BILINEAR,
|
229 |
+
base_pixel_number=64,
|
230 |
+
):
|
231 |
+
w, h = input_image.size
|
232 |
+
if size is not None:
|
233 |
+
w_resize_new, h_resize_new = size
|
234 |
+
else:
|
235 |
+
ratio = min_side / min(h, w)
|
236 |
+
w, h = round(ratio * w), round(ratio * h)
|
237 |
+
ratio = max_side / max(h, w)
|
238 |
+
input_image = input_image.resize([round(ratio * w), round(ratio * h)], mode)
|
239 |
+
w_resize_new = (round(ratio * w) // base_pixel_number) * base_pixel_number
|
240 |
+
h_resize_new = (round(ratio * h) // base_pixel_number) * base_pixel_number
|
241 |
+
input_image = input_image.resize([w_resize_new, h_resize_new], mode)
|
242 |
+
|
243 |
+
if pad_to_max_side:
|
244 |
+
res = np.ones([max_side, max_side, 3], dtype=np.uint8) * 255
|
245 |
+
offset_x = (max_side - w_resize_new) // 2
|
246 |
+
offset_y = (max_side - h_resize_new) // 2
|
247 |
+
res[
|
248 |
+
offset_y : offset_y + h_resize_new, offset_x : offset_x + w_resize_new
|
249 |
+
] = np.array(input_image)
|
250 |
+
input_image = Image.fromarray(res)
|
251 |
+
return input_image
|
252 |
|
253 |
+
face_emb = face_info["embedding"]
|
254 |
+
face_kps = draw_kps(convert_from_cv2_to_image(face_image_cv2), face_info["kps"])
|
255 |
+
img_controlnet = face_image
|
256 |
+
if pose_image_path is not None:
|
257 |
+
pose_image = load_image(pose_image_path)
|
258 |
+
pose_image = resize_img(pose_image, max_side=1024)
|
259 |
+
img_controlnet = pose_image
|
260 |
+
pose_image_cv2 = convert_from_image_to_cv2(pose_image)
|
261 |
+
|
262 |
+
face_info = self.app.get(pose_image_cv2)
|
263 |
+
|
264 |
+
# get error if no face is detected
|
265 |
+
# if len(face_info) == 0:
|
266 |
+
# raise gr.Error(
|
267 |
+
# f"Cannot find any face in the reference image! Please upload another person image"
|
268 |
+
# )
|
269 |
+
|
270 |
+
face_info = face_info[-1]
|
271 |
+
face_kps = draw_kps(pose_image, face_info["kps"])
|
272 |
+
|
273 |
+
width, height = face_kps.size
|
274 |
+
|
275 |
+
control_mask = np.zeros([height, width, 3])
|
276 |
+
x1, y1, x2, y2 = face_info["bbox"]
|
277 |
+
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
|
278 |
+
control_mask[y1:y2, x1:x2] = 255
|
279 |
+
control_mask = Image.fromarray(control_mask.astype(np.uint8))
|
280 |
+
|
281 |
+
if len(controlnet_selection) > 0:
|
282 |
+
controlnet_scales = {
|
283 |
+
"pose": pose_strength,
|
284 |
+
"canny": canny_strength,
|
285 |
+
"depth": depth_strength,
|
286 |
+
}
|
287 |
+
self.pipe.controlnet = MultiControlNetModel(
|
288 |
+
[self.controlnet_identitynet]
|
289 |
+
+ [self.controlnet_map[s] for s in controlnet_selection]
|
290 |
+
)
|
291 |
+
control_scales = [float(identitynet_strength_ratio)] + [
|
292 |
+
controlnet_scales[s] for s in controlnet_selection
|
293 |
+
]
|
294 |
+
control_images = [face_kps] + [
|
295 |
+
self.controlnet_map_fn[s](img_controlnet).resize((width, height))
|
296 |
+
for s in controlnet_selection
|
297 |
+
]
|
298 |
+
else:
|
299 |
+
self.pipe.controlnet = self.controlnet_identitynet
|
300 |
+
control_scales = float(identitynet_strength_ratio)
|
301 |
+
control_images = face_kps
|
302 |
+
|
303 |
+
generator = torch.Generator(device=device.type).manual_seed(3)
|
304 |
+
|
305 |
+
print("Start inference...")
|
306 |
+
|
307 |
+
self.pipe.set_ip_adapter_scale(adapter_strength_ratio)
|
308 |
+
images = self.pipe(
|
309 |
+
prompt=prompt,
|
310 |
+
negative_prompt=negative_prompt,
|
311 |
+
image_embeds=face_emb,
|
312 |
+
image=control_images,
|
313 |
+
control_mask=control_mask,
|
314 |
+
controlnet_conditioning_scale=control_scales,
|
315 |
+
num_inference_steps=30,
|
316 |
+
guidance_scale=7.5,
|
317 |
+
height=height,
|
318 |
+
width=width,
|
319 |
+
generator=generator,
|
320 |
+
).images
|
321 |
+
|
322 |
+
return images[0]
|