yamildiego commited on
Commit
edb4ffd
·
1 Parent(s): e3e308f
Files changed (1) hide show
  1. handler.py +34 -33
handler.py CHANGED
@@ -101,13 +101,13 @@ class EndpointHandler():
101
 
102
 
103
  # controlnet-pose/canny/depth
104
- # controlnet_pose_model = "thibaud/controlnet-openpose-sdxl-1.0"
105
  # controlnet_canny_model = "diffusers/controlnet-canny-sdxl-1.0"
106
  # controlnet_depth_model = "diffusers/controlnet-depth-sdxl-1.0-small"
107
 
108
- # controlnet_pose = ControlNetModel.from_pretrained(
109
- # controlnet_pose_model, torch_dtype=dtype
110
- # ).to(device)
111
  # controlnet_canny = ControlNetModel.from_pretrained(
112
  # controlnet_canny_model, torch_dtype=dtype
113
  # ).to(device)
@@ -141,21 +141,21 @@ class EndpointHandler():
141
 
142
  # return depth_image
143
 
144
- # self.controlnet_map = {
145
- # "pose": controlnet_pose,
146
  # "canny": controlnet_canny,
147
  # "depth": controlnet_depth,
148
- # }
149
 
150
- # openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
151
  # depth_anything = DepthAnything.from_pretrained('LiheYoung/depth_anything_vitl14').to(device).eval()
152
 
153
 
154
- # self.controlnet_map_fn = {
155
- # "pose": openpose,
156
  # "canny": get_canny_image,
157
  # "depth": get_depth_map,
158
- # }
159
 
160
  self.app = FaceAnalysis(name="buffalo_l", root="./", providers=["CPUExecutionProvider"])
161
  self.app.prepare(ctx_id=0, det_size=(640, 640))
@@ -166,9 +166,10 @@ class EndpointHandler():
166
 
167
  adapter_strength_ratio = 0.8
168
  identitynet_strength_ratio = 0.8
169
- # pose_strength = 0.4
170
  # canny_strength = 0.3
171
  # depth_strength = 0.5
 
172
  # controlnet_selection = ["pose", "canny", "depth"]
173
 
174
  face_image_path = "https://i.ibb.co/SKg69dD/kaifu-resize.png"
@@ -276,27 +277,27 @@ class EndpointHandler():
276
  control_mask[y1:y2, x1:x2] = 255
277
  control_mask = Image.fromarray(control_mask.astype(np.uint8))
278
 
279
- # if len(controlnet_selection) > 0:
280
- # controlnet_scales = {
281
- # "pose": pose_strength,
282
- # "canny": canny_strength,
283
- # "depth": depth_strength,
284
- # }
285
- # self.pipe.controlnet = MultiControlNetModel(
286
- # [self.controlnet_identitynet]
287
- # + [self.controlnet_map[s] for s in controlnet_selection]
288
- # )
289
- # control_scales = [float(identitynet_strength_ratio)] + [
290
- # controlnet_scales[s] for s in controlnet_selection
291
- # ]
292
- # control_images = [face_kps] + [
293
- # self.controlnet_map_fn[s](img_controlnet).resize((width, height))
294
- # for s in controlnet_selection
295
- # ]
296
- # else:
297
- self.pipe.controlnet = self.controlnet_identitynet
298
- control_scales = float(identitynet_strength_ratio)
299
- control_images = face_kps
300
 
301
  generator = torch.Generator(device=device.type).manual_seed(3)
302
 
 
101
 
102
 
103
  # controlnet-pose/canny/depth
104
+ controlnet_pose_model = "thibaud/controlnet-openpose-sdxl-1.0"
105
  # controlnet_canny_model = "diffusers/controlnet-canny-sdxl-1.0"
106
  # controlnet_depth_model = "diffusers/controlnet-depth-sdxl-1.0-small"
107
 
108
+ controlnet_pose = ControlNetModel.from_pretrained(
109
+ controlnet_pose_model, torch_dtype=dtype
110
+ ).to(device)
111
  # controlnet_canny = ControlNetModel.from_pretrained(
112
  # controlnet_canny_model, torch_dtype=dtype
113
  # ).to(device)
 
141
 
142
  # return depth_image
143
 
144
+ self.controlnet_map = {
145
+ "pose": controlnet_pose,
146
  # "canny": controlnet_canny,
147
  # "depth": controlnet_depth,
148
+ }
149
 
150
+ openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
151
  # depth_anything = DepthAnything.from_pretrained('LiheYoung/depth_anything_vitl14').to(device).eval()
152
 
153
 
154
+ self.controlnet_map_fn = {
155
+ "pose": openpose,
156
  # "canny": get_canny_image,
157
  # "depth": get_depth_map,
158
+ }
159
 
160
  self.app = FaceAnalysis(name="buffalo_l", root="./", providers=["CPUExecutionProvider"])
161
  self.app.prepare(ctx_id=0, det_size=(640, 640))
 
166
 
167
  adapter_strength_ratio = 0.8
168
  identitynet_strength_ratio = 0.8
169
+ pose_strength = 0.4
170
  # canny_strength = 0.3
171
  # depth_strength = 0.5
172
+ controlnet_selection = ["pose"]
173
  # controlnet_selection = ["pose", "canny", "depth"]
174
 
175
  face_image_path = "https://i.ibb.co/SKg69dD/kaifu-resize.png"
 
277
  control_mask[y1:y2, x1:x2] = 255
278
  control_mask = Image.fromarray(control_mask.astype(np.uint8))
279
 
280
+ if len(controlnet_selection) > 0:
281
+ controlnet_scales = {
282
+ "pose": pose_strength,
283
+ # "canny": canny_strength,
284
+ # "depth": depth_strength,
285
+ }
286
+ self.pipe.controlnet = MultiControlNetModel(
287
+ [self.controlnet_identitynet]
288
+ + [self.controlnet_map[s] for s in controlnet_selection]
289
+ )
290
+ control_scales = [float(identitynet_strength_ratio)] + [
291
+ controlnet_scales[s] for s in controlnet_selection
292
+ ]
293
+ control_images = [face_kps] + [
294
+ self.controlnet_map_fn[s](img_controlnet).resize((width, height))
295
+ for s in controlnet_selection
296
+ ]
297
+ else:
298
+ self.pipe.controlnet = self.controlnet_identitynet
299
+ control_scales = float(identitynet_strength_ratio)
300
+ control_images = face_kps
301
 
302
  generator = torch.Generator(device=device.type).manual_seed(3)
303