Spaces:
Running
on
Zero
Running
on
Zero
Update pulid/pipeline_flux.py
Browse files- pulid/pipeline_flux.py +0 -7
pulid/pipeline_flux.py
CHANGED
@@ -44,7 +44,6 @@ class PuLIDPipeline(nn.Module):
|
|
44 |
|
45 |
# preprocessors
|
46 |
# face align and parsing
|
47 |
-
print('pipeline init: ', self.device)
|
48 |
self.face_helper = FaceRestoreHelper(
|
49 |
upscale_factor=1,
|
50 |
face_size=512,
|
@@ -55,9 +54,6 @@ class PuLIDPipeline(nn.Module):
|
|
55 |
)
|
56 |
self.face_helper.face_parse = None
|
57 |
self.face_helper.face_parse = init_parsing_model(model_name='bisenet', device=self.device)
|
58 |
-
self.face_helper.face_parse = self.face_helper.face_parse.to(self.device)
|
59 |
-
self.face_helper.face_det = self.face_helper.face_det.to(self.device)
|
60 |
-
self.face_helper.face_det.body = self.face_helper.face_det.body.to(self.device)
|
61 |
# clip-vit backbone
|
62 |
model, _, _ = create_model_and_transforms('EVA02-CLIP-L-14-336', 'eva_clip', force_custom_clip=True)
|
63 |
model = model.visual
|
@@ -138,9 +134,6 @@ class PuLIDPipeline(nn.Module):
|
|
138 |
|
139 |
# using facexlib to detect and align face
|
140 |
self.face_helper.read_image(image_bgr)
|
141 |
-
print('face_det_device: ', self.face_helper.face_det.device)
|
142 |
-
print('face_det_mean_tensor_device: ', self.face_helper.face_det.mean_tensor.device)
|
143 |
-
self.face_helper.face_det.mean_tensor = self.face_helper.face_det.mean_tensor.to(self.device)
|
144 |
self.face_helper.get_face_landmarks_5(only_center_face=True)
|
145 |
self.face_helper.align_warp_face()
|
146 |
if len(self.face_helper.cropped_faces) == 0:
|
|
|
44 |
|
45 |
# preprocessors
|
46 |
# face align and parsing
|
|
|
47 |
self.face_helper = FaceRestoreHelper(
|
48 |
upscale_factor=1,
|
49 |
face_size=512,
|
|
|
54 |
)
|
55 |
self.face_helper.face_parse = None
|
56 |
self.face_helper.face_parse = init_parsing_model(model_name='bisenet', device=self.device)
|
|
|
|
|
|
|
57 |
# clip-vit backbone
|
58 |
model, _, _ = create_model_and_transforms('EVA02-CLIP-L-14-336', 'eva_clip', force_custom_clip=True)
|
59 |
model = model.visual
|
|
|
134 |
|
135 |
# using facexlib to detect and align face
|
136 |
self.face_helper.read_image(image_bgr)
|
|
|
|
|
|
|
137 |
self.face_helper.get_face_landmarks_5(only_center_face=True)
|
138 |
self.face_helper.align_warp_face()
|
139 |
if len(self.face_helper.cropped_faces) == 0:
|