Spaces:
Paused
Paused
Upload folder using huggingface_hub
Browse files
hy3dgen/shapegen/pipelines.py
CHANGED
@@ -445,7 +445,7 @@ class Hunyuan3DDiTPipeline:
|
|
445 |
latent_model_input = torch.cat([latents] * (3 if dual_guidance else 2))
|
446 |
else:
|
447 |
latent_model_input = latents
|
448 |
-
latent_model_input =
|
449 |
|
450 |
# predict the noise residual
|
451 |
timestep_tensor = torch.tensor([t], dtype=t_dtype, device=device)
|
@@ -466,11 +466,11 @@ class Hunyuan3DDiTPipeline:
|
|
466 |
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_cond - noise_pred_uncond)
|
467 |
|
468 |
# compute the previous noisy sample x_t -> x_t-1
|
469 |
-
outputs =
|
470 |
latents = outputs.prev_sample
|
471 |
|
472 |
if callback is not None and i % callback_steps == 0:
|
473 |
-
step_idx = i // getattr(
|
474 |
callback(step_idx, t, outputs)
|
475 |
|
476 |
return self._export(
|
@@ -543,8 +543,9 @@ class Hunyuan3DDiTFlowMatchingPipeline(Hunyuan3DDiTPipeline):
|
|
543 |
# 5. Prepare timesteps
|
544 |
# NOTE: this is slightly different from common usage, we start from 0.
|
545 |
sigmas = np.linspace(0, 1, num_inference_steps) if sigmas is None else sigmas
|
|
|
546 |
timesteps, num_inference_steps = retrieve_timesteps(
|
547 |
-
|
548 |
num_inference_steps,
|
549 |
device,
|
550 |
sigmas=sigmas,
|
@@ -565,7 +566,7 @@ class Hunyuan3DDiTFlowMatchingPipeline(Hunyuan3DDiTPipeline):
|
|
565 |
|
566 |
# NOTE: we assume model get timesteps ranged from 0 to 1
|
567 |
timestep = t.expand(latent_model_input.shape[0]).to(
|
568 |
-
latents.dtype) /
|
569 |
noise_pred = self.model(latent_model_input, timestep, cond, guidance=guidance)
|
570 |
|
571 |
if do_classifier_free_guidance:
|
@@ -573,11 +574,11 @@ class Hunyuan3DDiTFlowMatchingPipeline(Hunyuan3DDiTPipeline):
|
|
573 |
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_cond - noise_pred_uncond)
|
574 |
|
575 |
# compute the previous noisy sample x_t -> x_t-1
|
576 |
-
outputs =
|
577 |
latents = outputs.prev_sample
|
578 |
|
579 |
if callback is not None and i % callback_steps == 0:
|
580 |
-
step_idx = i // getattr(
|
581 |
callback(step_idx, t, outputs)
|
582 |
|
583 |
return self._export(
|
|
|
445 |
latent_model_input = torch.cat([latents] * (3 if dual_guidance else 2))
|
446 |
else:
|
447 |
latent_model_input = latents
|
448 |
+
latent_model_input = scheduler.scale_model_input(latent_model_input, t)
|
449 |
|
450 |
# predict the noise residual
|
451 |
timestep_tensor = torch.tensor([t], dtype=t_dtype, device=device)
|
|
|
466 |
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_cond - noise_pred_uncond)
|
467 |
|
468 |
# compute the previous noisy sample x_t -> x_t-1
|
469 |
+
outputs = scheduler.step(noise_pred, t, latents, **extra_step_kwargs)
|
470 |
latents = outputs.prev_sample
|
471 |
|
472 |
if callback is not None and i % callback_steps == 0:
|
473 |
+
step_idx = i // getattr(scheduler, "order", 1)
|
474 |
callback(step_idx, t, outputs)
|
475 |
|
476 |
return self._export(
|
|
|
543 |
# 5. Prepare timesteps
|
544 |
# NOTE: this is slightly different from common usage, we start from 0.
|
545 |
sigmas = np.linspace(0, 1, num_inference_steps) if sigmas is None else sigmas
|
546 |
+
scheduler = instantiate_from_config(self.kwargs['scheduler_cfg'])
|
547 |
timesteps, num_inference_steps = retrieve_timesteps(
|
548 |
+
scheduler,
|
549 |
num_inference_steps,
|
550 |
device,
|
551 |
sigmas=sigmas,
|
|
|
566 |
|
567 |
# NOTE: we assume model get timesteps ranged from 0 to 1
|
568 |
timestep = t.expand(latent_model_input.shape[0]).to(
|
569 |
+
latents.dtype) / scheduler.config.num_train_timesteps
|
570 |
noise_pred = self.model(latent_model_input, timestep, cond, guidance=guidance)
|
571 |
|
572 |
if do_classifier_free_guidance:
|
|
|
574 |
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_cond - noise_pred_uncond)
|
575 |
|
576 |
# compute the previous noisy sample x_t -> x_t-1
|
577 |
+
outputs = scheduler.step(noise_pred, t, latents)
|
578 |
latents = outputs.prev_sample
|
579 |
|
580 |
if callback is not None and i % callback_steps == 0:
|
581 |
+
step_idx = i // getattr(scheduler, "order", 1)
|
582 |
callback(step_idx, t, outputs)
|
583 |
|
584 |
return self._export(
|