Skip to content

Commit 34407f7

Browse files
committed
update
1 parent 1a17cd8 commit 34407f7

File tree

2 files changed

+16
-15
lines changed

2 files changed

+16
-15
lines changed

scripts/convert_ltx_to_diffusers.py

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
from diffusers import (
1111
AutoencoderKLLTXVideo,
1212
FlowMatchEulerDiscreteScheduler,
13+
LTXConditionPipeline,
1314
LTXLatentUpsamplePipeline,
1415
LTXPipeline,
1516
LTXVideoTransformer3DModel,
@@ -464,7 +465,7 @@ def get_args():
464465
for param in text_encoder.parameters():
465466
param.data = param.data.contiguous()
466467

467-
if args.version == "0.9.5":
468+
if args.version in ["0.9.5", "0.9.7"]:
468469
scheduler = FlowMatchEulerDiscreteScheduler(use_dynamic_shifting=False)
469470
else:
470471
scheduler = FlowMatchEulerDiscreteScheduler(
@@ -488,23 +489,23 @@ def get_args():
488489
output_path.as_posix(), safe_serialization=True, variant=variant, max_shard_size="5GB"
489490
)
490491
elif args.version in ["0.9.7"]:
491-
# pipe = LTXPipeline(
492-
# scheduler=scheduler,
493-
# vae=vae,
494-
# text_encoder=text_encoder,
495-
# tokenizer=tokenizer,
496-
# transformer=transformer,
497-
# )
492+
pipe = LTXConditionPipeline(
493+
scheduler=scheduler,
494+
vae=vae,
495+
text_encoder=text_encoder,
496+
tokenizer=tokenizer,
497+
transformer=transformer,
498+
)
498499
pipe_upsample = LTXLatentUpsamplePipeline(
499500
vae=vae,
500501
latent_upsampler=latent_upsampler,
501502
)
502-
# pipe.save_pretrained(
503-
# (output_path / "ltx_pipeline").as_posix(),
504-
# safe_serialization=True,
505-
# variant=variant,
506-
# max_shard_size="5GB",
507-
# )
503+
pipe.save_pretrained(
504+
(output_path / "ltx_pipeline").as_posix(),
505+
safe_serialization=True,
506+
variant=variant,
507+
max_shard_size="5GB",
508+
)
508509
pipe_upsample.save_pretrained(
509510
(output_path / "ltx_upsample_pipeline").as_posix(),
510511
safe_serialization=True,

src/diffusers/pipelines/ltx/pipeline_ltx_condition.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1168,7 +1168,7 @@ def __call__(
11681168
if not self.vae.config.timestep_conditioning:
11691169
timestep = None
11701170
else:
1171-
noise = torch.randn(latents.shape, generator=generator, device=device, dtype=latents.dtype)
1171+
noise = randn_tensor(latents.shape, generator=generator, device=device, dtype=latents.dtype)
11721172
if not isinstance(decode_timestep, list):
11731173
decode_timestep = [decode_timestep] * batch_size
11741174
if decode_noise_scale is None:

0 commit comments

Comments
 (0)