Skip to content

[tests] unbloat tests/lora/utils.py #11845

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 23 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 12 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 0 additions & 28 deletions tests/lora/test_lora_layers_auraflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,34 +103,6 @@ def get_dummy_inputs(self, with_generator=True):

return noise, input_ids, pipeline_inputs

@unittest.skip("Not supported in AuraFlow.")
Copy link
Member Author

@sayakpaul sayakpaul Jul 1, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These are skipped appropriately from the parent method. I think it's okay in this case, because it eases things a bit.

def test_simple_inference_with_text_denoiser_block_scale(self):
pass

@unittest.skip("Not supported in AuraFlow.")
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
pass

@unittest.skip("Not supported in AuraFlow.")
def test_modify_padding_mode(self):
pass

@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
def test_simple_inference_with_partial_text_lora(self):
pass

@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
def test_simple_inference_with_text_lora(self):
pass

@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
def test_simple_inference_with_text_lora_and_scale(self):
pass

@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
def test_simple_inference_with_text_lora_fused(self):
pass

@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
def test_simple_inference_with_text_lora_save_load(self):
pass
49 changes: 16 additions & 33 deletions tests/lora/test_lora_layers_cogvideox.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,8 +123,21 @@ def get_dummy_inputs(self, with_generator=True):
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)

def test_simple_inference_with_text_denoiser_lora_unfused(self):
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)
@parameterized.expand(
[
# Test actions on text_encoder LoRA only
("fused", "text_encoder_only"),
("unloaded", "text_encoder_only"),
("save_load", "text_encoder_only"),
# Test actions on both text_encoder and denoiser LoRA
("fused", "text_and_denoiser"),
("unloaded", "text_and_denoiser"),
("unfused", "text_and_denoiser"),
("save_load", "text_and_denoiser"),
]
)
def test_lora_actions(self, action, components_to_add):
super().test_lora_actions(action, components_to_add, expected_atol=9e-3)

def test_lora_scale_kwargs_match_fusion(self):
super().test_lora_scale_kwargs_match_fusion(expected_atol=9e-3, expected_rtol=9e-3)
Expand All @@ -136,38 +149,8 @@ def test_group_offloading_inference_denoiser(self, offload_type, use_stream):
# The reason for this can be found here: https://github.com/huggingface/diffusers/pull/11804#issuecomment-3013325338
super()._test_group_offloading_inference_denoiser(offload_type, use_stream)

@unittest.skip("Not supported in CogVideoX.")
def test_simple_inference_with_text_denoiser_block_scale(self):
pass

@unittest.skip("Not supported in CogVideoX.")
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
pass

@unittest.skip("Not supported in CogVideoX.")
def test_modify_padding_mode(self):
pass

@unittest.skip("Text encoder LoRA is not supported in CogVideoX.")
def test_simple_inference_with_partial_text_lora(self):
pass

@unittest.skip("Text encoder LoRA is not supported in CogVideoX.")
def test_simple_inference_with_text_lora(self):
pass

@unittest.skip("Text encoder LoRA is not supported in CogVideoX.")
def test_simple_inference_with_text_lora_and_scale(self):
pass

@unittest.skip("Text encoder LoRA is not supported in CogVideoX.")
def test_simple_inference_with_text_lora_fused(self):
pass

@unittest.skip("Text encoder LoRA is not supported in CogVideoX.")
def test_simple_inference_with_text_lora_save_load(self):
pass

@unittest.skip("Not supported in CogVideoX.")
def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self):
pass
# TODO: skip them properly
77 changes: 15 additions & 62 deletions tests/lora/test_lora_layers_cogview4.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,8 @@
# limitations under the License.

import sys
import tempfile
import unittest

import numpy as np
import torch
from parameterized import parameterized
from transformers import AutoTokenizer, GlmModel
Expand All @@ -27,7 +25,6 @@
require_peft_backend,
require_torch_accelerator,
skip_mps,
torch_device,
)


Expand Down Expand Up @@ -116,37 +113,21 @@ def get_dummy_inputs(self, with_generator=True):
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)

def test_simple_inference_with_text_denoiser_lora_unfused(self):
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)

def test_simple_inference_save_pretrained(self):
"""
Tests a simple usecase where users could use saving utilities for LoRA through save_pretrained
"""
for scheduler_cls in self.scheduler_classes:
components, _, _ = self.get_dummy_components(scheduler_cls)
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
_, _, inputs = self.get_dummy_inputs(with_generator=False)

output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]
self.assertTrue(output_no_lora.shape == self.output_shape)

images_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]

with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(tmpdirname)

pipe_from_pretrained = self.pipeline_class.from_pretrained(tmpdirname)
pipe_from_pretrained.to(torch_device)

images_lora_save_pretrained = pipe_from_pretrained(**inputs, generator=torch.manual_seed(0))[0]

self.assertTrue(
np.allclose(images_lora, images_lora_save_pretrained, atol=1e-3, rtol=1e-3),
"Loading from saved checkpoints should give same results.",
)
@parameterized.expand(
[
# Test actions on text_encoder LoRA only
("fused", "text_encoder_only"),
("unloaded", "text_encoder_only"),
("save_load", "text_encoder_only"),
# Test actions on both text_encoder and denoiser LoRA
("fused", "text_and_denoiser"),
("unloaded", "text_and_denoiser"),
("unfused", "text_and_denoiser"),
("save_load", "text_and_denoiser"),
]
)
def test_lora_actions(self, action, components_to_add):
super().test_lora_actions(action, components_to_add, expected_atol=9e-3)

@parameterized.expand([("block_level", True), ("leaf_level", False)])
@require_torch_accelerator
Expand All @@ -155,34 +136,6 @@ def test_group_offloading_inference_denoiser(self, offload_type, use_stream):
# The reason for this can be found here: https://github.com/huggingface/diffusers/pull/11804#issuecomment-3013325338
super()._test_group_offloading_inference_denoiser(offload_type, use_stream)

@unittest.skip("Not supported in CogView4.")
def test_simple_inference_with_text_denoiser_block_scale(self):
pass

@unittest.skip("Not supported in CogView4.")
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
pass

@unittest.skip("Not supported in CogView4.")
def test_modify_padding_mode(self):
pass

@unittest.skip("Text encoder LoRA is not supported in CogView4.")
def test_simple_inference_with_partial_text_lora(self):
pass

@unittest.skip("Text encoder LoRA is not supported in CogView4.")
def test_simple_inference_with_text_lora(self):
pass

@unittest.skip("Text encoder LoRA is not supported in CogView4.")
def test_simple_inference_with_text_lora_and_scale(self):
pass

@unittest.skip("Text encoder LoRA is not supported in CogView4.")
def test_simple_inference_with_text_lora_fused(self):
pass

@unittest.skip("Text encoder LoRA is not supported in CogView4.")
def test_simple_inference_with_text_lora_save_load(self):
pass
24 changes: 2 additions & 22 deletions tests/lora/test_lora_layers_flux.py
Original file line number Diff line number Diff line change
Expand Up @@ -263,21 +263,11 @@ def test_lora_expansion_works_for_extra_keys(self):
"LoRA should lead to different results.",
)

@unittest.skip("Not supported in Flux.")
def test_simple_inference_with_text_denoiser_block_scale(self):
pass

@unittest.skip("Not supported in Flux.")
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
pass

@unittest.skip("Not supported in Flux.")
def test_modify_padding_mode(self):
pass

@unittest.skip("Not supported in Flux.")
def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self):
pass
# TODO: skip them properly


class FluxControlLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
Expand Down Expand Up @@ -791,21 +781,11 @@ def test_lora_unload_with_parameter_expanded_shapes_and_no_reset(self):
self.assertTrue(pipe.transformer.x_embedder.weight.data.shape[1] == in_features * 2)
self.assertTrue(pipe.transformer.config.in_channels == in_features * 2)

@unittest.skip("Not supported in Flux.")
def test_simple_inference_with_text_denoiser_block_scale(self):
pass

@unittest.skip("Not supported in Flux.")
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
pass

@unittest.skip("Not supported in Flux.")
def test_modify_padding_mode(self):
pass

@unittest.skip("Not supported in Flux.")
def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self):
pass
# TODO: skip them properly


@slow
Expand Down
51 changes: 16 additions & 35 deletions tests/lora/test_lora_layers_hunyuanvideo.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
import numpy as np
import pytest
import torch
from parameterized import parameterized
from transformers import CLIPTextModel, CLIPTokenizer, LlamaModel, LlamaTokenizerFast

from diffusers import (
Expand Down Expand Up @@ -153,46 +154,26 @@ def get_dummy_inputs(self, with_generator=True):
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)

def test_simple_inference_with_text_denoiser_lora_unfused(self):
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)

# TODO(aryan): Fix the following test
@unittest.skip("This test fails with an error I haven't been able to debug yet.")
def test_simple_inference_save_pretrained(self):
pass

@unittest.skip("Not supported in HunyuanVideo.")
def test_simple_inference_with_text_denoiser_block_scale(self):
pass

@unittest.skip("Not supported in HunyuanVideo.")
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
pass
@parameterized.expand(
[
# Test actions on text_encoder LoRA only
("fused", "text_encoder_only"),
("unloaded", "text_encoder_only"),
("save_load", "text_encoder_only"),
# Test actions on both text_encoder and denoiser LoRA
("fused", "text_and_denoiser"),
("unloaded", "text_and_denoiser"),
("unfused", "text_and_denoiser"),
("save_load", "text_and_denoiser"),
]
)
def test_lora_actions(self, action, components_to_add):
super().test_lora_actions(action, components_to_add, expected_atol=9e-3)

@unittest.skip("Not supported in HunyuanVideo.")
def test_modify_padding_mode(self):
pass

@unittest.skip("Text encoder LoRA is not supported in HunyuanVideo.")
def test_simple_inference_with_partial_text_lora(self):
pass

@unittest.skip("Text encoder LoRA is not supported in HunyuanVideo.")
def test_simple_inference_with_text_lora(self):
pass

@unittest.skip("Text encoder LoRA is not supported in HunyuanVideo.")
def test_simple_inference_with_text_lora_and_scale(self):
pass

@unittest.skip("Text encoder LoRA is not supported in HunyuanVideo.")
def test_simple_inference_with_text_lora_fused(self):
pass

@unittest.skip("Text encoder LoRA is not supported in HunyuanVideo.")
def test_simple_inference_with_text_lora_save_load(self):
pass


@nightly
@require_torch_accelerator
Expand Down
46 changes: 16 additions & 30 deletions tests/lora/test_lora_layers_ltx_video.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
import unittest

import torch
from parameterized import parameterized
from transformers import AutoTokenizer, T5EncoderModel

from diffusers import (
Expand Down Expand Up @@ -111,37 +112,22 @@ def get_dummy_inputs(self, with_generator=True):
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)

def test_simple_inference_with_text_denoiser_lora_unfused(self):
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)

@unittest.skip("Not supported in LTXVideo.")
def test_simple_inference_with_text_denoiser_block_scale(self):
pass

@unittest.skip("Not supported in LTXVideo.")
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
pass
@parameterized.expand(
[
# Test actions on text_encoder LoRA only
("fused", "text_encoder_only"),
("unloaded", "text_encoder_only"),
("save_load", "text_encoder_only"),
# Test actions on both text_encoder and denoiser LoRA
("fused", "text_and_denoiser"),
("unloaded", "text_and_denoiser"),
("unfused", "text_and_denoiser"),
("save_load", "text_and_denoiser"),
]
)
def test_lora_actions(self, action, components_to_add):
super().test_lora_actions(action, components_to_add, expected_atol=9e-3)

@unittest.skip("Not supported in LTXVideo.")
def test_modify_padding_mode(self):
pass

@unittest.skip("Text encoder LoRA is not supported in LTXVideo.")
def test_simple_inference_with_partial_text_lora(self):
pass

@unittest.skip("Text encoder LoRA is not supported in LTXVideo.")
def test_simple_inference_with_text_lora(self):
pass

@unittest.skip("Text encoder LoRA is not supported in LTXVideo.")
def test_simple_inference_with_text_lora_and_scale(self):
pass

@unittest.skip("Text encoder LoRA is not supported in LTXVideo.")
def test_simple_inference_with_text_lora_fused(self):
pass

@unittest.skip("Text encoder LoRA is not supported in LTXVideo.")
def test_simple_inference_with_text_lora_save_load(self):
pass
Loading
Loading