Skip to content

Commit dfb20b0

Browse files
authored
Merge branch 'main' into benchmarking-overhaul
2 parents 64186b4 + 20273e5 commit dfb20b0

File tree

3 files changed

+6
-6
lines changed

3 files changed

+6
-6
lines changed

tests/models/test_modeling_common.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1060,7 +1060,7 @@ def test_deprecated_kwargs(self):
10601060
@parameterized.expand([True, False])
10611061
@torch.no_grad()
10621062
@unittest.skipIf(not is_peft_available(), "Only with PEFT")
1063-
def test_save_load_lora_adapter(self, use_dora=False):
1063+
def test_lora_save_load_adapter(self, use_dora=False):
10641064
import safetensors
10651065
from peft import LoraConfig
10661066
from peft.utils import get_peft_model_state_dict
@@ -1117,7 +1117,7 @@ def test_save_load_lora_adapter(self, use_dora=False):
11171117
self.assertTrue(torch.allclose(outputs_with_lora, outputs_with_lora_2, atol=1e-4, rtol=1e-4))
11181118

11191119
@unittest.skipIf(not is_peft_available(), "Only with PEFT")
1120-
def test_wrong_adapter_name_raises_error(self):
1120+
def test_lora_wrong_adapter_name_raises_error(self):
11211121
from peft import LoraConfig
11221122

11231123
from diffusers.loaders.peft import PeftAdapterMixin

tests/quantization/bnb/test_4bit.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -526,7 +526,7 @@ def test_moving_to_cpu_throws_warning(self):
526526
reason="Test will pass after https://github.com/huggingface/accelerate/pull/3223 is in a release.",
527527
strict=True,
528528
)
529-
def test_pipeline_device_placement_works_with_nf4(self):
529+
def test_pipeline_cuda_placement_works_with_nf4(self):
530530
transformer_nf4_config = BitsAndBytesConfig(
531531
load_in_4bit=True,
532532
bnb_4bit_quant_type="nf4",
@@ -560,7 +560,7 @@ def test_pipeline_device_placement_works_with_nf4(self):
560560
).to(torch_device)
561561

562562
# Check if inference works.
563-
_ = pipeline_4bit("table", max_sequence_length=20, num_inference_steps=2)
563+
_ = pipeline_4bit(self.prompt, max_sequence_length=20, num_inference_steps=2)
564564

565565
del pipeline_4bit
566566

tests/quantization/bnb/test_mixed_int8.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -492,7 +492,7 @@ def test_generate_quality_dequantize(self):
492492
self.assertTrue(max_diff < 1e-2)
493493

494494
# 8bit models cannot be offloaded to CPU.
495-
self.assertTrue(self.pipeline_8bit.transformer.device.type == "cuda")
495+
self.assertTrue(self.pipeline_8bit.transformer.device.type == torch_device)
496496
# calling it again shouldn't be a problem
497497
_ = self.pipeline_8bit(
498498
prompt=self.prompt,
@@ -534,7 +534,7 @@ def test_pipeline_cuda_placement_works_with_mixed_int8(self):
534534
).to(device)
535535

536536
# Check if inference works.
537-
_ = pipeline_8bit("table", max_sequence_length=20, num_inference_steps=2)
537+
_ = pipeline_8bit(self.prompt, max_sequence_length=20, num_inference_steps=2)
538538

539539
del pipeline_8bit
540540

0 commit comments

Comments
 (0)