Skip to content

Commit 04de669

Browse files
committed
fix: bug when no LoRAs
1 parent 7752362 commit 04de669

File tree

1 file changed

+3
-2
lines changed

1 file changed

+3
-2
lines changed

llama_cpp/llama.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -409,8 +409,9 @@ def __init__(
409409
# Immutable value representing active adapters for use as a key
410410
self._lora_adapters_active: Tuple[Tuple[str, float]] = ()
411411

412-
for lora_path, scale in self.lora_adapters.copy().items():
413-
self.set_lora_adapter_scale(lora_path, scale, load_if_needed=True)
412+
if self.lora_adapters:
413+
for lora_path, scale in self.lora_adapters.copy().items():
414+
self.set_lora_adapter_scale(lora_path, scale, load_if_needed=True)
414415

415416
if self.verbose:
416417
print(llama_cpp.llama_print_system_info().decode("utf-8"), file=sys.stderr)

0 commit comments

Comments
 (0)