Skip to content

Commit 477d87e

Browse files
committed
Fix layer patch dtype selection for CLIP text encoder models.
1 parent 8b4b0ff commit 477d87e

File tree

3 files changed

+4
-6
lines changed

3 files changed

+4
-6
lines changed

invokeai/app/invocations/compel.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ def _lora_loader() -> Iterator[Tuple[ModelPatchRaw, float]]:
8686
model=text_encoder,
8787
patches=_lora_loader(),
8888
prefix="lora_te_",
89-
dtype=TorchDevice.choose_torch_dtype(),
89+
dtype=text_encoder.dtype,
9090
cached_weights=cached_weights,
9191
),
9292
# Apply CLIP Skip after LoRA to prevent LoRA application from failing on skipped layers.
@@ -184,7 +184,7 @@ def _lora_loader() -> Iterator[Tuple[ModelPatchRaw, float]]:
184184
model=text_encoder,
185185
patches=_lora_loader(),
186186
prefix=lora_prefix,
187-
dtype=TorchDevice.choose_torch_dtype(),
187+
dtype=text_encoder.dtype,
188188
cached_weights=cached_weights,
189189
),
190190
# Apply CLIP Skip after LoRA to prevent LoRA application from failing on skipped layers.

invokeai/app/invocations/flux_text_encoder.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@
2222
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_CLIP_PREFIX
2323
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
2424
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData, FLUXConditioningInfo
25-
from invokeai.backend.util.devices import TorchDevice
2625

2726

2827
@invocation(
@@ -116,7 +115,7 @@ def _clip_encode(self, context: InvocationContext) -> torch.Tensor:
116115
model=clip_text_encoder,
117116
patches=self._clip_lora_iterator(context),
118117
prefix=FLUX_LORA_CLIP_PREFIX,
119-
dtype=TorchDevice.choose_torch_dtype(),
118+
dtype=clip_text_encoder.dtype,
120119
cached_weights=cached_weights,
121120
)
122121
)

invokeai/app/invocations/sd3_text_encoder.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121
from invokeai.backend.patches.lora_conversions.flux_lora_constants import FLUX_LORA_CLIP_PREFIX
2222
from invokeai.backend.patches.model_patch_raw import ModelPatchRaw
2323
from invokeai.backend.stable_diffusion.diffusion.conditioning_data import ConditioningFieldData, SD3ConditioningInfo
24-
from invokeai.backend.util.devices import TorchDevice
2524

2625
# The SD3 T5 Max Sequence Length set based on the default in diffusers.
2726
SD3_T5_MAX_SEQ_LEN = 256
@@ -155,7 +154,7 @@ def _clip_encode(
155154
model=clip_text_encoder,
156155
patches=self._clip_lora_iterator(context, clip_model),
157156
prefix=FLUX_LORA_CLIP_PREFIX,
158-
dtype=TorchDevice.choose_torch_dtype(),
157+
dtype=clip_text_encoder.dtype,
159158
cached_weights=cached_weights,
160159
)
161160
)

0 commit comments

Comments
 (0)