Skip to content

Commit 785bb1d

Browse files
RyanJDickhipsterusername
authored andcommitted
Fix all comparisons against the DEFAULT_PRECISION constant. DEFAULT_PRECISION is a torch.dtype. Previously, it was compared to a str in a number of places where it would always resolve to False. This is a bugfix that results in a change to the default behavior. In practice, this will not change the behavior for many users, because it only causes a change in behavior if a users has configured float32 as their default precision.
1 parent a3cb5da commit 785bb1d

File tree

6 files changed

+11
-10
lines changed

6 files changed

+11
-10
lines changed

invokeai/app/invocations/constants.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
from typing import Literal
22

33
from invokeai.backend.stable_diffusion.schedulers import SCHEDULER_MAP
4+
from invokeai.backend.util.devices import TorchDevice
45

56
LATENT_SCALE_FACTOR = 8
67
"""
@@ -15,3 +16,5 @@
1516

1617
IMAGE_MODES = Literal["L", "RGB", "RGBA", "CMYK", "YCbCr", "LAB", "HSV", "I", "F"]
1718
"""A literal type for PIL image modes supported by Invoke"""
19+
20+
DEFAULT_PRECISION = TorchDevice.choose_torch_dtype()

invokeai/app/invocations/create_denoise_mask.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
from torchvision.transforms.functional import resize as tv_resize
77

88
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
9-
from invokeai.app.invocations.denoise_latents import DEFAULT_PRECISION
9+
from invokeai.app.invocations.constants import DEFAULT_PRECISION
1010
from invokeai.app.invocations.fields import FieldDescriptions, ImageField, Input, InputField
1111
from invokeai.app.invocations.image_to_latents import ImageToLatentsInvocation
1212
from invokeai.app.invocations.model import VAEField
@@ -30,7 +30,7 @@ class CreateDenoiseMaskInvocation(BaseInvocation):
3030
mask: ImageField = InputField(description="The mask to use when pasting", ui_order=2)
3131
tiled: bool = InputField(default=False, description=FieldDescriptions.tiled, ui_order=3)
3232
fp32: bool = InputField(
33-
default=DEFAULT_PRECISION == "float32",
33+
default=DEFAULT_PRECISION == torch.float32,
3434
description=FieldDescriptions.fp32,
3535
ui_order=4,
3636
)

invokeai/app/invocations/create_gradient_mask.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
from torchvision.transforms.functional import resize as tv_resize
88

99
from invokeai.app.invocations.baseinvocation import BaseInvocation, BaseInvocationOutput, invocation, invocation_output
10-
from invokeai.app.invocations.denoise_latents import DEFAULT_PRECISION
10+
from invokeai.app.invocations.constants import DEFAULT_PRECISION
1111
from invokeai.app.invocations.fields import (
1212
DenoiseMaskField,
1313
FieldDescriptions,
@@ -74,7 +74,7 @@ class CreateGradientMaskInvocation(BaseInvocation):
7474
)
7575
tiled: bool = InputField(default=False, description=FieldDescriptions.tiled, ui_order=8)
7676
fp32: bool = InputField(
77-
default=DEFAULT_PRECISION == "float32",
77+
default=DEFAULT_PRECISION == torch.float32,
7878
description=FieldDescriptions.fp32,
7979
ui_order=9,
8080
)

invokeai/app/invocations/denoise_latents.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -59,8 +59,6 @@
5959
from .controlnet_image_processors import ControlField
6060
from .model import ModelIdentifierField, UNetField
6161

62-
DEFAULT_PRECISION = TorchDevice.choose_torch_dtype()
63-
6462

6563
def get_scheduler(
6664
context: InvocationContext,

invokeai/app/invocations/image_to_latents.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
from diffusers.models.autoencoders.autoencoder_tiny import AutoencoderTiny
1313

1414
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
15-
from invokeai.app.invocations.denoise_latents import DEFAULT_PRECISION
15+
from invokeai.app.invocations.constants import DEFAULT_PRECISION
1616
from invokeai.app.invocations.fields import (
1717
FieldDescriptions,
1818
ImageField,
@@ -44,7 +44,7 @@ class ImageToLatentsInvocation(BaseInvocation):
4444
input=Input.Connection,
4545
)
4646
tiled: bool = InputField(default=False, description=FieldDescriptions.tiled)
47-
fp32: bool = InputField(default=DEFAULT_PRECISION == "float32", description=FieldDescriptions.fp32)
47+
fp32: bool = InputField(default=DEFAULT_PRECISION == torch.float32, description=FieldDescriptions.fp32)
4848

4949
@staticmethod
5050
def vae_encode(vae_info: LoadedModel, upcast: bool, tiled: bool, image_tensor: torch.Tensor) -> torch.Tensor:

invokeai/app/invocations/latents_to_image.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
1212

1313
from invokeai.app.invocations.baseinvocation import BaseInvocation, invocation
14-
from invokeai.app.invocations.denoise_latents import DEFAULT_PRECISION
14+
from invokeai.app.invocations.constants import DEFAULT_PRECISION
1515
from invokeai.app.invocations.fields import (
1616
FieldDescriptions,
1717
Input,
@@ -46,7 +46,7 @@ class LatentsToImageInvocation(BaseInvocation, WithMetadata, WithBoard):
4646
input=Input.Connection,
4747
)
4848
tiled: bool = InputField(default=False, description=FieldDescriptions.tiled)
49-
fp32: bool = InputField(default=DEFAULT_PRECISION == "float32", description=FieldDescriptions.fp32)
49+
fp32: bool = InputField(default=DEFAULT_PRECISION == torch.float32, description=FieldDescriptions.fp32)
5050

5151
@torch.no_grad()
5252
def invoke(self, context: InvocationContext) -> ImageOutput:

0 commit comments

Comments
 (0)