|
21 | 21 |
|
22 | 22 | from ...configuration_utils import ConfigMixin, register_to_config
|
23 | 23 | from ...loaders import FluxTransformer2DLoadersMixin, FromOriginalModelMixin, PeftAdapterMixin
|
24 |
| -from ...models.attention import FeedForward |
25 |
| -from ...models.attention_processor import ( |
| 24 | +from ...utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers |
| 25 | +from ...utils.import_utils import is_torch_npu_available |
| 26 | +from ...utils.torch_utils import maybe_allow_in_graph |
| 27 | +from ..attention import FeedForward |
| 28 | +from ..attention_processor import ( |
26 | 29 | Attention,
|
27 | 30 | AttentionProcessor,
|
28 | 31 | FluxAttnProcessor2_0,
|
29 | 32 | FluxAttnProcessor2_0_NPU,
|
30 | 33 | FusedFluxAttnProcessor2_0,
|
31 | 34 | )
|
32 |
| -from ...models.modeling_utils import ModelMixin |
33 |
| -from ...models.normalization import AdaLayerNormContinuous, AdaLayerNormZero, AdaLayerNormZeroSingle |
34 |
| -from ...utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers |
35 |
| -from ...utils.import_utils import is_torch_npu_available |
36 |
| -from ...utils.torch_utils import maybe_allow_in_graph |
37 | 35 | from ..cache_utils import CacheMixin
|
38 | 36 | from ..embeddings import CombinedTimestepGuidanceTextProjEmbeddings, CombinedTimestepTextProjEmbeddings, FluxPosEmbed
|
39 | 37 | from ..modeling_outputs import Transformer2DModelOutput
|
| 38 | +from ..modeling_utils import ModelMixin |
| 39 | +from ..normalization import AdaLayerNormContinuous, AdaLayerNormZero, AdaLayerNormZeroSingle |
40 | 40 |
|
41 | 41 |
|
42 | 42 | logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
|
0 commit comments