@@ -1598,7 +1598,7 @@ def inputs(self) -> List[Tuple[str, Any]]:
1598
1598
("control_guidance_start" , 0.0 ),
1599
1599
("control_guidance_end" , 1.0 ),
1600
1600
("controlnet_conditioning_scale" , 1.0 ),
1601
- ("control_mode" , 0 ),
1601
+ ("control_mode" , None ),
1602
1602
("guess_mode" , False ),
1603
1603
("num_images_per_prompt" , 1 ),
1604
1604
("guidance_scale" , 5.0 ),
@@ -1791,8 +1791,9 @@ def __call__(self, pipeline, state: PipelineState) -> PipelineState:
1791
1791
control_type = (
1792
1792
control_type .reshape (1 , - 1 )
1793
1793
.to (device , dtype = prompt_embeds .dtype )
1794
- .repeat (batch_size * num_images_per_prompt * 2 , 1 )
1795
1794
)
1795
+ control_type = pipeline .controlnet_guider .prepare_input (control_type , control_type )
1796
+
1796
1797
with pipeline .progress_bar (total = num_inference_steps ) as progress_bar :
1797
1798
for i , t in enumerate (timesteps ):
1798
1799
# prepare latents for unet using the guider
@@ -2050,9 +2051,9 @@ class StableDiffusionXLAutoBeforeDenoiseStep(AutoPipelineBlocks):
2050
2051
2051
2052
2052
2053
class StableDiffusionXLAutoDenoiseStep (AutoPipelineBlocks ):
2053
- block_classes = [StableDiffusionXLControlNetDenoiseStep , StableDiffusionXLDenoiseStep ]
2054
- block_names = ["controlnet" , "unet" ]
2055
- block_trigger_inputs = ["control_image" , None ]
2054
+ block_classes = [StableDiffusionXLControlNetUnionDenoiseStep , StableDiffusionXLControlNetDenoiseStep , StableDiffusionXLDenoiseStep ]
2055
+ block_names = ["controlnet_union" , " controlnet" , "unet" ]
2056
+ block_trigger_inputs = ["control_mode" , " control_image" , None ]
2056
2057
2057
2058
2058
2059
class StableDiffusionXLAutoDecodeStep (AutoPipelineBlocks ):
0 commit comments