We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent cae9ddf commit c78d823Copy full SHA for c78d823
src/diffusers/pipelines/dream/pipeline_dream.py
@@ -272,7 +272,7 @@ def __call__(
272
device=device,
273
)
274
else:
275
- text_ids, attention_mask = None
+ text_ids, attention_mask = None, None
276
277
# 4. Prepare latent variables (e.g. the initial sample) for generation
278
total_batch_size = batch_size * num_texts_per_prompt
@@ -284,7 +284,7 @@ def __call__(
284
285
286
287
- if prompt_embeds is not None:
+ if prompt_embeds is None:
288
prompt_embeds = self.transformer.embed_tokens(latents)
289
290
# If prompt_embeds's seq len is not max_sequence_length, concat with embedding of mask tokens for the
0 commit comments