|
| 1 | +<!-- Copyright 2025 The HuggingFace Team. All rights reserved. |
| 2 | +# |
| 3 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +# you may not use this file except in compliance with the License. |
| 5 | +# You may obtain a copy of the License at |
| 6 | +# |
| 7 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +# |
| 9 | +# Unless required by applicable law or agreed to in writing, software |
| 10 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +# See the License for the specific language governing permissions and |
| 13 | +# limitations under the License. --> |
| 14 | + |
| 15 | +# HunyuanImage2.1 |
| 16 | + |
| 17 | + |
| 18 | +HunyuanImage-2.1 is a 17B text-to-image model that is capable of generating 2K (2048 x 2048) resolution images |
| 19 | + |
| 20 | +HunyuanImage-2.1 comes in the following variants: |
| 21 | + |
| 22 | +| model type | model id | |
| 23 | +|:----------:|:--------:| |
| 24 | +| HunyuanImage-2.1 | [hunyuanvideo-community/HunyuanImage-2.1-Diffusers](https://huggingface.co/hunyuanvideo-community/HunyuanImage-2.1-Diffusers) | |
| 25 | +| HunyuanImage-2.1-Distilled | [hunyuanvideo-community/HunyuanImage-2.1-Distilled-Diffusers](https://huggingface.co/hunyuanvideo-community/HunyuanImage-2.1-Distilled-Diffusers) | |
| 26 | +| HunyuanImage-2.1-Refiner | [hunyuanvideo-community/HunyuanImage-2.1-Refiner-Diffusers](https://huggingface.co/hunyuanvideo-community/HunyuanImage-2.1-Refiner-Diffusers) | |
| 27 | + |
| 28 | +> [!TIP] |
| 29 | +> [Caching](../../optimization/cache) may also speed up inference by storing and reusing intermediate outputs. |
| 30 | +
|
| 31 | +## HunyuanImage-2.1 |
| 32 | + |
| 33 | +HunyuanImage-2.1 applies [Adaptive Projected Guidance (APG)](https://huggingface.co/papers/2410.02416) combined with Classifier-Free Guidance (CFG) in the denoising loop. `HunyuanImagePipeline` has a `guider` component (read more about [Guider](../modular_diffusers/guiders.md)) and does not take a `guidance_scale` parameter at runtime. To change guider-related parameters, e.g., `guidance_scale`, you can update the `guider` configuration instead. |
| 34 | + |
| 35 | +```python |
| 36 | +import mindspore as ms |
| 37 | +from mindone.diffusers import HunyuanImagePipeline |
| 38 | + |
| 39 | +pipe = HunyuanImagePipeline.from_pretrained( |
| 40 | + "hunyuanvideo-community/HunyuanImage-2.1-Diffusers", |
| 41 | + mindspore_dtype=ms.bfloat16 |
| 42 | +) |
| 43 | +``` |
| 44 | + |
| 45 | +You can inspect the `guider` object: |
| 46 | + |
| 47 | +```py |
| 48 | +>>> pipe.guider |
| 49 | +AdaptiveProjectedMixGuidance { |
| 50 | + "_class_name": "AdaptiveProjectedMixGuidance", |
| 51 | + "_diffusers_version": "0.36.0.dev0", |
| 52 | + "adaptive_projected_guidance_momentum": -0.5, |
| 53 | + "adaptive_projected_guidance_rescale": 10.0, |
| 54 | + "adaptive_projected_guidance_scale": 10.0, |
| 55 | + "adaptive_projected_guidance_start_step": 5, |
| 56 | + "enabled": true, |
| 57 | + "eta": 0.0, |
| 58 | + "guidance_rescale": 0.0, |
| 59 | + "guidance_scale": 3.5, |
| 60 | + "start": 0.0, |
| 61 | + "stop": 1.0, |
| 62 | + "use_original_formulation": false |
| 63 | +} |
| 64 | + |
| 65 | +State: |
| 66 | + step: None |
| 67 | + num_inference_steps: None |
| 68 | + timestep: None |
| 69 | + count_prepared: 0 |
| 70 | + enabled: True |
| 71 | + num_conditions: 2 |
| 72 | + momentum_buffer: None |
| 73 | + is_apg_enabled: False |
| 74 | + is_cfg_enabled: True |
| 75 | +``` |
| 76 | + |
| 77 | +To update the guider with a different configuration, use the `new()` method. For example, to generate an image with `guidance_scale=5.0` while keeping all other default guidance parameters: |
| 78 | + |
| 79 | +```py |
| 80 | +import mindspore as ms |
| 81 | +from mindone.diffusers import HunyuanImagePipeline |
| 82 | + |
| 83 | +pipe = HunyuanImagePipeline.from_pretrained( |
| 84 | + "hunyuanvideo-community/HunyuanImage-2.1-Diffusers", |
| 85 | + mindspore_dtype=ms.bfloat16 |
| 86 | +) |
| 87 | + |
| 88 | +# Update the guider configuration |
| 89 | +pipe.guider = pipe.guider.new(guidance_scale=5.0) |
| 90 | + |
| 91 | +prompt = ( |
| 92 | + "A cute, cartoon-style anthropomorphic penguin plush toy with fluffy fur, standing in a painting studio, " |
| 93 | + "wearing a red knitted scarf and a red beret with the word 'Tencent' on it, holding a paintbrush with a " |
| 94 | + "focused expression as it paints an oil painting of the Mona Lisa, rendered in a photorealistic photographic style." |
| 95 | +) |
| 96 | + |
| 97 | +image = pipe( |
| 98 | + prompt=prompt, |
| 99 | + num_inference_steps=50, |
| 100 | + height=2048, |
| 101 | + width=2048, |
| 102 | +).images[0] |
| 103 | +image.save("image.png") |
| 104 | +``` |
| 105 | + |
| 106 | + |
| 107 | +## HunyuanImage-2.1-Distilled |
| 108 | + |
| 109 | +use `distilled_guidance_scale` with the guidance-distilled checkpoint, |
| 110 | + |
| 111 | +```py |
| 112 | +import mindspore as ms |
| 113 | +from mindone.diffusers import HunyuanImagePipeline |
| 114 | +pipe = HunyuanImagePipeline.from_pretrained("hunyuanvideo-community/HunyuanImage-2.1-Distilled-Diffusers", mindspore_dtype=ms.bfloat16) |
| 115 | + |
| 116 | +prompt = ( |
| 117 | + "A cute, cartoon-style anthropomorphic penguin plush toy with fluffy fur, standing in a painting studio, " |
| 118 | + "wearing a red knitted scarf and a red beret with the word 'Tencent' on it, holding a paintbrush with a " |
| 119 | + "focused expression as it paints an oil painting of the Mona Lisa, rendered in a photorealistic photographic style." |
| 120 | +) |
| 121 | + |
| 122 | +out = pipe( |
| 123 | + prompt, |
| 124 | + num_inference_steps=8, |
| 125 | + distilled_guidance_scale=3.25, |
| 126 | + height=2048, |
| 127 | + width=2048, |
| 128 | + generator=generator, |
| 129 | +).images[0] |
| 130 | + |
| 131 | +``` |
| 132 | + |
| 133 | + |
| 134 | +::: mindone.diffusers.HunyuanImagePipeline |
| 135 | + |
| 136 | +::: mindone.diffusers.HunyuanImageRefinerPipeline |
| 137 | + |
| 138 | +::: mindone.diffusers.pipelines.hunyuan_image.pipeline_output.HunyuanImagePipelineOutput |
0 commit comments