From 3102eb26c589250cca2d12f3ac3db52edacab90b Mon Sep 17 00:00:00 2001 From: stevhliu Date: Thu, 12 Jun 2025 15:53:27 -0700 Subject: [PATCH 1/2] draft --- docs/source/en/_toctree.yml | 2 + docs/source/en/optimization/memory.md | 8 +- .../en/optimization/speed-memory-optims.md | 148 ++++++++++++++++++ 3 files changed, 154 insertions(+), 4 deletions(-) create mode 100644 docs/source/en/optimization/speed-memory-optims.md diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index f13b7d54aec4..8ee46dd6cb61 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -180,6 +180,8 @@ title: Caching - local: optimization/memory title: Reduce memory usage + - local: optimization/speed-memory-optims + title: Compile and offloading - local: optimization/xformers title: xFormers - local: optimization/tome diff --git a/docs/source/en/optimization/memory.md b/docs/source/en/optimization/memory.md index 6b853a7a084b..1d8fe0bed96f 100644 --- a/docs/source/en/optimization/memory.md +++ b/docs/source/en/optimization/memory.md @@ -17,7 +17,7 @@ Modern diffusion models like [Flux](../api/pipelines/flux) and [Wan](../api/pipe This guide will show you how to reduce your memory usage. > [!TIP] -> Keep in mind these techniques may need to be adjusted depending on the model! For example, a transformer-based diffusion model may not benefit equally from these inference speed optimizations as a UNet-based model. +> Keep in mind these techniques may need to be adjusted depending on the model. For example, a transformer-based diffusion model may not benefit equally from these memory optimizations as a UNet-based model. ## Multiple GPUs @@ -145,7 +145,7 @@ print(f"Max memory reserved: {torch.cuda.max_memory_allocated() / 1024**3:.2f} G ``` > [!WARNING] -> [`AutoencoderKLWan`] and [`AsymmetricAutoencoderKL`] don't support slicing. +> The [`AutoencoderKLWan`] and [`AsymmetricAutoencoderKL`] classes don't support slicing. ## VAE tiling @@ -219,7 +219,7 @@ from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16 ) -pipline.enable_model_cpu_offload() +pipeline.enable_model_cpu_offload() pipeline( prompt="An astronaut riding a horse on Mars", @@ -493,7 +493,7 @@ with torch.inference_mode(): ## Memory-efficient attention > [!TIP] -> Memory-efficient attention optimizes for memory usage *and* [inference speed](./fp16#scaled-dot-product-attention! +> Memory-efficient attention optimizes for memory usage *and* [inference speed](./fp16#scaled-dot-product-attention)! The Transformers attention mechanism is memory-intensive, especially for long sequences, so you can try using different and more memory-efficient attention types. diff --git a/docs/source/en/optimization/speed-memory-optims.md b/docs/source/en/optimization/speed-memory-optims.md new file mode 100644 index 000000000000..f825c8d0eeb1 --- /dev/null +++ b/docs/source/en/optimization/speed-memory-optims.md @@ -0,0 +1,148 @@ + + +# Compile and offloading + +There are trade-offs associated with optimizing solely for [inference speed](./fp16) or [memory-usage](./memory). For example, [caching](./cache) increases inference speed but requires more memory to store the intermediate outputs from the attention layers. + +If your hardware is sufficiently powerful, you can choose to focus on one or the other. For a more balanced approach that doesn't sacrifice too much in terms of inference speed and memory-usage, try compiling and offloading a model. + +Refer to the table below for the latency and memory-usage of each combination. + +| combination | latency | memory usage | +|---|---|---| +| quantization, torch.compile | | | +| quantization, torch.compile, model CPU offloading | | | +| quantization, torch.compile, group offloading | | | + +This guide will show you how to compile and offload a model to improve both inference speed and memory-usage. + +## Quantization and torch.compile + +> [!TIP] +> The quantization backend, such as [bitsandbytes](../quantization/bitsandbytes#torchcompile), must be compatible with torch.compile. Refer to the quantization [overview](https://huggingface.co/docs/transformers/quantization/overview#overview) table to see which backends support torch.compile. + +Start by [quantizing](../quantization/overview) a model to reduce the memory required to store it and [compiling](./fp16#torchcompile) it to accelerate inference. + +```py +import torch +from diffusers import DiffusionPipeline +from diffusers.quantizers import PipelineQuantizationConfig + +# quantize +pipeline_quant_config = PipelineQuantizationConfig( + quant_backend="bitsandbytes_4bit", + quant_kwargs={"load_in_4bit": True, "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16}, + components_to_quantize=["transformer", "text_encoder_2"], +) +pipeline = DiffusionPipeline.from_pretrained( + "black-forest-labs/FLUX.1-dev", + quantization_config=pipeline_quant_config, + torch_dtype=torch.bfloat16, +).to("cuda") + +# compile +pipeline.transformer.to(memory_format=torch.channels_last) +pipeline.transformer = torch.compile( + pipeline.transformer, mode="ax-autotune", fullgraph=True +) +pipeline(""" + cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California + highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain +""" +).images[0] +``` + +## Quantization, torch.compile, and offloading + +In addition to quantization and torch.compile, try offloading if you need to reduce memory-usage further. Offloading moves various layers or model components from the CPU to the GPU as needed for computations. + + + + +[Model CPU offloading](./memory#model-offloading) moves an individual pipeline component, like the transformer model, to the GPU when it is needed for computation. Otherwise, it is offloaded to the CPU. + +```py +import torch +from diffusers import DiffusionPipeline +from diffusers.quantizers import PipelineQuantizationConfig + +# quantize +pipeline_quant_config = PipelineQuantizationConfig( + quant_backend="bitsandbytes_4bit", + quant_kwargs={"load_in_4bit": True, "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16}, + components_to_quantize=["transformer", "text_encoder_2"], +) +pipeline = DiffusionPipeline.from_pretrained( + "black-forest-labs/FLUX.1-dev", + quantization_config=pipeline_quant_config, + torch_dtype=torch.bfloat16, +).to("cuda") + +# model CPU offloading +pipeline.enable_model_cpu_offload() + +# compile +pipeline.transformer.to(memory_format=torch.channels_last) +pipeline.transformer = torch.compile( + pipeline.transformer, mode="ax-autotune", fullgraph=True +) +pipeline( + "cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California, highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain" +).images[0] +``` + + + + +[Group offloading](./memory#group-offloading) moves the internal layers of an individual pipeline component, like the transformer model, to the GPU for computation and offloads it when it's not required. At the same time, it uses the [CUDA stream](./memory#cuda-stream) feature to prefetch the next layer for execution. + +By overlapping computation and data transfer, it is faster than model CPU offloading while also saving memory. + +```py +import torch +from diffusers import DiffusionPipeline +from diffusers.hooks import apply_group_offloading +from diffusers.quantizers import PipelineQuantizationConfig + +# quantize +pipeline_quant_config = PipelineQuantizationConfig( + quant_backend="bitsandbytes_4bit", + quant_kwargs={"load_in_4bit": True, "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16}, + components_to_quantize=["transformer", "text_encoder_2"], +) +pipeline = DiffusionPipeline.from_pretrained( + "black-forest-labs/FLUX.1-dev", + quantization_config=pipeline_quant_config, + torch_dtype=torch.bfloat16, +).to("cuda") + +# group offloading +onload_device = torch.device("cuda") +offload_device = torch.device("cpu") + +pipeline.transformer.enable_group_offload(onload_device=onload_device, offload_device=offload_device, offload_type="leaf_level", use_stream=True) +pipeline.vae.enable_group_offload(onload_device=onload_device, offload_type="leaf_level", use_stream=True) +apply_group_offloading(pipeline.text_encoder, onload_device=onload_device, offload_type="block_level", num_blocks_per_group=1, use_stream=True) + +# compile +pipeline.transformer.to(memory_format=torch.channels_last) +pipeline.transformer = torch.compile( + pipeline.transformer, mode="ax-autotune", fullgraph=True +) +pipeline( + "cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California, highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain" +).images[0] +``` + + + \ No newline at end of file From 7d7f274e4c6a12524fe0e571d9563f162c995437 Mon Sep 17 00:00:00 2001 From: stevhliu Date: Fri, 13 Jun 2025 15:07:37 -0700 Subject: [PATCH 2/2] feedback --- .../en/optimization/speed-memory-optims.md | 27 ++++++++----------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/docs/source/en/optimization/speed-memory-optims.md b/docs/source/en/optimization/speed-memory-optims.md index f825c8d0eeb1..e15ca3d7ea5c 100644 --- a/docs/source/en/optimization/speed-memory-optims.md +++ b/docs/source/en/optimization/speed-memory-optims.md @@ -12,26 +12,26 @@ specific language governing permissions and limitations under the License. # Compile and offloading -There are trade-offs associated with optimizing solely for [inference speed](./fp16) or [memory-usage](./memory). For example, [caching](./cache) increases inference speed but requires more memory to store the intermediate outputs from the attention layers. +When optimizing models, you often face trade-offs between [inference speed](./fp16) and [memory-usage](./memory). For instance, while [caching](./cache) can boost inference speed, it comes at the cost of increased memory consumption since it needs to store intermediate attention layer outputs. -If your hardware is sufficiently powerful, you can choose to focus on one or the other. For a more balanced approach that doesn't sacrifice too much in terms of inference speed and memory-usage, try compiling and offloading a model. +A more balanced optimization strategy combines [torch.compile](./fp16#torchcompile) with various offloading methods. This approach not only accelerates inference but also helps lower memory-usage. -Refer to the table below for the latency and memory-usage of each combination. +The table below provides a comparison of optimization strategy combinations and their impact on latency and memory-usage. -| combination | latency | memory usage | +| combination | latency | memory-usage | |---|---|---| | quantization, torch.compile | | | | quantization, torch.compile, model CPU offloading | | | | quantization, torch.compile, group offloading | | | -This guide will show you how to compile and offload a model to improve both inference speed and memory-usage. +This guide will show you how to compile and offload a model. ## Quantization and torch.compile > [!TIP] > The quantization backend, such as [bitsandbytes](../quantization/bitsandbytes#torchcompile), must be compatible with torch.compile. Refer to the quantization [overview](https://huggingface.co/docs/transformers/quantization/overview#overview) table to see which backends support torch.compile. -Start by [quantizing](../quantization/overview) a model to reduce the memory required to store it and [compiling](./fp16#torchcompile) it to accelerate inference. +Start by [quantizing](../quantization/overview) a model to reduce the memory required for storage and [compiling](./fp16#torchcompile) it to accelerate inference. ```py import torch @@ -52,9 +52,7 @@ pipeline = DiffusionPipeline.from_pretrained( # compile pipeline.transformer.to(memory_format=torch.channels_last) -pipeline.transformer = torch.compile( - pipeline.transformer, mode="ax-autotune", fullgraph=True -) +pipeline.transformer.compile( mode="max-autotune", fullgraph=True) pipeline(""" cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain @@ -93,9 +91,7 @@ pipeline.enable_model_cpu_offload() # compile pipeline.transformer.to(memory_format=torch.channels_last) -pipeline.transformer = torch.compile( - pipeline.transformer, mode="ax-autotune", fullgraph=True -) +pipeline.transformer.compile( mode="max-autotune", fullgraph=True) pipeline( "cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California, highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain" ).images[0] @@ -132,13 +128,12 @@ offload_device = torch.device("cpu") pipeline.transformer.enable_group_offload(onload_device=onload_device, offload_device=offload_device, offload_type="leaf_level", use_stream=True) pipeline.vae.enable_group_offload(onload_device=onload_device, offload_type="leaf_level", use_stream=True) -apply_group_offloading(pipeline.text_encoder, onload_device=onload_device, offload_type="block_level", num_blocks_per_group=1, use_stream=True) +apply_group_offloading(pipeline.text_encoder, onload_device=onload_device, offload_type="leaf_level", use_stream=True) +apply_group_offloading(pipeline.text_encoder_2, onload_device=onload_device, offload_type="leaf_level", use_stream=True) # compile pipeline.transformer.to(memory_format=torch.channels_last) -pipeline.transformer = torch.compile( - pipeline.transformer, mode="ax-autotune", fullgraph=True -) +pipeline.transformer.compile( mode="max-autotune", fullgraph=True) pipeline( "cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California, highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain" ).images[0]