Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion library/original_unet.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,7 @@
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.attention import SDPBackend, sdpa_kernel
from einops import rearrange
from library.utils import setup_logging
setup_logging()
Expand Down Expand Up @@ -560,6 +561,7 @@ def forward(self, hidden_states):

return hidden_states

kernels = [SDPBackend.FLASH_ATTENTION, SDPBackend.CUDNN_ATTENTION, SDPBackend.EFFICIENT_ATTENTION, SDPBackend.MATH]

class CrossAttention(nn.Module):
def __init__(
Expand Down Expand Up @@ -741,7 +743,8 @@ def forward_sdpa(self, x, context=None, mask=None):
q, k, v = map(lambda t: rearrange(t, "b n (h d) -> b h n d", h=h), (q_in, k_in, v_in))
del q_in, k_in, v_in

out = F.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False)
with sdpa_kernel(kernels):
out = F.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False)

out = rearrange(out, "b h n d -> b n (h d)", h=h)

Expand Down
5 changes: 4 additions & 1 deletion library/sdxl_original_unet.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
import torch.utils.checkpoint
from torch import nn
from torch.nn import functional as F
from torch.nn.attention import SDPBackend, sdpa_kernel
from einops import rearrange
from .utils import setup_logging

Expand Down Expand Up @@ -387,6 +388,7 @@ def custom_forward(*inputs):

return hidden_states

kernels = [SDPBackend.FLASH_ATTENTION, SDPBackend.CUDNN_ATTENTION, SDPBackend.EFFICIENT_ATTENTION, SDPBackend.MATH]

class CrossAttention(nn.Module):
def __init__(
Expand Down Expand Up @@ -545,7 +547,8 @@ def forward_sdpa(self, x, context=None, mask=None):
q, k, v = map(lambda t: rearrange(t, "b n (h d) -> b h n d", h=h), (q_in, k_in, v_in))
del q_in, k_in, v_in

out = F.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False)
with sdpa_kernel(kernels):
out = F.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False)

out = rearrange(out, "b h n d -> b n (h d)", h=h)

Expand Down