|
| 1 | +from functools import wraps |
| 2 | +from packaging import version |
| 3 | +from collections import namedtuple |
| 4 | + |
| 5 | +import torch |
| 6 | +from torch import nn, einsum |
| 7 | +import torch.nn.functional as F |
| 8 | + |
| 9 | +from einops import rearrange, reduce |
| 10 | + |
| 11 | +# constants |
| 12 | + |
| 13 | +FlashAttentionConfig = namedtuple('FlashAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient']) |
| 14 | + |
| 15 | +# helpers |
| 16 | + |
| 17 | +def exists(val): |
| 18 | + return val is not None |
| 19 | + |
| 20 | +def once(fn): |
| 21 | + called = False |
| 22 | + @wraps(fn) |
| 23 | + def inner(x): |
| 24 | + nonlocal called |
| 25 | + if called: |
| 26 | + return |
| 27 | + called = True |
| 28 | + return fn(x) |
| 29 | + return inner |
| 30 | + |
| 31 | +print_once = once(print) |
| 32 | + |
| 33 | +# main class |
| 34 | + |
| 35 | +class Attend(nn.Module): |
| 36 | + def __init__( |
| 37 | + self, |
| 38 | + dropout = 0., |
| 39 | + flash = False, |
| 40 | + l2_dist = False |
| 41 | + ): |
| 42 | + super().__init__() |
| 43 | + assert not (flash and l2_dist), 'flash attention is not compatible with l2 distance' |
| 44 | + self.l2_dist = l2_dist |
| 45 | + |
| 46 | + self.dropout = dropout |
| 47 | + self.attn_dropout = nn.Dropout(dropout) |
| 48 | + |
| 49 | + self.flash = flash |
| 50 | + assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above' |
| 51 | + |
| 52 | + # determine efficient attention configs for cuda and cpu |
| 53 | + |
| 54 | + self.cpu_config = FlashAttentionConfig(True, True, True) |
| 55 | + self.cuda_config = None |
| 56 | + |
| 57 | + if not torch.cuda.is_available() or not flash: |
| 58 | + return |
| 59 | + |
| 60 | + device_properties = torch.cuda.get_device_properties(torch.device('cuda')) |
| 61 | + |
| 62 | + if device_properties.major == 8 and device_properties.minor == 0: |
| 63 | + print_once('A100 GPU detected, using flash attention if input tensor is on cuda') |
| 64 | + self.cuda_config = FlashAttentionConfig(True, False, False) |
| 65 | + else: |
| 66 | + print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda') |
| 67 | + self.cuda_config = FlashAttentionConfig(False, True, True) |
| 68 | + |
| 69 | + def flash_attn(self, q, k, v, mask = None): |
| 70 | + _, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device |
| 71 | + |
| 72 | + # Check if mask exists and expand to compatible shape |
| 73 | + # The mask is B L, so it would have to be expanded to B H N L |
| 74 | + |
| 75 | + if exists(mask): |
| 76 | + mask = mask.expand(-1, heads, q_len, -1) |
| 77 | + |
| 78 | + # Check if there is a compatible device for flash attention |
| 79 | + |
| 80 | + config = self.cuda_config if is_cuda else self.cpu_config |
| 81 | + |
| 82 | + # pytorch 2.0 flash attn: q, k, v, mask, dropout, softmax_scale |
| 83 | + |
| 84 | + with torch.backends.cuda.sdp_kernel(**config._asdict()): |
| 85 | + out = F.scaled_dot_product_attention( |
| 86 | + q, k, v, |
| 87 | + attn_mask = mask, |
| 88 | + dropout_p = self.dropout if self.training else 0. |
| 89 | + ) |
| 90 | + |
| 91 | + return out |
| 92 | + |
| 93 | + def forward(self, q, k, v, mask = None): |
| 94 | + """ |
| 95 | + einstein notation |
| 96 | + b - batch |
| 97 | + h - heads |
| 98 | + n, i, j - sequence length (base sequence length, source, target) |
| 99 | + d - feature dimension |
| 100 | + """ |
| 101 | + |
| 102 | + q_len, k_len, device = q.shape[-2], k.shape[-2], q.device |
| 103 | + |
| 104 | + scale = q.shape[-1] ** -0.5 |
| 105 | + |
| 106 | + if exists(mask) and mask.ndim != 4: |
| 107 | + mask = rearrange(mask, 'b j -> b 1 1 j') |
| 108 | + |
| 109 | + if self.flash: |
| 110 | + return self.flash_attn(q, k, v, mask = mask) |
| 111 | + |
| 112 | + # similarity |
| 113 | + |
| 114 | + sim = einsum(f"b h i d, b h j d -> b h i j", q, k) * scale |
| 115 | + |
| 116 | + # l2 distance |
| 117 | + |
| 118 | + if self.l2_dist: |
| 119 | + # -cdist squared == (-q^2 + 2qk - k^2) |
| 120 | + # so simply work off the qk above |
| 121 | + q_squared = reduce(q ** 2, 'b h i d -> b h i 1', 'sum') |
| 122 | + k_squared = reduce(k ** 2, 'b h j d -> b h 1 j', 'sum') |
| 123 | + sim = sim * 2 - q_squared - k_squared |
| 124 | + |
| 125 | + # key padding mask |
| 126 | + |
| 127 | + if exists(mask): |
| 128 | + sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max) |
| 129 | + |
| 130 | + # attention |
| 131 | + |
| 132 | + attn = sim.softmax(dim=-1) |
| 133 | + attn = self.attn_dropout(attn) |
| 134 | + |
| 135 | + # aggregate values |
| 136 | + |
| 137 | + out = einsum(f"b h i j, b h j d -> b h i d", attn, v) |
| 138 | + |
| 139 | + return out |
0 commit comments