forked from pytorch-labs/attention-gym
-
Notifications
You must be signed in to change notification settings - Fork 0
/
prefix_lm.py
58 lines (41 loc) · 1.6 KB
/
prefix_lm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
"""Generates a prefix LM causal attention mask"""
import torch
from torch.nn.attention.flex_attention import _mask_mod_signature, or_masks
from attn_gym.masks import causal_mask
def generate_prefix_lm_mask(prefix_length: int) -> _mask_mod_signature:
"""Generates a prefix LM causal attention mask.
Args:
prefix_length: The length of the prefix.
Note:
This mask allows full attention within the prefix (first PREFIX_LENGTH tokens)
and causal attention for the rest of the sequence.
"""
def prefix_mask(b, h, q_idx, kv_idx):
return kv_idx < prefix_length
prefix_lm_causal_mask = or_masks(prefix_mask, causal_mask)
prefix_lm_causal_mask.__name__ = f"prefix_lm_causal_mask_{prefix_length}"
return prefix_lm_causal_mask
def main(device: str = "cpu"):
"""Visualize the attention scores of prefix LM causal mask mod.
Args:
device (str): Device to use for computation. Defaults to "cpu".
"""
from attn_gym import visualize_attention_scores
B, H, SEQ_LEN, HEAD_DIM = 1, 1, 12, 8
def make_tensor():
return torch.ones(B, H, SEQ_LEN, HEAD_DIM, device=device)
query, key = make_tensor(), make_tensor()
prefix_lm_causal_mask = generate_prefix_lm_mask(prefix_length=4)
visualize_attention_scores(
query,
key,
mask_mod=prefix_lm_causal_mask,
device=device,
name="prefix_lm_causal_mask_length_4",
)
if __name__ == "__main__":
try:
from jsonargparse import CLI
except ImportError:
raise ImportError("Be sure to run: pip install -e .[viz]")
CLI(main)