Support SDPA, fix embeddings, output attention probs.
Browse files- modeling.py +54 -34
modeling.py
CHANGED
|
@@ -374,7 +374,7 @@ class NewEmbeddings(nn.Module):
|
|
| 374 |
if position_ids is None:
|
| 375 |
if seq_length > self.position_ids.size(0):
|
| 376 |
self.register_buffer(
|
| 377 |
-
"position_ids", torch.arange(seq_length), persistent=False
|
| 378 |
)
|
| 379 |
if unpad_inputs:
|
| 380 |
# [1, cumsum_seq_len]
|
|
@@ -397,16 +397,19 @@ class NewEmbeddings(nn.Module):
|
|
| 397 |
if self.type_vocab_size > 0:
|
| 398 |
if token_type_ids is None:
|
| 399 |
token_type_ids = position_ids.mul(0)
|
| 400 |
-
|
| 401 |
-
|
|
|
|
|
|
|
|
|
|
| 402 |
|
| 403 |
token_type_embeddings = self.token_type_embeddings(token_type_ids)
|
| 404 |
-
embeddings
|
| 405 |
|
| 406 |
# BERT position
|
| 407 |
if self.position_embedding_type == "absolute":
|
| 408 |
position_embeddings = self.position_embeddings(position_ids)
|
| 409 |
-
embeddings
|
| 410 |
|
| 411 |
embeddings = self.LayerNorm(embeddings)
|
| 412 |
embeddings = self.dropout(embeddings)
|
|
@@ -449,19 +452,17 @@ class NewAttention(nn.Module):
|
|
| 449 |
self.memory_efficient_attention = None if xops is None else xops.memory_efficient_attention
|
| 450 |
if self.use_memory_efficient_attention:
|
| 451 |
assert self.memory_efficient_attention is not None, 'please install xformers'
|
| 452 |
-
if self.config.unpad_inputs:
|
| 453 |
-
assert self.config.use_memory_efficient_attention, 'unpad only with xformers'
|
| 454 |
|
| 455 |
def forward(
|
| 456 |
self,
|
| 457 |
hidden_states: torch.Tensor,
|
| 458 |
attention_bias: torch.FloatTensor,
|
| 459 |
rope_embeds: Optional[Tuple[torch.FloatTensor, torch.FloatTensor]] = None,
|
|
|
|
| 460 |
attention_scale: Optional[torch.FloatTensor] = None,
|
| 461 |
head_mask: Optional[torch.FloatTensor] = None,
|
| 462 |
output_attentions: Optional[bool] = False,
|
| 463 |
qkv_inputs: Optional[Tuple] = None, # For RetroMAE
|
| 464 |
-
padding_inputs: Optional[Tuple] = None, # indices, batch, seqlen
|
| 465 |
) -> Tuple[torch.Tensor, ...]:
|
| 466 |
shape_hd = (self.num_attention_heads, self.attention_head_size)
|
| 467 |
# qkv
|
|
@@ -504,7 +505,11 @@ class NewAttention(nn.Module):
|
|
| 504 |
p=self.dropout.p
|
| 505 |
)
|
| 506 |
else:
|
| 507 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 508 |
|
| 509 |
if padding_inputs is not None:
|
| 510 |
context_layer = unpad_input(context_layer, indices=padding_inputs[0])
|
|
@@ -542,7 +547,8 @@ class NewAttention(nn.Module):
|
|
| 542 |
|
| 543 |
# This is actually dropping out entire tokens to attend to, which might
|
| 544 |
# seem a bit unusual, but is taken from the original Transformer paper.
|
| 545 |
-
|
|
|
|
| 546 |
|
| 547 |
# Mask heads if we want to
|
| 548 |
if head_mask is not None:
|
|
@@ -551,7 +557,7 @@ class NewAttention(nn.Module):
|
|
| 551 |
context_layer = torch.matmul(attention_probs, value_states)
|
| 552 |
|
| 553 |
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
| 554 |
-
return context_layer
|
| 555 |
|
| 556 |
|
| 557 |
class NewSdpaAttention(NewAttention):
|
|
@@ -562,11 +568,11 @@ class NewSdpaAttention(NewAttention):
|
|
| 562 |
"""
|
| 563 |
def __init__(self, config: NewConfig, **kwargs):
|
| 564 |
super().__init__(config, **kwargs)
|
| 565 |
-
torch.backends.cuda.enable_mem_efficient_sdp(False)
|
| 566 |
-
logger.warning(
|
| 567 |
-
|
| 568 |
-
|
| 569 |
-
)
|
| 570 |
|
| 571 |
def _attention(self, query_states, key_states, value_states, attention_bias, head_mask):
|
| 572 |
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
|
@@ -577,12 +583,12 @@ class NewSdpaAttention(NewAttention):
|
|
| 577 |
dropout_p=self.dropout.p if self.training else 0.0,
|
| 578 |
)
|
| 579 |
attn_output = attn_output.permute(0, 2, 1, 3).contiguous()
|
| 580 |
-
return attn_output
|
| 581 |
|
| 582 |
|
| 583 |
NEW_ATTENTION_CLASSES = {
|
| 584 |
"eager": NewAttention,
|
| 585 |
-
# "flash_attention_2": , # TODO
|
| 586 |
"sdpa": NewSdpaAttention,
|
| 587 |
}
|
| 588 |
|
|
@@ -625,8 +631,12 @@ class NewLayer(nn.Module):
|
|
| 625 |
super().__init__()
|
| 626 |
if attn_implementation is None:
|
| 627 |
attn_implementation = config._attn_implementation
|
| 628 |
-
if
|
| 629 |
-
use_memory_efficient_attention =
|
|
|
|
|
|
|
|
|
|
|
|
|
| 630 |
self.attention = NEW_ATTENTION_CLASSES[attn_implementation](
|
| 631 |
config, pack_qkv=pack_qkv, use_memory_efficient_attention=use_memory_efficient_attention
|
| 632 |
)
|
|
@@ -646,12 +656,12 @@ class NewLayer(nn.Module):
|
|
| 646 |
hidden_states: torch.Tensor,
|
| 647 |
attention_bias: torch.FloatTensor,
|
| 648 |
rope_embeds: Optional[Tuple[torch.FloatTensor, torch.FloatTensor]] = None,
|
|
|
|
| 649 |
attention_scale: Optional[torch.FloatTensor] = None,
|
| 650 |
subset_indices: Optional[torch.LongTensor] = None,
|
| 651 |
head_mask: Optional[torch.FloatTensor] = None,
|
| 652 |
output_attentions: Optional[bool] = False,
|
| 653 |
qkv_inputs: Optional[Tuple] = None, # For RetroMAE
|
| 654 |
-
padding_inputs: Optional[Tuple] = None,
|
| 655 |
) -> Tuple[torch.Tensor, ...]:
|
| 656 |
# Multi head self attention
|
| 657 |
residual = hidden_states if qkv_inputs is None else qkv_inputs[0]
|
|
@@ -659,11 +669,11 @@ class NewLayer(nn.Module):
|
|
| 659 |
hidden_states,
|
| 660 |
attention_bias,
|
| 661 |
rope_embeds,
|
|
|
|
| 662 |
attention_scale,
|
| 663 |
head_mask,
|
| 664 |
output_attentions=output_attentions,
|
| 665 |
qkv_inputs=qkv_inputs,
|
| 666 |
-
padding_inputs=padding_inputs,
|
| 667 |
)
|
| 668 |
hidden_states = attention_outputs[0]
|
| 669 |
if self.hidden_dropout is not None:
|
|
@@ -701,6 +711,7 @@ class NewEncoder(nn.Module):
|
|
| 701 |
hidden_states: torch.Tensor,
|
| 702 |
attention_bias: Optional[torch.FloatTensor] = None,
|
| 703 |
rope_embeds: Optional[Tuple[torch.FloatTensor, torch.FloatTensor]] = None,
|
|
|
|
| 704 |
attention_scale: Optional[torch.FloatTensor] = None,
|
| 705 |
subset_indices: Optional[torch.LongTensor] = None,
|
| 706 |
head_mask: Optional[torch.FloatTensor] = None,
|
|
@@ -728,6 +739,7 @@ class NewEncoder(nn.Module):
|
|
| 728 |
hidden_states,
|
| 729 |
attention_bias,
|
| 730 |
rope_embeds,
|
|
|
|
| 731 |
attention_scale,
|
| 732 |
layer_subset_indices,
|
| 733 |
layer_head_mask,
|
|
@@ -737,6 +749,7 @@ class NewEncoder(nn.Module):
|
|
| 737 |
hidden_states,
|
| 738 |
attention_bias,
|
| 739 |
rope_embeds,
|
|
|
|
| 740 |
attention_scale,
|
| 741 |
layer_subset_indices,
|
| 742 |
layer_head_mask,
|
|
@@ -792,6 +805,7 @@ class NewPreTrainedModel(PreTrainedModel):
|
|
| 792 |
config_class = NewConfig
|
| 793 |
base_model_prefix = "new"
|
| 794 |
supports_gradient_checkpointing = True
|
|
|
|
| 795 |
|
| 796 |
def _init_weights(self, module):
|
| 797 |
"""Initialize the weights"""
|
|
@@ -894,9 +908,7 @@ class NewModel(NewPreTrainedModel):
|
|
| 894 |
)
|
| 895 |
|
| 896 |
batch_size, seq_length = input_shape
|
| 897 |
-
|
| 898 |
-
if unpad_inputs:
|
| 899 |
-
assert self.config.use_memory_efficient_attention
|
| 900 |
attention_bias = xops.fmha.attn_bias.BlockDiagonalMask.from_seqlens(length)
|
| 901 |
else:
|
| 902 |
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
|
|
@@ -906,20 +918,29 @@ class NewModel(NewPreTrainedModel):
|
|
| 906 |
# Invalid shape for attention bias: torch.Size([48, 1, 1, 512]) (expected (48, 12, 512, 512))
|
| 907 |
attention_bias = attention_bias.expand(-1, self.config.num_attention_heads, seq_length, -1)
|
| 908 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 909 |
if self.config.logn_attention_scale:
|
| 910 |
-
|
| 911 |
-
|
| 912 |
-
|
| 913 |
-
|
| 914 |
-
|
| 915 |
-
|
| 916 |
-
|
| 917 |
-
|
|
|
|
| 918 |
|
| 919 |
encoder_outputs = self.encoder(
|
| 920 |
embedding_output,
|
| 921 |
attention_bias=attention_bias,
|
| 922 |
rope_embeds=rope_embeds,
|
|
|
|
| 923 |
attention_scale=attention_scale,
|
| 924 |
subset_indices=subset_indices,
|
| 925 |
head_mask=head_mask,
|
|
@@ -929,7 +950,6 @@ class NewModel(NewPreTrainedModel):
|
|
| 929 |
)
|
| 930 |
sequence_output = encoder_outputs[0]
|
| 931 |
if unpad_inputs and output_padded:
|
| 932 |
-
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
| 933 |
sequence_output = pad_input(
|
| 934 |
sequence_output.squeeze(), indices, batch_size, seq_length
|
| 935 |
)
|
|
|
|
| 374 |
if position_ids is None:
|
| 375 |
if seq_length > self.position_ids.size(0):
|
| 376 |
self.register_buffer(
|
| 377 |
+
"position_ids", torch.arange(seq_length, device=embeddings.device), persistent=False
|
| 378 |
)
|
| 379 |
if unpad_inputs:
|
| 380 |
# [1, cumsum_seq_len]
|
|
|
|
| 397 |
if self.type_vocab_size > 0:
|
| 398 |
if token_type_ids is None:
|
| 399 |
token_type_ids = position_ids.mul(0)
|
| 400 |
+
else:
|
| 401 |
+
if self.type_vocab_size < 2:
|
| 402 |
+
token_type_ids.mul_(0)
|
| 403 |
+
if unpad_inputs:
|
| 404 |
+
token_type_ids = token_type_ids[attention_mask_bool].unsqueeze(0)
|
| 405 |
|
| 406 |
token_type_embeddings = self.token_type_embeddings(token_type_ids)
|
| 407 |
+
embeddings = embeddings + token_type_embeddings
|
| 408 |
|
| 409 |
# BERT position
|
| 410 |
if self.position_embedding_type == "absolute":
|
| 411 |
position_embeddings = self.position_embeddings(position_ids)
|
| 412 |
+
embeddings = embeddings + position_embeddings
|
| 413 |
|
| 414 |
embeddings = self.LayerNorm(embeddings)
|
| 415 |
embeddings = self.dropout(embeddings)
|
|
|
|
| 452 |
self.memory_efficient_attention = None if xops is None else xops.memory_efficient_attention
|
| 453 |
if self.use_memory_efficient_attention:
|
| 454 |
assert self.memory_efficient_attention is not None, 'please install xformers'
|
|
|
|
|
|
|
| 455 |
|
| 456 |
def forward(
|
| 457 |
self,
|
| 458 |
hidden_states: torch.Tensor,
|
| 459 |
attention_bias: torch.FloatTensor,
|
| 460 |
rope_embeds: Optional[Tuple[torch.FloatTensor, torch.FloatTensor]] = None,
|
| 461 |
+
padding_inputs: Optional[Tuple] = None, # indices, batch, seqlen
|
| 462 |
attention_scale: Optional[torch.FloatTensor] = None,
|
| 463 |
head_mask: Optional[torch.FloatTensor] = None,
|
| 464 |
output_attentions: Optional[bool] = False,
|
| 465 |
qkv_inputs: Optional[Tuple] = None, # For RetroMAE
|
|
|
|
| 466 |
) -> Tuple[torch.Tensor, ...]:
|
| 467 |
shape_hd = (self.num_attention_heads, self.attention_head_size)
|
| 468 |
# qkv
|
|
|
|
| 505 |
p=self.dropout.p
|
| 506 |
)
|
| 507 |
else:
|
| 508 |
+
if output_attentions and isinstance(self, NewSdpaAttention):
|
| 509 |
+
raise RuntimeError("SDPA do not output attentions")
|
| 510 |
+
context_layer, attention_probs = self._attention(
|
| 511 |
+
query_states, key_states, value_states, attention_bias, head_mask
|
| 512 |
+
)
|
| 513 |
|
| 514 |
if padding_inputs is not None:
|
| 515 |
context_layer = unpad_input(context_layer, indices=padding_inputs[0])
|
|
|
|
| 547 |
|
| 548 |
# This is actually dropping out entire tokens to attend to, which might
|
| 549 |
# seem a bit unusual, but is taken from the original Transformer paper.
|
| 550 |
+
if self.dropout.p > 0:
|
| 551 |
+
attention_probs = self.dropout(attention_probs)
|
| 552 |
|
| 553 |
# Mask heads if we want to
|
| 554 |
if head_mask is not None:
|
|
|
|
| 557 |
context_layer = torch.matmul(attention_probs, value_states)
|
| 558 |
|
| 559 |
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
|
| 560 |
+
return context_layer, attention_probs
|
| 561 |
|
| 562 |
|
| 563 |
class NewSdpaAttention(NewAttention):
|
|
|
|
| 568 |
"""
|
| 569 |
def __init__(self, config: NewConfig, **kwargs):
|
| 570 |
super().__init__(config, **kwargs)
|
| 571 |
+
# torch.backends.cuda.enable_mem_efficient_sdp(False)
|
| 572 |
+
# logger.warning(
|
| 573 |
+
# "Disable memory efficient attention kernel for `NewSdpaAttention`, you can set "
|
| 574 |
+
# "`use_memory_efficient_attention=True` if it expected to use."
|
| 575 |
+
# )
|
| 576 |
|
| 577 |
def _attention(self, query_states, key_states, value_states, attention_bias, head_mask):
|
| 578 |
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
|
|
|
| 583 |
dropout_p=self.dropout.p if self.training else 0.0,
|
| 584 |
)
|
| 585 |
attn_output = attn_output.permute(0, 2, 1, 3).contiguous()
|
| 586 |
+
return attn_output, None
|
| 587 |
|
| 588 |
|
| 589 |
NEW_ATTENTION_CLASSES = {
|
| 590 |
"eager": NewAttention,
|
| 591 |
+
# "flash_attention_2": , # TODO
|
| 592 |
"sdpa": NewSdpaAttention,
|
| 593 |
}
|
| 594 |
|
|
|
|
| 631 |
super().__init__()
|
| 632 |
if attn_implementation is None:
|
| 633 |
attn_implementation = config._attn_implementation
|
| 634 |
+
if use_memory_efficient_attention is None:
|
| 635 |
+
use_memory_efficient_attention = config.use_memory_efficient_attention
|
| 636 |
+
if use_memory_efficient_attention:
|
| 637 |
+
if attn_implementation != 'eager':
|
| 638 |
+
logger.warning_once(f"Override {attn_implementation=} to 'eager' as {use_memory_efficient_attention=}")
|
| 639 |
+
attn_implementation = 'eager' # Since it will be SDPA by default for torch>=2.1.1
|
| 640 |
self.attention = NEW_ATTENTION_CLASSES[attn_implementation](
|
| 641 |
config, pack_qkv=pack_qkv, use_memory_efficient_attention=use_memory_efficient_attention
|
| 642 |
)
|
|
|
|
| 656 |
hidden_states: torch.Tensor,
|
| 657 |
attention_bias: torch.FloatTensor,
|
| 658 |
rope_embeds: Optional[Tuple[torch.FloatTensor, torch.FloatTensor]] = None,
|
| 659 |
+
padding_inputs: Optional[Tuple] = None, # indices, batch, seqlen
|
| 660 |
attention_scale: Optional[torch.FloatTensor] = None,
|
| 661 |
subset_indices: Optional[torch.LongTensor] = None,
|
| 662 |
head_mask: Optional[torch.FloatTensor] = None,
|
| 663 |
output_attentions: Optional[bool] = False,
|
| 664 |
qkv_inputs: Optional[Tuple] = None, # For RetroMAE
|
|
|
|
| 665 |
) -> Tuple[torch.Tensor, ...]:
|
| 666 |
# Multi head self attention
|
| 667 |
residual = hidden_states if qkv_inputs is None else qkv_inputs[0]
|
|
|
|
| 669 |
hidden_states,
|
| 670 |
attention_bias,
|
| 671 |
rope_embeds,
|
| 672 |
+
padding_inputs,
|
| 673 |
attention_scale,
|
| 674 |
head_mask,
|
| 675 |
output_attentions=output_attentions,
|
| 676 |
qkv_inputs=qkv_inputs,
|
|
|
|
| 677 |
)
|
| 678 |
hidden_states = attention_outputs[0]
|
| 679 |
if self.hidden_dropout is not None:
|
|
|
|
| 711 |
hidden_states: torch.Tensor,
|
| 712 |
attention_bias: Optional[torch.FloatTensor] = None,
|
| 713 |
rope_embeds: Optional[Tuple[torch.FloatTensor, torch.FloatTensor]] = None,
|
| 714 |
+
padding_inputs: Optional[Tuple] = None, # indices, batch, seqlen
|
| 715 |
attention_scale: Optional[torch.FloatTensor] = None,
|
| 716 |
subset_indices: Optional[torch.LongTensor] = None,
|
| 717 |
head_mask: Optional[torch.FloatTensor] = None,
|
|
|
|
| 739 |
hidden_states,
|
| 740 |
attention_bias,
|
| 741 |
rope_embeds,
|
| 742 |
+
padding_inputs,
|
| 743 |
attention_scale,
|
| 744 |
layer_subset_indices,
|
| 745 |
layer_head_mask,
|
|
|
|
| 749 |
hidden_states,
|
| 750 |
attention_bias,
|
| 751 |
rope_embeds,
|
| 752 |
+
padding_inputs,
|
| 753 |
attention_scale,
|
| 754 |
layer_subset_indices,
|
| 755 |
layer_head_mask,
|
|
|
|
| 805 |
config_class = NewConfig
|
| 806 |
base_model_prefix = "new"
|
| 807 |
supports_gradient_checkpointing = True
|
| 808 |
+
_supports_sdpa = True
|
| 809 |
|
| 810 |
def _init_weights(self, module):
|
| 811 |
"""Initialize the weights"""
|
|
|
|
| 908 |
)
|
| 909 |
|
| 910 |
batch_size, seq_length = input_shape
|
| 911 |
+
if unpad_inputs and self.config.use_memory_efficient_attention:
|
|
|
|
|
|
|
| 912 |
attention_bias = xops.fmha.attn_bias.BlockDiagonalMask.from_seqlens(length)
|
| 913 |
else:
|
| 914 |
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
|
|
|
|
| 918 |
# Invalid shape for attention bias: torch.Size([48, 1, 1, 512]) (expected (48, 12, 512, 512))
|
| 919 |
attention_bias = attention_bias.expand(-1, self.config.num_attention_heads, seq_length, -1)
|
| 920 |
|
| 921 |
+
padding_inputs = None
|
| 922 |
+
if unpad_inputs and (output_padded or not self.config.use_memory_efficient_attention):
|
| 923 |
+
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
| 924 |
+
if not self.config.use_memory_efficient_attention:
|
| 925 |
+
padding_inputs = (indices, *input_shape)
|
| 926 |
+
|
| 927 |
+
attention_scale = None
|
| 928 |
if self.config.logn_attention_scale:
|
| 929 |
+
logger.warning_once("TODO: logn_attention_scale")
|
| 930 |
+
# # attention scale log_512(input_len)
|
| 931 |
+
# attention_scale = attention_mask.sum(1).log() / torch.tensor(self.config.max_position_embeddings).log()
|
| 932 |
+
# # inference-time logn scale need clip 1
|
| 933 |
+
# if self.config.logn_attention_clip1:
|
| 934 |
+
# attention_scale.clip_(1)
|
| 935 |
+
# attention_scale = attention_scale[:, None, None, None]
|
| 936 |
+
# else:
|
| 937 |
+
# attention_scale = None
|
| 938 |
|
| 939 |
encoder_outputs = self.encoder(
|
| 940 |
embedding_output,
|
| 941 |
attention_bias=attention_bias,
|
| 942 |
rope_embeds=rope_embeds,
|
| 943 |
+
padding_inputs=padding_inputs,
|
| 944 |
attention_scale=attention_scale,
|
| 945 |
subset_indices=subset_indices,
|
| 946 |
head_mask=head_mask,
|
|
|
|
| 950 |
)
|
| 951 |
sequence_output = encoder_outputs[0]
|
| 952 |
if unpad_inputs and output_padded:
|
|
|
|
| 953 |
sequence_output = pad_input(
|
| 954 |
sequence_output.squeeze(), indices, batch_size, seq_length
|
| 955 |
)
|