| import torch |
| from transformers.models.bert import modeling_bert |
| from typing import Optional, Tuple |
| import torch.nn as nn |
| import math |
|
|
| def patch_bert_self_attn(): |
|
|
| def bert_self_attn_forward_patched(self, |
| hidden_states: torch.Tensor, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| encoder_hidden_states: Optional[torch.FloatTensor] = None, |
| encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, |
| output_attentions: Optional[bool] = False): |
|
|
| mixed_query_layer = self.query(hidden_states) |
|
|
| is_cross_attention = encoder_hidden_states is not None |
|
|
| if is_cross_attention and past_key_value is not None: |
| key_layer = past_key_value[0] |
| value_layer = past_key_value[1] |
| attention_mask = encoder_attention_mask |
| elif is_cross_attention: |
| key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) |
| value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) |
| attention_mask = encoder_attention_mask |
| elif past_key_value is not None: |
| key_layer = self.transpose_for_scores(self.key(hidden_states)) |
| value_layer = self.transpose_for_scores(self.value(hidden_states)) |
| key_layer = torch.cat([past_key_value[0], key_layer], dim=2) |
| value_layer = torch.cat([past_key_value[1], value_layer], dim=2) |
| else: |
| key_layer = self.transpose_for_scores(self.key(hidden_states)) |
| value_layer = self.transpose_for_scores(self.value(hidden_states)) |
|
|
| query_layer = self.transpose_for_scores(mixed_query_layer) |
|
|
| use_cache = past_key_value is not None |
| if self.is_decoder: |
| past_key_value = (key_layer, value_layer) |
|
|
| attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) |
|
|
| if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": |
| query_length, key_length = query_layer.shape[2], key_layer.shape[2] |
| if use_cache: |
| position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( |
| -1, 1 |
| ) |
| else: |
| position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) |
| position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) |
| distance = position_ids_l - position_ids_r |
|
|
| positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) |
| positional_embedding = positional_embedding.to(dtype=query_layer.dtype) |
|
|
| if self.position_embedding_type == "relative_key": |
| relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) |
| attention_scores = attention_scores + relative_position_scores |
| elif self.position_embedding_type == "relative_key_query": |
| relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) |
| relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) |
| attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key |
|
|
| attention_scores = attention_scores / math.sqrt(self.attention_head_size) |
| attn_scores = attention_scores |
| if attention_mask is not None: |
| attention_scores = attention_scores + attention_mask |
|
|
| attention_probs = nn.functional.softmax(attention_scores, dim=-1) |
|
|
| attention_probs = self.dropout(attention_probs) |
|
|
| if head_mask is not None: |
| attention_probs = attention_probs * head_mask |
|
|
| context_layer = torch.matmul(attention_probs, value_layer) |
|
|
| context_layer = context_layer.permute(0, 2, 1, 3).contiguous() |
| new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) |
| context_layer = context_layer.view(new_context_layer_shape) |
|
|
| outputs = (context_layer, attn_scores) if output_attentions else (context_layer,) |
|
|
| if self.is_decoder: |
| outputs = outputs + (past_key_value,) |
| return outputs |
|
|
| modeling_bert.BertSelfAttention.forward = bert_self_attn_forward_patched |