| | |
| | |
| | """ PyTorch CodeT5+ 2B 6B 16B models. |
| | The implementation is mainly based on transformers.models.codegen.modeling_codegen by adding cross-attention |
| | and transformers.models.encoder_decoder.modeling_encoder_decoder.EncoderDecoderModel. |
| | """ |
| | from typing import Optional, Tuple, Union |
| | import torch |
| | import torch.utils.checkpoint |
| | from torch import nn |
| | from torch.nn import CrossEntropyLoss |
| |
|
| | from transformers.activations import ACT2FN |
| | from transformers.modeling_outputs import BaseModelOutput, Seq2SeqLMOutput, \ |
| | BaseModelOutputWithPast, CausalLMOutputWithPast, \ |
| | BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions |
| | from transformers.modeling_utils import PreTrainedModel |
| | from transformers.configuration_utils import PretrainedConfig |
| | from transformers.utils import add_code_sample_docstrings, add_start_docstrings, logging |
| | from .configuration_codet5p import CodeT5pConfig, CodeT5pModuleConfig |
| |
|
| | logger = logging.get_logger(__name__) |
| |
|
| | CODET5P_PRETRAINED_MODEL_ARCHIVE_LIST = [ |
| | "Salesforce/codet5p-220m", |
| | "Salesforce/codet5p-770m", |
| | "Salesforce/codet5p-2b", |
| | "Salesforce/codet5p-6b", |
| | "Salesforce/codet5p-16b", |
| | |
| | ] |
| |
|
| |
|
| | |
| | def fixed_pos_embedding(x, seq_dim=1, seq_len=None): |
| | dim = x.shape[-1] |
| | if seq_len is None: |
| | seq_len = x.shape[seq_dim] |
| | inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2) / dim)) |
| | sinusoid_inp = ( |
| | torch.einsum("i , j -> i j", torch.arange(seq_len, dtype=torch.float), inv_freq).to(x.device).float() |
| | ) |
| | return torch.sin(sinusoid_inp), torch.cos(sinusoid_inp) |
| |
|
| |
|
| | |
| | def rotate_every_two(x): |
| | x1 = x[:, :, :, ::2] |
| | x2 = x[:, :, :, 1::2] |
| | x = torch.stack((-x2, x1), dim=-1) |
| | return x.flatten(-2) |
| |
|
| |
|
| | |
| | def duplicate_interleave(m): |
| | """ |
| | A simple version of `torch.repeat_interleave` for duplicating a matrix while interleaving the copy. |
| | """ |
| | dim0 = m.shape[0] |
| | m = m.view(-1, 1) |
| | m = m.repeat(1, 2) |
| | m = m.view(dim0, -1) |
| | return m |
| |
|
| |
|
| | |
| | def apply_rotary_pos_emb(x, sincos, offset=0): |
| | sin, cos = (duplicate_interleave(t)[None, offset: x.shape[1] + offset, None, :] for t in sincos) |
| | |
| | return (x * cos) + (rotate_every_two(x) * sin) |
| |
|
| |
|
| | |
| | class CodeT5pAttention(nn.Module): |
| | def __init__(self, config, is_cross_attention=False, is_decoder=True): |
| | super().__init__() |
| |
|
| | max_positions = config.max_position_embeddings |
| | self.register_buffer( |
| | "causal_mask", |
| | torch.tril(torch.ones((max_positions, max_positions), dtype=torch.uint8)).view( |
| | 1, 1, max_positions, max_positions |
| | ), |
| | ) |
| |
|
| | self.attn_dropout = nn.Dropout(config.attn_pdrop) |
| | self.resid_dropout = nn.Dropout(config.resid_pdrop) |
| |
|
| | self.embed_dim = config.hidden_size |
| | self.num_attention_heads = config.num_attention_heads |
| | self.head_dim = self.embed_dim // self.num_attention_heads |
| | if self.head_dim * self.num_attention_heads != self.embed_dim: |
| | raise ValueError( |
| | f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and" |
| | f" `num_attention_heads`: {self.num_attention_heads})." |
| | ) |
| |
|
| | self.scale_attn = torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32)).to(torch.get_default_dtype()) |
| | self.is_decoder = is_decoder |
| | self.is_cross_attention = is_cross_attention |
| | if self.is_cross_attention: |
| | self.qkv_proj = nn.Linear(self.embed_dim, self.embed_dim * 2, bias=False) |
| | self.q_attn = nn.Linear(self.embed_dim, self.embed_dim, bias=False) |
| | else: |
| | self.qkv_proj = nn.Linear(self.embed_dim, self.embed_dim * 3, bias=False) |
| |
|
| | self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) |
| | self.rotary_dim = None |
| | if config.rotary_dim is not None: |
| | self.rotary_dim = config.rotary_dim |
| |
|
| | def _split_heads(self, x, n_head, dim_head, mp_num): |
| | reshaped = x.reshape(x.shape[:-1] + (n_head // mp_num, dim_head)) |
| | reshaped = reshaped.reshape(x.shape[:-2] + (-1,) + reshaped.shape[-1:]) |
| | return reshaped |
| |
|
| | def _merge_heads(self, tensor, num_attention_heads, attn_head_size): |
| | """ |
| | Merges attn_head_size dim and num_attn_heads dim into n_ctx |
| | """ |
| | if len(tensor.shape) == 5: |
| | tensor = tensor.permute(0, 1, 3, 2, 4).contiguous() |
| | elif len(tensor.shape) == 4: |
| | tensor = tensor.permute(0, 2, 1, 3).contiguous() |
| | else: |
| | raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}") |
| | new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,) |
| | return tensor.view(new_shape) |
| |
|
| | def _attn( |
| | self, |
| | query, |
| | key, |
| | value, |
| | attention_mask=None, |
| | head_mask=None, |
| | ): |
| | |
| | query = query.to(torch.float32) |
| | key = key.to(torch.float32) |
| |
|
| | attn_weights = torch.matmul(query, key.transpose(-1, -2)) |
| | attn_weights = attn_weights / self.scale_attn |
| |
|
| | if not self.is_cross_attention and self.is_decoder: |
| | |
| | query_length, key_length = query.size(-2), key.size(-2) |
| | causal_mask = self.causal_mask[:, :, key_length - query_length: key_length, :key_length] |
| | mask_value = torch.finfo(attn_weights.dtype).min |
| | |
| | |
| | mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device) |
| | attn_weights = torch.where(causal_mask.bool(), attn_weights, mask_value) |
| |
|
| | if attention_mask is not None: |
| | |
| | attn_weights = attn_weights + attention_mask |
| |
|
| | attn_weights = nn.Softmax(dim=-1)(attn_weights) |
| | attn_weights = attn_weights.to(value.dtype) |
| | attn_weights = self.attn_dropout(attn_weights) |
| |
|
| | |
| | if head_mask is not None: |
| | attn_weights = attn_weights * head_mask |
| |
|
| | attn_output = torch.matmul(attn_weights, value) |
| |
|
| | return attn_output, attn_weights |
| |
|
| | def forward( |
| | self, |
| | hidden_states: Optional[torch.FloatTensor], |
| | attention_mask: Optional[torch.FloatTensor] = None, |
| | layer_past: Optional[Tuple[torch.Tensor]] = None, |
| | head_mask: Optional[torch.FloatTensor] = None, |
| | encoder_hidden_states: Optional[torch.Tensor] = None, |
| | encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| | use_cache: Optional[bool] = False, |
| | output_attentions: Optional[bool] = False, |
| | ) -> Union[ |
| | Tuple[torch.Tensor, Tuple[torch.Tensor]], |
| | Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]], |
| | ]: |
| |
|
| | if encoder_hidden_states is not None: |
| | if not hasattr(self, "q_attn"): |
| | raise ValueError( |
| | "If class is used as cross attention, the weights `q_attn` have to be defined. " |
| | "Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`." |
| | ) |
| |
|
| | mp_num = 4 |
| | local_dim = self.head_dim * self.num_attention_heads // mp_num |
| | q = self.q_attn(hidden_states) |
| | q_split = q.reshape(q.shape[:-1] + (mp_num, -1)) |
| | query = torch.split(q_split, local_dim, dim=-1)[0] |
| |
|
| | qkv = self.qkv_proj(encoder_hidden_states) |
| | qkv_split = qkv.reshape(qkv.shape[:-1] + (mp_num, -1)) |
| | value, key = torch.split(qkv_split, local_dim, dim=-1) |
| |
|
| | attention_mask = encoder_attention_mask |
| | else: |
| | qkv = self.qkv_proj(hidden_states) |
| | mp_num = 4 |
| | qkv_split = qkv.reshape(qkv.shape[:-1] + (mp_num, -1)) |
| |
|
| | local_dim = self.head_dim * self.num_attention_heads // mp_num |
| | query, value, key = torch.split(qkv_split, local_dim, dim=-1) |
| |
|
| | query = self._split_heads(query, self.num_attention_heads, self.head_dim, mp_num=mp_num) |
| | key = self._split_heads(key, self.num_attention_heads, self.head_dim, mp_num=mp_num) |
| |
|
| | value = self._split_heads(value, self.num_attention_heads, self.head_dim, mp_num=mp_num) |
| | value = value.permute(0, 2, 1, 3) |
| |
|
| | seq_len = key.shape[1] |
| | offset = 0 |
| |
|
| | if layer_past is not None: |
| | offset = layer_past[0].shape[-2] |
| | seq_len += offset |
| |
|
| | if self.rotary_dim is not None: |
| | k_rot = key[:, :, :, : self.rotary_dim] |
| | k_pass = key[:, :, :, self.rotary_dim:] |
| |
|
| | q_rot = query[:, :, :, : self.rotary_dim] |
| | q_pass = query[:, :, :, self.rotary_dim:] |
| |
|
| | sincos = fixed_pos_embedding(k_rot, 1, seq_len=seq_len) |
| | k_rot = apply_rotary_pos_emb(k_rot, sincos, offset=offset) |
| | seq_len_q = query.shape[1] |
| | sincos_q = fixed_pos_embedding(q_rot, 1, seq_len=seq_len_q) |
| | q_rot = apply_rotary_pos_emb(q_rot, sincos_q, offset=offset) |
| |
|
| | key = torch.cat([k_rot, k_pass], dim=-1) |
| | query = torch.cat([q_rot, q_pass], dim=-1) |
| | else: |
| | sincos = fixed_pos_embedding(key, 1, seq_len=seq_len) |
| | key = apply_rotary_pos_emb(key, sincos, offset=offset) |
| | query = apply_rotary_pos_emb(query, sincos, offset=offset) |
| |
|
| | key = key.permute(0, 2, 1, 3) |
| | query = query.permute(0, 2, 1, 3) |
| |
|
| | if layer_past is not None: |
| | past_key = layer_past[0] |
| | past_value = layer_past[1] |
| | key = torch.cat((past_key, key), dim=-2) |
| | value = torch.cat((past_value, value), dim=-2) |
| |
|
| | if use_cache is True: |
| | present = (key, value) |
| | else: |
| | present = None |
| |
|
| | |
| | attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask) |
| |
|
| | attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_dim) |
| | attn_output = self.out_proj(attn_output) |
| | attn_output = self.resid_dropout(attn_output) |
| |
|
| | outputs = (attn_output, present) |
| | if output_attentions: |
| | outputs += (attn_weights,) |
| |
|
| | return outputs |
| |
|
| |
|
| | |
| | class CodeT5pMLP(nn.Module): |
| | def __init__(self, intermediate_size, config): |
| | super().__init__() |
| | embed_dim = config.n_embd |
| |
|
| | self.fc_in = nn.Linear(embed_dim, intermediate_size) |
| | self.fc_out = nn.Linear(intermediate_size, embed_dim) |
| |
|
| | self.act = ACT2FN[config.activation_function] |
| | self.dropout = nn.Dropout(config.resid_pdrop) |
| |
|
| | def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.FloatTensor: |
| | hidden_states = self.fc_in(hidden_states) |
| | hidden_states = self.act(hidden_states) |
| | hidden_states = self.fc_out(hidden_states) |
| | hidden_states = self.dropout(hidden_states) |
| | return hidden_states |
| |
|
| |
|
| | |
| | class CodeT5pBlock(nn.Module): |
| | def __init__(self, config, layer_idx=None): |
| | super().__init__() |
| | inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd |
| | self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon) |
| |
|
| | if config.is_decoder is False: |
| | self.attn = CodeT5pAttention(config, is_cross_attention=False, is_decoder=False) |
| | else: |
| | self.attn = CodeT5pAttention(config) |
| | self.mlp = CodeT5pMLP(inner_dim, config) |
| |
|
| | |
| | self.add_cross_attention_by_layer = True \ |
| | if config.add_cross_attention and layer_idx == config.n_layer - 1 else False |
| |
|
| | if config.add_cross_attention and self.add_cross_attention_by_layer: |
| | self.crossattention = CodeT5pAttention(config, is_cross_attention=True) |
| |
|
| | def forward( |
| | self, |
| | hidden_states: Optional[torch.FloatTensor], |
| | layer_past: Optional[Tuple[torch.Tensor]] = None, |
| | attention_mask: Optional[torch.FloatTensor] = None, |
| | head_mask: Optional[torch.FloatTensor] = None, |
| | encoder_hidden_states: Optional[torch.Tensor] = None, |
| | encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| | use_cache: Optional[bool] = False, |
| | output_attentions: Optional[bool] = False, |
| | ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]: |
| | residual = hidden_states |
| | hidden_states = self.ln_1(hidden_states) |
| | attn_outputs = self.attn( |
| | hidden_states, |
| | layer_past=layer_past, |
| | attention_mask=attention_mask, |
| | head_mask=head_mask, |
| | use_cache=use_cache, |
| | output_attentions=output_attentions, |
| | ) |
| | attn_output = attn_outputs[0] |
| | outputs = attn_outputs[1:] |
| | feed_forward_hidden_states = self.mlp(hidden_states) |
| |
|
| | if encoder_hidden_states is not None and self.add_cross_attention_by_layer: |
| | |
| | if not hasattr(self, "crossattention"): |
| | raise ValueError( |
| | f"If `encoder_hidden_states` are passed, {self} has to be instantiated with " |
| | "cross-attention layers by setting `config.add_cross_attention=True`" |
| | ) |
| | |
| | |
| | cross_attn_outputs = self.crossattention( |
| | hidden_states, |
| | attention_mask=attention_mask, |
| | head_mask=head_mask, |
| | encoder_hidden_states=encoder_hidden_states, |
| | encoder_attention_mask=encoder_attention_mask, |
| | output_attentions=output_attentions, |
| | ) |
| | xattn_output = cross_attn_outputs[0] |
| | attn_output = attn_output + xattn_output |
| | outputs = outputs + cross_attn_outputs[2:] |
| |
|
| | hidden_states = attn_output + feed_forward_hidden_states + residual |
| |
|
| | if use_cache: |
| | outputs = (hidden_states,) + outputs |
| | else: |
| | outputs = (hidden_states,) + outputs[1:] |
| |
|
| | return outputs |
| |
|
| |
|
| | |
| | class CodeT5pPreTrainedModel(PreTrainedModel): |
| | """ |
| | An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained |
| | models. |
| | """ |
| | config_class = CodeT5pModuleConfig |
| | base_model_prefix = "transformer" |
| | supports_gradient_checkpointing = True |
| | _no_split_modules = ["CodeT5pBlock"] |
| |
|
| | def __init__(self, *inputs, **kwargs): |
| | super().__init__(*inputs, **kwargs) |
| |
|
| | def _init_weights(self, module): |
| | """Initialize the weights.""" |
| | if isinstance(module, (nn.Linear,)): |
| | |
| | |
| | module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) |
| | if module.bias is not None: |
| | module.bias.data.zero_() |
| | elif isinstance(module, nn.Embedding): |
| | module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) |
| | if module.padding_idx is not None: |
| | module.weight.data[module.padding_idx].zero_() |
| | elif isinstance(module, nn.LayerNorm): |
| | module.bias.data.zero_() |
| | module.weight.data.fill_(1.0) |
| |
|
| | def _set_gradient_checkpointing(self, module, value=False): |
| | if isinstance(module, CodeT5pModel): |
| | module.gradient_checkpointing = value |
| |
|
| |
|
| | |
| | class CodeT5pModel(CodeT5pPreTrainedModel): |
| | def __init__(self, config): |
| | super().__init__(config) |
| |
|
| | self.embed_dim = config.n_embd |
| | self.vocab_size = config.vocab_size |
| | self.wte = nn.Embedding(config.vocab_size, self.embed_dim) |
| | self.drop = nn.Dropout(config.embd_pdrop) |
| | self.h = nn.ModuleList([CodeT5pBlock(config, idx) for idx in range(config.n_layer)]) |
| | self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) |
| | self.rotary_dim = min(config.rotary_dim, config.n_ctx // config.num_attention_heads) |
| |
|
| | self.gradient_checkpointing = False |
| |
|
| | |
| | self.post_init() |
| |
|
| | def get_input_embeddings(self): |
| | return self.wte |
| |
|
| | def set_input_embeddings(self, new_embeddings): |
| | self.wte = new_embeddings |
| |
|
| | def forward( |
| | self, |
| | input_ids: Optional[torch.LongTensor] = None, |
| | past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, |
| | attention_mask: Optional[torch.FloatTensor] = None, |
| | token_type_ids: Optional[torch.LongTensor] = None, |
| | position_ids: Optional[torch.LongTensor] = None, |
| | head_mask: Optional[torch.FloatTensor] = None, |
| | inputs_embeds: Optional[torch.FloatTensor] = None, |
| | encoder_hidden_states: Optional[torch.Tensor] = None, |
| | encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| | use_cache: Optional[bool] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: |
| | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| | output_hidden_states = ( |
| | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| | ) |
| | use_cache = use_cache if use_cache is not None else self.config.use_cache |
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | if input_ids is not None and inputs_embeds is not None: |
| | raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") |
| | elif input_ids is not None: |
| | input_shape = input_ids.size() |
| | input_ids = input_ids.view(-1, input_shape[-1]) |
| | batch_size = input_ids.shape[0] |
| | elif inputs_embeds is not None: |
| | input_shape = inputs_embeds.size()[:-1] |
| | batch_size = inputs_embeds.shape[0] |
| | else: |
| | raise ValueError("You have to specify either input_ids or inputs_embeds") |
| |
|
| | device = input_ids.device if input_ids is not None else inputs_embeds.device |
| |
|
| | if token_type_ids is not None: |
| | token_type_ids = token_type_ids.view(-1, input_shape[-1]) |
| |
|
| | if position_ids is not None: |
| | position_ids = position_ids.view(-1, input_shape[-1]) |
| |
|
| | if past_key_values is None: |
| | past_length = 0 |
| | past_key_values = tuple([None] * len(self.h)) |
| | else: |
| | past_length = past_key_values[0][0].size(-2) |
| |
|
| | if position_ids is None: |
| | position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device) |
| | position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) |
| |
|
| | |
| | if attention_mask is not None: |
| | if batch_size <= 0: |
| | raise ValueError("batch_size has to be defined and > 0") |
| | attention_mask = attention_mask.view(batch_size, -1) |
| | |
| | |
| | |
| | |
| | |
| | attention_mask = attention_mask[:, None, None, :] |
| |
|
| | |
| | |
| | |
| | |
| | |
| | attention_mask = attention_mask.to(dtype=self.dtype) |
| | attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min |
| |
|
| | |
| | |
| | if self.config.add_cross_attention and encoder_hidden_states is not None: |
| | encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() |
| | encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) |
| | if encoder_attention_mask is None: |
| | encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) |
| | encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask) |
| | else: |
| | encoder_attention_mask = None |
| |
|
| | |
| | |
| | |
| | |
| | head_mask = self.get_head_mask(head_mask, self.config.n_layer) |
| |
|
| | if inputs_embeds is None: |
| | inputs_embeds = self.wte(input_ids) |
| |
|
| | hidden_states = inputs_embeds |
| |
|
| | if token_type_ids is not None: |
| | token_type_embeds = self.wte(token_type_ids) |
| | hidden_states = hidden_states + token_type_embeds |
| |
|
| | hidden_states = self.drop(hidden_states) |
| |
|
| | output_shape = input_shape + (hidden_states.size(-1),) |
| |
|
| | presents = () if use_cache else None |
| | all_self_attentions = () if output_attentions else None |
| | all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None |
| | all_hidden_states = () if output_hidden_states else None |
| | for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): |
| | if output_hidden_states: |
| | all_hidden_states = all_hidden_states + (hidden_states,) |
| |
|
| | if self.gradient_checkpointing and self.training: |
| | if use_cache: |
| | logger.warning( |
| | "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting " |
| | "`use_cache=False`..." |
| | ) |
| | use_cache = False |
| |
|
| | def create_custom_forward(module): |
| | def custom_forward(*inputs): |
| | |
| | return module(*inputs, use_cache, output_attentions) |
| |
|
| | return custom_forward |
| |
|
| | outputs = torch.utils.checkpoint.checkpoint( |
| | create_custom_forward(block), |
| | hidden_states, |
| | None, |
| | attention_mask, |
| | head_mask[i], |
| | encoder_hidden_states, |
| | encoder_attention_mask, |
| | ) |
| | else: |
| | outputs = block( |
| | hidden_states, |
| | layer_past=layer_past, |
| | attention_mask=attention_mask, |
| | head_mask=head_mask[i], |
| | encoder_hidden_states=encoder_hidden_states, |
| | encoder_attention_mask=encoder_attention_mask, |
| | use_cache=use_cache, |
| | output_attentions=output_attentions, |
| | ) |
| |
|
| | hidden_states = outputs[0] |
| | if use_cache is True: |
| | presents = presents + (outputs[1],) |
| |
|
| | if output_attentions: |
| | all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) |
| | if self.config.add_cross_attention and self.add_cross_attention_by_layer: |
| | all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],) |
| |
|
| | hidden_states = self.ln_f(hidden_states) |
| |
|
| | hidden_states = hidden_states.view(output_shape) |
| | |
| | if output_hidden_states: |
| | all_hidden_states = all_hidden_states + (hidden_states,) |
| |
|
| | if not return_dict: |
| | return tuple( |
| | v for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions] if |
| | v is not None) |
| |
|
| | return BaseModelOutputWithPastAndCrossAttentions( |
| | last_hidden_state=hidden_states, |
| | past_key_values=presents, |
| | hidden_states=all_hidden_states, |
| | attentions=all_self_attentions, |
| | cross_attentions=all_cross_attentions, |
| | ) |
| |
|
| |
|
| | |
| | class CodeT5pForCausalLM(CodeT5pPreTrainedModel): |
| | _keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.causal_mask"] |
| |
|
| | def __init__(self, config): |
| | super().__init__(config) |
| | self.transformer = CodeT5pModel(config) |
| | self.lm_head = nn.Linear(config.n_embd, config.vocab_size) |
| |
|
| | |
| | self.post_init() |
| |
|
| | def get_output_embeddings(self): |
| | return self.lm_head |
| |
|
| | def set_output_embeddings(self, new_embeddings): |
| | self.lm_head = new_embeddings |
| |
|
| | def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs): |
| | token_type_ids = kwargs.get("token_type_ids", None) |
| | |
| | if past_key_values: |
| | input_ids = input_ids[:, -1].unsqueeze(-1) |
| | if token_type_ids is not None: |
| | token_type_ids = token_type_ids[:, -1].unsqueeze(-1) |
| |
|
| | attention_mask = kwargs.get("attention_mask", None) |
| | position_ids = kwargs.get("position_ids", None) |
| |
|
| | if attention_mask is not None and position_ids is None: |
| | |
| | position_ids = attention_mask.long().cumsum(-1) - 1 |
| | position_ids.masked_fill_(attention_mask == 0, 1) |
| | if past_key_values: |
| | position_ids = position_ids[:, -1].unsqueeze(-1) |
| | else: |
| | position_ids = None |
| | return { |
| | "input_ids": input_ids, |
| | "past_key_values": past_key_values, |
| | "use_cache": kwargs.get("use_cache"), |
| | "position_ids": position_ids, |
| | "attention_mask": attention_mask, |
| | "token_type_ids": token_type_ids, |
| | } |
| |
|
| | def forward( |
| | self, |
| | input_ids: Optional[torch.LongTensor] = None, |
| | past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, |
| | attention_mask: Optional[torch.FloatTensor] = None, |
| | token_type_ids: Optional[torch.LongTensor] = None, |
| | position_ids: Optional[torch.LongTensor] = None, |
| | head_mask: Optional[torch.FloatTensor] = None, |
| | inputs_embeds: Optional[torch.FloatTensor] = None, |
| | encoder_hidden_states: Optional[torch.Tensor] = None, |
| | encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| | labels: Optional[torch.LongTensor] = None, |
| | use_cache: Optional[bool] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: |
| | r""" |
| | labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| | Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set |
| | `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` |
| | are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` |
| | """ |
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | transformer_outputs = self.transformer( |
| | input_ids, |
| | past_key_values=past_key_values, |
| | attention_mask=attention_mask, |
| | token_type_ids=token_type_ids, |
| | position_ids=position_ids, |
| | head_mask=head_mask, |
| | inputs_embeds=inputs_embeds, |
| | encoder_hidden_states=encoder_hidden_states, |
| | encoder_attention_mask=encoder_attention_mask, |
| | use_cache=use_cache, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| | hidden_states = transformer_outputs[0] |
| |
|
| | |
| | |
| | |
| | lm_logits = self.lm_head(hidden_states).to(torch.float32) |
| |
|
| | loss = None |
| | if labels is not None: |
| | |
| | shift_logits = lm_logits[..., :-1, :].contiguous() |
| | shift_labels = labels[..., 1:].contiguous() |
| | |
| | loss_fct = CrossEntropyLoss() |
| | loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) |
| |
|
| | loss = loss.to(hidden_states.dtype) |
| |
|
| | if not return_dict: |
| | output = (lm_logits,) + transformer_outputs[1:] |
| | return ((loss,) + output) if loss is not None else output |
| |
|
| | return CausalLMOutputWithCrossAttentions( |
| | loss=loss, |
| | logits=lm_logits, |
| | past_key_values=transformer_outputs.past_key_values, |
| | hidden_states=transformer_outputs.hidden_states, |
| | attentions=transformer_outputs.attentions, |
| | cross_attentions=transformer_outputs.cross_attentions, |
| | ) |
| |
|
| | @staticmethod |
| | def _reorder_cache( |
| | past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor |
| | ) -> Tuple[Tuple[torch.Tensor]]: |
| | """ |
| | This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or |
| | [`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct |
| | beam_idx at every generation step. |
| | """ |
| | return tuple( |
| | tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past) |
| | for layer_past in past_key_values |
| | ) |
| |
|
| |
|
| | def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): |
| | """ |
| | Shift input ids one token to the right. |
| | """ |
| | shifted_input_ids = input_ids.new_zeros(input_ids.shape) |
| | shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() |
| | if decoder_start_token_id is None: |
| | raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.") |
| | shifted_input_ids[:, 0] = decoder_start_token_id |
| |
|
| | if pad_token_id is None: |
| | raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.") |
| | |
| | shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) |
| |
|
| | return shifted_input_ids |
| |
|
| |
|
| | |
| | class CodeT5pEncoderDecoderModel(PreTrainedModel): |
| | config_class = CodeT5pConfig |
| | _no_split_modules = ["CodeT5pBlock"] |
| | |
| | def __init__( |
| | self, |
| | config: Optional[PretrainedConfig] = None, |
| | encoder: Optional[PreTrainedModel] = None, |
| | decoder: Optional[PreTrainedModel] = None, |
| | ): |
| | if config is None and (encoder is None or decoder is None): |
| | raise ValueError("Either a configuration or an encoder and a decoder has to be provided.") |
| | if config is None: |
| | config = CodeT5pConfig.from_encoder_decoder_configs(encoder.config, decoder.config) |
| | else: |
| | if not isinstance(config, self.config_class): |
| | raise ValueError(f"Config: {config} has to be of type {self.config_class}") |
| |
|
| | if config.decoder.cross_attention_hidden_size is not None: |
| | if config.decoder.cross_attention_hidden_size != config.encoder.hidden_size: |
| | raise ValueError( |
| | "If `cross_attention_hidden_size` is specified in the decoder's configuration, it has to be equal" |
| | f" to the encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for" |
| | f" `config.decoder.cross_attention_hidden_size` and {config.encoder.hidden_size} for" |
| | " `config.encoder.hidden_size`." |
| | ) |
| |
|
| | |
| | super().__init__(config) |
| |
|
| | if encoder is None: |
| | encoder = CodeT5pModel(config.encoder) |
| |
|
| | if decoder is None: |
| | decoder = CodeT5pForCausalLM(config.decoder) |
| |
|
| | self.encoder = encoder |
| | self.decoder = decoder |
| |
|
| | if self.encoder.config.to_dict() != self.config.encoder.to_dict(): |
| | logger.warning( |
| | f"Config of the encoder: {self.encoder.__class__} is overwritten by shared encoder config:" |
| | f" {self.config.encoder}" |
| | ) |
| | if self.decoder.config.to_dict() != self.config.decoder.to_dict(): |
| | logger.warning( |
| | f"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config:" |
| | f" {self.config.decoder}" |
| | ) |
| |
|
| | |
| | |
| | self.encoder.config = self.config.encoder |
| | self.decoder.config = self.config.decoder |
| |
|
| | |
| | if ( |
| | self.encoder.config.hidden_size != self.decoder.config.hidden_size |
| | and self.decoder.config.cross_attention_hidden_size is None |
| | ): |
| | self.enc_to_dec_proj = nn.Linear(self.encoder.config.hidden_size, self.decoder.config.hidden_size) |
| |
|
| | if self.encoder.get_output_embeddings() is not None: |
| | raise ValueError( |
| | f"The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head" |
| | ) |
| | |
| | self.tie_weights() |
| |
|
| | def tie_weights(self): |
| | |
| | if self.config.tie_encoder_decoder: |
| | |
| | decoder_base_model_prefix = self.decoder.base_model_prefix |
| | self._tie_encoder_decoder_weights( |
| | self.encoder, self.decoder._modules[decoder_base_model_prefix], self.decoder.base_model_prefix |
| | ) |
| |
|
| | def get_encoder(self): |
| | return self.encoder |
| |
|
| | def get_decoder(self): |
| | return self.decoder |
| |
|
| | def get_input_embeddings(self): |
| | return self.encoder.get_input_embeddings() |
| |
|
| | def get_output_embeddings(self): |
| | return self.decoder.get_output_embeddings() |
| |
|
| | def set_output_embeddings(self, new_embeddings): |
| | return self.decoder.set_output_embeddings(new_embeddings) |
| |
|
| | @classmethod |
| | def from_pretrained(cls, *args, **kwargs): |
| | |
| | if kwargs.get("_fast_init", False): |
| | logger.warning( |
| | "Fast initialization is currently not supported for EncoderDecoderModel. " |
| | "Falling back to slow initialization..." |
| | ) |
| | kwargs["_fast_init"] = False |
| | return super().from_pretrained(*args, **kwargs) |
| |
|
| | def forward( |
| | self, |
| | input_ids: Optional[torch.LongTensor] = None, |
| | attention_mask: Optional[torch.FloatTensor] = None, |
| | decoder_input_ids: Optional[torch.LongTensor] = None, |
| | decoder_attention_mask: Optional[torch.BoolTensor] = None, |
| | encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None, |
| | past_key_values: Tuple[Tuple[torch.FloatTensor]] = None, |
| | inputs_embeds: Optional[torch.FloatTensor] = None, |
| | decoder_inputs_embeds: Optional[torch.FloatTensor] = None, |
| | labels: Optional[torch.LongTensor] = None, |
| | use_cache: Optional[bool] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | **kwargs, |
| | ) -> Union[Tuple, Seq2SeqLMOutput]: |
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | kwargs_encoder = {argument: value for argument, value in kwargs.items() if not argument.startswith("decoder_")} |
| |
|
| | kwargs_decoder = { |
| | argument[len("decoder_"):]: value for argument, value in kwargs.items() if argument.startswith("decoder_") |
| | } |
| |
|
| | if encoder_outputs is None: |
| | encoder_outputs = self.encoder( |
| | input_ids=input_ids, |
| | attention_mask=attention_mask, |
| | inputs_embeds=inputs_embeds, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | **kwargs_encoder, |
| | ) |
| | elif isinstance(encoder_outputs, tuple): |
| | encoder_outputs = BaseModelOutput(*encoder_outputs) |
| |
|
| | encoder_hidden_states = encoder_outputs[0] |
| |
|
| | |
| | if ( |
| | self.encoder.config.hidden_size != self.decoder.config.hidden_size |
| | and self.decoder.config.cross_attention_hidden_size is None |
| | ): |
| | encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states) |
| |
|
| | if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None): |
| | decoder_input_ids = shift_tokens_right( |
| | labels, self.config.pad_token_id, self.config.decoder_start_token_id |
| | ) |
| |
|
| | |
| | decoder_outputs = self.decoder( |
| | input_ids=decoder_input_ids, |
| | attention_mask=decoder_attention_mask, |
| | encoder_hidden_states=encoder_hidden_states, |
| | encoder_attention_mask=attention_mask, |
| | inputs_embeds=decoder_inputs_embeds, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | use_cache=use_cache, |
| | past_key_values=past_key_values, |
| | return_dict=return_dict, |
| | **kwargs_decoder, |
| | ) |
| |
|
| | |
| | loss = None |
| | if labels is not None: |
| | |
| | logits = decoder_outputs.logits if return_dict else decoder_outputs[0] |
| | loss_fct = CrossEntropyLoss() |
| | loss = loss_fct(logits.reshape(-1, self.decoder.config.vocab_size), labels.view(-1)) |
| |
|
| | if not return_dict: |
| | if loss is not None: |
| | return (loss,) + decoder_outputs + encoder_outputs |
| | else: |
| | return decoder_outputs + encoder_outputs |
| |
|
| | return Seq2SeqLMOutput( |
| | loss=loss, |
| | logits=decoder_outputs.logits, |
| | past_key_values=decoder_outputs.past_key_values, |
| | decoder_hidden_states=decoder_outputs.hidden_states, |
| | decoder_attentions=decoder_outputs.attentions, |
| | cross_attentions=decoder_outputs.cross_attentions, |
| | encoder_last_hidden_state=encoder_outputs.last_hidden_state, |
| | encoder_hidden_states=encoder_outputs.hidden_states, |
| | encoder_attentions=encoder_outputs.attentions, |
| | ) |
| |
|
| | def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): |
| | return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) |
| |
|
| | def prepare_inputs_for_generation( |
| | self, input_ids, past=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs |
| | ): |
| | decoder_inputs = self.decoder.prepare_inputs_for_generation(input_ids, past=past) |
| | decoder_attention_mask = decoder_inputs["attention_mask"] if "attention_mask" in decoder_inputs else None |
| | input_dict = { |
| | "attention_mask": attention_mask, |
| | "decoder_attention_mask": decoder_attention_mask, |
| | "decoder_input_ids": decoder_inputs["input_ids"], |
| | "encoder_outputs": encoder_outputs, |
| | "past_key_values": decoder_inputs["past_key_values"], |
| | "use_cache": use_cache, |
| | } |
| | return input_dict |
| |
|
| | def resize_token_embeddings(self, *args, **kwargs): |
| | raise NotImplementedError( |
| | "Resizing the embedding layers via the EncoderDecoderModel directly is not supported. Please use the" |
| | " respective methods of the wrapped objects (model.encoder.resize_token_embeddings(...) or" |
| | " model.decoder.resize_token_embeddings(...))" |
| | ) |
| |
|
| | def _reorder_cache(self, past, beam_idx): |
| | |
| | return self.decoder._reorder_cache(past, beam_idx) |
| |
|