|
|
| """ PyTorch CrystalCoder model.""" |
|
|
| import math |
| import os |
| import warnings |
| from typing import Optional, Tuple, Union |
|
|
| import torch |
| from torch import Tensor, nn |
| from torch.cuda.amp import autocast |
| from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss |
|
|
| from transformers.activations import ACT2FN |
| from transformers.modeling_outputs import ( |
| BaseModelOutputWithPastAndCrossAttentions, |
| CausalLMOutputWithCrossAttentions, |
| QuestionAnsweringModelOutput, |
| SequenceClassifierOutputWithPast, |
| TokenClassifierOutput, |
| ) |
| from transformers.modeling_utils import PreTrainedModel |
| from transformers.pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer |
| from transformers.utils import ( |
| add_code_sample_docstrings, |
| add_start_docstrings, |
| add_start_docstrings_to_model_forward, |
| logging, |
| ) |
| from transformers.utils.model_parallel_utils import assert_device_map, get_device_map |
| from .configuration_crystalcoder import CrystalCoderConfig |
| |
|
|
|
|
| logger = logging.get_logger(__name__) |
|
|
|
|
| _CONFIG_FOR_DOC = "CrystalCoderConfig" |
|
|
|
|
| def _duplicate_interleave(m): |
| """ |
| A simple version of `torch.repeat_interleave` for duplicating a matrix while interleaving the copy. |
| """ |
| dim0 = m.shape[0] |
| m = m.view(-1, 1) |
| m = m.repeat(1, 2) |
| m = m.view(dim0, -1) |
| return m |
|
|
|
|
| class RotaryPositionEmbeddingHelper: |
| def __init__(self, max_position_embeddings, rotary_dim, base=10000): |
| super(RotaryPositionEmbeddingHelper, self).__init__() |
| self.max_position_embeddings = max_position_embeddings |
| self.rotary_dim = rotary_dim |
| self.base = base |
| self.sin_cached = None |
| self.cos_cached = None |
| |
|
|
| def create_fixed_pos_emb(self, x, offset): |
| if (self.sin_cached is not None and self.cos_cached is not None |
| and x.device == self.sin_cached.device |
| and x.device == self.cos_cached.device |
| ): |
| sin, cos = self.sin_cached, self.cos_cached |
| else: |
| |
| |
| |
| device = x.device |
|
|
| inv_freq = 1.0 / ( |
| self.base |
| ** ( |
| torch.arange(0, self.rotary_dim, 2, device=device) |
| / self.rotary_dim |
| ) |
| ) |
| sinusoid_inp = torch.einsum( |
| "i , j -> i j", |
| torch.arange(self.max_position_embeddings, device=device), |
| inv_freq, |
| ) |
| sin, cos = ( |
| torch.sin(sinusoid_inp).to(x.dtype), |
| torch.cos(sinusoid_inp).to(x.dtype), |
| ) |
|
|
| sin, cos = map(_duplicate_interleave, (sin, cos)) |
|
|
| self.sin_cached = sin |
| self.cos_cached = cos |
| |
| assert ( |
| self.max_position_embeddings >= x.shape[1] + offset |
| ), "RoPE requires max position embeddings ({}) >= sequence length ({}) + offset ({})".format( |
| self.max_position_embeddings, x.shape[1], offset, |
| ) |
|
|
| def slice_at_offset(t): |
| return t[None, offset : x.shape[1] + offset, None, :] |
| |
| sin, cos = map(slice_at_offset, (sin, cos)) |
|
|
| return sin, cos |
|
|
| def _apply_rotary_pos_emb(self, x, offset=0): |
| def rotate_every_two(x): |
| x1 = x[:, :, :, ::2] |
| x2 = x[:, :, :, 1::2] |
| x = torch.stack((-x2, x1), dim=-1) |
| |
| return x.flatten(-2) |
|
|
| sin, cos = self.create_fixed_pos_emb(x, offset) |
| l = x.size(1) |
| sin = sin[:, :l] |
| cos = cos[:, :l] |
|
|
| |
| return (x * cos) + (rotate_every_two(x) * sin) |
|
|
| def rotate_tensor(self, x, offset=0): |
| assert ( |
| len(x.shape) == 4 |
| ), "Tensor should be of shape [batch_size, seq_length, num_heads, head_dim] !" |
| x_rotary = x[:, :, :, : self.rotary_dim] |
| x_pass = x[:, :, :, self.rotary_dim :] |
| x_rotated = self._apply_rotary_pos_emb( |
| x_rotary, offset=offset |
| ) |
| x = torch.cat([x_rotated, x_pass], dim=-1) |
| return x |
|
|
|
|
| class SwiGLUActivation(nn.Module): |
| def forward(self, x1: Tensor, x2: Tensor) -> Tensor: |
| return x1 * nn.functional.silu(x2) |
|
|
|
|
| class AlibiPositionEmbeddingLayer(nn.Module): |
| def __init__(self, num_heads): |
| super(AlibiPositionEmbeddingLayer, self).__init__() |
|
|
| self.num_heads = num_heads |
| slopes = torch.tensor(AlibiPositionEmbeddingLayer._get_alibi_slopes(num_heads)).unsqueeze(-1) |
| self.slopes = nn.parameter.Parameter(slopes, requires_grad=False) |
|
|
| def forward( |
| self, |
| seq_length, |
| key_length, |
| cached_qk_len, |
| ): |
| context_position = torch.arange( |
| cached_qk_len, cached_qk_len + seq_length, device=self.slopes.device |
| )[:, None] |
| memory_position = torch.arange( |
| key_length + cached_qk_len, device=self.slopes.device |
| )[None, :] |
| relative_position = memory_position - context_position |
| relative_position = torch.abs(relative_position).unsqueeze(0).expand(self.num_heads, -1, -1) |
| alibi = (self.slopes * -1.0).unsqueeze(1) * relative_position |
| return alibi |
|
|
| @staticmethod |
| def _get_alibi_slopes(n): |
| def get_slopes_power_of_2(n): |
| start = 2 ** (-(2 ** -(math.log2(n) - 3))) |
| ratio = start |
| return [start * ratio**i for i in range(n)] |
|
|
| if math.log2(n).is_integer(): |
| return get_slopes_power_of_2( |
| n |
| ) |
| else: |
| closest_power_of_2 = 2 ** math.floor( |
| math.log2(n) |
| ) |
| return ( |
| get_slopes_power_of_2(closest_power_of_2) |
| + AlibiPositionEmbeddingLayer._get_alibi_slopes(2 * closest_power_of_2)[0::2][: n - closest_power_of_2] |
| ) |
|
|
|
|
| def load_tf_weights_in_crystalcoder(model, config, crystalcoder_checkpoint_path): |
| """Load tf checkpoints in a pytorch model""" |
| try: |
| import re |
|
|
| import tensorflow as tf |
| except ImportError: |
| logger.error( |
| "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " |
| "https://www.tensorflow.org/install/ for installation instructions." |
| ) |
| raise |
| tf_path = os.path.abspath(crystalcoder_checkpoint_path) |
| logger.info(f"Converting TensorFlow checkpoint from {tf_path}") |
| |
| init_vars = tf.train.list_variables(tf_path) |
| names = [] |
| arrays = [] |
| for name, shape in init_vars: |
| logger.info(f"Loading TF weight {name} with shape {shape}") |
| array = tf.train.load_variable(tf_path, name) |
| names.append(name) |
| arrays.append(array.squeeze()) |
|
|
| for name, array in zip(names, arrays): |
| name = name[6:] |
| name = name.split("/") |
| pointer = model |
| for m_name in name: |
| if re.fullmatch(r"[A-Za-z]+\d+", m_name): |
| scope_names = re.split(r"(\d+)", m_name) |
| else: |
| scope_names = [m_name] |
| if scope_names[0] == "w" or scope_names[0] == "g": |
| pointer = getattr(pointer, "weight") |
| elif scope_names[0] == "b": |
| pointer = getattr(pointer, "bias") |
| elif scope_names[0] == "wpe" or scope_names[0] == "wte": |
| pointer = getattr(pointer, scope_names[0]) |
| pointer = getattr(pointer, "weight") |
| else: |
| pointer = getattr(pointer, scope_names[0]) |
| if len(scope_names) >= 2: |
| num = int(scope_names[1]) |
| pointer = pointer[num] |
| try: |
| assert ( |
| pointer.shape == array.shape |
| ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" |
| except AssertionError as e: |
| e.args += (pointer.shape, array.shape) |
| raise |
| logger.info(f"Initialize PyTorch weight {name}") |
| pointer.data = torch.from_numpy(array) |
| return model |
|
|
|
|
| class CrystalCoderAttention(nn.Module): |
| def __init__(self, config, is_cross_attention=False, layer_idx=None): |
| super().__init__() |
|
|
| max_positions = config.max_position_embeddings |
| self.register_buffer( |
| "bias", |
| torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view( |
| 1, 1, max_positions, max_positions |
| ), |
| persistent=False, |
| ) |
| self.register_buffer("masked_bias", torch.tensor(-1e4), persistent=False) |
|
|
| self.embed_dim = config.hidden_size |
| self.num_heads = config.num_attention_heads |
| self.head_dim = self.embed_dim // self.num_heads |
| self.split_size = self.embed_dim |
| if self.head_dim * self.num_heads != self.embed_dim: |
| raise ValueError( |
| f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" |
| f" {self.num_heads})." |
| ) |
| if config.position_embedding_type == "rotary": |
| rotary_dim = config.rotary_dim or self.head_dim |
| self.rope_helper = RotaryPositionEmbeddingHelper(max_positions, rotary_dim) |
| else: |
| self.rope_helper = None |
|
|
|
|
| self.scale_attn_weights = config.scale_attn_weights |
| self.is_cross_attention = is_cross_attention |
|
|
| |
| self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx |
| self.layer_idx = layer_idx |
| self.reorder_and_upcast_attn = config.reorder_and_upcast_attn |
|
|
| if self.is_cross_attention: |
| self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim) |
| self.q_attn = Conv1D(self.embed_dim, self.embed_dim) |
| else: |
| self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim) |
| self.c_proj = Conv1D(self.embed_dim, self.embed_dim) |
|
|
| self.attn_dropout = nn.Dropout(config.attn_pdrop) |
| self.resid_dropout = nn.Dropout(config.resid_pdrop) |
|
|
| self.pruned_heads = set() |
|
|
| self.attn_scale_power = 1.0 if config.mup_scale_qk_dot_by_d else 0.5 |
|
|
| def prune_heads(self, heads): |
| if len(heads) == 0: |
| return |
| heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads) |
| index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)]) |
|
|
| |
| self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1) |
| self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0) |
|
|
| |
| self.split_size = (self.split_size // self.num_heads) * (self.num_heads - len(heads)) |
| self.num_heads = self.num_heads - len(heads) |
| self.pruned_heads = self.pruned_heads.union(heads) |
|
|
| def _attn(self, query, key, value, attention_mask=None, head_mask=None, position_bias=None): |
| attn_weights = torch.matmul(query, key.transpose(-1, -2)) |
|
|
| if self.scale_attn_weights: |
| attn_weights = attn_weights / torch.full( |
| [], value.size(-1) ** self.attn_scale_power, dtype=attn_weights.dtype, device=attn_weights.device |
| ) |
|
|
| |
| if self.scale_attn_by_inverse_layer_idx: |
| attn_weights = attn_weights / float(self.layer_idx + 1) |
|
|
| if not self.is_cross_attention: |
| |
| query_length, key_length = query.size(-2), key.size(-2) |
| causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length] |
| mask_value = torch.finfo(attn_weights.dtype).min |
| |
| |
| mask_value = torch.full([], mask_value, dtype=attn_weights.dtype).to(attn_weights.device) |
| attn_weights = torch.where(causal_mask, attn_weights.to(attn_weights.dtype), mask_value) |
|
|
| if attention_mask is not None: |
| |
| attn_weights = attn_weights + attention_mask |
|
|
| if position_bias is not None: |
| attn_weights += position_bias.type_as(attn_weights).unsqueeze(0) |
| attn_weights = nn.functional.softmax(attn_weights, dim=-1) |
|
|
| |
| attn_weights = attn_weights.type(value.dtype) |
| attn_weights = self.attn_dropout(attn_weights) |
|
|
| |
| if head_mask is not None: |
| attn_weights = attn_weights * head_mask |
|
|
| attn_output = torch.matmul(attn_weights, value) |
|
|
| return attn_output, attn_weights |
|
|
| def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, head_mask=None, position_bias=None): |
| |
| bsz, num_heads, q_seq_len, dk = query.size() |
| _, _, k_seq_len, _ = key.size() |
|
|
| |
| attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device) |
|
|
| |
| scale_factor = 1.0 |
| if self.scale_attn_weights: |
| scale_factor /= float(value.size(-1)) ** self.attn_scale_power |
|
|
| if self.scale_attn_by_inverse_layer_idx: |
| scale_factor /= float(self.layer_idx + 1) |
|
|
| |
| with autocast(enabled=False): |
| q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len) |
| attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor) |
| attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len) |
|
|
| if not self.is_cross_attention: |
| |
| query_length, key_length = query.size(-2), key.size(-2) |
| causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length] |
| mask_value = torch.finfo(attn_weights.dtype).min |
| |
| |
| mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device) |
| attn_weights = torch.where(causal_mask, attn_weights, mask_value) |
|
|
| if attention_mask is not None: |
| |
| attn_weights = attn_weights + attention_mask |
|
|
| if position_bias is not None: |
| attn_weights += position_bias.type_as(attn_weights).unsqueeze(0) |
| attn_weights = nn.functional.softmax(attn_weights, dim=-1) |
|
|
| |
| if attn_weights.dtype != torch.float32: |
| raise RuntimeError("Error with upcasting, attn_weights does not have dtype torch.float32") |
| attn_weights = attn_weights.type(value.dtype) |
| attn_weights = self.attn_dropout(attn_weights) |
|
|
| |
| if head_mask is not None: |
| attn_weights = attn_weights * head_mask |
|
|
| attn_output = torch.matmul(attn_weights, value) |
|
|
| return attn_output, attn_weights |
|
|
| def _split_heads(self, tensor, num_heads, attn_head_size): |
| """ |
| Splits hidden_size dim into attn_head_size and num_heads |
| """ |
| new_shape = tensor.size()[:-1] + (num_heads, attn_head_size) |
| tensor = tensor.view(new_shape) |
| return tensor |
|
|
| def _merge_heads(self, tensor, num_heads, attn_head_size): |
| """ |
| Merges attn_head_size dim and num_attn_heads dim into hidden_size |
| """ |
| tensor = tensor.permute(0, 2, 1, 3).contiguous() |
| new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,) |
| return tensor.view(new_shape) |
|
|
| def forward( |
| self, |
| hidden_states: Optional[Tuple[torch.FloatTensor]], |
| layer_past: Optional[Tuple[torch.Tensor]] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| encoder_hidden_states: Optional[torch.Tensor] = None, |
| encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| use_cache: Optional[bool] = False, |
| output_attentions: Optional[bool] = False, |
| position_bias: Optional[torch.FloatTensor] = None, |
| ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]: |
| if encoder_hidden_states is not None: |
| if not hasattr(self, "q_attn"): |
| raise ValueError( |
| "If class is used as cross attention, the weights `q_attn` have to be defined. " |
| "Please make sure to instantiate class with `CrystalCoderAttention(..., is_cross_attention=True)`." |
| ) |
|
|
| query = self.q_attn(hidden_states) |
| key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2) |
| attention_mask = encoder_attention_mask |
| else: |
| query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2) |
|
|
| query = self._split_heads(query, self.num_heads, self.head_dim) |
| key = self._split_heads(key, self.num_heads, self.head_dim) |
| value = self._split_heads(value, self.num_heads, self.head_dim) |
|
|
| |
| if self.rope_helper is not None: |
| len_past = (layer_past and layer_past[0].size(-2)) or 0 |
| query = self.rope_helper.rotate_tensor(query, offset=len_past) |
| key = self.rope_helper.rotate_tensor(key, offset=len_past) |
| query = query.transpose(1, 2) |
| key = key.transpose(1, 2) |
| value = value.transpose(1, 2) |
|
|
| if layer_past is not None: |
| past_key, past_value = layer_past |
| key = torch.cat((past_key, key), dim=-2) |
| value = torch.cat((past_value, value), dim=-2) |
|
|
| if use_cache is True: |
| present = (key, value) |
| else: |
| present = None |
|
|
| if self.reorder_and_upcast_attn: |
| attn_output, attn_weights = self._upcast_and_reordered_attn( |
| query, key, value, attention_mask, head_mask, position_bias |
| ) |
| else: |
| attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask, position_bias) |
|
|
| attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim) |
| attn_output = self.c_proj(attn_output) |
| attn_output = self.resid_dropout(attn_output) |
|
|
| outputs = (attn_output, present) |
| if output_attentions: |
| outputs += (attn_weights,) |
|
|
| return outputs |
|
|
|
|
| class CrystalCoderMLP(nn.Module): |
| def __init__(self, intermediate_size, config): |
| super().__init__() |
| embed_dim = config.hidden_size |
| self.swiglu = config.activation_function == "swiglu" |
| self.c_fc = Conv1D(intermediate_size, embed_dim) |
| self.c_fc2 = Conv1D(intermediate_size, embed_dim) if self.swiglu else None |
| self.c_proj = Conv1D(embed_dim, intermediate_size) |
| self.act = SwiGLUActivation() if self.swiglu else ACT2FN[config.activation_function] |
| self.dropout = nn.Dropout(config.resid_pdrop) |
|
|
| def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor: |
| if self.swiglu: |
| hidden_states2 = self.c_fc2(hidden_states) |
| hidden_states = self.c_fc(hidden_states) |
| hidden_states = self.act(hidden_states, hidden_states2) if self.swiglu else self.act(hidden_states) |
| hidden_states = self.c_proj(hidden_states) |
| hidden_states = self.dropout(hidden_states) |
| return hidden_states |
|
|
|
|
| class CrystalCoderBlock(nn.Module): |
| def __init__(self, config, layer_idx=None): |
| super().__init__() |
| hidden_size = config.hidden_size |
| inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size |
|
|
| self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) |
| self.attn = CrystalCoderAttention(config, layer_idx=layer_idx) |
| self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) |
|
|
| if config.add_cross_attention: |
| self.crossattention = CrystalCoderAttention(config, is_cross_attention=True, layer_idx=layer_idx) |
| self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) |
|
|
| self.mlp = CrystalCoderMLP(inner_dim, config) |
|
|
| def forward( |
| self, |
| hidden_states: Optional[Tuple[torch.FloatTensor]], |
| layer_past: Optional[Tuple[torch.Tensor]] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| encoder_hidden_states: Optional[torch.Tensor] = None, |
| encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| use_cache: Optional[bool] = False, |
| output_attentions: Optional[bool] = False, |
| position_bias: Optional[torch.FloatTensor] = None, |
| ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]: |
| residual = hidden_states |
| hidden_states = self.ln_1(hidden_states) |
| attn_outputs = self.attn( |
| hidden_states, |
| layer_past=layer_past, |
| attention_mask=attention_mask, |
| head_mask=head_mask, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| position_bias=position_bias, |
| ) |
| attn_output = attn_outputs[0] |
| outputs = attn_outputs[1:] |
| |
| hidden_states = attn_output + residual |
|
|
| if encoder_hidden_states is not None: |
| |
| if not hasattr(self, "crossattention"): |
| raise ValueError( |
| f"If `encoder_hidden_states` are passed, {self} has to be instantiated with " |
| "cross-attention layers by setting `config.add_cross_attention=True`" |
| ) |
| residual = hidden_states |
| hidden_states = self.ln_cross_attn(hidden_states) |
| cross_attn_outputs = self.crossattention( |
| hidden_states, |
| attention_mask=attention_mask, |
| head_mask=head_mask, |
| encoder_hidden_states=encoder_hidden_states, |
| encoder_attention_mask=encoder_attention_mask, |
| output_attentions=output_attentions, |
| position_bias=position_bias, |
| ) |
| attn_output = cross_attn_outputs[0] |
| |
| hidden_states = residual + attn_output |
| outputs = outputs + cross_attn_outputs[2:] |
|
|
| residual = hidden_states |
| hidden_states = self.ln_2(hidden_states) |
| feed_forward_hidden_states = self.mlp(hidden_states) |
| |
| hidden_states = residual + feed_forward_hidden_states |
|
|
| if use_cache: |
| outputs = (hidden_states,) + outputs |
| else: |
| outputs = (hidden_states,) + outputs[1:] |
|
|
| return outputs |
|
|
|
|
| class CrystalCoderPreTrainedModel(PreTrainedModel): |
| """ |
| An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained |
| models. |
| """ |
|
|
| config_class = CrystalCoderConfig |
| load_tf_weights = load_tf_weights_in_crystalcoder |
| base_model_prefix = "transformer" |
| is_parallelizable = True |
| supports_gradient_checkpointing = True |
| _no_split_modules = ["CrystalCoderBlock"] |
| _skip_keys_device_placement = "past_key_values" |
|
|
| def __init__(self, *inputs, **kwargs): |
| super().__init__(*inputs, **kwargs) |
|
|
| def _init_weights(self, module): |
| """Initialize the weights.""" |
| mup_init_scale = math.sqrt(self.config.mup_width_scale) |
| if isinstance(module, (nn.Linear, Conv1D)): |
| |
| |
| module.weight.data.normal_(mean=0.0, std=(self.config.initializer_range * mup_init_scale)) |
| if module.bias is not None: |
| module.bias.data.zero_() |
| elif isinstance(module, nn.Embedding): |
| module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) |
| if module.padding_idx is not None: |
| module.weight.data[module.padding_idx].zero_() |
| elif isinstance(module, nn.LayerNorm): |
| module.bias.data.zero_() |
| module.weight.data.fill_(1.0) |
|
|
| |
| |
| |
| |
| |
| |
| for name, p in module.named_parameters(): |
| if name == "c_proj.weight": |
| |
| stddev = self.config.initializer_range * mup_init_scale / math.sqrt(2 * self.config.n_layer) |
| p.data.normal_(mean=0.0, std=stddev) |
|
|
| def _set_gradient_checkpointing(self, module, value=False): |
| if isinstance(module, CrystalCoderModel): |
| module.gradient_checkpointing = value |
|
|
| def get_mup_param_groups(self, lr, weight_decay=0.0, decoupled_wd=True): |
| """ |
| Returns list of dicts defining parameter groups for muP: |
| group 0: most model params get scaled learning rate and weight decay. |
| group 1: embedding layer gets non-scaled learning rate and weight decay. |
| group 2: normalization layers and biases get non-scaled learning rate only. |
| |
| The output can be passed to Adam-base optimizers |
| e.g. |
| param_groups = model.get_mup_param_groups(lr=1e-3, weight_decay=0.1) |
| torch.optim.AdamW(param_groups, betas=(0.9, 0.95), eps=1e-8) |
| """ |
| norm_modules = ( |
| torch.nn.LayerNorm, |
| torch.nn.BatchNorm1d, |
| torch.nn.BatchNorm2d, |
| torch.nn.BatchNorm3d, |
| torch.nn.InstanceNorm1d, |
| torch.nn.InstanceNorm2d, |
| torch.nn.InstanceNorm3d, |
| torch.nn.GroupNorm, |
| torch.nn.SyncBatchNorm, |
| torch.nn.LocalResponseNorm, |
| ) |
|
|
| def get_group_index(param_name): |
| for name, module in self.named_modules(): |
| if name in param_name: |
| if isinstance(module, norm_modules): |
| return 2 |
| elif isinstance(module, torch.nn.Embedding): |
| return 1 |
| return 0 |
|
|
| width_scale = self.config.mup_width_scale |
| new_param_groups = [] |
| new_param_groups.append({"params": [], "lr": lr * width_scale, "weight_decay": weight_decay}) |
| if not decoupled_wd: |
| new_param_groups[0]["weight_decay"] /= width_scale |
| new_param_groups.append({"params": [], "lr": lr, "weight_decay": weight_decay}) |
| new_param_groups.append({"params": [], "lr": lr, "weight_decay": 0.0}) |
|
|
| for name, param in self.named_parameters(): |
| if not param.requires_grad: |
| continue |
|
|
| if name.endswith("bias"): |
| new_param_groups[2]["params"].append(param) |
| else: |
| new_param_groups[get_group_index(name)]["params"].append(param) |
|
|
| for idx, param_group in enumerate(new_param_groups): |
| if len(param_group["params"]) == 0: |
| del new_param_groups[idx] |
|
|
| return new_param_groups |
|
|
|
|
| CrystalCoder_START_DOCSTRING = r""" |
| |
| This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the |
| library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads |
| etc.) |
| |
| This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. |
| Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage |
| and behavior. |
| |
| Parameters: |
| config ([`CrystalCoderConfig`]): Model configuration class with all the parameters of the model. |
| Initializing with a config file does not load the weights associated with the model, only the |
| configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. |
| """ |
|
|
| CrystalCoder_INPUTS_DOCSTRING = r""" |
| Args: |
| input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): |
| `input_ids_length` = `sequence_length` if `past_key_values` is `None` else |
| `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input |
| sequence tokens in the vocabulary. |
| |
| If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as |
| `input_ids`. |
| |
| Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
| [`PreTrainedTokenizer.__call__`] for details. |
| |
| [What are input IDs?](../glossary#input-ids) |
| past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`): |
| Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see |
| `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have |
| their past given to this model should not be passed as `input_ids` as they have already been computed. |
| attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: |
| |
| - 1 for tokens that are **not masked**, |
| - 0 for tokens that are **masked**. |
| |
| If `past_key_values` is used, `attention_mask` needs to contain the masking strategy that was used for |
| `past_key_values`. In other words, the `attention_mask` always has to have the length: |
| `len(past_key_values) + len(input_ids)` |
| |
| [What are attention masks?](../glossary#attention-mask) |
| token_type_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*): |
| Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, |
| 1]`: |
| |
| - 0 corresponds to a *sentence A* token, |
| - 1 corresponds to a *sentence B* token. |
| |
| [What are token type IDs?](../glossary#token-type-ids) |
| position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, |
| config.max_position_embeddings - 1]`. |
| |
| [What are position IDs?](../glossary#position-ids) |
| head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): |
| Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: |
| |
| - 1 indicates the head is **not masked**, |
| - 0 indicates the head is **masked**. |
| |
| inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): |
| Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This |
| is useful if you want more control over how to convert `input_ids` indices into associated vectors than the |
| model's internal embedding lookup matrix. |
| |
| If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see |
| `past_key_values`). |
| use_cache (`bool`, *optional*): |
| If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see |
| `past_key_values`). |
| output_attentions (`bool`, *optional*): |
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned |
| tensors for more detail. |
| output_hidden_states (`bool`, *optional*): |
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for |
| more detail. |
| return_dict (`bool`, *optional*): |
| Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
| """ |
| PARALLELIZE_DOCSTRING = r""" |
| This is an experimental feature and is a subject to change at a moment's notice. |
| |
| Uses a device map to distribute attention modules of the model across several devices. If no device map is given, |
| it will evenly distribute blocks across all devices. |
| |
| Args: |
| device_map (`Dict[int, list]`, optional, defaults to None): |
| A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always |
| automatically mapped to the first device (for esoteric reasons). That means that the first device should |
| have fewer attention modules mapped to it than other devices. For reference, the gpt2 models have the |
| following number of attention modules: |
| |
| - gpt2: 12 |
| - gpt2-medium: 24 |
| - gpt2-large: 36 |
| - gpt2-xl: 48 |
| |
| Example: |
| |
| ```python |
| # Here is an example of a device map on a machine with 4 GPUs using gpt2-xl, which has a total of 48 attention modules: |
| model = GPT2LMHeadModel.from_pretrained("gpt2-xl") |
| device_map = { |
| 0: [0, 1, 2, 3, 4, 5, 6, 7, 8], |
| 1: [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21], |
| 2: [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34], |
| 3: [35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47], |
| } |
| model.parallelize(device_map) |
| ``` |
| """ |
| DEPARALLELIZE_DOCSTRING = r""" |
| Moves the model to cpu from a model parallel state. |
| |
| Example: |
| |
| ```python |
| # On a 4 GPU machine with gpt2-large: |
| model = GPT2LMHeadModel.from_pretrained("gpt2-large") |
| device_map = { |
| 0: [0, 1, 2, 3, 4, 5, 6, 7], |
| 1: [8, 9, 10, 11, 12, 13, 14, 15], |
| 2: [16, 17, 18, 19, 20, 21, 22, 23], |
| 3: [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35], |
| } |
| model.parallelize(device_map) # Splits the model across several devices |
| model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache() |
| ``` |
| """ |
|
|
|
|
| @add_start_docstrings( |
| "The bare CrystalCoder Model transformer outputting raw hidden-states without any specific head on top.", |
| CrystalCoder_START_DOCSTRING, |
| ) |
| class CrystalCoderModel(CrystalCoderPreTrainedModel): |
| _keys_to_ignore_on_load_unexpected = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"] |
| _keys_to_ignore_on_load_missing = [r"attn.masked_bias", r"h\.\d+\.attn\.masked_bias", r"h\.\d+\.attn\.bias"] |
|
|
| def __init__(self, config): |
| super().__init__(config) |
|
|
| self.embed_dim = config.hidden_size |
|
|
| self.wte = nn.Embedding(config.vocab_size, self.embed_dim) |
| self.wpe = ( |
| nn.Embedding(config.max_position_embeddings, self.embed_dim) |
| if config.position_embedding_type == "learned" |
| else None |
| ) |
| self.embeddings_scale = config.mup_embeddings_scale |
|
|
| self.drop = nn.Dropout(config.embd_pdrop) |
| self.h = nn.ModuleList([CrystalCoderBlock(config, layer_idx=i) for i in range(config.num_hidden_layers)]) |
| self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) |
|
|
| self.relative_pe = ( |
| AlibiPositionEmbeddingLayer(config.num_attention_heads) |
| if config.position_embedding_type == "alibi" |
| else None |
| ) |
|
|
| |
| self.model_parallel = False |
| self.device_map = None |
| self.gradient_checkpointing = False |
|
|
| |
| self.post_init() |
|
|
| @add_start_docstrings(PARALLELIZE_DOCSTRING) |
| def parallelize(self, device_map=None): |
| |
| warnings.warn( |
| "`CrystalCoderModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your" |
| " model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own" |
| " `device_map` but it needs to be a dictionary module_name to device, so for instance {'h.0': 0, 'h.1': 1," |
| " ...}", |
| FutureWarning, |
| ) |
| self.device_map = ( |
| get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map |
| ) |
| assert_device_map(self.device_map, len(self.h)) |
| self.model_parallel = True |
| self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys())) |
| self.last_device = "cuda:" + str(max(self.device_map.keys())) |
| self.wte = self.wte.to(self.first_device) |
| if self.wpe is not None: |
| self.wpe = self.wpe.to(self.first_device) |
| |
| for k, v in self.device_map.items(): |
| for block in v: |
| cuda_device = "cuda:" + str(k) |
| self.h[block] = self.h[block].to(cuda_device) |
| |
| self.ln_f = self.ln_f.to(self.last_device) |
|
|
| @add_start_docstrings(DEPARALLELIZE_DOCSTRING) |
| def deparallelize(self): |
| warnings.warn( |
| "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", |
| FutureWarning, |
| ) |
| self.model_parallel = False |
| self.device_map = None |
| self.first_device = "cpu" |
| self.last_device = "cpu" |
| self.wte = self.wte.to("cpu") |
| if self.wpe is not None: |
| self.wpe = self.wpe.to("cpu") |
| for index in range(len(self.h)): |
| self.h[index] = self.h[index].to("cpu") |
| self.ln_f = self.ln_f.to("cpu") |
| torch.cuda.empty_cache() |
|
|
| def get_input_embeddings(self): |
| return self.wte |
|
|
| def set_input_embeddings(self, new_embeddings): |
| self.wte = new_embeddings |
|
|
| def _prune_heads(self, heads_to_prune): |
| """ |
| Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} |
| """ |
| for layer, heads in heads_to_prune.items(): |
| self.h[layer].attn.prune_heads(heads) |
|
|
| @add_start_docstrings_to_model_forward(CrystalCoder_INPUTS_DOCSTRING) |
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| token_type_ids: Optional[torch.LongTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| encoder_hidden_states: Optional[torch.Tensor] = None, |
| encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
| use_cache = use_cache if use_cache is not None else self.config.use_cache |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| if input_ids is not None and inputs_embeds is not None: |
| raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") |
| elif input_ids is not None: |
| input_shape = input_ids.size() |
| input_ids = input_ids.view(-1, input_shape[-1]) |
| batch_size = input_ids.shape[0] |
| elif inputs_embeds is not None: |
| input_shape = inputs_embeds.size()[:-1] |
| batch_size = inputs_embeds.shape[0] |
| else: |
| raise ValueError("You have to specify either input_ids or inputs_embeds") |
|
|
| device = input_ids.device if input_ids is not None else inputs_embeds.device |
|
|
| if token_type_ids is not None: |
| token_type_ids = token_type_ids.view(-1, input_shape[-1]) |
| if position_ids is not None: |
| position_ids = position_ids.view(-1, input_shape[-1]) |
|
|
| if past_key_values is None: |
| past_length = 0 |
| past_key_values = tuple([None] * len(self.h)) |
| else: |
| past_length = past_key_values[0][0].size(-2) |
| if position_ids is None: |
| position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device) |
| position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) |
|
|
| |
| if attention_mask is not None: |
| if batch_size <= 0: |
| raise ValueError("batch_size has to be defined and > 0") |
| attention_mask = attention_mask.view(batch_size, -1) |
| |
| |
| |
| |
| |
| attention_mask = attention_mask[:, None, None, :] |
|
|
| |
| |
| |
| |
| |
| attention_mask = attention_mask.to(dtype=self.dtype) |
| attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min |
|
|
| |
| |
| if self.config.add_cross_attention and encoder_hidden_states is not None: |
| encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() |
| encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) |
| if encoder_attention_mask is None: |
| encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) |
| encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask) |
| else: |
| encoder_attention_mask = None |
|
|
| |
| |
| |
| |
| head_mask = self.get_head_mask(head_mask, self.config.n_layer) |
|
|
| if inputs_embeds is None: |
| inputs_embeds = self.wte(input_ids) |
| if self.wpe is not None: |
| position_embeds = self.wpe(position_ids) |
| hidden_states = inputs_embeds + position_embeds |
| else: |
| hidden_states = inputs_embeds |
| hidden_states *= torch.tensor( |
| float(self.embeddings_scale), dtype=hidden_states.dtype, device=hidden_states.device |
| ) |
|
|
| if token_type_ids is not None: |
| token_type_embeds = self.wte(token_type_ids) |
| hidden_states = hidden_states + token_type_embeds |
|
|
| hidden_states = self.drop(hidden_states) |
|
|
| if self.relative_pe is not None: |
| length = input_ids.shape[1] |
| cached_kv_length = 0 |
| cached_kv = past_key_values[0] |
| if cached_kv is not None: |
| cached_kv_length = cached_kv[0].shape[-2] |
| position_bias = self.relative_pe(length, length, cached_kv_length) |
| else: |
| position_bias = None |
|
|
| output_shape = input_shape + (hidden_states.size(-1),) |
|
|
| if self.gradient_checkpointing and self.training: |
| if use_cache: |
| logger.warning_once( |
| "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." |
| ) |
| use_cache = False |
|
|
| presents = () if use_cache else None |
| all_self_attentions = () if output_attentions else None |
| all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None |
| all_hidden_states = () if output_hidden_states else None |
| for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): |
| |
| if self.model_parallel: |
| torch.cuda.set_device(hidden_states.device) |
| |
| if layer_past is not None: |
| layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past) |
| |
| if attention_mask is not None: |
| attention_mask = attention_mask.to(hidden_states.device) |
| if isinstance(head_mask, torch.Tensor): |
| head_mask = head_mask.to(hidden_states.device) |
| if output_hidden_states: |
| all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
| if self.gradient_checkpointing and self.training: |
|
|
| def create_custom_forward(module): |
| def custom_forward(*inputs): |
| |
| return module(*inputs, use_cache, output_attentions) |
|
|
| return custom_forward |
|
|
| outputs = torch.utils.checkpoint.checkpoint( |
| create_custom_forward(block), |
| hidden_states, |
| None, |
| attention_mask, |
| head_mask[i], |
| encoder_hidden_states, |
| encoder_attention_mask, |
| ) |
| else: |
| outputs = block( |
| hidden_states, |
| layer_past=layer_past, |
| attention_mask=attention_mask, |
| head_mask=head_mask[i], |
| encoder_hidden_states=encoder_hidden_states, |
| encoder_attention_mask=encoder_attention_mask, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| position_bias=position_bias, |
| ) |
|
|
| hidden_states = outputs[0] |
| if use_cache is True: |
| presents = presents + (outputs[1],) |
|
|
| if output_attentions: |
| all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) |
| if self.config.add_cross_attention: |
| all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],) |
|
|
| |
| if self.model_parallel: |
| for k, v in self.device_map.items(): |
| if i == v[-1] and "cuda:" + str(k) != self.last_device: |
| hidden_states = hidden_states.to("cuda:" + str(k + 1)) |
|
|
| hidden_states = self.ln_f(hidden_states) |
|
|
| hidden_states = hidden_states.view(output_shape) |
| |
| if output_hidden_states: |
| all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
| if not return_dict: |
| return tuple( |
| v |
| for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions] |
| if v is not None |
| ) |
|
|
| return BaseModelOutputWithPastAndCrossAttentions( |
| last_hidden_state=hidden_states, |
| past_key_values=presents, |
| hidden_states=all_hidden_states, |
| attentions=all_self_attentions, |
| cross_attentions=all_cross_attentions, |
| ) |
|
|
|
|
| @add_start_docstrings( |
| """ |
| The CrystalCoder Model transformer with a language modeling head on top (linear layer with weights tied to the input |
| embeddings). |
| """, |
| CrystalCoder_START_DOCSTRING, |
| ) |
| class CrystalCoderLMHeadModel(CrystalCoderPreTrainedModel): |
| _keys_to_ignore_on_load_missing = [r"lm_head.weight"] |
| _keys_to_ignore_on_load_unexpected = [r"h\.\d+\.attn\.masked_bias", r"h\.\d+\.attn\.bias"] |
|
|
| def __init__(self, config): |
| super().__init__(config) |
| self.transformer = CrystalCoderModel(config) |
| self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) |
| self.output_logits_scale = config.mup_output_alpha * config.mup_width_scale |
|
|
| |
| self.model_parallel = False |
| self.device_map = None |
|
|
| |
| self.post_init() |
|
|
| @add_start_docstrings(PARALLELIZE_DOCSTRING) |
| def parallelize(self, device_map=None): |
| warnings.warn( |
| "`CrystalCoderLMHeadModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load" |
| " your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own" |
| " `device_map` but it needs to be a dictionary module_name to device, so for instance {'transformer.h.0':" |
| " 0, 'transformer.h.1': 1, ...}", |
| FutureWarning, |
| ) |
| self.device_map = ( |
| get_device_map(len(self.transformer.h), range(torch.cuda.device_count())) |
| if device_map is None |
| else device_map |
| ) |
| assert_device_map(self.device_map, len(self.transformer.h)) |
| self.transformer.parallelize(self.device_map) |
| self.lm_head = self.lm_head.to(self.transformer.first_device) |
| self.model_parallel = True |
|
|
| @add_start_docstrings(DEPARALLELIZE_DOCSTRING) |
| def deparallelize(self): |
| warnings.warn( |
| "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", |
| FutureWarning, |
| ) |
| self.transformer.deparallelize() |
| self.transformer = self.transformer.to("cpu") |
| self.lm_head = self.lm_head.to("cpu") |
| self.model_parallel = False |
| torch.cuda.empty_cache() |
|
|
| def get_output_embeddings(self): |
| return self.lm_head |
|
|
| def set_output_embeddings(self, new_embeddings): |
| self.lm_head = new_embeddings |
|
|
| def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs): |
| token_type_ids = kwargs.get("token_type_ids", None) |
| |
| if past_key_values: |
| input_ids = input_ids[:, -1].unsqueeze(-1) |
| if token_type_ids is not None: |
| token_type_ids = token_type_ids[:, -1].unsqueeze(-1) |
|
|
| attention_mask = kwargs.get("attention_mask", None) |
| position_ids = kwargs.get("position_ids", None) |
|
|
| if attention_mask is not None and position_ids is None: |
| |
| position_ids = attention_mask.long().cumsum(-1) - 1 |
| position_ids.masked_fill_(attention_mask == 0, 1) |
| if past_key_values: |
| position_ids = position_ids[:, -1].unsqueeze(-1) |
| else: |
| position_ids = None |
|
|
| |
| if inputs_embeds is not None and past_key_values is None: |
| model_inputs = {"inputs_embeds": inputs_embeds} |
| else: |
| model_inputs = {"input_ids": input_ids} |
|
|
| model_inputs.update( |
| { |
| "past_key_values": past_key_values, |
| "use_cache": kwargs.get("use_cache"), |
| "position_ids": position_ids, |
| "attention_mask": attention_mask, |
| "token_type_ids": token_type_ids, |
| } |
| ) |
| return model_inputs |
|
|
| @add_start_docstrings_to_model_forward(CrystalCoder_INPUTS_DOCSTRING) |
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| token_type_ids: Optional[torch.LongTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| encoder_hidden_states: Optional[torch.Tensor] = None, |
| encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| labels: Optional[torch.LongTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: |
| r""" |
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set |
| `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` |
| are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` |
| """ |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| transformer_outputs = self.transformer( |
| input_ids, |
| past_key_values=past_key_values, |
| attention_mask=attention_mask, |
| token_type_ids=token_type_ids, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| encoder_hidden_states=encoder_hidden_states, |
| encoder_attention_mask=encoder_attention_mask, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
| hidden_states = transformer_outputs[0] |
|
|
| |
| if self.model_parallel: |
| torch.cuda.set_device(self.transformer.first_device) |
| hidden_states = hidden_states.to(self.lm_head.weight.device) |
|
|
| lm_logits = self.lm_head(hidden_states) |
| lm_logits *= torch.tensor(float(self.output_logits_scale), dtype=lm_logits.dtype, device=lm_logits.device) |
|
|
| loss = None |
| if labels is not None: |
| |
| labels = labels.to(lm_logits.device) |
| |
| shift_logits = lm_logits[..., :-1, :].contiguous() |
| shift_labels = labels[..., 1:].contiguous() |
| |
| loss_fct = CrossEntropyLoss() |
| loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) |
|
|
| if not return_dict: |
| output = (lm_logits,) + transformer_outputs[1:] |
| return ((loss,) + output) if loss is not None else output |
|
|
| return CausalLMOutputWithCrossAttentions( |
| loss=loss, |
| logits=lm_logits, |
| past_key_values=transformer_outputs.past_key_values, |
| hidden_states=transformer_outputs.hidden_states, |
| attentions=transformer_outputs.attentions, |
| cross_attentions=transformer_outputs.cross_attentions, |
| ) |
|
|
| @staticmethod |
| def _reorder_cache( |
| past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor |
| ) -> Tuple[Tuple[torch.Tensor]]: |
| """ |
| This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or |
| [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct |
| beam_idx at every generation step. |
| """ |
| return tuple( |
| tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past) |
| for layer_past in past_key_values |
| ) |
|
|
|
|
| @add_start_docstrings( |
| """ |
| The CrystalCoder Model transformer with a sequence classification head on top (linear layer). |
| |
| [`CrystalCoderForSequenceClassification`] uses the last token in order to do the classification, as other causal models |
| (e.g. GPT-1) do. |
| |
| Since it does classification on the last token, it requires to know the position of the last token. If a |
| `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If |
| no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the |
| padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in |
| each row of the batch). |
| """, |
| CrystalCoder_START_DOCSTRING, |
| ) |
| class CrystalCoderForSequenceClassification(CrystalCoderPreTrainedModel): |
| _keys_to_ignore_on_load_unexpected = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"] |
| _keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.masked_bias", r"lm_head.weight"] |
|
|
| def __init__(self, config): |
| super().__init__(config) |
| self.num_labels = config.num_labels |
| self.transformer = CrystalCoderModel(config) |
| self.score = nn.Linear(config.n_embd, self.num_labels, bias=False) |
| self.output_logits_scale = config.mup_output_alpha * config.mup_width_scale |
|
|
| |
| self.model_parallel = False |
| self.device_map = None |
|
|
| |
| self.post_init() |
|
|
| @add_start_docstrings_to_model_forward(CrystalCoder_INPUTS_DOCSTRING) |
| @add_code_sample_docstrings( |
| checkpoint="microsoft/DialogRPT-updown", |
| output_type=SequenceClassifierOutputWithPast, |
| config_class=_CONFIG_FOR_DOC, |
| ) |
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| token_type_ids: Optional[torch.LongTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| labels: Optional[torch.LongTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, SequenceClassifierOutputWithPast]: |
| r""" |
| labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): |
| Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., |
| config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If |
| `config.num_labels > 1` a classification loss is computed (Cross-Entropy). |
| """ |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| transformer_outputs = self.transformer( |
| input_ids, |
| past_key_values=past_key_values, |
| attention_mask=attention_mask, |
| token_type_ids=token_type_ids, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
| hidden_states = transformer_outputs[0] |
| logits = self.score(hidden_states) |
| logits *= torch.tensor(float(self.output_logits_scale), dtype=logits.dtype, device=logits.device) |
|
|
| if input_ids is not None: |
| batch_size, sequence_length = input_ids.shape[:2] |
| else: |
| batch_size, sequence_length = inputs_embeds.shape[:2] |
|
|
| assert ( |
| self.config.pad_token_id is not None or batch_size == 1 |
| ), "Cannot handle batch sizes > 1 if no padding token is defined." |
| if self.config.pad_token_id is None: |
| sequence_lengths = -1 |
| else: |
| if input_ids is not None: |
| sequence_lengths = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device) |
| else: |
| sequence_lengths = -1 |
| logger.warning( |
| f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " |
| "unexpected if using padding tokens in conjunction with `inputs_embeds.`" |
| ) |
|
|
| pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] |
|
|
| loss = None |
| if labels is not None: |
| if self.config.problem_type is None: |
| if self.num_labels == 1: |
| self.config.problem_type = "regression" |
| elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): |
| self.config.problem_type = "single_label_classification" |
| else: |
| self.config.problem_type = "multi_label_classification" |
|
|
| if self.config.problem_type == "regression": |
| loss_fct = MSELoss() |
| if self.num_labels == 1: |
| loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) |
| else: |
| loss = loss_fct(pooled_logits, labels) |
| elif self.config.problem_type == "single_label_classification": |
| loss_fct = CrossEntropyLoss() |
| loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) |
| elif self.config.problem_type == "multi_label_classification": |
| loss_fct = BCEWithLogitsLoss() |
| loss = loss_fct(pooled_logits, labels) |
| if not return_dict: |
| output = (pooled_logits,) + transformer_outputs[1:] |
| return ((loss,) + output) if loss is not None else output |
|
|
| return SequenceClassifierOutputWithPast( |
| loss=loss, |
| logits=pooled_logits, |
| past_key_values=transformer_outputs.past_key_values, |
| hidden_states=transformer_outputs.hidden_states, |
| attentions=transformer_outputs.attentions, |
| ) |
|
|
|
|
| @add_start_docstrings( |
| """ |
| CrystalCoder Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for |
| Named-Entity-Recognition (NER) tasks. |
| """, |
| CrystalCoder_START_DOCSTRING, |
| ) |
| class CrystalCoderForTokenClassification(CrystalCoderPreTrainedModel): |
| def __init__(self, config): |
| super().__init__(config) |
| self.num_labels = config.num_labels |
|
|
| self.transformer = CrystalCoderModel(config) |
| if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None: |
| classifier_dropout = config.classifier_dropout |
| elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None: |
| classifier_dropout = config.hidden_dropout |
| else: |
| classifier_dropout = 0.1 |
| self.dropout = nn.Dropout(classifier_dropout) |
| self.classifier = nn.Linear(config.hidden_size, config.num_labels) |
| self.output_logits_scale = config.mup_output_alpha * config.mup_width_scale |
|
|
| |
| self.model_parallel = False |
| self.device_map = None |
|
|
| |
| self.post_init() |
|
|
| @add_start_docstrings_to_model_forward(CrystalCoder_INPUTS_DOCSTRING) |
| |
| @add_code_sample_docstrings( |
| checkpoint="brad1141/gpt2-finetuned-comp2", |
| output_type=TokenClassifierOutput, |
| config_class=_CONFIG_FOR_DOC, |
| expected_loss=0.25, |
| expected_output=["Lead", "Lead", "Lead", "Position", "Lead", "Lead", "Lead", "Lead", "Lead", "Lead", "Lead", "Lead"], |
| ) |
| |
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| token_type_ids: Optional[torch.LongTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| labels: Optional[torch.LongTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, TokenClassifierOutput]: |
| r""" |
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., |
| config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If |
| `config.num_labels > 1` a classification loss is computed (Cross-Entropy). |
| """ |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| transformer_outputs = self.transformer( |
| input_ids, |
| past_key_values=past_key_values, |
| attention_mask=attention_mask, |
| token_type_ids=token_type_ids, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|
| hidden_states = transformer_outputs[0] |
| hidden_states = self.dropout(hidden_states) |
| logits = self.classifier(hidden_states) |
| logits *= torch.tensor(float(self.output_logits_scale), dtype=logits.dtype, device=logits.device) |
|
|
| loss = None |
| if labels is not None: |
| labels = labels.to(logits.device) |
| loss_fct = CrossEntropyLoss() |
| loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) |
|
|
| if not return_dict: |
| output = (logits,) + transformer_outputs[2:] |
| return ((loss,) + output) if loss is not None else output |
|
|
| return TokenClassifierOutput( |
| loss=loss, |
| logits=logits, |
| hidden_states=transformer_outputs.hidden_states, |
| attentions=transformer_outputs.attentions, |
| ) |
|
|
|
|
| @add_start_docstrings( |
| """ |
| The CrystalCoder Model transformer with a span classification head on top for extractive question-answering tasks like |
| SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). |
| """, |
| CrystalCoder_START_DOCSTRING, |
| ) |
| class CrystalCoderForQuestionAnswering(CrystalCoderPreTrainedModel): |
| _keys_to_ignore_on_load_unexpected = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"] |
| _keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.masked_bias", r"h\.\d+\.attn\.bias", r"lm_head.weight"] |
|
|
| def __init__(self, config): |
| super().__init__(config) |
| self.num_labels = config.num_labels |
| self.transformer = CrystalCoderModel(config) |
| self.qa_outputs = nn.Linear(config.hidden_size, 2) |
| self.output_logits_scale = config.mup_output_alpha * config.mup_width_scale |
|
|
| |
| self.model_parallel = False |
| self.device_map = None |
| self.gradient_checkpointing = False |
|
|
| |
| self.post_init() |
|
|
| @add_start_docstrings_to_model_forward(CrystalCoder_INPUTS_DOCSTRING.format("batch_size, sequence_length")) |
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| attention_mask: Optional[torch.FloatTensor] = None, |
| token_type_ids: Optional[torch.LongTensor] = None, |
| position_ids: Optional[torch.LongTensor] = None, |
| head_mask: Optional[torch.FloatTensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| start_positions: Optional[torch.LongTensor] = None, |
| end_positions: Optional[torch.LongTensor] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, QuestionAnsweringModelOutput]: |
| r""" |
| start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): |
| Labels for position (index) of the start of the labelled span for computing the token classification loss. |
| Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence |
| are not taken into account for computing the loss. |
| end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): |
| Labels for position (index) of the end of the labelled span for computing the token classification loss. |
| Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence |
| are not taken into account for computing the loss. |
| """ |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| outputs = self.transformer( |
| input_ids, |
| attention_mask=attention_mask, |
| token_type_ids=token_type_ids, |
| position_ids=position_ids, |
| head_mask=head_mask, |
| inputs_embeds=inputs_embeds, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict, |
| ) |
|
|
| sequence_output = outputs[0] |
|
|
| logits = self.qa_outputs(sequence_output) |
| logits *= torch.tensor(float(self.output_logits_scale), dtype=logits.dtype, device=logits.device) |
| start_logits, end_logits = logits.split(1, dim=-1) |
| start_logits = start_logits.squeeze(-1).contiguous() |
| end_logits = end_logits.squeeze(-1).contiguous() |
|
|
| total_loss = None |
| if start_positions is not None and end_positions is not None: |
| |
| if len(start_positions.size()) > 1: |
| start_positions = start_positions.squeeze(-1).to(start_logits.device) |
| if len(end_positions.size()) > 1: |
| end_positions = end_positions.squeeze(-1).to(end_logits.device) |
| |
| ignored_index = start_logits.size(1) |
| start_positions = start_positions.clamp(0, ignored_index) |
| end_positions = end_positions.clamp(0, ignored_index) |
|
|
| loss_fct = CrossEntropyLoss(ignore_index=ignored_index) |
| start_loss = loss_fct(start_logits, start_positions) |
| end_loss = loss_fct(end_logits, end_positions) |
| total_loss = (start_loss + end_loss) / 2 |
|
|
| if not return_dict: |
| output = (start_logits, end_logits) + outputs[2:] |
| return ((total_loss,) + output) if total_loss is not None else output |
|
|
| return QuestionAnsweringModelOutput( |
| loss=total_loss, |
| start_logits=start_logits, |
| end_logits=end_logits, |
| hidden_states=outputs.hidden_states, |
| attentions=outputs.attentions, |
| ) |
|
|