| import gc |
| import math |
| from typing import Dict, Mapping, Optional, Tuple, Any, Union |
| import pdb |
| import torch |
| import numpy as np |
| from torch import nn, Tensor |
| import torch.distributed as dist |
| import torch.nn.functional as F |
| from torch.nn import TransformerEncoder, TransformerEncoderLayer |
| from torch.distributions import Bernoulli |
| from tqdm import trange |
| import torch |
| from torch.autograd import Function |
| from .dsbn import DomainSpecificBatchNorm1d |
|
|
|
|
| class TransformerModel(nn.Module): |
| def __init__( |
| self, |
| ntoken: int, |
| d_model: int, |
| nhead: int, |
| d_hid: int, |
| nlayers: int, |
| nlayers_cls: int = 3, |
| n_cls: int = 1, |
| vocab: Any = None, |
| dropout: float = 0.5, |
| pad_token: str = "<pad>", |
| pad_value: int = 0, |
| do_mvc: bool = False, |
| do_dab: bool = False, |
| use_batch_labels: bool = False, |
| num_batch_labels: Optional[int] = None, |
| domain_spec_batchnorm: Union[bool, str] = False, |
| input_emb_style: str = "continuous", |
| n_input_bins: Optional[int] = None, |
| cell_emb_style: str = "avg-pool", |
| mvc_decoder_style: str = "inner product", |
| ecs_threshold: float = 0.3, |
| explicit_zero_prob: bool = False, |
| use_fast_transformer: bool = False, |
| fast_transformer_backend: str = "flash", |
| pre_norm: bool = False, |
| bin_output: bool = False, |
| use_down_up_transformer: bool = False, |
| n_top_genes: int = 2048, |
| bottleneck_dim: int = 128, |
| ): |
| super().__init__() |
| self.model_type = "Transformer" |
| self.d_model = d_model |
| self.do_dab = do_dab |
| self.ecs_threshold = ecs_threshold |
| self.use_batch_labels = use_batch_labels |
| self.domain_spec_batchnorm = domain_spec_batchnorm |
| self.input_emb_style = input_emb_style |
| self.cell_emb_style = cell_emb_style |
| self.explicit_zero_prob = explicit_zero_prob |
| self.norm_scheme = "pre" if pre_norm else "post" |
| self.bin_output = bin_output |
| self.n_top_genes = n_top_genes |
| self.use_down_up_transformer = use_down_up_transformer |
| if self.input_emb_style not in ["category", "continuous", "scaling"]: |
| raise ValueError( |
| f"input_emb_style should be one of category, continuous, scaling, " |
| f"got {input_emb_style}" |
| ) |
| if cell_emb_style not in ["cls", "avg-pool", "w-pool"]: |
| raise ValueError(f"Unknown cell_emb_style: {cell_emb_style}") |
| if use_fast_transformer: |
| if not flash_attn_available: |
| warnings.warn( |
| "flash-attn is not installed, using pytorch transformer instead. " |
| "Set use_fast_transformer=False to avoid this warning. " |
| "Installing flash-attn is highly recommended." |
| ) |
| use_fast_transformer = False |
| self.use_fast_transformer = use_fast_transformer |
|
|
| |
|
|
| self.encoder = GeneEncoder(ntoken, d_model, padding_idx=vocab[pad_token]) |
|
|
| |
| if input_emb_style == "continuous": |
| self.value_encoder = ContinuousValueEncoder(d_model, dropout) |
| elif input_emb_style == "category": |
| assert n_input_bins > 0 |
| self.value_encoder = CategoryValueEncoder( |
| n_input_bins, d_model, padding_idx=pad_value |
| ) |
| else: |
| self.value_encoder = nn.Identity() |
| |
| |
| if self.bin_output and n_input_bins > 0: |
| self.value_decode = CategoryValueDecoder(d_model, n_input_bins, use_batch_labels=use_batch_labels,) |
| else: |
| self.value_decode = nn.Identity() |
| |
| |
| if use_batch_labels: |
| self.batch_encoder = BatchLabelEncoder(num_batch_labels, d_model) |
|
|
| if domain_spec_batchnorm is True or domain_spec_batchnorm == "dsbn": |
| use_affine = True if domain_spec_batchnorm == "do_affine" else False |
| print(f"Use domain specific batchnorm with affine={use_affine}") |
| self.dsbn = DomainSpecificBatchNorm1d( |
| d_model, num_batch_labels, eps=6.1e-5, affine=use_affine |
| ) |
| elif domain_spec_batchnorm == "batchnorm": |
| print("Using simple batchnorm instead of domain specific batchnorm") |
| self.bn = nn.BatchNorm1d(d_model, eps=6.1e-5) |
|
|
| if use_fast_transformer: |
| if fast_transformer_backend == "linear": |
| self.transformer_encoder = FastTransformerEncoderWrapper( |
| d_model, nhead, d_hid, nlayers, dropout |
| ) |
| elif fast_transformer_backend == "flash": |
| encoder_layers = FlashTransformerEncoderLayer( |
| d_model, |
| nhead, |
| d_hid, |
| dropout, |
| batch_first=True, |
| norm_scheme=self.norm_scheme, |
| ) |
| self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers) |
| elif self.use_down_up_transformer: |
| encoder_layers = TransformerEncoderLayer( |
| d_model, nhead, d_hid, dropout, batch_first=True |
| ) |
| self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers) |
| |
| |
| |
| self.MLP_pooling = nn.Sequential( |
| nn.Linear(n_top_genes, d_model), |
| nn.ReLU(), |
| nn.Linear(d_model, bottleneck_dim),) |
| self.ln1 = nn.LayerNorm(n_top_genes,) |
| self.ln2 = nn.LayerNorm(bottleneck_dim,) |
| self.ln3 = nn.LayerNorm(bottleneck_dim,) |
| self.batch_norm = nn.BatchNorm1d(bottleneck_dim, momentum=0.01, eps=0.001) |
| |
| self.cross_x_in = nn.Parameter(torch.randn(512, 512)) |
| self.encoder_cross_x = GeneEncoder(ntoken, d_model, padding_idx=vocab[pad_token]) |
| self.feature_MLP = nn.Sequential( |
| nn.Linear(d_model, d_model), |
| nn.ReLU(), |
| nn.Linear(d_model, d_model),) |
| self.MLP_upsampling = nn.Sequential( |
| nn.Linear(bottleneck_dim, d_model), |
| nn.ReLU(), |
| nn.Linear(d_model, n_top_genes),) |
| else: |
| encoder_layers = TransformerEncoderLayer( |
| d_model, nhead, d_hid, dropout, batch_first=True |
| ) |
| self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers) |
|
|
| self.decoder = ExprDecoder( |
| d_model, |
| explicit_zero_prob=explicit_zero_prob, |
| use_batch_labels=use_batch_labels, |
| ) |
| self.cls_decoder = ClsDecoder(d_model, n_cls, nlayers=nlayers_cls) |
| if do_mvc: |
| self.mvc_decoder = MVCDecoder( |
| d_model, |
| arch_style=mvc_decoder_style, |
| explicit_zero_prob=explicit_zero_prob, |
| use_batch_labels=use_batch_labels, |
| ) |
|
|
| if do_dab: |
| self.grad_reverse_discriminator = AdversarialDiscriminator( |
| d_model, |
| n_cls=num_batch_labels, |
| reverse_grad=True, |
| ) |
|
|
| self.sim = Similarity(temp=0.5) |
| self.creterion_cce = nn.CrossEntropyLoss() |
|
|
| self.init_weights() |
|
|
| def init_weights(self) -> None: |
| initrange = 0.1 |
| |
| self.encoder.embedding.weight.data.uniform_(-initrange, initrange) |
| |
| def bin_decode(self, x: Tensor) -> Tensor: |
| if self.bin_output: |
| return self.value_decode(x) |
| else: |
| return x |
| |
| def _decode(self, x: Tensor) -> Tensor: |
| |
| x = self.MLP_upsampling(x).transpose(-1,-2) |
| bin_output = self.value_decode(x) |
| return bin_output |
| |
| def _encode( |
| self, |
| src: Tensor, |
| values: Tensor, |
| src_key_padding_mask: Tensor, |
| batch_labels: Optional[Tensor] = None, |
| ) -> Tensor: |
| self._check_batch_labels(batch_labels) |
|
|
| src = self.encoder(src) |
| self.cur_gene_token_embs = src |
|
|
| values = self.value_encoder(values) |
| if self.input_emb_style == "scaling": |
| values = values.unsqueeze(2) |
| total_embs = src * values |
| else: |
| total_embs = src + values |
|
|
| if getattr(self, "dsbn", None) is not None: |
| batch_label = int(batch_labels[0].item()) |
| total_embs = self.dsbn(total_embs.permute(0, 2, 1), batch_label).permute( |
| 0, 2, 1 |
| ) |
| elif getattr(self, "bn", None) is not None: |
| total_embs = self.bn(total_embs.permute(0, 2, 1)).permute(0, 2, 1) |
| |
| output = self.transformer_encoder( |
| total_embs, src_key_padding_mask=src_key_padding_mask |
| ) |
|
|
| if self.use_down_up_transformer: |
|
|
| output = self.ln1(output.transpose(-1,-2)) |
| output = self.MLP_pooling(output) |
| output = self.ln2(output) |
| |
| output = self.feature_MLP(output.transpose(-1,-2)) |
| output = self.ln3(output.transpose(-1,-2)) |
| output = torch.clip(output, min=-1, max=1) |
| |
| |
| |
| |
| |
| |
| |
|
|
| return output |
|
|
| def _get_cell_emb_from_layer( |
| self, layer_output: Tensor, weights: Tensor = None |
| ) -> Tensor: |
| """ |
| Args: |
| layer_output(:obj:`Tensor`): shape (batch, seq_len, embsize) |
| weights(:obj:`Tensor`): shape (batch, seq_len), optional and only used |
| when :attr:`self.cell_emb_style` is "w-pool". |
| |
| Returns: |
| :obj:`Tensor`: shape (batch, embsize) |
| """ |
| if self.cell_emb_style == "cls": |
| cell_emb = layer_output[:, 0, :] |
| elif self.cell_emb_style == "avg-pool": |
| cell_emb = torch.mean(layer_output, dim=1) |
| elif self.cell_emb_style == "w-pool": |
| if weights is None: |
| raise ValueError("weights is required when cell_emb_style is w-pool") |
| if weights.dim() != 2: |
| raise ValueError("weights should be 2D") |
| cell_emb = torch.sum(layer_output * weights.unsqueeze(2), dim=1) |
| cell_emb = F.normalize(cell_emb, p=2, dim=1) |
|
|
| return cell_emb |
|
|
| def _check_batch_labels(self, batch_labels: Tensor) -> None: |
| if self.use_batch_labels or self.domain_spec_batchnorm: |
| assert batch_labels is not None |
| elif batch_labels is not None: |
| raise ValueError( |
| "batch_labels should only be provided when `self.use_batch_labels`" |
| " or `self.domain_spec_batchnorm` is True" |
| ) |
|
|
| def generate( |
| self, |
| cell_emb: Tensor, |
| src: Tensor, |
| values: Optional[Tensor] = None, |
| src_key_padding_mask: Optional[Tensor] = None, |
| gen_iters: int = 1, |
| batch_labels: Optional[Tensor] = None, |
| ) -> Tensor: |
| """ |
| Args: |
| cell_emb(:obj:`Tensor`): shape (batch, embsize) |
| src(:obj:`Tensor`): shape (batch, seq_len) |
| values(:obj:`Tensor`): shape (batch, seq_len), optional |
| src_key_padding_mask(:obj:`Tensor`): shape (batch, seq_len), optional |
| gen_iters(:obj:`int`): number of generation iterations |
| batch_labels(:obj:`Tensor`): shape (batch,), optional |
| """ |
| |
| |
| try: |
| self._check_batch_labels(batch_labels) |
| except: |
| import warnings |
|
|
| warnings.warn( |
| "batch_labels is required but not provided, using zeros instead" |
| ) |
| batch_labels = torch.zeros( |
| cell_emb.shape[0], dtype=torch.long, device=cell_emb.device |
| ) |
|
|
| src = self.encoder(src) |
|
|
| if values is not None: |
| values = self.value_encoder(values) |
| if self.input_emb_style == "scaling": |
| values = values.unsqueeze(2) |
| total_embs = src * values |
| else: |
| total_embs = src + values |
| else: |
| total_embs = src |
|
|
| if getattr(self, "dsbn", None) is not None: |
| batch_label = int(batch_labels[0].item()) |
| total_embs = self.dsbn(total_embs.permute(0, 2, 1), batch_label).permute( |
| 0, 2, 1 |
| ) |
| elif getattr(self, "bn", None) is not None: |
| total_embs = self.bn(total_embs.permute(0, 2, 1)).permute(0, 2, 1) |
|
|
| total_embs[:, 0, :] = cell_emb |
|
|
| if src_key_padding_mask is None: |
| src_key_padding_mask = torch.zeros( |
| total_embs.shape[:2], dtype=torch.bool, device=total_embs.device |
| ) |
| transformer_output = self.transformer_encoder( |
| total_embs, src_key_padding_mask=src_key_padding_mask |
| ) |
|
|
| if self.use_batch_labels: |
| batch_emb = self.batch_encoder(batch_labels) |
| mlm_output = self.decoder( |
| transformer_output |
| if not self.use_batch_labels |
| else torch.cat( |
| [ |
| transformer_output, |
| batch_emb.unsqueeze(1).repeat(1, transformer_output.shape[1], 1), |
| ], |
| dim=2, |
| ), |
| |
| ) |
| output = mlm_output["pred"] |
|
|
| return output |
|
|
| def forward( |
| self, |
| src: Tensor, |
| values: Tensor, |
| src_key_padding_mask: Tensor, |
| condition: Optional[Tensor] = None, |
| batch_labels: Optional[Tensor] = None, |
| CLS: bool = False, |
| CCE: bool = False, |
| MVC: bool = False, |
| ECS: bool = False, |
| do_sample: bool = False, |
| ) -> Mapping[str, Tensor]: |
| """ |
| Args: |
| src (:obj:`Tensor`): token ids, shape [batch_size, seq_len] |
| values (:obj:`Tensor`): token values, shape [batch_size, seq_len] |
| src_key_padding_mask (:obj:`Tensor`): mask for src, shape [batch_size, |
| seq_len] |
| batch_labels (:obj:`Tensor`): batch labels, shape [batch_size] |
| CLS (:obj:`bool`): if True, return the celltype classification objective |
| (CLS) output |
| CCE (:obj:`bool`): if True, return the contrastive cell embedding objective |
| (CCE) output |
| MVC (:obj:`bool`): if True, return the masked value prediction for cell |
| embedding MVC output |
| ECS (:obj:`bool`): if True, return the elastic cell similarity objective |
| (ECS) output. |
| |
| Returns: |
| dict of output Tensors. |
| """ |
| transformer_output = self._encode( |
| src, values, src_key_padding_mask, batch_labels |
| ) |
| if self.use_down_up_transformer: |
| transformer_output = self.MLP_upsampling(transformer_output).transpose(-1,-2) |
| |
| if self.use_batch_labels: |
| batch_emb = self.batch_encoder(batch_labels) |
|
|
| output = {} |
| mlm_output = self.decoder( |
| transformer_output |
| if not self.use_batch_labels |
| else torch.cat( |
| [ |
| transformer_output, |
| batch_emb.unsqueeze(1).repeat(1, transformer_output.shape[1], 1), |
| ], |
| dim=2, |
| ), |
| |
| ) |
| |
| if self.bin_output: |
| output["bin_output"] = self.value_decode( |
| transformer_output |
| if not self.use_batch_labels |
| else torch.cat( |
| [ |
| transformer_output, |
| batch_emb.unsqueeze(1).repeat(1, transformer_output.shape[1], 1), |
| ], |
| dim=2, |
| ), |
| ) |
| |
| if self.explicit_zero_prob and do_sample: |
| bernoulli = Bernoulli(probs=mlm_output["zero_probs"]) |
| output["mlm_output"] = bernoulli.sample() * mlm_output["pred"] |
| else: |
| output["mlm_output"] = mlm_output["pred"] |
| if self.explicit_zero_prob: |
| output["mlm_zero_probs"] = mlm_output["zero_probs"] |
|
|
| cell_emb = self._get_cell_emb_from_layer(transformer_output, values) |
| output["cell_emb"] = cell_emb |
| |
| if CLS: |
| output["cls_output"] = self.cls_decoder(cell_emb) |
| if CCE: |
| cell1 = cell_emb |
| transformer_output2 = self._encode( |
| src, values, src_key_padding_mask, batch_labels |
| ) |
| cell2 = self._get_cell_emb_from_layer(transformer_output2) |
|
|
| |
| if dist.is_initialized() and self.training: |
| cls1_list = [ |
| torch.zeros_like(cell1) for _ in range(dist.get_world_size()) |
| ] |
| cls2_list = [ |
| torch.zeros_like(cell2) for _ in range(dist.get_world_size()) |
| ] |
| dist.all_gather(tensor_list=cls1_list, tensor=cell1.contiguous()) |
| dist.all_gather(tensor_list=cls2_list, tensor=cell2.contiguous()) |
|
|
| |
| |
| |
| cls1_list[dist.get_rank()] = cell1 |
| cls2_list[dist.get_rank()] = cell2 |
|
|
| cell1 = torch.cat(cls1_list, dim=0) |
| cell2 = torch.cat(cls2_list, dim=0) |
| |
| cos_sim = self.sim(cell1.unsqueeze(1), cell2.unsqueeze(0)) |
| labels = torch.arange(cos_sim.size(0)).long().to(cell1.device) |
| output["loss_cce"] = self.creterion_cce(cos_sim, labels) |
| if MVC: |
| mvc_output = self.mvc_decoder( |
| cell_emb |
| if not self.use_batch_labels |
| else torch.cat([cell_emb, batch_emb], dim=1), |
| |
| self.cur_gene_token_embs, |
| ) |
| if self.explicit_zero_prob and do_sample: |
| bernoulli = Bernoulli(probs=mvc_output["zero_probs"]) |
| output["mvc_output"] = bernoulli.sample() * mvc_output["pred"] |
| else: |
| output["mvc_output"] = mvc_output["pred"] |
| if self.explicit_zero_prob: |
| output["mvc_zero_probs"] = mvc_output["zero_probs"] |
| if ECS: |
| |
| |
| |
| cell_emb_normed = F.normalize(cell_emb, p=2, dim=1) |
| cos_sim = torch.mm(cell_emb_normed, cell_emb_normed.t()) |
|
|
| |
| mask = torch.eye(cos_sim.size(0)).bool().to(cos_sim.device) |
| cos_sim = cos_sim.masked_fill(mask, 0.0) |
| |
| cos_sim = F.relu(cos_sim) |
|
|
| output["loss_ecs"] = torch.mean(1 - (cos_sim - self.ecs_threshold) ** 2) |
| |
| |
| if self.do_dab: |
| output["dab_output"] = self.grad_reverse_discriminator(cell_emb) |
|
|
| return output |
| |
| def bin_encode(self, src: Tensor, values: Tensor, src_key_padding_mask: Tensor, batch_labels: Optional[Tensor] = None) -> Tensor: |
| transformer_output = self._encode( |
| src, values, src_key_padding_mask, batch_labels |
| ) |
| return transformer_output |
| |
| def bin_decode(self, transformer_output: Tensor, batch_emb: Optional[Tensor] = None) -> Tensor: |
| if self.bin_output: |
| return self.value_decode( |
| transformer_output |
| if not self.use_batch_labels |
| else torch.cat( |
| [ |
| transformer_output, |
| batch_emb.unsqueeze(1).repeat(1, transformer_output.shape[1], 1), |
| ], |
| dim=2, |
| ), |
| ) |
| else: |
| raise ValueError("bin_output is not enabled") |
| |
| def encode_batch( |
| self, |
| src: Tensor, |
| values: Tensor, |
| src_key_padding_mask: Tensor, |
| batch_size: int, |
| batch_labels: Optional[Tensor] = None, |
| output_to_cpu: bool = True, |
| time_step: Optional[int] = None, |
| return_np: bool = False, |
| ) -> Tensor: |
| """ |
| Args: |
| src (Tensor): shape [N, seq_len] |
| values (Tensor): shape [N, seq_len] |
| src_key_padding_mask (Tensor): shape [N, seq_len] |
| batch_size (int): batch size for encoding |
| batch_labels (Tensor): shape [N, n_batch_labels] |
| output_to_cpu (bool): whether to move the output to cpu |
| time_step (int): the time step index in the transformer output to return. |
| The time step is along the second dimenstion. If None, return all. |
| return_np (bool): whether to return numpy array |
| |
| Returns: |
| output Tensor of shape [N, seq_len, embsize] |
| """ |
| N = src.size(0) |
| device = next(self.parameters()).device |
|
|
| |
| array_func = np.zeros if return_np else torch.zeros |
| float32_ = np.float32 if return_np else torch.float32 |
| shape = ( |
| (N, self.d_model) |
| if time_step is not None |
| else (N, src.size(1), self.d_model) |
| ) |
| outputs = array_func(shape, dtype=float32_) |
|
|
| for i in trange(0, N, batch_size): |
| raw_output = self._encode( |
| src[i : i + batch_size].to(device), |
| values[i : i + batch_size].to(device), |
| src_key_padding_mask[i : i + batch_size].to(device), |
| batch_labels[i : i + batch_size].to(device) |
| if batch_labels is not None |
| else None, |
| ) |
| output = raw_output.detach() |
| if output_to_cpu: |
| output = output.cpu() |
| if return_np: |
| output = output.numpy() |
| if time_step is not None: |
| output = output[:, time_step, :] |
| outputs[i : i + batch_size] = output |
|
|
| return outputs |
|
|
|
|
| def generate_square_subsequent_mask(sz: int) -> Tensor: |
| """Generates an upper-triangular matrix of -inf, with zeros on diag.""" |
| return torch.triu(torch.ones(sz, sz) * float("-inf"), diagonal=1) |
|
|
|
|
| class FastTransformerEncoderWrapper(nn.Module): |
| def __init__( |
| self, |
| d_model: int, |
| nhead: int, |
| d_hid: int, |
| nlayers: int, |
| dropout: float = 0.5, |
| ): |
| super().__init__() |
| self.fast_transformer_encoder = self.build_fast_transformer_encoder( |
| d_model, nhead, d_hid, nlayers, dropout |
| ) |
|
|
| @staticmethod |
| def build_fast_transformer_encoder( |
| d_model: int, nhead: int, d_hid: int, nlayers: int, dropout: float |
| ) -> nn.Module: |
| from fast_transformers.builders import TransformerEncoderBuilder |
|
|
| if d_model % nhead != 0: |
| raise ValueError( |
| f"d_model must be divisible by nhead, " |
| f"got d_model={d_model} and nhead={nhead}" |
| ) |
| builder = TransformerEncoderBuilder.from_kwargs( |
| n_layers=nlayers, |
| n_heads=nhead, |
| query_dimensions=d_model // nhead, |
| value_dimensions=d_model // nhead, |
| feed_forward_dimensions=d_hid, |
| attention_type="linear", |
| attention_dropout=dropout, |
| dropout=dropout, |
| activation="gelu", |
| ) |
| assert builder.attention_type == "linear" |
| return builder.get() |
|
|
| @staticmethod |
| def build_length_mask( |
| src: Tensor, |
| src_key_padding_mask: torch.BoolTensor, |
| ) -> "LengthMask": |
| from fast_transformers.masking import LengthMask |
|
|
| seq_len = src.shape[1] |
| num_paddings = src_key_padding_mask.sum(dim=1) |
| actual_seq_len = seq_len - num_paddings |
| length_mask = LengthMask(actual_seq_len, max_len=seq_len, device=src.device) |
|
|
| if src_key_padding_mask[length_mask.bool_matrix].sum() != 0: |
| raise ValueError( |
| "Found padding tokens in the middle of the sequence. " |
| "src_key_padding_mask and length_mask are not compatible." |
| ) |
| return length_mask |
|
|
| def forward( |
| self, |
| src: Tensor, |
| src_key_padding_mask: torch.BoolTensor, |
| ) -> Tensor: |
| """ |
| Args: |
| src: Tensor, shape [N, seq_len, embsize] |
| src_key_padding_mask: Tensor, shape [N, seq_len] |
| |
| Returns: |
| output Tensor of shape [N, seq_len, embsize] |
| """ |
| if src_key_padding_mask.shape != src.shape[:2]: |
| raise ValueError( |
| f"src_key_padding_mask shape {src_key_padding_mask.shape} " |
| f"does not match first two dims of src shape {src.shape[:2]}" |
| ) |
|
|
| if src_key_padding_mask.dtype != torch.bool: |
| raise ValueError( |
| f"src_key_padding_mask needs to be of type torch.bool, " |
| f"got {src_key_padding_mask.dtype}" |
| ) |
|
|
| length_mask = self.build_length_mask(src, src_key_padding_mask) |
| output = self.fast_transformer_encoder(src, length_mask=length_mask) |
| return output |
|
|
| class AttentionPooling(nn.Module): |
| def __init__(self, n, d_model): |
| super().__init__() |
| self.n = n |
| self.d_model = d_model |
| self.query = nn.Parameter(torch.randn(n, d_model)) |
| self.K = nn.Linear(d_model, d_model) |
| self.V = nn.Linear(d_model, d_model) |
| self.ln = nn.LayerNorm(d_model) |
| print(f"using attention pooling") |
| self.ff = nn.Sequential( |
| nn.Linear(d_model, d_model * 4), |
| nn.GELU(), |
| nn.Linear(d_model * 4, d_model), |
| nn.Dropout(0.5), |
| ) |
|
|
| def forward(self, hidden): |
| if hidden.dim() == 2: |
| hidden = hidden.unsqueeze(0) |
|
|
| B, L, D = hidden.shape |
| k = self.K(hidden) |
| v = self.V(hidden) |
| attn_score = torch.matmul(self.query, k.transpose(1, 2)) / (D ** 0.5) |
| attn_weights = F.softmax(attn_score, dim=-1) |
|
|
| pooled = torch.matmul(attn_weights, v) |
| residual = pooled |
| pooled = self.ff(pooled) |
| pooled = self.ln(pooled + residual) |
| |
| return pooled.squeeze(0) if pooled.size(0) == 1 else pooled |
| class FlashMultiheadAttention(nn.Module): |
| """ |
| Multi-head self-attention using flash-attn backend. |
| Only supports self-attention (q=k=v) and batch_first=True. |
| """ |
| def __init__(self, embed_dim, num_heads, dropout=0.0, batch_first=True, **factory_kwargs): |
| super().__init__() |
| assert batch_first, "Only batch_first=True is supported." |
| self.embed_dim = embed_dim |
| self.num_heads = num_heads |
| self.dropout = dropout |
| self.head_dim = embed_dim // num_heads |
| assert ( |
| self.head_dim * num_heads == embed_dim |
| ), "embed_dim must be divisible by num_heads" |
| self.qkv_proj = nn.Linear(embed_dim, 3 * embed_dim, **factory_kwargs) |
| self.out_proj = nn.Linear(embed_dim, embed_dim, **factory_kwargs) |
|
|
| def forward(self, x, key_padding_mask=None): |
| |
| B, S, C = x.shape |
| qkv = self.qkv_proj(x) |
| qkv = qkv.view(B, S, 3, self.num_heads, self.head_dim) |
| q = qkv[:, :, 0] |
| k = qkv[:, :, 1] |
| v = qkv[:, :, 2] |
| |
| |
| |
| if key_padding_mask is not None: |
| |
| mask = ~key_padding_mask |
| q = q * mask.unsqueeze(-1).unsqueeze(-1) |
| k = k * mask.unsqueeze(-1).unsqueeze(-1) |
| v = v * mask.unsqueeze(-1).unsqueeze(-1) |
| attn_output = flash_attn_func( |
| q, k, v, dropout_p=self.dropout, causal=False |
| ) |
| attn_output = attn_output.reshape(B, S, C) |
| return self.out_proj(attn_output), None |
|
|
|
|
| class FlashTransformerEncoderLayer(nn.Module): |
| r"""TransformerEncoderLayer is made up of self-attn and feedforward network. |
| The class is modified from torch.nn.TransformerEncoderLayer to support the |
| FlashAttention. |
| |
| Args: |
| d_model: the number of expected features in the input (required). |
| nhead: the number of heads in the multiheadattention models (required). |
| dim_feedforward: the dimension of the feedforward network model (default=2048). |
| dropout: the dropout value (default=0.1). |
| activation: the activation function of intermediate layer, relu or gelu (default=relu). |
| layer_norm_eps: the eps value in layer normalization components (default=1e-5). |
| batch_first: If ``True``, then the input and output tensors are provided |
| as (batch, seq, feature). Default: ``False``. |
| |
| Examples:: |
| >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8) |
| >>> src = torch.rand(10, 32, 512) |
| >>> out = encoder_layer(src) |
| |
| Alternatively, when ``batch_first`` is ``True``: |
| >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8, batch_first=True) |
| >>> src = torch.rand(32, 10, 512) |
| >>> out = encoder_layer(src) |
| """ |
| __constants__ = ["batch_first"] |
|
|
| def __init__( |
| self, |
| d_model, |
| nhead, |
| dim_feedforward=2048, |
| dropout=0.1, |
| activation="relu", |
| layer_norm_eps=1e-5, |
| batch_first=True, |
| device=None, |
| dtype=None, |
| norm_scheme="post", |
| ) -> None: |
| factory_kwargs = {"device": device, "dtype": dtype} |
| super().__init__() |
| self.self_attn = FlashMultiheadAttention( |
| embed_dim=d_model, |
| num_heads=nhead, |
| batch_first=batch_first, |
| dropout=dropout, |
| **factory_kwargs, |
| ) |
| |
| |
| if not hasattr(self.self_attn, "batch_first"): |
| self.self_attn.batch_first = batch_first |
| |
| self.linear1 = nn.Linear(d_model, dim_feedforward, **factory_kwargs) |
| self.dropout = nn.Dropout(dropout) |
| self.linear2 = nn.Linear(dim_feedforward, d_model, **factory_kwargs) |
|
|
| self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs) |
| self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs) |
| self.dropout1 = nn.Dropout(dropout) |
| self.dropout2 = nn.Dropout(dropout) |
|
|
| self.activation = self._get_activation_fn(activation) |
| self.norm_scheme = norm_scheme |
| if self.norm_scheme not in ["pre", "post"]: |
| raise ValueError(f"norm_scheme should be pre or post, not {norm_scheme}") |
|
|
| @staticmethod |
| def _get_activation_fn(activation): |
| if activation == "relu": |
| return F.relu |
| elif activation == "gelu": |
| return F.gelu |
|
|
| raise RuntimeError("activation should be relu/gelu, not {}".format(activation)) |
|
|
| def __setstate__(self, state): |
| if "activation" not in state: |
| state["activation"] = F.relu |
| super().__setstate__(state) |
|
|
| def forward( |
| self, |
| src: Tensor, |
| src_mask: Optional[Tensor] = None, |
| src_key_padding_mask: Optional[Tensor] = None, |
| **kwargs, |
| ) -> Tensor: |
| r"""Pass the input through the encoder layer. |
| |
| Args: |
| src: the sequence to the encoder layer (required). |
| src_mask: the mask for the src sequence (optional). |
| src_key_padding_mask: the mask for the src keys per batch (optional). |
| |
| Shape: |
| see the docs in Transformer class. |
| """ |
| print('FlashTransformerEncoderLayer forward') |
| if src_mask is not None: |
| raise ValueError("FlashTransformerEncoderLayer does not support src_mask") |
|
|
| if not src_key_padding_mask.any().item(): |
| |
| src_key_padding_mask_ = None |
| else: |
| if src_key_padding_mask.dtype != torch.bool: |
| src_key_padding_mask = src_key_padding_mask.bool() |
| |
| src_key_padding_mask_ = ~src_key_padding_mask |
|
|
| if self.norm_scheme == "pre": |
| src = self.norm1(src) |
| src2 = self.self_attn(src, key_padding_mask=src_key_padding_mask_)[0] |
| src = src + self.dropout1(src2) |
| src = self.norm2(src) |
| src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) |
| src = src + self.dropout2(src2) |
| else: |
| src2 = self.self_attn(src, key_padding_mask=src_key_padding_mask_)[0] |
| src = src + self.dropout1(src2) |
| src = self.norm1(src) |
| src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) |
| src = src + self.dropout2(src2) |
| src = self.norm2(src) |
|
|
| return src |
| |
| class DownTransformerEncoder(nn.Module): |
| def __init__(self, d_model, n_top_genes, nhead, d_hid, nlayers, dropout): |
| super().__init__() |
| self.nlayers = nlayers // 2 |
| gene_d_model = [n_top_genes // 2 ** i for i in range(0,self.nlayers+1)] |
| self.transformer_gene = [] |
| self.transformer_feature = [] |
| self.down_transfer = [] |
| |
| for i in range(self.nlayers): |
| self.transformer_gene.append(TransformerEncoderLayer(gene_d_model[i+1], nhead, d_hid, dropout, batch_first=True)) |
| self.transformer_feature.append(nn.Sequential(TransformerEncoderLayer(d_model, nhead, d_hid, dropout, batch_first=True), |
| TransformerEncoderLayer(d_model, nhead, d_hid, dropout, batch_first=True))) |
| self.down_transfer.append(nn.Sequential( |
| nn.Linear(gene_d_model[i], gene_d_model[i+1]), |
| nn.ReLU(), |
| nn.LayerNorm(gene_d_model[i+1]), |
| )) |
| self.down_transfer = nn.ModuleList(self.down_transfer) |
| self.transformer_gene = nn.ModuleList(self.transformer_gene) |
| self.transformer_feature = nn.ModuleList(self.transformer_feature) |
| |
| def forward(self, src, src_key_padding_mask=None): |
| for i in range(self.nlayers): |
| src = self.transformer_feature[i](src) |
| |
| |
| |
| |
| return src |
| |
| class UpTransformerDecoder(nn.Module): |
| def __init__(self, d_model, n_top_genes, nhead, d_hid, nlayers, dropout): |
| super().__init__() |
| self.nlayers = nlayers // 2 |
| gene_d_model = [n_top_genes // 2 ** i for i in range(self.nlayers,-1,-1)] |
| self.transformer_gene = [] |
| self.transformer_feature = [] |
| self.up_transfer = [] |
| |
| for i in range(self.nlayers): |
| self.transformer_gene.append(TransformerEncoderLayer(gene_d_model[i+1], nhead, d_hid, dropout, batch_first=True)) |
| self.transformer_feature.append(nn.Sequential(TransformerEncoderLayer(d_model, nhead, d_hid, dropout, batch_first=True), |
| TransformerEncoderLayer(d_model, nhead, d_hid, dropout, batch_first=True))) |
| self.up_transfer.append(nn.Sequential( |
| nn.Linear(gene_d_model[i], gene_d_model[i+1]), |
| nn.ReLU(), |
| nn.LayerNorm(gene_d_model[i+1]), |
| )) |
| self.up_transfer = nn.ModuleList(self.up_transfer) |
| self.transformer_gene = nn.ModuleList(self.transformer_gene) |
| self.transformer_feature = nn.ModuleList(self.transformer_feature) |
|
|
| def forward(self, src, src_key_padding_mask=None): |
| for i in range(self.nlayers): |
| |
| src = self.transformer_feature[i](src) |
| |
| |
| |
| |
| return src |
| |
|
|
| class CrossAttentionModule(nn.Module): |
| def __init__(self, embed_dim, num_heads, dropout=0.0): |
| super().__init__() |
| self.attn = nn.MultiheadAttention(embed_dim=embed_dim, |
| num_heads=num_heads, |
| dropout=dropout, |
| batch_first=True) |
| self.norm1 = nn.LayerNorm(embed_dim) |
| self.norm2 = nn.LayerNorm(embed_dim) |
| self.ff = nn.Sequential( |
| nn.Linear(embed_dim, embed_dim * 4), |
| nn.GELU(), |
| nn.Linear(embed_dim * 4, embed_dim), |
| nn.Dropout(dropout), |
| ) |
| self.selfattn = nn.MultiheadAttention(embed_dim=embed_dim, |
| num_heads=num_heads, |
| dropout=dropout, |
| batch_first=True) |
| self.norm3 = nn.LayerNorm(embed_dim) |
| self.norm4 = nn.LayerNorm(embed_dim) |
| self.ff2 = nn.Sequential( |
| nn.Linear(embed_dim, embed_dim * 4), |
| nn.GELU(), |
| nn.Linear(embed_dim * 4, embed_dim), |
| nn.Dropout(dropout), |
| ) |
|
|
| def forward(self, x1, x2, attn_mask=None, key_padding_mask=None): |
| |
| residual = x1 |
| attn_out, attn_weights = self.attn(query=x1, key=x2, value=x2, |
| attn_mask=attn_mask, |
| key_padding_mask=key_padding_mask) |
| x = self.norm1(attn_out + residual) |
|
|
| |
| residual2 = x |
| x = self.ff(x) |
| x = self.norm2(x + residual2) |
|
|
| residual = x |
| attn_out, _ = self.selfattn(query=x, key=x, value=x, |
| attn_mask=attn_mask, |
| key_padding_mask=key_padding_mask) |
| x = self.norm3(attn_out + residual) |
|
|
| |
| residual2 = x |
| x = self.ff2(x) |
| x = self.norm4(x + residual2) |
| return x, attn_weights |
| class GeneEncoder(nn.Module): |
| def __init__( |
| self, |
| num_embeddings: int, |
| embedding_dim: int, |
| padding_idx: Optional[int] = None, |
| ): |
| super().__init__() |
| self.embedding = nn.Embedding( |
| num_embeddings, embedding_dim, padding_idx=padding_idx |
| ) |
| self.enc_norm = nn.LayerNorm(embedding_dim) |
|
|
| def forward(self, x: Tensor) -> Tensor: |
| x = self.embedding(x) |
| x = self.enc_norm(x) |
| return x |
|
|
|
|
| class PositionalEncoding(nn.Module): |
| def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 5000): |
| super().__init__() |
| self.dropout = nn.Dropout(p=dropout) |
|
|
| position = torch.arange(max_len).unsqueeze(1) |
| div_term = torch.exp( |
| torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model) |
| ) |
| pe = torch.zeros(max_len, 1, d_model) |
| pe[:, 0, 0::2] = torch.sin(position * div_term) |
| pe[:, 0, 1::2] = torch.cos(position * div_term) |
| self.register_buffer("pe", pe) |
|
|
| def forward(self, x: Tensor) -> Tensor: |
| """ |
| Args: |
| x: Tensor, shape [seq_len, batch_size, embedding_dim] |
| """ |
| x = x + self.pe[: x.size(0)] |
| return self.dropout(x) |
|
|
|
|
| class ContinuousValueEncoder(nn.Module): |
| """ |
| Encode real number values to a vector using neural nets projection. |
| """ |
|
|
| def __init__(self, d_model: int, dropout: float = 0.1, max_value: int = 512): |
| super().__init__() |
| self.dropout = nn.Dropout(p=dropout) |
| self.linear1 = nn.Linear(1, d_model) |
| self.activation = nn.ReLU() |
| self.linear2 = nn.Linear(d_model, d_model) |
| self.norm = nn.LayerNorm(d_model) |
| self.max_value = max_value |
|
|
| def forward(self, x: Tensor) -> Tensor: |
| """ |
| Args: |
| x: Tensor, shape [batch_size, seq_len] |
| """ |
| |
| |
| x = x.unsqueeze(-1) |
| |
| x = torch.clamp(x, max=self.max_value) |
| x = self.activation(self.linear1(x)) |
| x = self.linear2(x) |
| x = self.norm(x) |
| return self.dropout(x) |
|
|
|
|
| class CategoryValueEncoder(nn.Module): |
| def __init__( |
| self, |
| num_embeddings: int, |
| embedding_dim: int, |
| padding_idx: Optional[int] = None, |
| ): |
| super().__init__() |
| self.embedding = nn.Embedding( |
| num_embeddings, embedding_dim, padding_idx=padding_idx |
| ) |
| self.enc_norm = nn.LayerNorm(embedding_dim) |
|
|
| def forward(self, x: Tensor) -> Tensor: |
| x = x.long() |
| x = self.embedding(x) |
| x = self.enc_norm(x) |
| return x |
|
|
|
|
| class CategoryValueDecoder(nn.Module): |
| def __init__( |
| self, |
| d_model: int, |
| n_bins: int, |
| use_batch_labels: bool = False, |
| nlayers: int = 3, |
| activation: callable = nn.ReLU, |
| ): |
| super().__init__() |
| self._decoder = nn.ModuleList() |
| d_in = d_model * 2 if use_batch_labels else d_model |
| if use_batch_labels: |
| self._decoder.append(nn.Linear(d_in, d_model)) |
| self._decoder.append(activation()) |
| self._decoder.append(nn.LayerNorm(d_model)) |
| for i in range(nlayers - 1): |
| self._decoder.append(nn.Linear(d_model, d_model)) |
| self._decoder.append(activation()) |
| self._decoder.append(nn.LayerNorm(d_model)) |
| self.out_layer = nn.Linear(d_model, n_bins) |
|
|
| def forward(self, x: Tensor) -> Tensor: |
| """ |
| Args: |
| x: Tensor, shape [batch_size, embsize] |
| output: Tensor, shape [batch_size, n_bins] |
| """ |
| for layer in self._decoder: |
| x = layer(x) |
| return self.out_layer(x) |
| |
|
|
| class BatchLabelEncoder(nn.Module): |
| def __init__( |
| self, |
| num_embeddings: int, |
| embedding_dim: int, |
| padding_idx: Optional[int] = None, |
| ): |
| super().__init__() |
| self.embedding = nn.Embedding( |
| num_embeddings, embedding_dim, padding_idx=padding_idx |
| ) |
| self.enc_norm = nn.LayerNorm(embedding_dim) |
|
|
| def forward(self, x: Tensor) -> Tensor: |
| x = self.embedding(x) |
| x = self.enc_norm(x) |
| return x |
|
|
|
|
| class Similarity(nn.Module): |
| """ |
| Dot product or cosine similarity |
| """ |
|
|
| def __init__(self, temp): |
| super().__init__() |
| self.temp = temp |
| self.cos = nn.CosineSimilarity(dim=-1) |
|
|
| def forward(self, x, y): |
| return self.cos(x, y) / self.temp |
|
|
|
|
| class ExprDecoder(nn.Module): |
| def __init__( |
| self, |
| d_model: int, |
| explicit_zero_prob: bool = False, |
| use_batch_labels: bool = False, |
| ): |
| super().__init__() |
| d_in = d_model * 2 if use_batch_labels else d_model |
| self.fc = nn.Sequential( |
| nn.Linear(d_in, d_model), |
| nn.LeakyReLU(), |
| nn.Linear(d_model, d_model), |
| nn.LeakyReLU(), |
| nn.Linear(d_model, 1), |
| ) |
| self.explicit_zero_prob = explicit_zero_prob |
| if explicit_zero_prob: |
| self.zero_logit = nn.Sequential( |
| nn.Linear(d_in, d_model), |
| nn.LeakyReLU(), |
| nn.Linear(d_model, d_model), |
| nn.LeakyReLU(), |
| nn.Linear(d_model, 1), |
| ) |
|
|
| def forward(self, x: Tensor) -> Dict[str, Tensor]: |
| """x is the output of the transformer, (batch, seq_len, d_model)""" |
| pred_value = self.fc(x).squeeze(-1) |
|
|
| if not self.explicit_zero_prob: |
| return dict(pred=pred_value) |
| zero_logits = self.zero_logit(x).squeeze(-1) |
| zero_probs = torch.sigmoid(zero_logits) |
| return dict(pred=pred_value, zero_probs=zero_probs) |
| |
| |
| |
| |
| |
|
|
|
|
| class ClsDecoder(nn.Module): |
| """ |
| Decoder for classification task. |
| """ |
|
|
| def __init__( |
| self, |
| d_model: int, |
| n_cls: int, |
| nlayers: int = 3, |
| activation: callable = nn.ReLU, |
| ): |
| super().__init__() |
| |
| self._decoder = nn.ModuleList() |
| for i in range(nlayers - 1): |
| self._decoder.append(nn.Linear(d_model, d_model)) |
| self._decoder.append(activation()) |
| self._decoder.append(nn.LayerNorm(d_model)) |
| self.out_layer = nn.Linear(d_model, n_cls) |
|
|
| def forward(self, x: Tensor) -> Tensor: |
| """ |
| Args: |
| x: Tensor, shape [batch_size, embsize] |
| """ |
| for layer in self._decoder: |
| x = layer(x) |
| return self.out_layer(x) |
|
|
|
|
| class MVCDecoder(nn.Module): |
| """ |
| Decoder for the masked value prediction for cell embeddings. |
| """ |
|
|
| def __init__( |
| self, |
| d_model: int, |
| arch_style: str = "inner product", |
| query_activation: nn.Module = nn.Sigmoid, |
| hidden_activation: nn.Module = nn.PReLU, |
| explicit_zero_prob: bool = False, |
| use_batch_labels: bool = False, |
| ) -> None: |
| """ |
| Args: |
| d_model (:obj:`int`): dimension of the gene embedding. |
| arch_style (:obj:`str`): architecture style of the decoder, choice from |
| 1. "inner product" or 2. "concat query" or 3. "sum query". |
| query_activation (:obj:`nn.Module`): activation function for the query |
| vectors. |
| hidden_activation (:obj:`nn.Module`): activation function for the hidden |
| layers. |
| """ |
| super().__init__() |
| d_in = d_model * 2 if use_batch_labels else d_model |
| if arch_style in ["inner product", "inner product, detach"]: |
| self.gene2query = nn.Linear(d_model, d_model) |
| self.query_activation = query_activation() |
| self.W = nn.Linear(d_model, d_in, bias=False) |
| if explicit_zero_prob: |
| self.W_zero_logit = nn.Linear(d_model, d_in) |
| elif arch_style == "concat query": |
| self.gene2query = nn.Linear(d_model, 64) |
| self.query_activation = query_activation() |
| self.fc1 = nn.Linear(d_model + 64, 64) |
| self.hidden_activation = hidden_activation() |
| self.fc2 = nn.Linear(64, 1) |
| elif arch_style == "sum query": |
| self.gene2query = nn.Linear(d_model, d_model) |
| self.query_activation = query_activation() |
| self.fc1 = nn.Linear(d_model, 64) |
| self.hidden_activation = hidden_activation() |
| self.fc2 = nn.Linear(64, 1) |
| else: |
| raise ValueError(f"Unknown arch_style: {arch_style}") |
|
|
| self.arch_style = arch_style |
| self.do_detach = arch_style.endswith("detach") |
| self.explicit_zero_prob = explicit_zero_prob |
|
|
| def forward( |
| self, cell_emb: Tensor, gene_embs: Tensor |
| ) -> Union[Tensor, Dict[str, Tensor]]: |
| """ |
| Args: |
| cell_emb: Tensor, shape (batch, embsize=d_model) |
| gene_embs: Tensor, shape (batch, seq_len, embsize=d_model) |
| """ |
| gene_embs = gene_embs.detach() if self.do_detach else gene_embs |
| if self.arch_style in ["inner product", "inner product, detach"]: |
| query_vecs = self.query_activation(self.gene2query(gene_embs)) |
| cell_emb = cell_emb.unsqueeze(2) |
| |
| pred_value = torch.bmm(self.W(query_vecs), cell_emb).squeeze(2) |
| if not self.explicit_zero_prob: |
| return dict(pred=pred_value) |
| |
| zero_logits = torch.bmm(self.W_zero_logit(query_vecs), cell_emb).squeeze(2) |
| zero_probs = torch.sigmoid(zero_logits) |
| return dict(pred=pred_value, zero_probs=zero_probs) |
| elif self.arch_style == "concat query": |
| query_vecs = self.query_activation(self.gene2query(gene_embs)) |
| |
| cell_emb = cell_emb.unsqueeze(1).expand(-1, gene_embs.shape[1], -1) |
|
|
| h = self.hidden_activation( |
| self.fc1(torch.cat([cell_emb, query_vecs], dim=2)) |
| ) |
| if self.explicit_zero_prob: |
| raise NotImplementedError |
| return self.fc2(h).squeeze(2) |
| elif self.arch_style == "sum query": |
| query_vecs = self.query_activation(self.gene2query(gene_embs)) |
| cell_emb = cell_emb.unsqueeze(1) |
|
|
| h = self.hidden_activation(self.fc1(cell_emb + query_vecs)) |
| if self.explicit_zero_prob: |
| raise NotImplementedError |
| return self.fc2(h).squeeze(2) |
|
|
|
|
| class AdversarialDiscriminator(nn.Module): |
| """ |
| Discriminator for the adversarial training for batch correction. |
| """ |
|
|
| def __init__( |
| self, |
| d_model: int, |
| n_cls: int, |
| nlayers: int = 3, |
| activation: callable = nn.LeakyReLU, |
| reverse_grad: bool = False, |
| ): |
| super().__init__() |
| |
| self._decoder = nn.ModuleList() |
| for i in range(nlayers - 1): |
| self._decoder.append(nn.Linear(d_model, d_model)) |
| self._decoder.append(activation()) |
| self._decoder.append(nn.LayerNorm(d_model)) |
| self.out_layer = nn.Linear(d_model, n_cls) |
| self.reverse_grad = reverse_grad |
|
|
| def forward(self, x: Tensor) -> Tensor: |
| """ |
| Args: |
| x: Tensor, shape [batch_size, embsize] |
| """ |
| if self.reverse_grad: |
| x = grad_reverse(x, lambd=1.0) |
| for layer in self._decoder: |
| x = layer(x) |
| return self.out_layer(x) |
| |
| class GradReverse(Function): |
| @staticmethod |
| def forward(ctx, x: torch.Tensor, lambd: float) -> torch.Tensor: |
| ctx.lambd = lambd |
| return x.view_as(x) |
|
|
| @staticmethod |
| def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor: |
| return grad_output.neg() * ctx.lambd, None |
|
|
|
|
| def grad_reverse(x: torch.Tensor, lambd: float = 1.0) -> torch.Tensor: |
| return GradReverse.apply(x, lambd) |
|
|
| try: |
| from flash_attn.flash_attention import FlashMHA |
| |
| flash_attn_available = True |
| except ImportError: |
| import warnings |
|
|
| warnings.warn("flash_attn is not installed") |
| flash_attn_available = False |