File size: 7,201 Bytes
0161e74 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 | import pdb
from re import X
import torch
import torch.nn as nn
from torch import Tensor
from typing import Optional, Dict
import math
from .blocks import CrossAttentionTransformerLayer
import copy
class GeneadaLN(nn.Module):
def __init__(self, hidden_size: int, dropout: float = 0.1):
super().__init__()
self.adaLN_modulation = nn.Sequential(
nn.SiLU(),
nn.Linear(hidden_size, 3 * hidden_size, bias=True)
)
self.norm = nn.LayerNorm(hidden_size)
def forward(self, gene_emb: Tensor, value_emb: Tensor) -> Tensor:
shift, gate, scale = self.adaLN_modulation(gene_emb).chunk(3, dim=-1)
x = value_emb + gate * (self.norm(value_emb) * scale + shift )
return x
class ContinuousValueEncoder(nn.Module):
"""
Encode real number values to a vector using neural nets projection.
"""
def __init__(self, d_model: int, dropout: float = 0.1, max_value: int = 512):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
self.linear1 = nn.Linear(1, d_model)
self.activation = nn.ReLU()
self.linear2 = nn.Linear(d_model, d_model)
self.norm = nn.LayerNorm(d_model)
self.max_value = max_value
def forward(self, x: Tensor) -> Tensor:
"""
Args:
x: Tensor, shape [batch_size, seq_len]
"""
# TODO: test using actual embedding layer if input is categorical
# expand last dimension
x = x.unsqueeze(-1)
# clip x to [-inf, max_value]
x = torch.clamp(x, max=self.max_value)
x = self.activation(self.linear1(x))
x = self.linear2(x)
x = self.norm(x)
return self.dropout(x)
class GeneEncoder(nn.Module):
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: Optional[int] = None,
nhead:int = 8 ,
use_perturbation_interaction: bool = False,
dropout:float = 0.1,
mask_path: str = None,
):
super().__init__()
self.embedding = nn.Embedding(
num_embeddings, embedding_dim, padding_idx=padding_idx
)
self.enc_norm = nn.LayerNorm(embedding_dim)
self.use_perturbation_interaction = use_perturbation_interaction
if use_perturbation_interaction:
self.data_name = mask_path.split('/')[-2]
self.perturbation_interaction = CrossAttentionTransformerLayer(embedding_dim, nhead, mlp_ratio=4.0, dropout=dropout)
self.mask_padded = torch.load(mask_path)
self.mask_num = self.mask_padded.shape[0]
def forward(self, x: Tensor) -> Tensor:
if self.use_perturbation_interaction:
# NOTE using the same perturbation and gene names
if self.mask_padded.device != x.device:
self.mask_padded = self.mask_padded.to(x.device)
mask = self.mask_padded[x[0]]
x = self.embedding(x) # (batch, seq_len, embsize)
x = self.enc_norm(x)
if self.use_perturbation_interaction:
memory_id = torch.arange(self.mask_num,device=x.device)
memory_emb = self.embedding(memory_id)
memory_emb = self.enc_norm(memory_emb).expand(x.shape[0],-1,-1)
x = self.perturbation_interaction(x, memory_emb,mask)[0]
return x
class BatchLabelEncoder(nn.Module):
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: Optional[int] = None,
):
super().__init__()
self.embedding = nn.Embedding(
num_embeddings, embedding_dim, padding_idx=padding_idx
)
self.enc_norm = nn.LayerNorm(embedding_dim)
def forward(self, x: Tensor) -> Tensor:
x = self.embedding(x) # (batch, embsize)
x = self.enc_norm(x)
return x
class ExprDecoder(nn.Module):
def __init__(
self,
d_model: int,
explicit_zero_prob: bool = False,
use_batch_labels: bool = False,
):
super().__init__()
d_in = d_model * 2 if use_batch_labels else d_model
self.fc = nn.Sequential(
nn.Linear(d_in, d_model),
nn.LeakyReLU(),
nn.Linear(d_model, d_model),
nn.LeakyReLU(),
nn.Linear(d_model, 1),
)
self.explicit_zero_prob = explicit_zero_prob
if explicit_zero_prob:
self.zero_logit = nn.Sequential(
nn.Linear(d_in, d_model),
nn.LeakyReLU(),
nn.Linear(d_model, d_model),
nn.LeakyReLU(),
nn.Linear(d_model, 1),
)
def forward(self, x: Tensor) -> Dict[str, Tensor]:
"""x is the output of the transformer, (batch, seq_len, d_model)"""
pred_value = self.fc(x).squeeze(-1) # (batch, seq_len)
if not self.explicit_zero_prob:
return dict(pred=pred_value)
zero_logits = self.zero_logit(x).squeeze(-1) # (batch, seq_len)
zero_probs = torch.sigmoid(zero_logits)
return dict(pred=pred_value, zero_probs=zero_probs)
# TODO: note that the return currently is only for training. Since decoder
# is not used in the test setting for the integration task, the eval/inference
# logic is not implemented yet. However, remember to implement it when
# the decoder is used in any test setting. The inference logic will need
# to sample from the bernoulli distribution with the zero_probs.
class TimestepEmbedder(nn.Module):
"""
Embeds scalar timesteps into vector representations.
"""
def __init__(self, hidden_size, frequency_embedding_size=256):
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(frequency_embedding_size, hidden_size, bias=True),
nn.SiLU(),
nn.Linear(hidden_size, hidden_size, bias=True),
)
self.frequency_embedding_size = frequency_embedding_size
@staticmethod
def timestep_embedding(t, dim, max_period=10000):
"""
Create sinusoidal timestep embeddings.
:param t: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an (N, D) Tensor of positional embeddings.
"""
# https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py
half = dim // 2
freqs = torch.exp(
-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
).to(device=t.device)
args = t[:, None].float() * freqs[None]
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
if dim % 2:
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
return embedding
def forward(self, t):
t_freq = self.timestep_embedding(t, self.frequency_embedding_size)
t_emb = self.mlp(t_freq)
return t_emb |