File size: 3,966 Bytes
0161e74 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 | """
New layers for CCFM: LatentEmbedder and LatentDecoder.
Analogous to LatentForcing's dino_embedder (BottleneckPatchEmbed) and
final_layer_dino + dh_blocks_dino.
"""
import torch
import torch.nn as nn
class LatentEmbedder(nn.Module):
"""
Projects per-gene scGPT features (B, G, scgpt_dim) to (B, G, d_model).
Analogous to LatentForcing's dino_embedder (BottleneckPatchEmbed with kernel=1).
Uses a bottleneck projection: scgpt_dim -> bottleneck_dim -> d_model.
"""
def __init__(self, scgpt_dim: int = 512, bottleneck_dim: int = 128, d_model: int = 128):
super().__init__()
self.proj = nn.Sequential(
nn.Linear(scgpt_dim, bottleneck_dim),
nn.GELU(),
nn.Linear(bottleneck_dim, d_model),
)
def forward(self, z: torch.Tensor) -> torch.Tensor:
"""z: (B, G, scgpt_dim) -> (B, G, d_model)"""
return self.proj(z)
class LatentDecoderBlock(nn.Module):
"""
A simple transformer-like block for the latent decoder head.
Uses AdaLN conditioning (analogous to LatentForcing's dh_blocks_dino).
"""
def __init__(self, hidden_size: int, num_heads: int = 4, mlp_ratio: float = 4.0,
hidden_size_c: int = None):
super().__init__()
hidden_size_c = hidden_size_c or hidden_size
self.norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
self.attn = nn.MultiheadAttention(hidden_size, num_heads, batch_first=True)
self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
mlp_hidden = int(hidden_size * mlp_ratio)
self.mlp = nn.Sequential(
nn.Linear(hidden_size, mlp_hidden),
nn.GELU(),
nn.Linear(mlp_hidden, hidden_size),
)
self.adaLN_modulation = nn.Sequential(
nn.SiLU(),
nn.Linear(hidden_size_c, 6 * hidden_size, bias=True),
)
def forward(self, x: torch.Tensor, c: torch.Tensor) -> torch.Tensor:
"""
x: (B, G, hidden_size)
c: (B, hidden_size_c) — conditioning vector
"""
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (
self.adaLN_modulation(c).chunk(6, dim=1)
)
# Self-attention with AdaLN
h = self.norm1(x)
h = h * (1 + scale_msa.unsqueeze(1)) + shift_msa.unsqueeze(1)
h = self.attn(h, h, h)[0]
x = x + gate_msa.unsqueeze(1) * h
# MLP with AdaLN
h = self.norm2(x)
h = h * (1 + scale_mlp.unsqueeze(1)) + shift_mlp.unsqueeze(1)
h = self.mlp(h)
x = x + gate_mlp.unsqueeze(1) * h
return x
class LatentDecoder(nn.Module):
"""
Decodes backbone output (B, G, d_model) back to (B, G, scgpt_dim).
Analogous to LatentForcing's final_layer_dino + dh_blocks_dino.
"""
def __init__(self, d_model: int = 128, scgpt_dim: int = 512,
dh_depth: int = 2, num_heads: int = 4,
hidden_size_c: int = None):
super().__init__()
hidden_size_c = hidden_size_c or d_model
self.dh_proj = nn.Linear(d_model, d_model)
if dh_depth > 0:
self.dh_blocks = nn.ModuleList([
LatentDecoderBlock(d_model, num_heads=num_heads, hidden_size_c=hidden_size_c)
for _ in range(dh_depth)
])
else:
self.dh_blocks = nn.ModuleList()
self.final = nn.Sequential(
nn.LayerNorm(d_model),
nn.Linear(d_model, d_model),
nn.GELU(),
nn.Linear(d_model, scgpt_dim),
)
def forward(self, x: torch.Tensor, c: torch.Tensor) -> torch.Tensor:
"""
x: (B, G, d_model) — backbone output
c: (B, d_model) — conditioning vector
Returns: (B, G, scgpt_dim)
"""
h = self.dh_proj(x)
for block in self.dh_blocks:
h = block(h, c)
return self.final(h)
|