lfj-code / transfer /code /CCFM /src /model /model.py
ethan1115's picture
Upload folder using huggingface_hub
0161e74 verified
"""
CascadedFlowModel — Main backbone for Cascaded Conditioned Flow Matching (CCFM).
Architecture mirrors LatentForcing's model_cot.py:
1. Dual-stream embedding (expression + latent) with element-wise addition
2. Conditioning via AdaLN: c = t_expr_emb + t_latent_emb + pert_emb
- True conditioning signals: control expression + perturbation_id (available at inference)
- scGPT latent features are an auxiliary generation target (like DINO in LatentForcing),
NOT a conditioning signal — they are generated from noise at inference time.
3. Shared backbone (reusing scDFM's DiffPerceiver/Perceiver blocks)
4. Separate decoder heads: ExprDecoder (reused) + LatentDecoder (new)
"""
import torch
import torch.nn as nn
from torch import Tensor
from typing import Optional, Tuple
from .layers import LatentEmbedder, LatentDecoder
from .._scdfm_imports import (
GeneadaLN,
ContinuousValueEncoder,
GeneEncoder,
BatchLabelEncoder,
TimestepEmbedder,
ExprDecoder,
DifferentialTransformerBlock,
PerceiverBlock,
DiffPerceiverBlock,
)
class CascadedFlowModel(nn.Module):
"""
Cascaded Flow Model for single-cell perturbation prediction.
Inputs:
gene_id: (B, G) gene token IDs
cell_1: (B, G) source (control) expression
x_t: (B, G) noised target expression (expression flow)
z_t: (B, G, scgpt_dim) noised scGPT features (latent flow)
t_expr: (B,) expression flow timestep
t_latent: (B,) latent flow timestep
perturbation_id: (B, 2) perturbation token IDs
Outputs:
pred_v_expr: (B, G) predicted expression velocity
pred_v_latent: (B, G, scgpt_dim) predicted latent velocity
"""
def __init__(
self,
ntoken: int = 6000,
d_model: int = 128,
nhead: int = 8,
d_hid: int = 512,
nlayers: int = 4,
dropout: float = 0.1,
fusion_method: str = "differential_perceiver",
perturbation_function: str = "crisper",
use_perturbation_interaction: bool = True,
mask_path: str = None,
# Latent-specific
scgpt_dim: int = 512,
bottleneck_dim: int = 128,
dh_depth: int = 2,
):
super().__init__()
self.d_model = d_model
self.fusion_method = fusion_method
self.perturbation_function = perturbation_function
# === Timestep embedders (separate for expr and latent, like model_cot.py) ===
self.t_expr_embedder = TimestepEmbedder(d_model)
self.t_latent_embedder = TimestepEmbedder(d_model)
# === Perturbation embedder ===
self.perturbation_embedder = BatchLabelEncoder(ntoken, d_model)
# === Expression stream (reused from scDFM) ===
self.value_encoder_1 = ContinuousValueEncoder(d_model, dropout)
self.value_encoder_2 = ContinuousValueEncoder(d_model, dropout)
self.encoder = GeneEncoder(
ntoken, d_model,
use_perturbation_interaction=use_perturbation_interaction,
mask_path=mask_path,
)
self.use_perturbation_interaction = use_perturbation_interaction
self.fusion_layer = nn.Sequential(
nn.Linear(2 * d_model, d_model),
nn.GELU(),
nn.Linear(d_model, d_model),
nn.LayerNorm(d_model),
)
# === Latent stream embedder (new, analogous to dino_embedder) ===
self.latent_embedder = LatentEmbedder(scgpt_dim, bottleneck_dim, d_model)
# === Shared backbone blocks ===
if fusion_method == "differential_transformer":
self.blocks = nn.ModuleList([
DifferentialTransformerBlock(d_model, nhead, i, mlp_ratio=4.0)
for i in range(nlayers)
])
elif fusion_method == "differential_perceiver":
self.blocks = nn.ModuleList([
DiffPerceiverBlock(d_model, nhead, i, mlp_ratio=4.0)
for i in range(nlayers)
])
elif fusion_method == "perceiver":
self.blocks = nn.ModuleList([
PerceiverBlock(d_model, d_model, heads=nhead, mlp_ratio=4.0, dropout=0.1)
for _ in range(nlayers)
])
else:
raise ValueError(f"Invalid fusion method: {fusion_method}")
# === Per-layer gene AdaLN + adapter ===
self.gene_adaLN = nn.ModuleList([
GeneadaLN(d_model, dropout) for _ in range(nlayers)
])
self.adapter_layer = nn.ModuleList([
nn.Sequential(
nn.Linear(2 * d_model, d_model),
nn.LeakyReLU(),
nn.Dropout(dropout),
nn.Linear(d_model, d_model),
nn.LeakyReLU(),
)
for _ in range(nlayers)
])
# === Expression decoder head (reused from scDFM) ===
self.final_layer = ExprDecoder(d_model, explicit_zero_prob=False, use_batch_labels=True)
# === Latent decoder head (new, analogous to dh_blocks_dino + final_layer_dino) ===
self.latent_decoder = LatentDecoder(
d_model=d_model,
scgpt_dim=scgpt_dim,
dh_depth=dh_depth,
num_heads=max(nhead // 2, 1),
hidden_size_c=d_model,
)
self.initialize_weights()
def initialize_weights(self):
def _basic_init(module):
if isinstance(module, nn.Linear):
torch.nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
self.apply(_basic_init)
def get_perturbation_emb(
self,
perturbation_id: Optional[Tensor] = None,
perturbation_emb: Optional[Tensor] = None,
cell_1: Optional[Tensor] = None,
) -> Tensor:
"""Get perturbation embedding, replicating scDFM logic."""
assert perturbation_emb is None or perturbation_id is None
if perturbation_id is not None:
if self.perturbation_function == "crisper":
perturbation_emb = self.encoder(perturbation_id)
else:
perturbation_emb = self.perturbation_embedder(perturbation_id)
perturbation_emb = perturbation_emb.mean(1) # (B, d)
elif perturbation_emb is not None:
perturbation_emb = perturbation_emb.to(cell_1.device, dtype=cell_1.dtype)
if perturbation_emb.dim() == 1:
perturbation_emb = perturbation_emb.unsqueeze(0)
if perturbation_emb.size(0) == 1:
perturbation_emb = perturbation_emb.expand(cell_1.shape[0], -1).contiguous()
perturbation_emb = self.perturbation_embedder.enc_norm(perturbation_emb)
return perturbation_emb
def forward(
self,
gene_id: Tensor, # (B, G)
cell_1: Tensor, # (B, G) source expression
x_t: Tensor, # (B, G) noised expression
z_t: Tensor, # (B, G, scgpt_dim) noised latent features
t_expr: Tensor, # (B,)
t_latent: Tensor, # (B,)
perturbation_id: Optional[Tensor] = None, # (B, 2)
) -> Tuple[Tensor, Tensor]:
if t_expr.dim() == 0:
t_expr = t_expr.repeat(cell_1.size(0))
if t_latent.dim() == 0:
t_latent = t_latent.repeat(cell_1.size(0))
# === 1. Expression stream embedding (aligned with scDFM) ===
gene_emb = self.encoder(gene_id) # (B, G, d)
val_emb_1 = self.value_encoder_1(x_t) # (B, G, d) encoder_1 = noisy target (same role as scDFM)
val_emb_2 = self.value_encoder_2(cell_1) # (B, G, d) encoder_2 = control (same role as scDFM)
expr_tokens = self.fusion_layer(torch.cat([val_emb_1, val_emb_2], dim=-1)) + gene_emb # (B, G, d)
# === 2. Latent stream embedding (new, analogous to dino_embedder) ===
latent_tokens = self.latent_embedder(z_t) # (B, G, d)
# === 3. Element-wise addition (model_cot.py line 414) ===
x = expr_tokens + latent_tokens # (B, G, d)
# === 4. Conditioning vector (model_cot.py line 409) ===
t_expr_emb = self.t_expr_embedder(t_expr) # (B, d)
t_latent_emb = self.t_latent_embedder(t_latent) # (B, d)
pert_emb = self.get_perturbation_emb(perturbation_id, cell_1=cell_1) # (B, d)
c = t_expr_emb + t_latent_emb + pert_emb # (B, d)
# === 5. Shared backbone (reused scDFM blocks) ===
for i, block in enumerate(self.blocks):
x = self.gene_adaLN[i](gene_emb, x)
pert_exp = pert_emb[:, None, :].expand(-1, x.size(1), -1)
x = torch.cat([x, pert_exp], dim=-1)
x = self.adapter_layer[i](x)
x = block(x, val_emb_2, c)
# === 6a. Expression decoder head (reused) ===
x_with_pert = torch.cat([x, pert_emb[:, None, :].expand(-1, x.size(1), -1)], dim=-1)
pred_v_expr = self.final_layer(x_with_pert)["pred"] # (B, G)
# === 6b. Latent decoder head (new, analogous to dh_blocks_dino) ===
pred_v_latent = self.latent_decoder(x, c) # (B, G, scgpt_dim)
return pred_v_expr, pred_v_latent