| """ |
| CascadedFlowModel — Main backbone for Cascaded Conditioned Flow Matching (CCFM). |
| |
| Architecture mirrors LatentForcing's model_cot.py: |
| 1. Dual-stream embedding (expression + latent) with element-wise addition |
| 2. Conditioning via AdaLN: c = t_expr_emb + t_latent_emb + pert_emb |
| - True conditioning signals: control expression + perturbation_id (available at inference) |
| - scGPT latent features are an auxiliary generation target (like DINO in LatentForcing), |
| NOT a conditioning signal — they are generated from noise at inference time. |
| 3. Shared backbone (reusing scDFM's DiffPerceiver/Perceiver blocks) |
| 4. Separate decoder heads: ExprDecoder (reused) + LatentDecoder (new) |
| """ |
|
|
| import torch |
| import torch.nn as nn |
| from torch import Tensor |
| from typing import Optional, Tuple |
|
|
| from .layers import LatentEmbedder, LatentDecoder |
| from .._scdfm_imports import ( |
| GeneadaLN, |
| ContinuousValueEncoder, |
| GeneEncoder, |
| BatchLabelEncoder, |
| TimestepEmbedder, |
| ExprDecoder, |
| DifferentialTransformerBlock, |
| PerceiverBlock, |
| DiffPerceiverBlock, |
| ) |
|
|
|
|
| class CascadedFlowModel(nn.Module): |
| """ |
| Cascaded Flow Model for single-cell perturbation prediction. |
| |
| Inputs: |
| gene_id: (B, G) gene token IDs |
| cell_1: (B, G) source (control) expression |
| x_t: (B, G) noised target expression (expression flow) |
| z_t: (B, G, scgpt_dim) noised scGPT features (latent flow) |
| t_expr: (B,) expression flow timestep |
| t_latent: (B,) latent flow timestep |
| perturbation_id: (B, 2) perturbation token IDs |
| |
| Outputs: |
| pred_v_expr: (B, G) predicted expression velocity |
| pred_v_latent: (B, G, scgpt_dim) predicted latent velocity |
| """ |
|
|
| def __init__( |
| self, |
| ntoken: int = 6000, |
| d_model: int = 128, |
| nhead: int = 8, |
| d_hid: int = 512, |
| nlayers: int = 4, |
| dropout: float = 0.1, |
| fusion_method: str = "differential_perceiver", |
| perturbation_function: str = "crisper", |
| use_perturbation_interaction: bool = True, |
| mask_path: str = None, |
| |
| scgpt_dim: int = 512, |
| bottleneck_dim: int = 128, |
| dh_depth: int = 2, |
| ): |
| super().__init__() |
| self.d_model = d_model |
| self.fusion_method = fusion_method |
| self.perturbation_function = perturbation_function |
|
|
| |
| self.t_expr_embedder = TimestepEmbedder(d_model) |
| self.t_latent_embedder = TimestepEmbedder(d_model) |
|
|
| |
| self.perturbation_embedder = BatchLabelEncoder(ntoken, d_model) |
|
|
| |
| self.value_encoder_1 = ContinuousValueEncoder(d_model, dropout) |
| self.value_encoder_2 = ContinuousValueEncoder(d_model, dropout) |
| self.encoder = GeneEncoder( |
| ntoken, d_model, |
| use_perturbation_interaction=use_perturbation_interaction, |
| mask_path=mask_path, |
| ) |
| self.use_perturbation_interaction = use_perturbation_interaction |
| self.fusion_layer = nn.Sequential( |
| nn.Linear(2 * d_model, d_model), |
| nn.GELU(), |
| nn.Linear(d_model, d_model), |
| nn.LayerNorm(d_model), |
| ) |
|
|
| |
| self.latent_embedder = LatentEmbedder(scgpt_dim, bottleneck_dim, d_model) |
|
|
| |
| if fusion_method == "differential_transformer": |
| self.blocks = nn.ModuleList([ |
| DifferentialTransformerBlock(d_model, nhead, i, mlp_ratio=4.0) |
| for i in range(nlayers) |
| ]) |
| elif fusion_method == "differential_perceiver": |
| self.blocks = nn.ModuleList([ |
| DiffPerceiverBlock(d_model, nhead, i, mlp_ratio=4.0) |
| for i in range(nlayers) |
| ]) |
| elif fusion_method == "perceiver": |
| self.blocks = nn.ModuleList([ |
| PerceiverBlock(d_model, d_model, heads=nhead, mlp_ratio=4.0, dropout=0.1) |
| for _ in range(nlayers) |
| ]) |
| else: |
| raise ValueError(f"Invalid fusion method: {fusion_method}") |
|
|
| |
| self.gene_adaLN = nn.ModuleList([ |
| GeneadaLN(d_model, dropout) for _ in range(nlayers) |
| ]) |
| self.adapter_layer = nn.ModuleList([ |
| nn.Sequential( |
| nn.Linear(2 * d_model, d_model), |
| nn.LeakyReLU(), |
| nn.Dropout(dropout), |
| nn.Linear(d_model, d_model), |
| nn.LeakyReLU(), |
| ) |
| for _ in range(nlayers) |
| ]) |
|
|
| |
| self.final_layer = ExprDecoder(d_model, explicit_zero_prob=False, use_batch_labels=True) |
|
|
| |
| self.latent_decoder = LatentDecoder( |
| d_model=d_model, |
| scgpt_dim=scgpt_dim, |
| dh_depth=dh_depth, |
| num_heads=max(nhead // 2, 1), |
| hidden_size_c=d_model, |
| ) |
|
|
| self.initialize_weights() |
|
|
| def initialize_weights(self): |
| def _basic_init(module): |
| if isinstance(module, nn.Linear): |
| torch.nn.init.xavier_uniform_(module.weight) |
| if module.bias is not None: |
| nn.init.constant_(module.bias, 0) |
| self.apply(_basic_init) |
|
|
| def get_perturbation_emb( |
| self, |
| perturbation_id: Optional[Tensor] = None, |
| perturbation_emb: Optional[Tensor] = None, |
| cell_1: Optional[Tensor] = None, |
| ) -> Tensor: |
| """Get perturbation embedding, replicating scDFM logic.""" |
| assert perturbation_emb is None or perturbation_id is None |
| if perturbation_id is not None: |
| if self.perturbation_function == "crisper": |
| perturbation_emb = self.encoder(perturbation_id) |
| else: |
| perturbation_emb = self.perturbation_embedder(perturbation_id) |
| perturbation_emb = perturbation_emb.mean(1) |
| elif perturbation_emb is not None: |
| perturbation_emb = perturbation_emb.to(cell_1.device, dtype=cell_1.dtype) |
| if perturbation_emb.dim() == 1: |
| perturbation_emb = perturbation_emb.unsqueeze(0) |
| if perturbation_emb.size(0) == 1: |
| perturbation_emb = perturbation_emb.expand(cell_1.shape[0], -1).contiguous() |
| perturbation_emb = self.perturbation_embedder.enc_norm(perturbation_emb) |
| return perturbation_emb |
|
|
| def forward( |
| self, |
| gene_id: Tensor, |
| cell_1: Tensor, |
| x_t: Tensor, |
| z_t: Tensor, |
| t_expr: Tensor, |
| t_latent: Tensor, |
| perturbation_id: Optional[Tensor] = None, |
| ) -> Tuple[Tensor, Tensor]: |
| if t_expr.dim() == 0: |
| t_expr = t_expr.repeat(cell_1.size(0)) |
| if t_latent.dim() == 0: |
| t_latent = t_latent.repeat(cell_1.size(0)) |
|
|
| |
| gene_emb = self.encoder(gene_id) |
| val_emb_1 = self.value_encoder_1(x_t) |
| val_emb_2 = self.value_encoder_2(cell_1) |
| expr_tokens = self.fusion_layer(torch.cat([val_emb_1, val_emb_2], dim=-1)) + gene_emb |
|
|
| |
| latent_tokens = self.latent_embedder(z_t) |
|
|
| |
| x = expr_tokens + latent_tokens |
|
|
| |
| t_expr_emb = self.t_expr_embedder(t_expr) |
| t_latent_emb = self.t_latent_embedder(t_latent) |
| pert_emb = self.get_perturbation_emb(perturbation_id, cell_1=cell_1) |
| c = t_expr_emb + t_latent_emb + pert_emb |
|
|
| |
| for i, block in enumerate(self.blocks): |
| x = self.gene_adaLN[i](gene_emb, x) |
| pert_exp = pert_emb[:, None, :].expand(-1, x.size(1), -1) |
| x = torch.cat([x, pert_exp], dim=-1) |
| x = self.adapter_layer[i](x) |
| x = block(x, val_emb_2, c) |
|
|
| |
| x_with_pert = torch.cat([x, pert_emb[:, None, :].expand(-1, x.size(1), -1)], dim=-1) |
| pred_v_expr = self.final_layer(x_with_pert)["pred"] |
|
|
| |
| pred_v_latent = self.latent_decoder(x, c) |
|
|
| return pred_v_expr, pred_v_latent |
|
|