lfj-code / transfer /code /CCFM /src /denoiser.py
ethan1115's picture
Upload folder using huggingface_hub
0161e74 verified
"""
CascadedDenoiser — Wraps CascadedFlowModel + FrozenScGPTExtractor.
Implements cascaded time-step sampling (from LatentForcing denoiser_cot.py:121-132)
and two-stage cascaded generation (denoiser_cot.py:224-247).
"""
import torch
import torch.nn as nn
import torchdiffeq
from ._scdfm_imports import AffineProbPath, CondOTScheduler, make_lognorm_poisson_noise
from .model.model import CascadedFlowModel
from .data.scgpt_extractor import FrozenScGPTExtractor
# Shared flow matching path
flow_path = AffineProbPath(scheduler=CondOTScheduler())
def pairwise_sq_dists(X, Y):
return torch.cdist(X, Y, p=2) ** 2
@torch.no_grad()
def median_sigmas(X, scales=(0.5, 1.0, 2.0, 4.0)):
D2 = pairwise_sq_dists(X, X)
tri = D2[~torch.eye(D2.size(0), dtype=bool, device=D2.device)]
m = torch.median(tri).clamp_min(1e-12)
s2 = torch.tensor(scales, device=X.device) * m
return [float(s.item()) for s in torch.sqrt(s2)]
def mmd2_unbiased_multi_sigma(X, Y, sigmas):
m, n = X.size(0), Y.size(0)
Dxx = pairwise_sq_dists(X, X)
Dyy = pairwise_sq_dists(Y, Y)
Dxy = pairwise_sq_dists(X, Y)
vals = []
for sigma in sigmas:
beta = 1.0 / (2.0 * (sigma ** 2) + 1e-12)
Kxx = torch.exp(-beta * Dxx)
Kyy = torch.exp(-beta * Dyy)
Kxy = torch.exp(-beta * Dxy)
term_xx = (Kxx.sum() - Kxx.diag().sum()) / (m * (m - 1) + 1e-12)
term_yy = (Kyy.sum() - Kyy.diag().sum()) / (n * (n - 1) + 1e-12)
term_xy = Kxy.mean()
vals.append(term_xx + term_yy - 2.0 * term_xy)
return torch.stack(vals).mean()
class CascadedDenoiser(nn.Module):
"""
Cascaded denoiser combining CascadedFlowModel with FrozenScGPTExtractor.
Training: Cascaded time-step sampling (dino_first_cascaded mode).
Inference: Two-stage cascaded generation (latent first, then expression).
"""
def __init__(
self,
model: CascadedFlowModel,
scgpt_extractor: FrozenScGPTExtractor,
choose_latent_p: float = 0.4,
latent_weight: float = 1.0,
noise_type: str = "Gaussian",
use_mmd_loss: bool = True,
gamma: float = 0.5,
poisson_alpha: float = 0.8,
poisson_target_sum: float = 1e4,
# Logit-normal time-step sampling
t_sample_mode: str = "logit_normal",
t_expr_mean: float = 0.0,
t_expr_std: float = 1.0,
t_latent_mean: float = 0.0,
t_latent_std: float = 1.0,
# Cascaded noise (LatentForcing dino_first_cascaded_noised)
noise_beta: float = 0.25,
):
super().__init__()
self.model = model
self.scgpt_extractor = scgpt_extractor
self.choose_latent_p = choose_latent_p
self.latent_weight = latent_weight
self.noise_type = noise_type
self.use_mmd_loss = use_mmd_loss
self.gamma = gamma
self.poisson_alpha = poisson_alpha
self.poisson_target_sum = poisson_target_sum
self.t_sample_mode = t_sample_mode
self.t_expr_mean = t_expr_mean
self.t_expr_std = t_expr_std
self.t_latent_mean = t_latent_mean
self.t_latent_std = t_latent_std
self.noise_beta = noise_beta
def sample_t(self, n: int, device: torch.device):
"""
Cascaded time-step sampling — dino_first_cascaded mode.
(LatentForcing denoiser_cot.py:121-132)
Supports uniform or logit-normal sampling per flow.
With probability choose_latent_p:
- Train latent flow: t_latent sampled, t_expr = 0, loss_weight_expr = 0
Otherwise:
- Train expression flow: t_expr sampled, t_latent = 1 (clean), loss_weight_latent = 0
"""
if self.t_sample_mode == "logit_normal":
t_latent = torch.sigmoid(torch.randn(n, device=device) * self.t_latent_std + self.t_latent_mean)
t_expr = torch.sigmoid(torch.randn(n, device=device) * self.t_expr_std + self.t_expr_mean)
else:
t_latent = torch.rand(n, device=device)
t_expr = torch.rand(n, device=device)
choose_latent_mask = torch.rand(n, device=device) < self.choose_latent_p
# When training expr flow: t_latent ~ U[1-noise_beta, 1] (LatentForcing dino_first_cascaded_noised)
# noise_beta=0 recovers original behavior (t_latent=1, no noise)
t_latent_expr = torch.rand_like(t_latent) * self.noise_beta + (1.0 - self.noise_beta)
t_latent = torch.where(choose_latent_mask, t_latent, t_latent_expr)
t_expr = torch.where(choose_latent_mask, torch.zeros_like(t_expr), t_expr)
w_expr = (~choose_latent_mask).float()
w_latent = choose_latent_mask.float()
return t_expr, t_latent, w_expr, w_latent
def _make_expr_noise(self, source: torch.Tensor) -> torch.Tensor:
"""Create noise for expression flow."""
if self.noise_type == "Gaussian":
return torch.randn_like(source)
elif self.noise_type == "Poisson":
return make_lognorm_poisson_noise(
target_log=source,
alpha=self.poisson_alpha,
per_cell_L=self.poisson_target_sum,
)
else:
raise ValueError(f"Unknown noise_type: {self.noise_type}")
def train_step(
self,
source: torch.Tensor, # (B, G_full)
target: torch.Tensor, # (B, G_full)
perturbation_id: torch.Tensor, # (B, 2)
gene_ids: torch.Tensor, # (G_full,)
infer_top_gene: int = 1000,
cached_z_target: torch.Tensor = None, # (B, G_sub, scgpt_dim) pre-extracted
cached_gene_ids: torch.Tensor = None, # (G_sub,) gene indices used for cache
) -> torch.Tensor:
"""
Single training step with cascaded time sampling.
Returns: scalar loss
"""
B = source.shape[0]
device = source.device
# Random gene subset (same as scDFM)
if cached_gene_ids is not None:
input_gene_ids = cached_gene_ids
else:
input_gene_ids = torch.randperm(source.shape[-1], device=device)[:infer_top_gene]
source_sub = source[:, input_gene_ids]
target_sub = target[:, input_gene_ids]
gene_input = gene_ids[input_gene_ids].unsqueeze(0).expand(B, -1)
# 1. scGPT features: use cache if available, otherwise extract on-the-fly
if cached_z_target is not None:
z_target = cached_z_target
else:
z_target = self.scgpt_extractor.extract(target_sub, gene_indices=input_gene_ids) # (B, G, scgpt_dim)
# 2. Cascaded time sampling
t_expr, t_latent, w_expr, w_latent = self.sample_t(B, device)
# 3. Expression flow path
noise_expr = self._make_expr_noise(source_sub)
path_expr = flow_path.sample(t=t_expr, x_0=noise_expr, x_1=target_sub)
# 4. Latent flow path
# AffineProbPath.sample expects t as (B,) and broadcasts internally.
# But z_target is (B, G, scgpt_dim) — we need to flatten to 2D, sample, then reshape.
B_l, G_l, D_l = z_target.shape
noise_latent = torch.randn_like(z_target)
z_target_flat = z_target.reshape(B_l, G_l * D_l)
noise_latent_flat = noise_latent.reshape(B_l, G_l * D_l)
path_latent_flat = flow_path.sample(t=t_latent, x_0=noise_latent_flat, x_1=z_target_flat)
# Wrap path_latent with reshaped tensors
class _LatentPath:
pass
path_latent = _LatentPath()
path_latent.x_t = path_latent_flat.x_t.reshape(B_l, G_l, D_l)
path_latent.dx_t = path_latent_flat.dx_t.reshape(B_l, G_l, D_l)
# 5. Model forward
pred_v_expr, pred_v_latent = self.model(
gene_input, source_sub, path_expr.x_t, path_latent.x_t,
t_expr, t_latent, perturbation_id,
)
# 6. Losses — per-sample MSE first, then weight by cascaded mask
# (aligns with LatentForcing: eliminates dimension mismatch, decouples choose_latent_p from loss weight)
loss_expr_per_sample = ((pred_v_expr - path_expr.dx_t) ** 2).mean(dim=-1) # (B,)
loss_expr = (loss_expr_per_sample * w_expr).sum() / w_expr.sum().clamp(min=1)
loss_latent_per_sample = ((pred_v_latent - path_latent.dx_t) ** 2).mean(dim=(-1, -2)) # (B,)
loss_latent = (loss_latent_per_sample * w_latent).sum() / w_latent.sum().clamp(min=1)
loss = loss_expr + self.latent_weight * loss_latent
# Optional MMD loss on expression (same as scDFM)
_mmd_loss = torch.tensor(0.0, device=device)
if self.use_mmd_loss and w_expr.sum() > 0:
expr_mask = w_expr > 0
if expr_mask.any():
x1_hat = (
path_expr.x_t[expr_mask]
+ pred_v_expr[expr_mask] * (1 - t_expr[expr_mask]).unsqueeze(-1)
)
sigmas = median_sigmas(target_sub[expr_mask], scales=(0.5, 1.0, 2.0, 4.0))
_mmd_loss = mmd2_unbiased_multi_sigma(x1_hat, target_sub[expr_mask], sigmas)
loss = loss + _mmd_loss * self.gamma
return {
"loss": loss,
"loss_expr": loss_expr.detach(),
"loss_latent": loss_latent.detach(),
"loss_mmd": _mmd_loss.detach(),
}
@torch.no_grad()
def generate(
self,
source: torch.Tensor, # (B, G)
perturbation_id: torch.Tensor, # (B, 2)
gene_ids: torch.Tensor, # (B, G) or (G,)
latent_steps: int = 20,
expr_steps: int = 20,
method: str = "rk4",
) -> torch.Tensor:
"""
Two-stage cascaded generation.
method="euler": Single-loop joint Euler steps (LatentForcing style).
method="rk4": Two-stage serial ODE with torchdiffeq RK4 (scDFM style, higher accuracy).
Returns: (B, G) generated expression values
"""
B, G = source.shape
device = source.device
scgpt_dim = self.scgpt_extractor.scgpt_d_model
if gene_ids.dim() == 1:
gene_ids = gene_ids.unsqueeze(0).expand(B, -1)
# === Initialize both noise states ===
z_t = torch.randn(B, G, scgpt_dim, device=device)
x_t = self._make_expr_noise(source)
if method == "rk4":
# === Stage 1: Latent generation (t_latent: 0->1, t_expr=0) ===
t_zero = torch.zeros(B, device=device)
t_one = torch.ones(B, device=device)
def latent_vf(t, z):
v_expr, v_latent = self.model(
gene_ids, source, x_t, z,
t_zero, t.expand(B), perturbation_id,
)
return v_latent
z_t = torchdiffeq.odeint(
latent_vf, z_t,
torch.linspace(0, 1, latent_steps + 1, device=device),
method="rk4", atol=1e-4, rtol=1e-4,
)[-1]
# === Stage 2: Expression generation (t_expr: 0->1, t_latent=1) ===
def expr_vf(t, x):
v_expr, v_latent = self.model(
gene_ids, source, x, z_t,
t.expand(B), t_one, perturbation_id,
)
return v_expr
x_t = torchdiffeq.odeint(
expr_vf, x_t,
torch.linspace(0, 1, expr_steps + 1, device=device),
method="rk4", atol=1e-4, rtol=1e-4,
)[-1]
else: # euler — joint loop (LatentForcing style)
t_latent_schedule = torch.cat([
torch.linspace(0, 1, latent_steps + 1, device=device),
torch.ones(expr_steps, device=device),
])
t_expr_schedule = torch.cat([
torch.zeros(latent_steps + 1, device=device),
torch.linspace(0, 1, expr_steps + 1, device=device)[1:],
])
for i in range(latent_steps + expr_steps):
t_lat = t_latent_schedule[i]
t_lat_next = t_latent_schedule[i + 1]
t_exp = t_expr_schedule[i]
t_exp_next = t_expr_schedule[i + 1]
v_expr, v_latent = self.model(
gene_ids, source, x_t, z_t,
t_exp.expand(B), t_lat.expand(B), perturbation_id,
)
x_t = x_t + (t_exp_next - t_exp) * v_expr
z_t = z_t + (t_lat_next - t_lat) * v_latent
return torch.clamp(x_t, min=0)