| """ |
| Diagnostic: load trained checkpoint, run forward on a few batches, |
| print expression vs latent prediction MSE and velocity distributions. |
| |
| Uses synthetic latent features (same distribution as normalized scGPT features) |
| to avoid loading the 44GB cache file. |
| |
| Usage (login node, CPU): |
| cd /home/hp250092/ku50001222/qian/aivc/lfj/transfer/code/CCFM |
| python scripts/diagnose_trained_model.py |
| """ |
|
|
| import sys |
| import os |
|
|
| _PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) |
| sys.path.insert(0, _PROJECT_ROOT) |
|
|
| import _bootstrap_scdfm |
|
|
| import torch |
| import numpy as np |
| from torch.utils.data import DataLoader |
|
|
| from src.data.data import get_data_classes |
| from src._scdfm_imports import AffineProbPath, CondOTScheduler, process_vocab |
| from src.utils import GeneVocab |
| from src.model.model import CascadedFlowModel |
|
|
| _REPO_ROOT = os.path.dirname(_PROJECT_ROOT) |
|
|
|
|
| def describe(name, t): |
| t_flat = t.float().flatten() |
| print(f" {name:35s} | mean={t_flat.mean():.4f} std={t_flat.std():.4f} " |
| f"min={t_flat.min():.4f} max={t_flat.max():.4f} median={t_flat.median():.4f}") |
|
|
|
|
| def main(): |
| device = torch.device("cpu") |
| data_name = "norman" |
| n_top_genes = 5000 |
| infer_top_gene = 1000 |
| batch_size = 48 |
| scgpt_dim = 512 |
| ckpt_path = os.path.join( |
| _PROJECT_ROOT, |
| "result/ccfm-norman-f1-topk30-negTrue-d128-lr5e-05-lw1.0-lp0.4-ema0.9999-ln-wu2000-rk4-online", |
| "iteration_100000/checkpoint.pt", |
| ) |
|
|
| |
| Data, PerturbationDataset, TrainSampler, TestDataset = get_data_classes() |
| scdfm_data_path = os.path.join(_REPO_ROOT, "scDFM", "data") |
| data_manager = Data(scdfm_data_path) |
| data_manager.load_data(data_name) |
|
|
| if "gene_name" in data_manager.adata.var.columns and data_manager.adata.var_names[0].startswith("ENSG"): |
| data_manager.adata.var_names = data_manager.adata.var["gene_name"].values |
| data_manager.adata.var_names_make_unique() |
|
|
| data_manager.process_data( |
| n_top_genes=n_top_genes, split_method="additive", |
| fold=1, use_negative_edge=True, k=30, |
| ) |
| train_sampler, _, _ = data_manager.load_flow_data(batch_size=batch_size) |
| train_dataset = PerturbationDataset(train_sampler, batch_size) |
| dataloader = DataLoader(train_dataset, batch_size=1, shuffle=False, num_workers=0) |
|
|
| |
| orig_cwd = os.getcwd() |
| os.chdir(os.path.join(_REPO_ROOT, "scDFM")) |
| _fc = type("_FakeConfig", (), { |
| "perturbation_function": "crisper", |
| "data_name": data_name, |
| "n_top_genes": n_top_genes, |
| })() |
| vocab = process_vocab(data_manager, _fc) |
| os.chdir(orig_cwd) |
|
|
| gene_ids = vocab.encode(list(data_manager.adata.var_names)) |
| gene_ids = torch.tensor(gene_ids, dtype=torch.long, device=device) |
|
|
| |
| mask_path = os.path.join( |
| data_manager.data_path, data_manager.data_name, |
| "mask_fold_1topk_30additive_negative_edge.pt", |
| ) |
|
|
| |
| model = CascadedFlowModel( |
| ntoken=len(vocab), d_model=128, nhead=8, d_hid=512, nlayers=4, |
| fusion_method="differential_perceiver", |
| perturbation_function="crisper", |
| mask_path=mask_path, |
| scgpt_dim=scgpt_dim, bottleneck_dim=128, dh_depth=2, |
| ) |
|
|
| ckpt = torch.load(ckpt_path, map_location="cpu") |
| model.load_state_dict(ckpt["model_state_dict"]) |
| model.eval() |
| print(f"Loaded checkpoint: {ckpt_path}") |
| print(f" iteration: {ckpt.get('iteration', '?')}") |
|
|
| |
| flow_path = AffineProbPath(scheduler=CondOTScheduler()) |
| inverse_dict = {v: str(k) for k, v in data_manager.perturbation_dict.items()} |
|
|
| |
| n_batches = 5 |
| all_mse_expr = [] |
| all_mse_latent = [] |
|
|
| print(f"\n{'='*90}") |
| print(f"Running {n_batches} batches with trained model") |
| print(f"Note: using synthetic latent (N(0,1.1)) to match normalized scGPT distribution") |
| print(f"{'='*90}") |
|
|
| for i, batch_data in enumerate(dataloader): |
| if i >= n_batches: |
| break |
|
|
| source = batch_data["src_cell_data"].squeeze(0) |
| target = batch_data["tgt_cell_data"].squeeze(0) |
|
|
| |
| perturbation_id = batch_data["condition_id"].squeeze(0).to(device) |
| perturbation_name = [ |
| inverse_dict[int(p_id)] for p_id in perturbation_id[0].cpu().numpy() |
| ] |
| perturbation_id = torch.tensor( |
| vocab.encode(perturbation_name), dtype=torch.long, device=device |
| ) |
| perturbation_id = perturbation_id.repeat(source.shape[0], 1) |
|
|
| |
| input_gene_ids = torch.randperm(source.shape[-1])[:infer_top_gene] |
| source_sub = source[:, input_gene_ids] |
| target_sub = target[:, input_gene_ids] |
| gene_input = gene_ids[input_gene_ids].unsqueeze(0).expand(source.shape[0], -1) |
|
|
| B = source_sub.shape[0] |
|
|
| |
| z_target = torch.randn(B, infer_top_gene, scgpt_dim) * 1.1 |
|
|
| |
| t = torch.sigmoid(torch.randn(B)) |
|
|
| |
| noise_expr = torch.randn_like(source_sub) |
| path_expr = flow_path.sample(t=t, x_0=noise_expr, x_1=target_sub) |
|
|
| |
| noise_latent = torch.randn_like(z_target) |
| z_target_flat = z_target.reshape(B, -1) |
| noise_latent_flat = noise_latent.reshape(B, -1) |
| path_latent_flat = flow_path.sample(t=t, x_0=noise_latent_flat, x_1=z_target_flat) |
|
|
| z_t = path_latent_flat.x_t.reshape(B, infer_top_gene, scgpt_dim) |
| dx_t_latent = path_latent_flat.dx_t.reshape(B, infer_top_gene, scgpt_dim) |
|
|
| |
| with torch.no_grad(): |
| pred_v_expr, pred_v_latent = model( |
| gene_input, source_sub, path_expr.x_t, z_t, |
| t, t, perturbation_id, |
| ) |
|
|
| |
| err_expr = (pred_v_expr - path_expr.dx_t) ** 2 |
| err_latent = (pred_v_latent - dx_t_latent) ** 2 |
| mse_expr = err_expr.mean().item() |
| mse_latent = err_latent.mean().item() |
| all_mse_expr.append(mse_expr) |
| all_mse_latent.append(mse_latent) |
|
|
| print(f"\n--- Batch {i} ---") |
| print(f"[Velocity Targets]") |
| describe("dx_t_expr (ground truth)", path_expr.dx_t) |
| describe("dx_t_latent (ground truth)", dx_t_latent) |
|
|
| print(f"[Model Predictions]") |
| describe("pred_v_expr", pred_v_expr) |
| describe("pred_v_latent", pred_v_latent) |
|
|
| print(f"[Prediction Error]") |
| describe("error_expr (pred - gt)^2", err_expr) |
| describe("error_latent (pred - gt)^2", err_latent) |
|
|
| print(f"[MSE Summary]") |
| print(f" MSE_expr = {mse_expr:.4f}") |
| print(f" MSE_latent = {mse_latent:.4f}") |
| print(f" ratio expr/latent = {mse_expr / max(mse_latent, 1e-8):.2f}x") |
|
|
| |
| avg_expr = np.mean(all_mse_expr) |
| avg_latent = np.mean(all_mse_latent) |
| print(f"\n{'='*90}") |
| print(f"OVERALL AVERAGE ({n_batches} batches):") |
| print(f" avg MSE_expr = {avg_expr:.4f}") |
| print(f" avg MSE_latent = {avg_latent:.4f}") |
| print(f" ratio expr/latent = {avg_expr / max(avg_latent, 1e-8):.2f}x") |
| print(f"{'='*90}") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|