Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +59 -0
- GRN/GRN_latent_design.md +247 -0
- GRN/PCA1/_bootstrap_scdfm.py +101 -0
- GRN/PCA1/pca_extractor.py +63 -0
- GRN/PCA1/run_job.sh +69 -0
- GRN/PCA1/run_pca1.py +461 -0
- GRN/RegFM_design.md +768 -0
- GRN/SB/_bootstrap_scdfm.py +101 -0
- GRN/SB/config/__init__.py +0 -0
- GRN/SB/config/config_sb.py +103 -0
- GRN/SB/run_a1_baseline.sh +37 -0
- GRN/SB/run_eval_rk4.sh +65 -0
- GRN/SB/run_sb.sh +39 -0
- GRN/SB/run_sb_a6.sh +42 -0
- GRN/SB/run_sb_sa6.sh +42 -0
- GRN/SB/run_sb_source_anchored.sh +44 -0
- GRN/SB/scripts/run_sb.py +366 -0
- GRN/SB/src/__init__.py +0 -0
- GRN/SB/src/_scdfm_imports.py +50 -0
- GRN/SB/src/data/__init__.py +0 -0
- GRN/SB/src/data/data.py +112 -0
- GRN/SB/src/denoiser.py +297 -0
- GRN/SB/src/model/__init__.py +0 -0
- GRN/SB/src/model/layers.py +111 -0
- GRN/SB/src/model/model.py +218 -0
- GRN/SB/src/ot_anisotropic.py +109 -0
- GRN/SB/src/utils.py +14 -0
- GRN/baseline/baseline_5418102.out +3 -0
- GRN/baseline/baseline_d128_5527533.out +3 -0
- GRN/baseline/d128/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/iteration_100000/checkpoint.pt +3 -0
- GRN/baseline/d128/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/loss_curve.csv +0 -0
- GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30-gene_noise_scale/iteration_215000/agg_results.csv +10 -0
- GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30-gene_noise_scale/iteration_215000/checkpoint.pt +3 -0
- GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30-gene_noise_scale/iteration_215000/pred.h5ad +3 -0
- GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30-gene_noise_scale/iteration_215000/real.h5ad +3 -0
- GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30-gene_noise_scale/iteration_215000/results.csv +40 -0
- GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30-gene_noise_scale/loss_curve.csv +3 -0
- GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/eval_only/agg_results.csv +10 -0
- GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/eval_only/pred.h5ad +3 -0
- GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/eval_only/real.h5ad +3 -0
- GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/eval_only/results.csv +40 -0
- GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/iteration_200000/agg_results.csv +10 -0
- GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/iteration_200000/checkpoint.pt +3 -0
- GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/iteration_200000/distributional_results.csv +40 -0
- GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/iteration_200000/pred.h5ad +3 -0
- GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/iteration_200000/real.h5ad +3 -0
- GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/iteration_200000/results.csv +40 -0
- GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/loss_curve.csv +3 -0
- GRN/dim1_ablation/run_dim1.py +461 -0
- GRN/dim1_ablation/run_eval_iter60000.sh +73 -0
.gitattributes
CHANGED
|
@@ -77,3 +77,62 @@ transfer/code/ori_scDFM/logs/ccfm_baseline_5404238.out filter=lfs diff=lfs merge
|
|
| 77 |
transfer/code/scDFM/data/norman/go.csv filter=lfs diff=lfs merge=lfs -text
|
| 78 |
transfer/code/scDFM/data/norman/perturb_processed.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 79 |
transfer/code/scDFM/data/norman.h5ad filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77 |
transfer/code/scDFM/data/norman/go.csv filter=lfs diff=lfs merge=lfs -text
|
| 78 |
transfer/code/scDFM/data/norman/perturb_processed.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 79 |
transfer/code/scDFM/data/norman.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 80 |
+
GRN/baseline/baseline_5418102.out filter=lfs diff=lfs merge=lfs -text
|
| 81 |
+
GRN/baseline/baseline_d128_5527533.out filter=lfs diff=lfs merge=lfs -text
|
| 82 |
+
GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/eval_only/pred.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 83 |
+
GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/eval_only/real.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 84 |
+
GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/iteration_200000/pred.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 85 |
+
GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/iteration_200000/real.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 86 |
+
GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/loss_curve.csv filter=lfs diff=lfs merge=lfs -text
|
| 87 |
+
GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30-gene_noise_scale/iteration_215000/pred.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 88 |
+
GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30-gene_noise_scale/iteration_215000/real.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 89 |
+
GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30-gene_noise_scale/loss_curve.csv filter=lfs diff=lfs merge=lfs -text
|
| 90 |
+
GRN/result/SB/A1_baseline/eval_only/pred.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 91 |
+
GRN/result/SB/A1_baseline/eval_only/real.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 92 |
+
GRN/result/SB/A1_baseline/iteration_195000/pred.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 93 |
+
GRN/result/SB/A1_baseline/iteration_195000/real.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 94 |
+
GRN/result/SB/A1_baseline/loss_curve.csv filter=lfs diff=lfs merge=lfs -text
|
| 95 |
+
GRN/result/SB/A5_full_asb_sde/loss_curve.csv filter=lfs diff=lfs merge=lfs -text
|
| 96 |
+
GRN/result/SB/A6_dsm_aniso/eval_only/pred.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 97 |
+
GRN/result/SB/A6_dsm_aniso/eval_only/real.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 98 |
+
GRN/result/SB/A6_dsm_aniso/iteration_195000/pred.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 99 |
+
GRN/result/SB/A6_dsm_aniso/iteration_195000/real.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 100 |
+
GRN/result/SB/A6_dsm_aniso/loss_curve.csv filter=lfs diff=lfs merge=lfs -text
|
| 101 |
+
GRN/result/SB/SA1_source_anchored_ode/iteration_195000/pred.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 102 |
+
GRN/result/SB/SA1_source_anchored_ode/iteration_195000/real.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 103 |
+
GRN/result/SB/SA1_source_anchored_ode/loss_curve.csv filter=lfs diff=lfs merge=lfs -text
|
| 104 |
+
GRN/result/SB/SA6_source_anchored_sde/iteration_195000/pred.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 105 |
+
GRN/result/SB/SA6_source_anchored_sde/iteration_195000/real.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 106 |
+
GRN/result/SB/SA6_source_anchored_sde/loss_curve.csv filter=lfs diff=lfs merge=lfs -text
|
| 107 |
+
GRN/result/att_only/fall_att_only/iteration_0/pred.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 108 |
+
GRN/result/att_only/fall_att_only/iteration_0/real.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 109 |
+
GRN/result/att_svd/svd_baseline/eval_only/pred.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 110 |
+
GRN/result/att_svd/svd_baseline/eval_only/real.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 111 |
+
GRN/result/dense4/dense4-norman-f1-topk30-negTrue-d128-ld4-lr5e-05-lw1.0-lp0.4-ema0.9999-ln-wu2000-rk4/eval_only/pred.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 112 |
+
GRN/result/dense4/dense4-norman-f1-topk30-negTrue-d128-ld4-lr5e-05-lw1.0-lp0.4-ema0.9999-ln-wu2000-rk4/eval_only/real.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 113 |
+
GRN/result/dim1_ablation/grn-norman-f1-topk30-negTrue-d512-lr5e-05-lw1.0-lp0.4-ema0.9999-ln-wu2000-rk4-online-attn_L11/eval_only/pred.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 114 |
+
GRN/result/dim1_ablation/grn-norman-f1-topk30-negTrue-d512-lr5e-05-lw1.0-lp0.4-ema0.9999-ln-wu2000-rk4-online-attn_L11/eval_only/real.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 115 |
+
GRN/result/dim1_ablation/grn-norman-f1-topk30-negTrue-d512-lr5e-05-lw1.0-lp0.4-ema0.9999-ln-wu2000-rk4-online-attn_L11/iteration_195000/pred.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 116 |
+
GRN/result/dim1_ablation/grn-norman-f1-topk30-negTrue-d512-lr5e-05-lw1.0-lp0.4-ema0.9999-ln-wu2000-rk4-online-attn_L11/iteration_195000/real.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 117 |
+
GRN/result/dim1_ablation/grn-norman-f1-topk30-negTrue-d512-lr5e-05-lw1.0-lp0.4-ema0.9999-ln-wu2000-rk4-online-attn_L11/loss_curve.csv filter=lfs diff=lfs merge=lfs -text
|
| 118 |
+
GRN/result/evl/dense4_35k/pred.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 119 |
+
GRN/result/evl/dense4_35k/real.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 120 |
+
GRN/result/evl/dim1_ablation_145k/pred.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 121 |
+
GRN/result/evl/dim1_ablation_145k/real.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 122 |
+
GRN/result/evl/dim1_ablation_155k/pred.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 123 |
+
GRN/result/evl/dim1_ablation_155k/real.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 124 |
+
GRN/result/evl/scalar_15k/pred.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 125 |
+
GRN/result/evl/scalar_15k/real.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 126 |
+
GRN/result/evl/scalar_bs48_30k/pred.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 127 |
+
GRN/result/evl/scalar_bs48_30k/real.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 128 |
+
GRN/result/evl/scalar_bs48_75k/pred.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 129 |
+
GRN/result/evl/scalar_bs48_75k/real.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 130 |
+
GRN/result/evl/scalar_dtk100_30k/pred.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 131 |
+
GRN/result/evl/scalar_dtk100_30k/real.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 132 |
+
GRN/result/grn-norman-f1-topk30-negTrue-d512-lr5e-05-lw1.0-lp0.4-ema0.9999-ln-wu2000-rk4-online-attn_L11/loss_curve.csv filter=lfs diff=lfs merge=lfs -text
|
| 133 |
+
GRN/result/scalar/scalar-norman-f1-topk30-negTrue-d128-ld1-lr5e-05-lw1.0-lp0.4-agg_signed_l2-dtk100-ema0.9999-ln-wu2000-rk4/eval_only/pred.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 134 |
+
GRN/result/scalar/scalar-norman-f1-topk30-negTrue-d128-ld1-lr5e-05-lw1.0-lp0.4-agg_signed_l2-dtk100-ema0.9999-ln-wu2000-rk4/eval_only/real.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 135 |
+
GRN/result/scalar/scalar_bs48/eval_only/pred.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 136 |
+
GRN/result/scalar/scalar_bs48/eval_only/real.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 137 |
+
GRN/result/topk30_emb/grn-norman-f1-topk30-negTrue-d512-lr5e-05-lw1.0-lp0.4-ema0.9999-ln-wu2000-rk4-cached_sparse-sparse_tk30_L11/iteration_0/pred.h5ad filter=lfs diff=lfs merge=lfs -text
|
| 138 |
+
GRN/result/topk30_emb/grn-norman-f1-topk30-negTrue-d512-lr5e-05-lw1.0-lp0.4-ema0.9999-ln-wu2000-rk4-cached_sparse-sparse_tk30_L11/iteration_0/real.h5ad filter=lfs diff=lfs merge=lfs -text
|
GRN/GRN_latent_design.md
ADDED
|
@@ -0,0 +1,247 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# GRN-CCFM:基于 Attention-Delta 的扰动预测模型
|
| 2 |
+
|
| 3 |
+
## Context
|
| 4 |
+
|
| 5 |
+
CCFM 当前两个核心问题:(1) 维度不对等——512 维 latent 压缩到 128 维 backbone;(2) scGPT encoder output 编码绝对状态,非扰动变化。此外还有一个实际 bug:scDFM/scGPT 词表未对齐,缺失基因在 latent 路径上处理不正确。
|
| 6 |
+
|
| 7 |
+
三个问题合并为一个方案,代码从 CCFM 复制到 `GRN/grn_ccfm/` 独立开发。
|
| 8 |
+
|
| 9 |
+
## 用户决策
|
| 10 |
+
|
| 11 |
+
- 保留 LatentForcing 两阶段 cascaded 范式
|
| 12 |
+
- 维度对齐:**d_model=512 + 加法融合**(同 LatentForcing,不用 concat)
|
| 13 |
+
- 特征替换:**Attention-Delta**(`Δ_attn @ gene_emb`)
|
| 14 |
+
- scGPT 保留为 backbone
|
| 15 |
+
- 修复缺失基因处理
|
| 16 |
+
|
| 17 |
+
---
|
| 18 |
+
|
| 19 |
+
## 项目结构
|
| 20 |
+
|
| 21 |
+
```
|
| 22 |
+
GRN/grn_ccfm/ # 新建子文件夹
|
| 23 |
+
├── _bootstrap_scdfm.py # 复制自 CCFM
|
| 24 |
+
├── config/
|
| 25 |
+
│ └── config_cascaded.py # 【修改】d_model=512 + 新参数
|
| 26 |
+
├── src/
|
| 27 |
+
│ ├── _scdfm_imports.py # 复制
|
| 28 |
+
│ ├── utils.py # 复制
|
| 29 |
+
│ ├── model/
|
| 30 |
+
│ │ ├── model.py # 【小改】无结构变化,d_model=512 自动适配
|
| 31 |
+
│ │ └── layers.py # 【小改】LatentEmbedder 简化
|
| 32 |
+
│ ├── data/
|
| 33 |
+
│ │ ├── data.py # 复制
|
| 34 |
+
│ │ ├── scgpt_extractor.py # 【核心】新增 attention-delta + missing mask
|
| 35 |
+
│ │ └── scgpt_cache.py # 复制
|
| 36 |
+
│ └── denoiser.py # 【修改】feature_mode 路由 + 缺失基因处理
|
| 37 |
+
├── scripts/
|
| 38 |
+
│ └── run_cascaded.py # 【修改】传新参数
|
| 39 |
+
└── run_grn.sh # 【新增】GPU 提交脚本
|
| 40 |
+
```
|
| 41 |
+
|
| 42 |
+
复制命令:
|
| 43 |
+
```bash
|
| 44 |
+
CCFM=/home/hp250092/ku50001222/qian/aivc/lfj/transfer/code/CCFM
|
| 45 |
+
GRN=/home/hp250092/ku50001222/qian/aivc/lfj/GRN/grn_ccfm
|
| 46 |
+
mkdir -p $GRN/{config,src/model,src/data,scripts}
|
| 47 |
+
cp $CCFM/_bootstrap_scdfm.py $GRN/
|
| 48 |
+
cp $CCFM/config/config_cascaded.py $GRN/config/
|
| 49 |
+
cp $CCFM/src/{_scdfm_imports.py,utils.py,__init__.py} $GRN/src/
|
| 50 |
+
cp $CCFM/src/model/{model.py,layers.py,__init__.py} $GRN/src/model/
|
| 51 |
+
cp $CCFM/src/data/{data.py,scgpt_extractor.py,scgpt_cache.py,__init__.py} $GRN/src/data/
|
| 52 |
+
cp $CCFM/src/denoiser.py $GRN/src/
|
| 53 |
+
cp $CCFM/scripts/run_cascaded.py $GRN/scripts/
|
| 54 |
+
```
|
| 55 |
+
|
| 56 |
+
---
|
| 57 |
+
|
| 58 |
+
## Part 1:d_model=512 + 加法融合
|
| 59 |
+
|
| 60 |
+
d_model 从 128 增到 512 后,latent 512→512 **无压缩**,expression 1→512 **充足容量**。加法融合与 LatentForcing 完全一致(pixel_emb + dino_emb in 1024-dim space)。model.py 的 forward() **不改结构**。
|
| 61 |
+
|
| 62 |
+
### 改动 1:`config/config_cascaded.py`
|
| 63 |
+
|
| 64 |
+
```python
|
| 65 |
+
d_model: int = 512 # 128 → 512
|
| 66 |
+
d_hid: int = 2048 # d_model * 4
|
| 67 |
+
bottleneck_dim: int = 512 # 匹配 d_model
|
| 68 |
+
|
| 69 |
+
# 新增
|
| 70 |
+
feature_mode: str = "encoder"
|
| 71 |
+
attn_layer: int = 11
|
| 72 |
+
attn_use_rank_norm: bool = True
|
| 73 |
+
attn_multi_layer: str = ""
|
| 74 |
+
```
|
| 75 |
+
|
| 76 |
+
### 改动 2:`src/model/layers.py`
|
| 77 |
+
|
| 78 |
+
LatentEmbedder:scgpt_dim == d_model 时简化为 LayerNorm + Linear。
|
| 79 |
+
|
| 80 |
+
### 改动 3:`src/model/model.py`
|
| 81 |
+
|
| 82 |
+
**无结构改动**。forward() 保持 `x = expr_tokens + latent_tokens`。所有层随 d_model=512 自动适配:
|
| 83 |
+
- GeneadaLN:参数化 by hidden_size ✓
|
| 84 |
+
- ContinuousValueEncoder:1→d_model MLP ✓
|
| 85 |
+
- ExprDecoder(use_batch_labels=True):接受 2*d_model 输入 ✓
|
| 86 |
+
- DiffPerceiverBlock:参数化 by d_model ✓
|
| 87 |
+
|
| 88 |
+
---
|
| 89 |
+
|
| 90 |
+
## Part 2:Attention-Delta 特征
|
| 91 |
+
|
| 92 |
+
### 计算流程
|
| 93 |
+
|
| 94 |
+
```
|
| 95 |
+
输入: control_expr (B, G), target_expr (B, G), gene_ids (G_sel,)
|
| 96 |
+
|
| 97 |
+
Step 1: gene_emb = scGPT.encoder(gene_ids) → (G_sel, 512) [查表,静态]
|
| 98 |
+
Step 2: hidden_ctrl = scGPT_layers_0_to_10(ctrl) → (B, S, 512)
|
| 99 |
+
hidden_tgt = scGPT_layers_0_to_10(tgt) → (B, S, 512)
|
| 100 |
+
Step 3: attn_ctrl = Q_ctrl @ K_ctrl^T (rank norm, avg heads) → (B, S, S)
|
| 101 |
+
attn_tgt = Q_tgt @ K_tgt^T (rank norm, avg heads) → (B, S, S)
|
| 102 |
+
Step 4: Δ_attn = attn_tgt - attn_ctrl, 去 CLS → (B, G_sel, G_sel)
|
| 103 |
+
Step 5: features = Δ_attn @ gene_emb → (B, G_sel, 512)
|
| 104 |
+
Step 6: scatter 到 G 位 + 归一化 → (B, G, 512)
|
| 105 |
+
```
|
| 106 |
+
|
| 107 |
+
### 改动 4:`src/data/scgpt_extractor.py`
|
| 108 |
+
|
| 109 |
+
保留 `extract()` 不变。新增:
|
| 110 |
+
|
| 111 |
+
- `_prepare_gene_selection(gene_indices, device)` → 共享的基因子集逻辑
|
| 112 |
+
- `_forward_to_layer(src, values, mask, target_layer)` → scGPT 前 L 层
|
| 113 |
+
- `_compute_attention(hidden, layer_idx, use_rank_norm)` → Q/K via `in_proj_weight` + rank norm
|
| 114 |
+
- `extract_attention_delta(control_expr, target_expr, ...)` → 核心方法,输出 (B, G, 512)
|
| 115 |
+
- **`get_missing_gene_mask(gene_indices)`** → 返回布尔 mask,True = 缺失基因
|
| 116 |
+
|
| 117 |
+
Q/K 提取细节:CCFM 设 `use_fast_transformer=False`,`self_attn` 是标准 `nn.MultiheadAttention`,Q/K/V 权重在 `in_proj_weight` 中按 `[W_q; W_k; W_v]` 排列。`_load_pretrained_safe()` 已处理 Wqkv→in_proj_weight ���射。
|
| 118 |
+
|
| 119 |
+
### 改动 5:`src/denoiser.py`
|
| 120 |
+
|
| 121 |
+
feature_mode 路由 + 缺失基因处理(见 Part 3)。
|
| 122 |
+
|
| 123 |
+
### 改动 6:`scripts/run_cascaded.py`
|
| 124 |
+
|
| 125 |
+
传入新参数。attention_delta 模式下 cache 不可用。
|
| 126 |
+
|
| 127 |
+
---
|
| 128 |
+
|
| 129 |
+
## Part 3:缺失基因修复
|
| 130 |
+
|
| 131 |
+
### 问题
|
| 132 |
+
|
| 133 |
+
scDFM 有 5000 HVG,部分不在 scGPT vocab 中。当前:scGPT 特征为零,但 latent 噪声/归一化/loss/推理 未适配。
|
| 134 |
+
|
| 135 |
+
### 修复:`missing_mask` 贯穿 latent 全路径
|
| 136 |
+
|
| 137 |
+
在 `scgpt_extractor` 中获取 mask:
|
| 138 |
+
|
| 139 |
+
```python
|
| 140 |
+
def get_missing_gene_mask(self, gene_indices=None):
|
| 141 |
+
"""返回 (G,) bool tensor, True = 该基因不在 scGPT vocab"""
|
| 142 |
+
hvg_ids = self.hvg_to_scgpt_id[gene_indices] if gene_indices is not None else self.hvg_to_scgpt_id
|
| 143 |
+
return hvg_ids < 0
|
| 144 |
+
```
|
| 145 |
+
|
| 146 |
+
#### 训练路径:`denoiser.train_step()` 中 4 处使用 mask
|
| 147 |
+
|
| 148 |
+
```python
|
| 149 |
+
missing = self.scgpt_extractor.get_missing_gene_mask(input_gene_ids) # (G_sub,)
|
| 150 |
+
|
| 151 |
+
# ① z_target 已经是零(extractor scatter 保证),无需改
|
| 152 |
+
|
| 153 |
+
# ② Latent 噪声置零
|
| 154 |
+
noise_latent = torch.randn_like(z_target)
|
| 155 |
+
noise_latent[:, missing, :] = 0.0 # 缺失基因无噪声
|
| 156 |
+
|
| 157 |
+
# ③ 归一化跳过缺失基因
|
| 158 |
+
# 已有逻辑: nonzero_mask = output.abs().sum(-1) > 0 → 天然跳过零值 ✓
|
| 159 |
+
|
| 160 |
+
# ④ Latent loss mask 缺失基因
|
| 161 |
+
loss_latent_per_gene = ((pred_v_latent - path_latent.dx_t) ** 2).mean(dim=-1) # (B, G)
|
| 162 |
+
loss_latent_per_gene[:, missing] = 0.0
|
| 163 |
+
n_valid = (~missing).sum().clamp(min=1)
|
| 164 |
+
loss_latent_per_sample = loss_latent_per_gene.sum(dim=-1) / n_valid
|
| 165 |
+
```
|
| 166 |
+
|
| 167 |
+
#### 推理路径:`denoiser.generate()` 中 3 处使用 mask
|
| 168 |
+
|
| 169 |
+
`generate()` 不调用 extractor,但需要 missing_mask。通过 `get_missing_gene_mask()` 获取全量基因的 mask(推理时用全部 5000 基因,不做随机子集):
|
| 170 |
+
|
| 171 |
+
```python
|
| 172 |
+
@torch.no_grad()
|
| 173 |
+
def generate(self, source, perturbation_id, gene_ids, ...):
|
| 174 |
+
B, G = source.shape
|
| 175 |
+
|
| 176 |
+
# 获取全量 missing mask(推理用全部基因,gene_indices=None)
|
| 177 |
+
missing = self.scgpt_extractor.get_missing_gene_mask() # (G_full,)
|
| 178 |
+
|
| 179 |
+
# ⑤ 初始 latent 噪声置零
|
| 180 |
+
z_t = torch.randn(B, G, scgpt_dim, device=device)
|
| 181 |
+
z_t[:, missing, :] = 0.0
|
| 182 |
+
|
| 183 |
+
# ⑥ ODE 积分过程中,每步后强制置零(防止数值漂移)
|
| 184 |
+
# RK4 模式:在 latent_vf 返回前
|
| 185 |
+
def latent_vf(t, z):
|
| 186 |
+
v_expr, v_latent = self.model(...)
|
| 187 |
+
v_latent[:, missing, :] = 0.0 # 缺失基因速度为零
|
| 188 |
+
return v_latent
|
| 189 |
+
|
| 190 |
+
# ⑦ Euler 模式:每步更新后
|
| 191 |
+
z_t[:, missing, :] = 0.0 # 每步 Euler 更新后置零
|
| 192 |
+
```
|
| 193 |
+
|
| 194 |
+
**Expression flow 完全不受影响**——expression 不依赖 scGPT vocab。
|
| 195 |
+
|
| 196 |
+
---
|
| 197 |
+
|
| 198 |
+
## 实施顺序
|
| 199 |
+
|
| 200 |
+
1. 从 CCFM 复制文件到 `GRN/grn_ccfm/`
|
| 201 |
+
2. `config/config_cascaded.py` — d_model=512 + 新字段
|
| 202 |
+
3. `src/model/layers.py` — LatentEmbedder 适配
|
| 203 |
+
4. `src/data/scgpt_extractor.py` — attention-delta + get_missing_gene_mask
|
| 204 |
+
5. `src/denoiser.py` — feature_mode 路由 + missing mask 训练 4 处 + 推理 3 处
|
| 205 |
+
6. `scripts/run_cascaded.py` — 传参
|
| 206 |
+
7. `run_grn.sh` — GPU 提交脚本
|
| 207 |
+
|
| 208 |
+
## 验证
|
| 209 |
+
|
| 210 |
+
```bash
|
| 211 |
+
# scDFM baseline(已有 GRN/baseline/)
|
| 212 |
+
|
| 213 |
+
# GRN-CCFM: encoder 特征(隔离维度+vocab修复效果)
|
| 214 |
+
pjsub run_grn.sh # --feature-mode encoder
|
| 215 |
+
|
| 216 |
+
# GRN-CCFM: attention-delta(完整方案)
|
| 217 |
+
pjsub run_grn.sh # --feature-mode attention_delta
|
| 218 |
+
```
|
| 219 |
+
|
| 220 |
+
- Shape 验证:attention-delta 输出 (B, G, 512)
|
| 221 |
+
- Sanity: control==target 时 Δ_attn≈0
|
| 222 |
+
- Missing mask: 缺失基因 latent noise/loss/velocity 均为零
|
| 223 |
+
- cell-eval 对比三个实验
|
| 224 |
+
|
| 225 |
+
---
|
| 226 |
+
|
| 227 |
+
## 后续优化方向(不在本次实施范围)
|
| 228 |
+
|
| 229 |
+
### 1. 聚合基底:gene_emb → z_ctrl
|
| 230 |
+
当前 `Δ_attn @ gene_emb`(静态身份向量)。可改为 `Δ_attn @ z_ctrl`(上下文相关,编码身份+表达状态)。z_ctrl 在 attention 提取时已算出,几乎免费。更丰富但可能更不稳定。
|
| 231 |
+
|
| 232 |
+
### 2. MLP 增强版融合
|
| 233 |
+
```python
|
| 234 |
+
z_change = MLP(concat(Δ_attn @ z_ctrl, Δ_attn @ z_tgt, z_tgt - z_ctrl))
|
| 235 |
+
```
|
| 236 |
+
引入可学习参数,可端到端训练。增加复杂度但可能提升表现。
|
| 237 |
+
|
| 238 |
+
### 3. 多层 attention 计算优化
|
| 239 |
+
用 `attn_multi_layer="9,10,11"` 时,共享 0→8 层计算,只对 9/10/11 层分别续接。
|
| 240 |
+
|
| 241 |
+
### 4. LR / 训练超参适配
|
| 242 |
+
d_model 从 128→512,参数量 ~15 倍。可能需要:更多 warmup(2000→4000)、略低 LR(5e-5→3e-5)。初次实验先用原值观察 loss 曲线。
|
| 243 |
+
|
| 244 |
+
### 5. 横向对比备忘
|
| 245 |
+
- **vs LatentForcing**:未使用 Bottleneck 两阶段投影(LF 用 Conv2d 做空间降采样,我们不需要)
|
| 246 |
+
- **vs scGPT Tutorial**:单细胞 attention 比群体平均更嘈杂,但 flow matching + 矩阵乘聚合有降噪效果
|
| 247 |
+
- **vs scDFM**:所有 block(DiffPerceiverBlock, GeneadaLN, ExprDecoder)均参数化 by d_model,已验证 512 维兼容
|
GRN/PCA1/_bootstrap_scdfm.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Bootstrap scDFM imports by creating missing __init__.py files and loading
|
| 3 |
+
its modules under a 'scdfm_src' prefix in sys.modules.
|
| 4 |
+
|
| 5 |
+
This module MUST be imported before any CCFM src imports.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import sys
|
| 9 |
+
import os
|
| 10 |
+
import types
|
| 11 |
+
|
| 12 |
+
_SCDFM_ROOT = os.path.normpath(
|
| 13 |
+
os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "transfer", "code", "scDFM")
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
# Directories in scDFM that need __init__.py to be proper packages
|
| 17 |
+
_DIRS_NEEDING_INIT = [
|
| 18 |
+
"src",
|
| 19 |
+
"src/models",
|
| 20 |
+
"src/models/origin",
|
| 21 |
+
"src/data_process",
|
| 22 |
+
"src/tokenizer",
|
| 23 |
+
"src/script",
|
| 24 |
+
"src/models/perturbation",
|
| 25 |
+
]
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def _ensure_init_files():
|
| 29 |
+
"""Create missing __init__.py files in scDFM so it can be imported as packages."""
|
| 30 |
+
created = []
|
| 31 |
+
for d in _DIRS_NEEDING_INIT:
|
| 32 |
+
init_path = os.path.join(_SCDFM_ROOT, d, "__init__.py")
|
| 33 |
+
if not os.path.exists(init_path):
|
| 34 |
+
with open(init_path, "w") as f:
|
| 35 |
+
f.write("# Auto-created by CCFM bootstrap\n")
|
| 36 |
+
created.append(init_path)
|
| 37 |
+
return created
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def bootstrap():
|
| 41 |
+
"""Load scDFM's src package as 'scdfm_src' in sys.modules."""
|
| 42 |
+
if "scdfm_src" in sys.modules:
|
| 43 |
+
return # Already bootstrapped
|
| 44 |
+
|
| 45 |
+
# Create missing __init__.py files
|
| 46 |
+
_ensure_init_files()
|
| 47 |
+
|
| 48 |
+
# Save CCFM's src modules
|
| 49 |
+
saved = {}
|
| 50 |
+
for key in list(sys.modules.keys()):
|
| 51 |
+
if key == "src" or key.startswith("src."):
|
| 52 |
+
saved[key] = sys.modules.pop(key)
|
| 53 |
+
|
| 54 |
+
# Add scDFM root to path
|
| 55 |
+
sys.path.insert(0, _SCDFM_ROOT)
|
| 56 |
+
|
| 57 |
+
try:
|
| 58 |
+
# Import scDFM modules (their relative imports work now)
|
| 59 |
+
import src as scdfm_src_pkg
|
| 60 |
+
import src.models
|
| 61 |
+
import src.models.origin
|
| 62 |
+
import src.models.origin.blocks
|
| 63 |
+
import src.models.origin.layers
|
| 64 |
+
import src.models.origin.model
|
| 65 |
+
import src.flow_matching
|
| 66 |
+
import src.flow_matching.path
|
| 67 |
+
import src.flow_matching.path.path
|
| 68 |
+
import src.flow_matching.path.path_sample
|
| 69 |
+
import src.flow_matching.path.affine
|
| 70 |
+
import src.flow_matching.path.scheduler
|
| 71 |
+
import src.flow_matching.path.scheduler.scheduler
|
| 72 |
+
# Skip src.flow_matching.ot (requires 'ot' package, not needed for CCFM)
|
| 73 |
+
import src.utils
|
| 74 |
+
import src.utils.utils
|
| 75 |
+
import src.tokenizer
|
| 76 |
+
import src.tokenizer.gene_tokenizer
|
| 77 |
+
# Skip src.data_process (has heavy deps like bs4, rdkit)
|
| 78 |
+
# We handle data loading separately in CCFM
|
| 79 |
+
|
| 80 |
+
# Re-register all under scdfm_src.* prefix
|
| 81 |
+
for key in list(sys.modules.keys()):
|
| 82 |
+
if key == "src" or key.startswith("src."):
|
| 83 |
+
new_key = "scdfm_" + key
|
| 84 |
+
sys.modules[new_key] = sys.modules[key]
|
| 85 |
+
|
| 86 |
+
finally:
|
| 87 |
+
# Remove scDFM's src.* entries
|
| 88 |
+
for key in list(sys.modules.keys()):
|
| 89 |
+
if (key == "src" or key.startswith("src.")) and not key.startswith("scdfm_"):
|
| 90 |
+
del sys.modules[key]
|
| 91 |
+
|
| 92 |
+
# Restore CCFM's src modules
|
| 93 |
+
for key, mod in saved.items():
|
| 94 |
+
sys.modules[key] = mod
|
| 95 |
+
|
| 96 |
+
# Remove scDFM from front of path
|
| 97 |
+
if _SCDFM_ROOT in sys.path:
|
| 98 |
+
sys.path.remove(_SCDFM_ROOT)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
bootstrap()
|
GRN/PCA1/pca_extractor.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
PCAScGPTExtractor — Projects FrozenScGPTExtractor output onto
|
| 3 |
+
the first n_dims principal components of scGPT gene embeddings.
|
| 4 |
+
|
| 5 |
+
Instead of slicing the first n_dims (arbitrary), PCA captures the
|
| 6 |
+
dominant variation direction in gene embedding space:
|
| 7 |
+
gene_proj = PCA(gene_emb, n_dims) # (G, 1)
|
| 8 |
+
features = delta_attn @ gene_proj # (B, G, 1)
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
import torch
|
| 12 |
+
import torch.nn as nn
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class PCAScGPTExtractor(nn.Module):
|
| 16 |
+
|
| 17 |
+
def __init__(self, base_extractor, n_dims: int = 1):
|
| 18 |
+
super().__init__()
|
| 19 |
+
self.base = base_extractor
|
| 20 |
+
self.n_dims = n_dims
|
| 21 |
+
self.scgpt_d_model = n_dims
|
| 22 |
+
self.n_hvg = base_extractor.n_hvg
|
| 23 |
+
self._pca_V = None # (512, n_dims), computed lazily
|
| 24 |
+
|
| 25 |
+
@torch.no_grad()
|
| 26 |
+
def _ensure_pca(self, device):
|
| 27 |
+
if self._pca_V is not None:
|
| 28 |
+
return
|
| 29 |
+
valid_ids = self.base.hvg_to_scgpt_id[self.base.hvg_to_scgpt_id >= 0]
|
| 30 |
+
gene_emb = self.base.scgpt_model.encoder(
|
| 31 |
+
valid_ids.unsqueeze(0).to(device)
|
| 32 |
+
).squeeze(0) # (G_valid, 512)
|
| 33 |
+
|
| 34 |
+
centered = gene_emb - gene_emb.mean(dim=0)
|
| 35 |
+
U, S, V = torch.pca_lowrank(centered, q=max(self.n_dims, 6))
|
| 36 |
+
self._pca_V = V[:, :self.n_dims].to(device) # (512, n_dims)
|
| 37 |
+
|
| 38 |
+
explained = (S[:self.n_dims] ** 2).sum() / (centered ** 2).sum()
|
| 39 |
+
print(f"[PCA] gene_emb: {self.base.scgpt_d_model}D -> {self.n_dims}D, "
|
| 40 |
+
f"explained variance: {explained:.4f}")
|
| 41 |
+
|
| 42 |
+
def extract(self, expression_values, gene_indices=None):
|
| 43 |
+
z = self.base.extract(expression_values, gene_indices) # (B, G, 512)
|
| 44 |
+
self._ensure_pca(z.device)
|
| 45 |
+
return torch.matmul(z, self._pca_V) # (B, G, n_dims)
|
| 46 |
+
|
| 47 |
+
def extract_attention_delta(self, control_expr, target_expr,
|
| 48 |
+
gene_indices=None, attn_layer=11,
|
| 49 |
+
use_rank_norm=True, multi_layer=""):
|
| 50 |
+
z = self.base.extract_attention_delta(
|
| 51 |
+
control_expr, target_expr, gene_indices,
|
| 52 |
+
attn_layer, use_rank_norm, multi_layer,
|
| 53 |
+
) # (B, G, 512)
|
| 54 |
+
self._ensure_pca(z.device)
|
| 55 |
+
return torch.matmul(z, self._pca_V) # (B, G, n_dims)
|
| 56 |
+
|
| 57 |
+
def get_missing_gene_mask(self, gene_indices=None):
|
| 58 |
+
return self.base.get_missing_gene_mask(gene_indices)
|
| 59 |
+
|
| 60 |
+
def train(self, mode=True):
|
| 61 |
+
super().train(mode)
|
| 62 |
+
self.base.train(mode)
|
| 63 |
+
return self
|
GRN/PCA1/run_job.sh
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
#PJM -L rscgrp=b-batch
|
| 3 |
+
#PJM -L gpu=1
|
| 4 |
+
#PJM -L elapse=4:00:00
|
| 5 |
+
#PJM -N grn_pca1
|
| 6 |
+
#PJM -j
|
| 7 |
+
#PJM -o /home/hp250092/ku50001222/qian/aivc/lfj/GRN/PCA1/logs/pca1_%j.out
|
| 8 |
+
|
| 9 |
+
module load cuda/12.2.2
|
| 10 |
+
module load cudnn/8.9.7
|
| 11 |
+
module load gcc-toolset/12
|
| 12 |
+
|
| 13 |
+
source /home/hp250092/ku50001222/qian/aivc/lfj/stack_env/bin/activate
|
| 14 |
+
|
| 15 |
+
cd /home/hp250092/ku50001222/qian/aivc/lfj/GRN/PCA1
|
| 16 |
+
|
| 17 |
+
export PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:256
|
| 18 |
+
|
| 19 |
+
echo "=========================================="
|
| 20 |
+
echo "Job ID: $PJM_JOBID"
|
| 21 |
+
echo "Job Name: $PJM_JOBNAME"
|
| 22 |
+
echo "Start: $(date)"
|
| 23 |
+
echo "Node: $(hostname)"
|
| 24 |
+
echo "GPU: $(nvidia-smi --query-gpu=name,memory.total --format=csv,noheader 2>/dev/null || echo 'N/A')"
|
| 25 |
+
echo "Ablation: PCA1 — project attention_delta @ gene_emb onto PC1"
|
| 26 |
+
echo "=========================================="
|
| 27 |
+
|
| 28 |
+
accelerate launch --num_processes=1 run_pca1.py \
|
| 29 |
+
--data-name norman \
|
| 30 |
+
--d-model 512 \
|
| 31 |
+
--d-hid 2048 \
|
| 32 |
+
--nhead 8 \
|
| 33 |
+
--nlayers 4 \
|
| 34 |
+
--batch-size 48 \
|
| 35 |
+
--lr 5e-5 \
|
| 36 |
+
--steps 50000 \
|
| 37 |
+
--fusion-method differential_perceiver \
|
| 38 |
+
--perturbation-function crisper \
|
| 39 |
+
--noise-type Gaussian \
|
| 40 |
+
--infer-top-gene 1000 \
|
| 41 |
+
--n-top-genes 5000 \
|
| 42 |
+
--use-mmd-loss \
|
| 43 |
+
--gamma 0.5 \
|
| 44 |
+
--split-method additive \
|
| 45 |
+
--fold 1 \
|
| 46 |
+
--scgpt-dim 1 \
|
| 47 |
+
--bottleneck-dim 512 \
|
| 48 |
+
--latent-weight 1.0 \
|
| 49 |
+
--choose-latent-p 0.4 \
|
| 50 |
+
--dh-depth 2 \
|
| 51 |
+
--print-every 5000 \
|
| 52 |
+
--topk 30 \
|
| 53 |
+
--use-negative-edge \
|
| 54 |
+
--ema-decay 0.9999 \
|
| 55 |
+
--t-sample-mode logit_normal \
|
| 56 |
+
--t-expr-mean 0.0 \
|
| 57 |
+
--t-expr-std 1.0 \
|
| 58 |
+
--t-latent-mean 0.0 \
|
| 59 |
+
--t-latent-std 1.0 \
|
| 60 |
+
--warmup-steps 2000 \
|
| 61 |
+
--ode-method rk4 \
|
| 62 |
+
--feature-mode attention_delta \
|
| 63 |
+
--attn-layer 11 \
|
| 64 |
+
--attn-use-rank-norm \
|
| 65 |
+
--result-path /home/hp250092/ku50001222/qian/aivc/lfj/GRN/result/PCA1
|
| 66 |
+
|
| 67 |
+
echo "=========================================="
|
| 68 |
+
echo "Finished: $(date)"
|
| 69 |
+
echo "=========================================="
|
GRN/PCA1/run_pca1.py
ADDED
|
@@ -0,0 +1,461 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Training and evaluation entry point for CCFM (Cascaded Conditioned Flow Matching).
|
| 3 |
+
PCA1 ablation: projects scGPT features onto first principal component of gene embeddings.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import sys
|
| 7 |
+
import os
|
| 8 |
+
|
| 9 |
+
# Set up paths — grn_ccfm/ is the CCFM project root (one level up from PCA1/)
|
| 10 |
+
_ABLATION_DIR = os.path.dirname(os.path.abspath(__file__))
|
| 11 |
+
_PROJECT_ROOT = os.path.join(_ABLATION_DIR, "..", "grn_ccfm")
|
| 12 |
+
_PROJECT_ROOT = os.path.normpath(_PROJECT_ROOT)
|
| 13 |
+
sys.path.insert(0, _PROJECT_ROOT)
|
| 14 |
+
sys.path.insert(0, _ABLATION_DIR) # for pca_extractor
|
| 15 |
+
|
| 16 |
+
# Bootstrap scDFM imports (must happen before any CCFM src imports)
|
| 17 |
+
import _bootstrap_scdfm # noqa: F401
|
| 18 |
+
|
| 19 |
+
import copy
|
| 20 |
+
import torch
|
| 21 |
+
import torch.nn as nn
|
| 22 |
+
import tyro
|
| 23 |
+
import tqdm
|
| 24 |
+
import numpy as np
|
| 25 |
+
import pandas as pd
|
| 26 |
+
import anndata as ad
|
| 27 |
+
import scanpy as sc
|
| 28 |
+
from torch.utils.data import DataLoader
|
| 29 |
+
from tqdm import trange
|
| 30 |
+
from accelerate import Accelerator, DistributedDataParallelKwargs
|
| 31 |
+
from torch.optim.lr_scheduler import LinearLR, CosineAnnealingLR, SequentialLR
|
| 32 |
+
|
| 33 |
+
from config.config_cascaded import CascadedFlowConfig as Config
|
| 34 |
+
from src.data.data import get_data_classes
|
| 35 |
+
from src.model.model import CascadedFlowModel
|
| 36 |
+
from src.data.scgpt_extractor import FrozenScGPTExtractor
|
| 37 |
+
from src.data.scgpt_cache import ScGPTFeatureCache
|
| 38 |
+
from src.denoiser import CascadedDenoiser
|
| 39 |
+
from src.utils import (
|
| 40 |
+
save_checkpoint,
|
| 41 |
+
load_checkpoint,
|
| 42 |
+
pick_eval_score,
|
| 43 |
+
process_vocab,
|
| 44 |
+
set_requires_grad_for_p_only,
|
| 45 |
+
GeneVocab,
|
| 46 |
+
)
|
| 47 |
+
from pca_extractor import PCAScGPTExtractor
|
| 48 |
+
|
| 49 |
+
from cell_eval import MetricsEvaluator
|
| 50 |
+
|
| 51 |
+
# Resolve scDFM directory paths
|
| 52 |
+
_REPO_ROOT = os.path.normpath(os.path.join(_PROJECT_ROOT, "..", "..", "transfer", "code")) # transfer/code/
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
@torch.inference_mode()
|
| 56 |
+
def test(data_sampler, denoiser, accelerator, config, vocab, data_manager,
|
| 57 |
+
batch_size=128, path_dir="./"):
|
| 58 |
+
"""Evaluate: generate predictions and compute cell-eval metrics."""
|
| 59 |
+
device = accelerator.device
|
| 60 |
+
gene_ids_test = vocab.encode(list(data_sampler.adata.var_names))
|
| 61 |
+
gene_ids_test = torch.tensor(gene_ids_test, dtype=torch.long, device=device)
|
| 62 |
+
|
| 63 |
+
perturbation_name_list = data_sampler._perturbation_covariates
|
| 64 |
+
control_data = data_sampler.get_control_data()
|
| 65 |
+
inverse_dict = {v: str(k) for k, v in data_manager.perturbation_dict.items()}
|
| 66 |
+
|
| 67 |
+
all_pred_expressions = [control_data["src_cell_data"]]
|
| 68 |
+
obs_perturbation_name_pred = ["control"] * control_data["src_cell_data"].shape[0]
|
| 69 |
+
all_target_expressions = [control_data["src_cell_data"]]
|
| 70 |
+
obs_perturbation_name_real = ["control"] * control_data["src_cell_data"].shape[0]
|
| 71 |
+
|
| 72 |
+
print("perturbation_name_list:", len(perturbation_name_list))
|
| 73 |
+
for perturbation_name in perturbation_name_list:
|
| 74 |
+
perturbation_data = data_sampler.get_perturbation_data(perturbation_name)
|
| 75 |
+
target = perturbation_data["tgt_cell_data"]
|
| 76 |
+
perturbation_id = perturbation_data["condition_id"]
|
| 77 |
+
source = control_data["src_cell_data"].to(device)
|
| 78 |
+
perturbation_id = perturbation_id.to(device)
|
| 79 |
+
|
| 80 |
+
if config.perturbation_function == "crisper":
|
| 81 |
+
perturbation_name_crisper = [
|
| 82 |
+
inverse_dict[int(p_id)] for p_id in perturbation_id[0].cpu().numpy()
|
| 83 |
+
]
|
| 84 |
+
perturbation_id = torch.tensor(
|
| 85 |
+
vocab.encode(perturbation_name_crisper), dtype=torch.long, device=device
|
| 86 |
+
)
|
| 87 |
+
perturbation_id = perturbation_id.repeat(source.shape[0], 1)
|
| 88 |
+
|
| 89 |
+
idx = torch.randperm(source.shape[0])
|
| 90 |
+
source = source[idx]
|
| 91 |
+
N = 128
|
| 92 |
+
source = source[:N]
|
| 93 |
+
|
| 94 |
+
pred_expressions = []
|
| 95 |
+
for i in trange(0, N, batch_size, desc=perturbation_name):
|
| 96 |
+
batch_source = source[i : i + batch_size]
|
| 97 |
+
batch_pert_id = perturbation_id[0].repeat(batch_source.shape[0], 1).to(device)
|
| 98 |
+
|
| 99 |
+
# Get the underlying model for generation
|
| 100 |
+
model = denoiser.module if hasattr(denoiser, "module") else denoiser
|
| 101 |
+
|
| 102 |
+
pred = model.generate(
|
| 103 |
+
batch_source,
|
| 104 |
+
batch_pert_id,
|
| 105 |
+
gene_ids_test,
|
| 106 |
+
latent_steps=config.latent_steps,
|
| 107 |
+
expr_steps=config.expr_steps,
|
| 108 |
+
method=config.ode_method,
|
| 109 |
+
)
|
| 110 |
+
pred_expressions.append(pred)
|
| 111 |
+
|
| 112 |
+
pred_expressions = torch.cat(pred_expressions, dim=0).cpu().numpy()
|
| 113 |
+
all_pred_expressions.append(pred_expressions)
|
| 114 |
+
all_target_expressions.append(target)
|
| 115 |
+
obs_perturbation_name_pred.extend([perturbation_name] * pred_expressions.shape[0])
|
| 116 |
+
obs_perturbation_name_real.extend([perturbation_name] * target.shape[0])
|
| 117 |
+
|
| 118 |
+
all_pred_expressions = np.concatenate(all_pred_expressions, axis=0)
|
| 119 |
+
all_target_expressions = np.concatenate(all_target_expressions, axis=0)
|
| 120 |
+
obs_pred = pd.DataFrame({"perturbation": obs_perturbation_name_pred})
|
| 121 |
+
obs_real = pd.DataFrame({"perturbation": obs_perturbation_name_real})
|
| 122 |
+
pred_adata = ad.AnnData(X=all_pred_expressions, obs=obs_pred)
|
| 123 |
+
real_adata = ad.AnnData(X=all_target_expressions, obs=obs_real)
|
| 124 |
+
|
| 125 |
+
eval_score = None
|
| 126 |
+
if accelerator.is_main_process:
|
| 127 |
+
evaluator = MetricsEvaluator(
|
| 128 |
+
adata_pred=pred_adata,
|
| 129 |
+
adata_real=real_adata,
|
| 130 |
+
control_pert="control",
|
| 131 |
+
pert_col="perturbation",
|
| 132 |
+
num_threads=32,
|
| 133 |
+
)
|
| 134 |
+
results, agg_results = evaluator.compute()
|
| 135 |
+
results.write_csv(os.path.join(path_dir, "results.csv"))
|
| 136 |
+
agg_results.write_csv(os.path.join(path_dir, "agg_results.csv"))
|
| 137 |
+
pred_adata.write_h5ad(os.path.join(path_dir, "pred.h5ad"))
|
| 138 |
+
real_adata.write_h5ad(os.path.join(path_dir, "real.h5ad"))
|
| 139 |
+
eval_score = pick_eval_score(agg_results, "mse")
|
| 140 |
+
print(f"Current evaluation score: {eval_score:.4f}")
|
| 141 |
+
|
| 142 |
+
return eval_score
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
if __name__ == "__main__":
|
| 146 |
+
config = tyro.cli(Config)
|
| 147 |
+
|
| 148 |
+
ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
|
| 149 |
+
accelerator = Accelerator(kwargs_handlers=[ddp_kwargs])
|
| 150 |
+
if accelerator.is_main_process:
|
| 151 |
+
print(config)
|
| 152 |
+
save_path = config.make_path()
|
| 153 |
+
os.makedirs(save_path, exist_ok=True)
|
| 154 |
+
device = accelerator.device
|
| 155 |
+
|
| 156 |
+
# === Data loading (reuse scDFM) ===
|
| 157 |
+
Data, PerturbationDataset, TrainSampler, TestDataset = get_data_classes()
|
| 158 |
+
|
| 159 |
+
scdfm_data_path = os.path.join(_REPO_ROOT, "scDFM", "data")
|
| 160 |
+
data_manager = Data(scdfm_data_path)
|
| 161 |
+
data_manager.load_data(config.data_name)
|
| 162 |
+
|
| 163 |
+
# Convert var_names from Ensembl IDs to gene symbols if needed.
|
| 164 |
+
# scDFM vocab and perturbation encoding both expect gene symbols as var_names.
|
| 165 |
+
if "gene_name" in data_manager.adata.var.columns and data_manager.adata.var_names[0].startswith("ENSG"):
|
| 166 |
+
data_manager.adata.var_names = data_manager.adata.var["gene_name"].values
|
| 167 |
+
data_manager.adata.var_names_make_unique()
|
| 168 |
+
if accelerator.is_main_process:
|
| 169 |
+
print(f"Converted var_names to gene symbols, sample: {list(data_manager.adata.var_names[:5])}")
|
| 170 |
+
|
| 171 |
+
data_manager.process_data(
|
| 172 |
+
n_top_genes=config.n_top_genes,
|
| 173 |
+
split_method=config.split_method,
|
| 174 |
+
fold=config.fold,
|
| 175 |
+
use_negative_edge=config.use_negative_edge,
|
| 176 |
+
k=config.topk,
|
| 177 |
+
)
|
| 178 |
+
train_sampler, valid_sampler, _ = data_manager.load_flow_data(batch_size=config.batch_size)
|
| 179 |
+
|
| 180 |
+
train_dataset = PerturbationDataset(train_sampler, config.batch_size)
|
| 181 |
+
dataloader = DataLoader(
|
| 182 |
+
train_dataset, batch_size=1, shuffle=False,
|
| 183 |
+
num_workers=8, pin_memory=True, persistent_workers=True,
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
# === Build mask path ===
|
| 187 |
+
if config.use_negative_edge:
|
| 188 |
+
mask_path = os.path.join(
|
| 189 |
+
data_manager.data_path, data_manager.data_name,
|
| 190 |
+
f"mask_fold_{config.fold}topk_{config.topk}{config.split_method}_negative_edge.pt",
|
| 191 |
+
)
|
| 192 |
+
else:
|
| 193 |
+
mask_path = os.path.join(
|
| 194 |
+
data_manager.data_path, data_manager.data_name,
|
| 195 |
+
f"mask_fold_{config.fold}topk_{config.topk}{config.split_method}.pt",
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
# === Vocab ===
|
| 199 |
+
orig_cwd = os.getcwd()
|
| 200 |
+
os.chdir(os.path.join(_REPO_ROOT, "scDFM"))
|
| 201 |
+
vocab = process_vocab(data_manager, config)
|
| 202 |
+
os.chdir(orig_cwd)
|
| 203 |
+
|
| 204 |
+
# Vocab is built from var_names (may be Ensembl IDs or gene symbols)
|
| 205 |
+
gene_ids = vocab.encode(list(data_manager.adata.var_names))
|
| 206 |
+
gene_ids = torch.tensor(gene_ids, dtype=torch.long, device=device)
|
| 207 |
+
|
| 208 |
+
# === Build CascadedFlowModel ===
|
| 209 |
+
vf = CascadedFlowModel(
|
| 210 |
+
ntoken=len(vocab),
|
| 211 |
+
d_model=config.d_model,
|
| 212 |
+
nhead=config.nhead,
|
| 213 |
+
d_hid=config.d_hid,
|
| 214 |
+
nlayers=config.nlayers,
|
| 215 |
+
fusion_method=config.fusion_method,
|
| 216 |
+
perturbation_function=config.perturbation_function,
|
| 217 |
+
mask_path=mask_path,
|
| 218 |
+
scgpt_dim=config.scgpt_dim,
|
| 219 |
+
bottleneck_dim=config.bottleneck_dim,
|
| 220 |
+
dh_depth=config.dh_depth,
|
| 221 |
+
)
|
| 222 |
+
|
| 223 |
+
# === Build FrozenScGPTExtractor ===
|
| 224 |
+
# var_names have been converted to gene symbols above, matching scGPT vocab.
|
| 225 |
+
hvg_gene_names = list(data_manager.adata.var_names)
|
| 226 |
+
scgpt_model_dir = os.path.join(
|
| 227 |
+
os.path.dirname(_REPO_ROOT), # transfer/
|
| 228 |
+
config.scgpt_model_dir.replace("transfer/", ""),
|
| 229 |
+
)
|
| 230 |
+
scgpt_extractor = FrozenScGPTExtractor(
|
| 231 |
+
model_dir=scgpt_model_dir,
|
| 232 |
+
hvg_gene_names=hvg_gene_names,
|
| 233 |
+
device=device,
|
| 234 |
+
max_seq_len=config.scgpt_max_seq_len,
|
| 235 |
+
target_std=config.target_std,
|
| 236 |
+
warmup_batches=config.warmup_batches,
|
| 237 |
+
)
|
| 238 |
+
scgpt_extractor = scgpt_extractor.to(device)
|
| 239 |
+
|
| 240 |
+
# === PCA1: project scGPT features onto first principal component ===
|
| 241 |
+
if config.scgpt_dim < scgpt_extractor.scgpt_d_model:
|
| 242 |
+
print(f"[PCA1] Projecting scGPT features: {scgpt_extractor.scgpt_d_model}D -> {config.scgpt_dim}D (PCA)")
|
| 243 |
+
scgpt_extractor = PCAScGPTExtractor(scgpt_extractor, n_dims=config.scgpt_dim)
|
| 244 |
+
|
| 245 |
+
# === Build CascadedDenoiser ===
|
| 246 |
+
denoiser = CascadedDenoiser(
|
| 247 |
+
model=vf,
|
| 248 |
+
scgpt_extractor=scgpt_extractor,
|
| 249 |
+
choose_latent_p=config.choose_latent_p,
|
| 250 |
+
latent_weight=config.latent_weight,
|
| 251 |
+
noise_type=config.noise_type,
|
| 252 |
+
use_mmd_loss=config.use_mmd_loss,
|
| 253 |
+
gamma=config.gamma,
|
| 254 |
+
poisson_alpha=config.poisson_alpha,
|
| 255 |
+
poisson_target_sum=config.poisson_target_sum,
|
| 256 |
+
t_sample_mode=config.t_sample_mode,
|
| 257 |
+
t_expr_mean=config.t_expr_mean,
|
| 258 |
+
t_expr_std=config.t_expr_std,
|
| 259 |
+
t_latent_mean=config.t_latent_mean,
|
| 260 |
+
t_latent_std=config.t_latent_std,
|
| 261 |
+
noise_beta=config.noise_beta,
|
| 262 |
+
feature_mode=config.feature_mode,
|
| 263 |
+
attn_layer=config.attn_layer,
|
| 264 |
+
attn_use_rank_norm=config.attn_use_rank_norm,
|
| 265 |
+
attn_multi_layer=config.attn_multi_layer,
|
| 266 |
+
)
|
| 267 |
+
|
| 268 |
+
# === Load scGPT cache if configured ===
|
| 269 |
+
scgpt_cache = None
|
| 270 |
+
if config.scgpt_cache_path and config.feature_mode == "attention_delta":
|
| 271 |
+
if accelerator.is_main_process:
|
| 272 |
+
print("WARNING: scGPT cache is not compatible with attention_delta mode. Ignoring cache.")
|
| 273 |
+
config.scgpt_cache_path = ""
|
| 274 |
+
if config.scgpt_cache_path:
|
| 275 |
+
scgpt_cache = ScGPTFeatureCache(
|
| 276 |
+
config.scgpt_cache_path,
|
| 277 |
+
target_std=config.target_std,
|
| 278 |
+
)
|
| 279 |
+
if accelerator.is_main_process:
|
| 280 |
+
print(f"Using pre-extracted scGPT cache: {config.scgpt_cache_path}")
|
| 281 |
+
print(f" Cache shape: {scgpt_cache.features.shape}, cells: {len(scgpt_cache.name_to_idx)}")
|
| 282 |
+
|
| 283 |
+
# === EMA model (on same device as training model) ===
|
| 284 |
+
ema_model = copy.deepcopy(vf).to(device)
|
| 285 |
+
ema_model.eval()
|
| 286 |
+
ema_model.requires_grad_(False)
|
| 287 |
+
|
| 288 |
+
# === Optimizer & Scheduler (with warmup) ===
|
| 289 |
+
save_path = config.make_path()
|
| 290 |
+
optimizer = torch.optim.Adam(vf.parameters(), lr=config.lr)
|
| 291 |
+
warmup_scheduler = LinearLR(
|
| 292 |
+
optimizer, start_factor=1e-3, end_factor=1.0, total_iters=config.warmup_steps,
|
| 293 |
+
)
|
| 294 |
+
cosine_scheduler = CosineAnnealingLR(
|
| 295 |
+
optimizer, T_max=max(config.steps - config.warmup_steps, 1), eta_min=config.eta_min,
|
| 296 |
+
)
|
| 297 |
+
scheduler = SequentialLR(
|
| 298 |
+
optimizer, [warmup_scheduler, cosine_scheduler], milestones=[config.warmup_steps],
|
| 299 |
+
)
|
| 300 |
+
|
| 301 |
+
start_iteration = 0
|
| 302 |
+
if config.checkpoint_path != "":
|
| 303 |
+
start_iteration, _ = load_checkpoint(config.checkpoint_path, vf, optimizer, scheduler)
|
| 304 |
+
# Sync EMA with loaded weights
|
| 305 |
+
ema_model.load_state_dict(vf.state_dict())
|
| 306 |
+
|
| 307 |
+
# === Prepare with accelerator ===
|
| 308 |
+
denoiser = accelerator.prepare(denoiser)
|
| 309 |
+
optimizer, scheduler, dataloader = accelerator.prepare(optimizer, scheduler, dataloader)
|
| 310 |
+
|
| 311 |
+
inverse_dict = {v: str(k) for k, v in data_manager.perturbation_dict.items()}
|
| 312 |
+
|
| 313 |
+
# === Test-only mode ===
|
| 314 |
+
if config.test_only:
|
| 315 |
+
eval_path = os.path.join(save_path, "eval_only")
|
| 316 |
+
os.makedirs(eval_path, exist_ok=True)
|
| 317 |
+
if accelerator.is_main_process:
|
| 318 |
+
print(f"Test-only mode. Saving results to {eval_path}")
|
| 319 |
+
eval_score = test(
|
| 320 |
+
valid_sampler, denoiser, accelerator, config, vocab, data_manager,
|
| 321 |
+
batch_size=config.batch_size, path_dir=eval_path,
|
| 322 |
+
)
|
| 323 |
+
if accelerator.is_main_process and eval_score is not None:
|
| 324 |
+
print(f"Final evaluation score: {eval_score:.4f}")
|
| 325 |
+
sys.exit(0)
|
| 326 |
+
|
| 327 |
+
# === Loss logging (CSV + TensorBoard) ===
|
| 328 |
+
import csv
|
| 329 |
+
from torch.utils.tensorboard import SummaryWriter
|
| 330 |
+
if accelerator.is_main_process:
|
| 331 |
+
os.makedirs(save_path, exist_ok=True)
|
| 332 |
+
csv_path = os.path.join(save_path, 'loss_curve.csv')
|
| 333 |
+
if start_iteration > 0 and os.path.exists(csv_path):
|
| 334 |
+
csv_file = open(csv_path, 'a', newline='')
|
| 335 |
+
csv_writer = csv.writer(csv_file)
|
| 336 |
+
else:
|
| 337 |
+
csv_file = open(csv_path, 'w', newline='')
|
| 338 |
+
csv_writer = csv.writer(csv_file)
|
| 339 |
+
csv_writer.writerow(['iteration', 'loss', 'loss_expr', 'loss_latent', 'loss_mmd', 'lr'])
|
| 340 |
+
tb_writer = SummaryWriter(log_dir=os.path.join(save_path, 'tb_logs'))
|
| 341 |
+
|
| 342 |
+
# === Training loop ===
|
| 343 |
+
pbar = tqdm.tqdm(total=config.steps, initial=start_iteration)
|
| 344 |
+
iteration = start_iteration
|
| 345 |
+
|
| 346 |
+
while iteration < config.steps:
|
| 347 |
+
for batch_data in dataloader:
|
| 348 |
+
source = batch_data["src_cell_data"].squeeze(0)
|
| 349 |
+
target = batch_data["tgt_cell_data"].squeeze(0)
|
| 350 |
+
perturbation_id = batch_data["condition_id"].squeeze(0).to(device)
|
| 351 |
+
|
| 352 |
+
if config.perturbation_function == "crisper":
|
| 353 |
+
perturbation_name = [
|
| 354 |
+
inverse_dict[int(p_id)] for p_id in perturbation_id[0].cpu().numpy()
|
| 355 |
+
]
|
| 356 |
+
perturbation_id = torch.tensor(
|
| 357 |
+
vocab.encode(perturbation_name), dtype=torch.long, device=device
|
| 358 |
+
)
|
| 359 |
+
perturbation_id = perturbation_id.repeat(source.shape[0], 1)
|
| 360 |
+
|
| 361 |
+
# Get the underlying denoiser for train_step
|
| 362 |
+
base_denoiser = denoiser.module if hasattr(denoiser, "module") else denoiser
|
| 363 |
+
base_denoiser.model.train()
|
| 364 |
+
|
| 365 |
+
if scgpt_cache is not None:
|
| 366 |
+
# Cache mode: sample gene subset here, look up pre-extracted features
|
| 367 |
+
# DataLoader collate wraps strings in tuples; unwrap them
|
| 368 |
+
tgt_cell_names = [n[0] if isinstance(n, (tuple, list)) else n for n in batch_data["tgt_cell_id"]]
|
| 369 |
+
input_gene_ids = torch.randperm(source.shape[-1], device=device)[:config.infer_top_gene]
|
| 370 |
+
cached_z_target = scgpt_cache.lookup(tgt_cell_names, input_gene_ids, device=device)
|
| 371 |
+
loss_dict = base_denoiser.train_step(
|
| 372 |
+
source, target, perturbation_id, gene_ids,
|
| 373 |
+
infer_top_gene=config.infer_top_gene,
|
| 374 |
+
cached_z_target=cached_z_target,
|
| 375 |
+
cached_gene_ids=input_gene_ids,
|
| 376 |
+
)
|
| 377 |
+
else:
|
| 378 |
+
loss_dict = base_denoiser.train_step(
|
| 379 |
+
source, target, perturbation_id, gene_ids,
|
| 380 |
+
infer_top_gene=config.infer_top_gene,
|
| 381 |
+
)
|
| 382 |
+
|
| 383 |
+
loss = loss_dict["loss"]
|
| 384 |
+
optimizer.zero_grad(set_to_none=True)
|
| 385 |
+
accelerator.backward(loss)
|
| 386 |
+
optimizer.step()
|
| 387 |
+
scheduler.step()
|
| 388 |
+
|
| 389 |
+
# === EMA update ===
|
| 390 |
+
with torch.no_grad():
|
| 391 |
+
decay = config.ema_decay
|
| 392 |
+
for ema_p, model_p in zip(ema_model.parameters(), vf.parameters()):
|
| 393 |
+
ema_p.lerp_(model_p.data, 1 - decay)
|
| 394 |
+
|
| 395 |
+
if iteration % config.print_every == 0:
|
| 396 |
+
save_path_ = os.path.join(save_path, f"iteration_{iteration}")
|
| 397 |
+
os.makedirs(save_path_, exist_ok=True)
|
| 398 |
+
if accelerator.is_main_process:
|
| 399 |
+
print(f"Saving iteration {iteration} checkpoint...")
|
| 400 |
+
# Save EMA model (used for inference) and training state
|
| 401 |
+
save_checkpoint(
|
| 402 |
+
model=ema_model,
|
| 403 |
+
optimizer=optimizer,
|
| 404 |
+
scheduler=scheduler,
|
| 405 |
+
iteration=iteration,
|
| 406 |
+
eval_score=None,
|
| 407 |
+
save_path=save_path_,
|
| 408 |
+
is_best=False,
|
| 409 |
+
)
|
| 410 |
+
# Evaluate with EMA weights
|
| 411 |
+
# Only evaluate at the start and the last checkpoint
|
| 412 |
+
if iteration == 0 or iteration + config.print_every >= config.steps:
|
| 413 |
+
# Swap EMA weights into denoiser for evaluation
|
| 414 |
+
orig_state = copy.deepcopy(vf.state_dict())
|
| 415 |
+
vf.load_state_dict(ema_model.state_dict())
|
| 416 |
+
|
| 417 |
+
eval_score = test(
|
| 418 |
+
valid_sampler, denoiser, accelerator, config, vocab, data_manager,
|
| 419 |
+
batch_size=config.batch_size, path_dir=save_path_,
|
| 420 |
+
)
|
| 421 |
+
|
| 422 |
+
# Restore training weights
|
| 423 |
+
vf.load_state_dict(orig_state)
|
| 424 |
+
|
| 425 |
+
if accelerator.is_main_process and eval_score is not None:
|
| 426 |
+
tb_writer.add_scalar('eval/score', eval_score, iteration)
|
| 427 |
+
|
| 428 |
+
# --- Per-iteration loss logging ---
|
| 429 |
+
if accelerator.is_main_process:
|
| 430 |
+
current_lr = scheduler.get_last_lr()[0]
|
| 431 |
+
csv_writer.writerow([
|
| 432 |
+
iteration, loss.item(),
|
| 433 |
+
loss_dict["loss_expr"].item(),
|
| 434 |
+
loss_dict["loss_latent"].item(),
|
| 435 |
+
loss_dict["loss_mmd"].item(),
|
| 436 |
+
current_lr,
|
| 437 |
+
])
|
| 438 |
+
if iteration % 100 == 0:
|
| 439 |
+
csv_file.flush()
|
| 440 |
+
tb_writer.add_scalar('loss/train', loss.item(), iteration)
|
| 441 |
+
tb_writer.add_scalar('loss/expr', loss_dict["loss_expr"].item(), iteration)
|
| 442 |
+
tb_writer.add_scalar('loss/latent', loss_dict["loss_latent"].item(), iteration)
|
| 443 |
+
tb_writer.add_scalar('loss/mmd', loss_dict["loss_mmd"].item(), iteration)
|
| 444 |
+
tb_writer.add_scalar('lr', current_lr, iteration)
|
| 445 |
+
|
| 446 |
+
accelerator.wait_for_everyone()
|
| 447 |
+
|
| 448 |
+
pbar.update(1)
|
| 449 |
+
pbar.set_description(
|
| 450 |
+
f"loss: {loss.item():.4f} (expr: {loss_dict['loss_expr'].item():.4f}, "
|
| 451 |
+
f"latent: {loss_dict['loss_latent'].item():.4f}, "
|
| 452 |
+
f"mmd: {loss_dict['loss_mmd'].item():.4f}), iter: {iteration}"
|
| 453 |
+
)
|
| 454 |
+
iteration += 1
|
| 455 |
+
if iteration >= config.steps:
|
| 456 |
+
break
|
| 457 |
+
|
| 458 |
+
# === Close logging ===
|
| 459 |
+
if accelerator.is_main_process:
|
| 460 |
+
csv_file.close()
|
| 461 |
+
tb_writer.close()
|
GRN/RegFM_design.md
ADDED
|
@@ -0,0 +1,768 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Regulatory Flow Matching (RegFM): 设计文档
|
| 2 |
+
|
| 3 |
+
## 1. 问题背景
|
| 4 |
+
|
| 5 |
+
### 1.1 当前方法的局限
|
| 6 |
+
|
| 7 |
+
scDFM 是一个基于流匹配(Flow Matching)的单细胞扰动预测模型。它学习一个速度场 $v_\theta(x, t)$,将控制细胞的表达分布沿 ODE 轨迹传输到扰动后的表达分布。
|
| 8 |
+
|
| 9 |
+
**核心局限**:scDFM 将基因表达视为 **无结构的向量** ——速度场对每个基因的预测是独立的,不显式建模基因间的调控交互。但生物学告诉我们,扰动响应是通过基因调控网络(GRN)传播的:knockout gene A → 直接靶基因 B 变化 → 下游基因 C 变化。
|
| 10 |
+
|
| 11 |
+
### 1.2 已有尝试的失败分析
|
| 12 |
+
|
| 13 |
+
GRN 项目(grn_ccfm / grn_svd / grn_att_only)借鉴 LatentForcing 的双时间步级联方法,试图同时生成 delta_attention(GRN 变化)和基因表达。但所有变体都遇到了:
|
| 14 |
+
|
| 15 |
+
- **latent loss 收敛困难**:稳定在 ~1.0-2.0,无法有效训练
|
| 16 |
+
- **级联解耦**:训练时 40% step 只训 latent / 60% 只训 expression,推理时两阶段串行 ODE
|
| 17 |
+
- **表达生成未受益**:GRN 信息未能有效引导 expression flow
|
| 18 |
+
|
| 19 |
+
**根本原因**:级联方法要求模型「生成」GRN 变化,但这本身是一个极其困难的任务(稀疏 G×G 矩阵,0.6% 非零)。我们的目标不是生成 GRN,而是用 GRN 信息来提升表达预测。
|
| 20 |
+
|
| 21 |
+
### 1.3 核心洞察
|
| 22 |
+
|
| 23 |
+
delta_attention 是一种 **训练时特权信息**(Learning Using Privileged Information):训练时有(可从 source + target 细胞计算),推理时无(只有 source 细胞)。
|
| 24 |
+
|
| 25 |
+
与其训练一个 latent flow 来生成它,不如直接将其融入速度场的 **结构** 中。
|
| 26 |
+
|
| 27 |
+
---
|
| 28 |
+
|
| 29 |
+
## 2. 方法:Regulatory Flow Matching (RegFM)
|
| 30 |
+
|
| 31 |
+
### 2.1 核心思想
|
| 32 |
+
|
| 33 |
+
将速度场分解为两个语义明确的成分:
|
| 34 |
+
|
| 35 |
+
$$v_\theta(x, t) = \alpha_\theta \odot v_{reg}(x, t) + (1 - \alpha_\theta) \odot v_{int}(x, t)$$
|
| 36 |
+
|
| 37 |
+
- **$v_{reg}$(调控速度)**:由基因间交互关系驱动。通过一个可学习的调控交互矩阵 $R_\theta$ 聚合其他基因的信息来计算 gene j 的速度
|
| 38 |
+
- **$v_{int}$(内在速度)**:基因自身的自主动力学,不依赖其他基因的状态
|
| 39 |
+
- **$\alpha_\theta$(门控)**:逐基因、逐时间步的可学习混合比例
|
| 40 |
+
|
| 41 |
+
训练时,$R_\theta$ 与 delta_attention 对齐(软监督)。推理时,$R_\theta$ 由模型自主预测,不需要任何 GRN 输入。
|
| 42 |
+
|
| 43 |
+
### 2.2 数学形式
|
| 44 |
+
|
| 45 |
+
**标准流匹配回顾**:
|
| 46 |
+
|
| 47 |
+
给定 affine 概率路径 $x_t = (1-t) \cdot \epsilon + t \cdot x_{target}$,目标速度为 $v_{target} = x_{target} - \epsilon$。
|
| 48 |
+
|
| 49 |
+
标准训练目标:$\mathcal{L}_{vel} = \mathbb{E}_t \| v_\theta(x_t, t) - v_{target} \|^2$
|
| 50 |
+
|
| 51 |
+
**RegFM 的速度场分解**:
|
| 52 |
+
|
| 53 |
+
给定 backbone 隐状态 $h \in \mathbb{R}^{B \times G \times d}$:
|
| 54 |
+
|
| 55 |
+
1. **调控交互矩阵**:
|
| 56 |
+
$$R_\theta = \tanh\!\left(\text{zero\_diag}\!\left(\frac{Q_r \cdot K_r^\top}{\sqrt{d_r}}\right)\right) \in [-1, 1]^{B \times G \times G}$$
|
| 57 |
+
其中 $Q_r = W_q \cdot h$,$K_r = W_k \cdot h$,zero_diag 置零对角线防止自环泄漏,tanh 匹配 delta_attn 值域
|
| 58 |
+
|
| 59 |
+
2. **调控速度**:
|
| 60 |
+
$$v_{reg} = \text{Linear}(R_\theta \cdot V_r) \in \mathbb{R}^{B \times G}$$
|
| 61 |
+
其中 $V_r = W_v \cdot h \in \mathbb{R}^{B \times G \times d_r}$
|
| 62 |
+
|
| 63 |
+
3. **内在速度**:
|
| 64 |
+
$$v_{int} = \text{ExprDecoder}(h) \in \mathbb{R}^{B \times G}$$
|
| 65 |
+
|
| 66 |
+
4. **门控混合**(三路条件化:基因状态 × 扰动类型 × 流时间步):
|
| 67 |
+
$$\alpha = \sigma(\text{MLP}([h;\; \text{pert\_emb};\; t\_\text{emb}])) \in (0, 1)^{B \times G}$$
|
| 68 |
+
$$v = \alpha \odot v_{reg} + (1 - \alpha) \odot v_{int}$$
|
| 69 |
+
|
| 70 |
+
**训练目标**:
|
| 71 |
+
|
| 72 |
+
$$\mathcal{L} = \mathcal{L}_{vel} + \lambda \cdot \mathcal{L}_{reg} + \gamma \cdot \mathcal{L}_{mmd}$$
|
| 73 |
+
|
| 74 |
+
- $\mathcal{L}_{vel} = \| v - v_{target} \|^2$(标准流匹配)
|
| 75 |
+
- $\mathcal{L}_{reg}$(调控结构监督,详见 §2.4)
|
| 76 |
+
- $\mathcal{L}_{mmd}$(可选 MMD loss,沿用 scDFM)
|
| 77 |
+
|
| 78 |
+
---
|
| 79 |
+
|
| 80 |
+
## 3. 架构设计
|
| 81 |
+
|
| 82 |
+
### 3.1 整体结构
|
| 83 |
+
|
| 84 |
+
```
|
| 85 |
+
Input: source(B,G), x_t(B,G), t(B,), pert_id(B,2), gene_ids(G,)
|
| 86 |
+
│
|
| 87 |
+
▼
|
| 88 |
+
┌─────────────────────────────────────────────┐
|
| 89 |
+
│ scDFM Backbone (不改) │
|
| 90 |
+
│ │
|
| 91 |
+
│ gene_emb = GeneEncoder(gene_ids) │
|
| 92 |
+
│ val_emb_xt = ContinuousValueEncoder(x_t) │
|
| 93 |
+
│ val_emb_src = ContinuousValueEncoder(src) │
|
| 94 |
+
│ + gene_emb │
|
| 95 |
+
│ fused = FusionLayer(cat(val_emb_xt, │
|
| 96 |
+
│ val_emb_src)) │
|
| 97 |
+
│ + gene_emb │
|
| 98 |
+
│ │
|
| 99 |
+
│ t_emb = TimestepEmbedder(t) │
|
| 100 |
+
│ pert_emb = get_perturbation_emb(pert_id) │
|
| 101 |
+
│ │
|
| 102 |
+
│ h = DiffPerceiverBlocks(fused, t_emb, │
|
| 103 |
+
│ pert_emb, gene_emb) │
|
| 104 |
+
│ → h: (B, G, d_model=128) │
|
| 105 |
+
└──────────────┬──────────────────────────────┘
|
| 106 |
+
│
|
| 107 |
+
┌───────┼───────────┐
|
| 108 |
+
▼ │ ▼
|
| 109 |
+
┌──────────┐ │ ┌──────────────────────────┐
|
| 110 |
+
│ v_int │ │ │ RegulatoryHead (新增) │
|
| 111 |
+
│ │ │ │ │
|
| 112 |
+
│ ExprDec │ │ │ Q = W_q(h) (B,G,d_r) │
|
| 113 |
+
│ (原有) │ │ │ K = W_k(h) (B,G,d_r) │
|
| 114 |
+
│ │ │ │ V = W_v(h) (B,G,d_r) │
|
| 115 |
+
│ → (B,G) │ │ │ │
|
| 116 |
+
└────┬─────┘ │ │ R = Q·K^T/√d_r (B,G,G) │──→ L_reg
|
| 117 |
+
│ │ │ │
|
| 118 |
+
│ │ │ agg = R · V (B,G,d_r) │
|
| 119 |
+
│ │ │ v_reg = Linear(agg) (B,G) │
|
| 120 |
+
│ │ └────────────┬──────────────┘
|
| 121 |
+
│ │ │
|
| 122 |
+
│ ┌────┴──────────────┐ │
|
| 123 |
+
│ │ Gate (新增) │ │
|
| 124 |
+
│ │ 输入: h+pert+t_emb │ │
|
| 125 |
+
│ │ MLP(384→128→1) │ │
|
| 126 |
+
│ │ α=σ(MLP[h;p;t]) │ │
|
| 127 |
+
│ │ (B,G) │ │
|
| 128 |
+
│ └────┬───────────────┘ │
|
| 129 |
+
│ │ │
|
| 130 |
+
▼ ▼ ▼
|
| 131 |
+
┌─────────────────────────────┐
|
| 132 |
+
│ v = α ⊙ v_reg │
|
| 133 |
+
│ + (1-α) ⊙ v_int │
|
| 134 |
+
│ → (B, G) │
|
| 135 |
+
└─────────────────────────────┘
|
| 136 |
+
```
|
| 137 |
+
|
| 138 |
+
### 3.2 各模块详细规格
|
| 139 |
+
|
| 140 |
+
**Backbone(完全复用 scDFM,不改)**:
|
| 141 |
+
|
| 142 |
+
| 参数 | 值 | 来源 |
|
| 143 |
+
|------|------|------|
|
| 144 |
+
| d_model | 128 | 与 baseline 一致 |
|
| 145 |
+
| nlayers | 4 | differential_perceiver 默认 |
|
| 146 |
+
| nhead | 8 | scDFM 默认 |
|
| 147 |
+
| d_hid | 512 | scDFM 默认 |
|
| 148 |
+
| fusion_method | differential_perceiver | scDFM 默认 |
|
| 149 |
+
|
| 150 |
+
**RegulatoryHead(新增)**:
|
| 151 |
+
|
| 152 |
+
```python
|
| 153 |
+
class RegulatoryHead(nn.Module):
|
| 154 |
+
def __init__(self, d_model: int, d_r: int = 32):
|
| 155 |
+
super().__init__()
|
| 156 |
+
self.d_r = d_r
|
| 157 |
+
self.W_q = nn.Linear(d_model, d_r, bias=False)
|
| 158 |
+
self.W_k = nn.Linear(d_model, d_r, bias=False)
|
| 159 |
+
self.W_v = nn.Linear(d_model, d_r, bias=False)
|
| 160 |
+
self.out_proj = nn.Linear(d_r, 1)
|
| 161 |
+
self.scale = d_r ** -0.5
|
| 162 |
+
|
| 163 |
+
def forward(self, h):
|
| 164 |
+
"""
|
| 165 |
+
Args:
|
| 166 |
+
h: (B, G, d_model) backbone hidden states
|
| 167 |
+
Returns:
|
| 168 |
+
v_reg: (B, G) regulatory velocity
|
| 169 |
+
R: (B, G, G) predicted interaction matrix
|
| 170 |
+
"""
|
| 171 |
+
Q = self.W_q(h) # (B, G, d_r)
|
| 172 |
+
K = self.W_k(h) # (B, G, d_r)
|
| 173 |
+
V = self.W_v(h) # (B, G, d_r)
|
| 174 |
+
|
| 175 |
+
R = torch.bmm(Q, K.transpose(1, 2)) # (B, G, G)
|
| 176 |
+
R = R * self.scale
|
| 177 |
+
|
| 178 |
+
# 移除对角线:防止自环泄漏,确保 v_reg 只编码基因间交互
|
| 179 |
+
R = R - torch.diag_embed(R.diagonal(dim1=1, dim2=2))
|
| 180 |
+
|
| 181 |
+
# Tanh 约束到 [-1, 1]:匹配 delta_attn 的值域,稳定训练
|
| 182 |
+
R = torch.tanh(R)
|
| 183 |
+
|
| 184 |
+
agg = torch.bmm(R, V) # (B, G, d_r)
|
| 185 |
+
v_reg = self.out_proj(agg).squeeze(-1) # (B, G)
|
| 186 |
+
|
| 187 |
+
return v_reg, R
|
| 188 |
+
```
|
| 189 |
+
|
| 190 |
+
**关键设计**:
|
| 191 |
+
- **移除对角线**:若 R[j,j] 很大,v_reg_j ≈ R[j,j]·V_r[j],退化为另一个 v_int。GRN 描述的是基因**间**的调控,自环属于内在动力学(v_int 负责)
|
| 192 |
+
- **Tanh 约束**:(1) delta_attn ∈ [-1,1],R_θ 匹配此值域使 L_reg 的 MSE 尺度合理;(2) 防止训练初期 R_θ 数值爆炸导致 v_reg 不稳定;(3) R_θ ∈ [-1,1] 有直接的生物学可解释性(调控强度)。v_reg = Linear(tanh(R)·V) 中 out_proj 可自行学习缩放
|
| 193 |
+
|
| 194 |
+
参数量:`3 * d_model * d_r + d_r = 3 * 128 * 32 + 32 = 12,320`(极轻量)
|
| 195 |
+
|
| 196 |
+
**Gate(新增)**:
|
| 197 |
+
|
| 198 |
+
```python
|
| 199 |
+
class VelocityGate(nn.Module):
|
| 200 |
+
def __init__(self, d_model: int):
|
| 201 |
+
super().__init__()
|
| 202 |
+
# 三路输入: h (基因状态) + pert_emb (扰动标识) + t_emb (时间步)
|
| 203 |
+
self.mlp = nn.Sequential(
|
| 204 |
+
nn.Linear(d_model * 3, d_model),
|
| 205 |
+
nn.SiLU(),
|
| 206 |
+
nn.Linear(d_model, 1),
|
| 207 |
+
)
|
| 208 |
+
# 末层初始化: bias=-3 → sigmoid(-3)≈0.05, 训练初期 v ≈ v_int
|
| 209 |
+
nn.init.zeros_(self.mlp[-1].weight)
|
| 210 |
+
nn.init.constant_(self.mlp[-1].bias, -3.0)
|
| 211 |
+
|
| 212 |
+
def forward(self, h, pert_emb, t_emb):
|
| 213 |
+
"""
|
| 214 |
+
Args:
|
| 215 |
+
h: (B, G, d_model) backbone hidden states
|
| 216 |
+
pert_emb: (B, d_model) perturbation embedding
|
| 217 |
+
t_emb: (B, d_model) timestep embedding
|
| 218 |
+
Returns:
|
| 219 |
+
alpha: (B, G) in (0, 1), 初始≈0.05
|
| 220 |
+
"""
|
| 221 |
+
pert_exp = pert_emb.unsqueeze(1).expand_as(h) # (B, G, d_model)
|
| 222 |
+
t_exp = t_emb.unsqueeze(1).expand_as(h) # (B, G, d_model)
|
| 223 |
+
x = torch.cat([h, pert_exp, t_exp], dim=-1) # (B, G, 3*d_model)
|
| 224 |
+
return torch.sigmoid(self.mlp(x).squeeze(-1))
|
| 225 |
+
return torch.sigmoid(self.proj(h).squeeze(-1))
|
| 226 |
+
```
|
| 227 |
+
|
| 228 |
+
参数量:`d_model + 1 = 129`
|
| 229 |
+
|
| 230 |
+
**ExprDecoder(复用,不改)**:
|
| 231 |
+
|
| 232 |
+
原有的 3 层 MLP:`d_model → d_model → d_model → 1`,LeakyReLU 激活。
|
| 233 |
+
|
| 234 |
+
输入 `(B, G, d_model)`(不使用 perturbation concat,即 `use_batch_labels=False`),输出 `(B, G)`。
|
| 235 |
+
|
| 236 |
+
### 3.3 与 scDFM model 的集成方式
|
| 237 |
+
|
| 238 |
+
在 scDFM 的 `model.forward()` 最后阶段,原始代码:
|
| 239 |
+
|
| 240 |
+
```python
|
| 241 |
+
# 原始 scDFM (model.py line ~240)
|
| 242 |
+
x = self.decoder(x) # ExprDecoder, returns dict
|
| 243 |
+
return x['pred'] # (B, G)
|
| 244 |
+
```
|
| 245 |
+
|
| 246 |
+
RegFM 修改为:
|
| 247 |
+
|
| 248 |
+
```python
|
| 249 |
+
# RegFM
|
| 250 |
+
v_int = self.decoder(x)['pred'] # (B, G) — 原有 ExprDecoder
|
| 251 |
+
v_reg, R = self.reg_head(x) # (B, G), (B, G, G) — 新增
|
| 252 |
+
alpha = self.gate(x, pert_emb, t_emb) # (B, G) — 新增, 三路条件化
|
| 253 |
+
v = alpha * v_reg + (1 - alpha) * v_int
|
| 254 |
+
return v, R # 训练时返回 R 用于 L_reg
|
| 255 |
+
```
|
| 256 |
+
|
| 257 |
+
推理时只需要 `v`,`R` 可选择性保存用于事后分析。
|
| 258 |
+
|
| 259 |
+
---
|
| 260 |
+
|
| 261 |
+
## 4. 损失函数
|
| 262 |
+
|
| 263 |
+
### 4.1 速度损失 $\mathcal{L}_{vel}$(标准流匹配)
|
| 264 |
+
|
| 265 |
+
$$\mathcal{L}_{vel} = \frac{1}{B \cdot G} \sum_{b,g} (v_{pred}^{(b,g)} - v_{target}^{(b,g)})^2$$
|
| 266 |
+
|
| 267 |
+
与 scDFM baseline 完全一致。
|
| 268 |
+
|
| 269 |
+
### 4.2 调控结构监督 $\mathcal{L}_{reg}$
|
| 270 |
+
|
| 271 |
+
delta_attention 是高度稀疏的(~3% 非零 at G_sub=1000, delta_topk=30),需要特殊处理:
|
| 272 |
+
|
| 273 |
+
```python
|
| 274 |
+
def compute_reg_loss(R_pred, delta_attn, missing_mask=None, sparse_weight=0.01):
|
| 275 |
+
"""
|
| 276 |
+
Magnitude-weighted L_reg with diagonal exclusion and sparsity regularization.
|
| 277 |
+
|
| 278 |
+
Args:
|
| 279 |
+
R_pred: (B, G, G) predicted interaction matrix (diagonal already zeroed)
|
| 280 |
+
delta_attn: (B, G, G) ground truth delta attention (sparse, topk=50 per row)
|
| 281 |
+
missing_mask: (G,) bool, True = gene exists in scGPT vocab
|
| 282 |
+
sparse_weight: float, weight for zero-entry sparsity regularization
|
| 283 |
+
Returns:
|
| 284 |
+
loss: scalar
|
| 285 |
+
"""
|
| 286 |
+
B, G, _ = R_pred.shape
|
| 287 |
+
|
| 288 |
+
# 1. 排除对角线(自环不属于 GRN)
|
| 289 |
+
diag_mask = torch.eye(G, dtype=torch.bool, device=R_pred.device)
|
| 290 |
+
R_pred = R_pred.masked_fill(diag_mask.unsqueeze(0), 0.0)
|
| 291 |
+
delta_attn = delta_attn.masked_fill(diag_mask.unsqueeze(0), 0.0)
|
| 292 |
+
|
| 293 |
+
# 2. 处理 missing genes: 清零对应行列
|
| 294 |
+
if missing_mask is not None:
|
| 295 |
+
inv = ~missing_mask
|
| 296 |
+
R_pred = R_pred.clone()
|
| 297 |
+
R_pred[:, inv, :] = 0; R_pred[:, :, inv] = 0
|
| 298 |
+
delta_attn = delta_attn.clone()
|
| 299 |
+
delta_attn[:, inv, :] = 0; delta_attn[:, :, inv] = 0
|
| 300 |
+
|
| 301 |
+
# 3. 非零 entry: magnitude-weighted MSE
|
| 302 |
+
# 大 |δ_attn| 的调控边获得更大权重,防止弱交互梯度淹没强交互
|
| 303 |
+
mask_nz = (delta_attn != 0)
|
| 304 |
+
if mask_nz.any():
|
| 305 |
+
residual = (R_pred[mask_nz] - delta_attn[mask_nz]) ** 2
|
| 306 |
+
mag_weights = delta_attn[mask_nz].abs()
|
| 307 |
+
mag_weights = mag_weights / mag_weights.sum() # 归一化为概率分布
|
| 308 |
+
loss_nz = (mag_weights * residual).sum()
|
| 309 |
+
else:
|
| 310 |
+
loss_nz = 0.0
|
| 311 |
+
|
| 312 |
+
# 4. 零 entry: Hard Negative Mining 稀疏正则
|
| 313 |
+
# 只惩罚"模型猜得大但实际为 0"的假阳性边,
|
| 314 |
+
# 忽略已经正确接近零的 entry(避免梯度被大量近零值稀释)
|
| 315 |
+
mask_zero = ~mask_nz
|
| 316 |
+
if missing_mask is not None:
|
| 317 |
+
valid = missing_mask.unsqueeze(0).unsqueeze(2) & missing_mask.unsqueeze(0).unsqueeze(1)
|
| 318 |
+
mask_zero = mask_zero & valid
|
| 319 |
+
|
| 320 |
+
if mask_zero.any():
|
| 321 |
+
zero_preds = R_pred[mask_zero] # 所有零 entry 的预测值
|
| 322 |
+
n_hard = min(3 * mask_nz.sum().item(), len(zero_preds)) # 采样 3× 正样本数
|
| 323 |
+
n_hard = max(int(n_hard), 1)
|
| 324 |
+
_, hard_idx = zero_preds.abs().topk(n_hard) # 取 |R_pred| 最大的
|
| 325 |
+
loss_sparse = zero_preds[hard_idx].pow(2).mean()
|
| 326 |
+
else:
|
| 327 |
+
loss_sparse = 0.0
|
| 328 |
+
|
| 329 |
+
return loss_nz + sparse_weight * loss_sparse
|
| 330 |
+
```
|
| 331 |
+
|
| 332 |
+
**设计要点**:
|
| 333 |
+
- **Magnitude weighting**:|δ_attn|=0.8 的强调控边权重远大于 |δ_attn|=0.01 的弱交互,防止弱信号梯度淹没强信号
|
| 334 |
+
- **对角线排除**:与 RegulatoryHead 的 zero-diagonal 一致,R_pred 和 delta_attn 的对角线均置零
|
| 335 |
+
- **Hard Negative Mining**:零 entry 中只惩罚 top-K 假阳性(K = 3× 非零 entry 数),梯度集中在真正有问题的边上,不被大量近零值稀释
|
| 336 |
+
- **delta_topk 默认 100**:覆盖方差拐点附近的有意义交互边,magnitude weighting 自动抑制尾部噪声
|
| 337 |
+
|
| 338 |
+
### 4.3 MMD 损失 $\mathcal{L}_{mmd}$(沿用 scDFM,可选)
|
| 339 |
+
|
| 340 |
+
```python
|
| 341 |
+
# 从 v_pred ��算 x_1_hat
|
| 342 |
+
x1_hat = x_t + v_pred * (1 - t).unsqueeze(-1)
|
| 343 |
+
sigmas = median_sigmas(target, scales=(0.5, 1.0, 2.0, 4.0))
|
| 344 |
+
loss_mmd = mmd2_unbiased_multi_sigma(x1_hat, target, sigmas)
|
| 345 |
+
```
|
| 346 |
+
|
| 347 |
+
### 4.4 总损失
|
| 348 |
+
|
| 349 |
+
$$\mathcal{L} = \mathcal{L}_{vel} + \lambda_{reg} \cdot \mathcal{L}_{reg} + \gamma \cdot \mathcal{L}_{mmd}$$
|
| 350 |
+
|
| 351 |
+
**超参数建议**:
|
| 352 |
+
- $\lambda_{reg} = 0.1$(目标值,可调)
|
| 353 |
+
- $\gamma = 0.5$(沿用 scDFM baseline)
|
| 354 |
+
- delta_topk = 100(第 ~92 位附近方差较大,消融对比 {50, 100, 150})
|
| 355 |
+
|
| 356 |
+
**两层 Warmup 策略**(架构层 + loss 层联合保护):
|
| 357 |
+
|
| 358 |
+
| 层级 | 机制 | 效果 |
|
| 359 |
+
|------|------|------|
|
| 360 |
+
| 架构层 | Gate bias 初始化为 -3(α≈0.05) | v ≈ v_int,v_reg 噪声不干扰 L_vel |
|
| 361 |
+
| Loss 层 | λ_reg 两阶段调度 | backbone 梯度前 N 步完全来自 L_vel |
|
| 362 |
+
|
| 363 |
+
```
|
| 364 |
+
λ_reg 调度 (从零训练):
|
| 365 |
+
Phase 1: step [0, 3000) → λ_reg = 0 (backbone 专注学 flow)
|
| 366 |
+
Phase 2: step [3000, 5000) → λ_reg 线性 0→0.1 (逐步引入调控监督)
|
| 367 |
+
Phase 3: step [5000, ∞) → λ_reg = 0.1 (正常训练)
|
| 368 |
+
|
| 369 |
+
λ_reg 调度 (warm start from baseline):
|
| 370 |
+
Phase 1: step [0, 1000) → λ_reg = 0
|
| 371 |
+
Phase 2: step [1000, 2000) → λ_reg 线性 0→0.1
|
| 372 |
+
Phase 3: step [2000, ∞) → λ_reg = 0.1
|
| 373 |
+
```
|
| 374 |
+
|
| 375 |
+
两层保护的必要性:Gate bias 只保护 L_vel 不被 v_reg 噪声影响,但 L_reg 的梯度仍通过 R_θ=Q(h)·K(h)^T 流入 backbone。Phase 1 的 λ_reg=0 确保 backbone 早期梯度完全来自 L_vel。
|
| 376 |
+
|
| 377 |
+
---
|
| 378 |
+
|
| 379 |
+
## 5. 训练流程
|
| 380 |
+
|
| 381 |
+
### 5.1 算法伪代码
|
| 382 |
+
|
| 383 |
+
```
|
| 384 |
+
Algorithm: RegFM Training
|
| 385 |
+
────────────────────────────────────────────────────────
|
| 386 |
+
Input:
|
| 387 |
+
- scDFM backbone (可从 baseline checkpoint warm start)
|
| 388 |
+
- SparseRawDeltaCache (已有, 来自 GRN 项目)
|
| 389 |
+
- GRNDatasetWrapper (已有, 提供 delta_attention)
|
| 390 |
+
|
| 391 |
+
Initialize:
|
| 392 |
+
- 加载 scDFM backbone weights (可选 warm start)
|
| 393 |
+
- 随机初始化 RegulatoryHead + VelocityGate
|
| 394 |
+
- Adam optimizer, lr=5e-5
|
| 395 |
+
- LinearLR warmup (2000 steps) → CosineAnnealingLR
|
| 396 |
+
- EMA model copy (decay=0.9999)
|
| 397 |
+
|
| 398 |
+
For iter = 1 to 200,000:
|
| 399 |
+
1. Sample batch from GRNDatasetWrapper:
|
| 400 |
+
{source, target, delta_attn, gene_ids_sub, input_gene_ids, condition_id}
|
| 401 |
+
source, target: (B, G_sub)
|
| 402 |
+
delta_attn: (B, G_sub, G_sub)
|
| 403 |
+
|
| 404 |
+
2. Flow matching path:
|
| 405 |
+
t ~ LogitNormal(0, 1) or Uniform[0, 1]
|
| 406 |
+
ε ~ N(0, I)
|
| 407 |
+
x_t = (1-t)·ε + t·target
|
| 408 |
+
v_target = target - ε (CondOT affine path)
|
| 409 |
+
|
| 410 |
+
3. Forward:
|
| 411 |
+
h = Backbone(gene_ids_sub, x_t, t, source, condition_id)
|
| 412 |
+
v_int = ExprDecoder(h)
|
| 413 |
+
v_reg, R_pred = RegulatoryHead(h)
|
| 414 |
+
α = Gate(h)
|
| 415 |
+
v_pred = α · v_reg + (1-α) · v_int
|
| 416 |
+
|
| 417 |
+
4. Loss:
|
| 418 |
+
L_vel = MSE(v_pred, v_target)
|
| 419 |
+
L_reg = compute_reg_loss(R_pred, delta_attn, missing_mask)
|
| 420 |
+
L_mmd = mmd_loss(x_t, v_pred, t, target) # 可选
|
| 421 |
+
|
| 422 |
+
# λ_reg 两阶段调度
|
| 423 |
+
if iter < lambda_reg_zero_steps:
|
| 424 |
+
λ_eff = 0.0
|
| 425 |
+
elif iter < lambda_reg_zero_steps + lambda_reg_ramp_steps:
|
| 426 |
+
λ_eff = lambda_reg * (iter - lambda_reg_zero_steps) / lambda_reg_ramp_steps
|
| 427 |
+
else:
|
| 428 |
+
λ_eff = lambda_reg
|
| 429 |
+
|
| 430 |
+
L = L_vel + λ_eff · L_reg + γ · L_mmd
|
| 431 |
+
|
| 432 |
+
5. Backward + optimizer.step() + scheduler.step()
|
| 433 |
+
6. EMA update
|
| 434 |
+
|
| 435 |
+
Every 5000 iters:
|
| 436 |
+
Evaluate on validation set (cell-eval metrics)
|
| 437 |
+
Save checkpoint
|
| 438 |
+
```
|
| 439 |
+
|
| 440 |
+
### 5.2 数据加载
|
| 441 |
+
|
| 442 |
+
完全复用已有的 GRNDatasetWrapper + SparseRawDeltaCache:
|
| 443 |
+
|
| 444 |
+
- `SparseRawDeltaCache`:从 HDF5 读取稀疏 delta_attention → 稠密 (B, G_sub, G_sub)
|
| 445 |
+
- `GRNDatasetWrapper`:在 DataLoader worker 中完成 gene subsetting + cache lookup
|
| 446 |
+
- 返回格式不变:`{src_cell_data, tgt_cell_data, z_target, gene_ids_sub, input_gene_ids, condition_id}`
|
| 447 |
+
|
| 448 |
+
**唯一改动**:将 `z_target` 改名为 `delta_attn` 以提高语义清晰度(可选,非必须)。
|
| 449 |
+
|
| 450 |
+
### 5.3 关于 warm start
|
| 451 |
+
|
| 452 |
+
推荐两阶段训练策略:
|
| 453 |
+
|
| 454 |
+
1. **阶段 1(可选)**:先用标准 scDFM 训练 backbone 到一个合理的 checkpoint(或直接使用已有的 baseline checkpoint)
|
| 455 |
+
2. **阶段 2**:加载 backbone weights,新增 RegulatoryHead + Gate,用 RegFM 的完整 loss 继续训练
|
| 456 |
+
|
| 457 |
+
这避免了 RegulatoryHead 随机初始化的噪声干扰 backbone 的早期训练。
|
| 458 |
+
|
| 459 |
+
也可以选择 **端到端从零训练**,此时建议对 $\lambda_{reg}$ 做 warmup(前 N 步设为 0 或很小值)。
|
| 460 |
+
|
| 461 |
+
---
|
| 462 |
+
|
| 463 |
+
## 6. 推理流程
|
| 464 |
+
|
| 465 |
+
### 6.1 算法伪代码
|
| 466 |
+
|
| 467 |
+
```
|
| 468 |
+
Algorithm: RegFM Inference
|
| 469 |
+
────────────────────────────────────────────────────────
|
| 470 |
+
与标准 scDFM 完全相同,无任何额外输入。
|
| 471 |
+
|
| 472 |
+
Input: source (B, G), perturbation_id (B, 2)
|
| 473 |
+
|
| 474 |
+
1. Random gene subset: input_gene_ids = randperm(G_full)[:infer_top_gene]
|
| 475 |
+
source_sub = source[:, input_gene_ids]
|
| 476 |
+
|
| 477 |
+
2. Initialize: ε ~ N(0, I) shape (B, G_sub)
|
| 478 |
+
|
| 479 |
+
3. ODE integration:
|
| 480 |
+
traj = torchdiffeq.odeint(
|
| 481 |
+
func = lambda t, x: model(gene_ids_sub, x, t, source_sub, pert_id)[0],
|
| 482 |
+
# 只取 v, 忽略 R ^^^^
|
| 483 |
+
y0 = ε,
|
| 484 |
+
t = linspace(0, 1, steps=20),
|
| 485 |
+
method = "rk4",
|
| 486 |
+
atol = 1e-4, rtol = 1e-4,
|
| 487 |
+
)
|
| 488 |
+
|
| 489 |
+
4. x_pred = clamp(traj[-1], min=0)
|
| 490 |
+
|
| 491 |
+
Optional: 保存 R 用于可解释性分析
|
| 492 |
+
在 ODE 的最后一个时间步(t=1)额外运行一次 forward,获取 R_final
|
| 493 |
+
```
|
| 494 |
+
|
| 495 |
+
### 6.2 推理不需要 delta_attention
|
| 496 |
+
|
| 497 |
+
这是 RegFM 相对于级联方案的核心优势:
|
| 498 |
+
|
| 499 |
+
- **级联方案**:推理需要两阶段 ODE(先 latent 20 steps + 后 expression 20 steps = 40 steps)
|
| 500 |
+
- **RegFM**:推理只需要单阶段 ODE(20 steps),与 scDFM baseline 完全一致
|
| 501 |
+
- 速度提升约 2x,且无 latent flow 收敛的前置依赖
|
| 502 |
+
|
| 503 |
+
---
|
| 504 |
+
|
| 505 |
+
## 7. 配置设计
|
| 506 |
+
|
| 507 |
+
### 7.1 RegFMConfig
|
| 508 |
+
|
| 509 |
+
基于现有 CascadedFlowConfig,移除级联相关参数,新增 RegFM 参数:
|
| 510 |
+
|
| 511 |
+
```python
|
| 512 |
+
@dataclass
|
| 513 |
+
class RegFMConfig:
|
| 514 |
+
# === Base (与 scDFM baseline 对齐) ===
|
| 515 |
+
model_type: str = "regfm"
|
| 516 |
+
batch_size: int = 48 # 与 baseline 一致 (级联用 96 是因为不需要 G×G latent)
|
| 517 |
+
ntoken: int = 512
|
| 518 |
+
d_model: int = 128
|
| 519 |
+
nhead: int = 8
|
| 520 |
+
nlayers: int = 4 # differential_perceiver 默认
|
| 521 |
+
d_hid: int = 512
|
| 522 |
+
lr: float = 5e-5
|
| 523 |
+
steps: int = 200000
|
| 524 |
+
eta_min: float = 1e-6
|
| 525 |
+
|
| 526 |
+
data_name: str = "norman"
|
| 527 |
+
perturbation_function: str = "crisper"
|
| 528 |
+
noise_type: str = "Gaussian"
|
| 529 |
+
fusion_method: str = "differential_perceiver"
|
| 530 |
+
infer_top_gene: int = 1000
|
| 531 |
+
n_top_genes: int = 5000
|
| 532 |
+
fold: int = 1
|
| 533 |
+
split_method: str = "additive"
|
| 534 |
+
use_negative_edge: bool = True
|
| 535 |
+
topk: int = 30
|
| 536 |
+
|
| 537 |
+
mode: str = "predict_y"
|
| 538 |
+
gamma: float = 0.5 # MMD loss weight
|
| 539 |
+
use_mmd_loss: bool = True
|
| 540 |
+
print_every: int = 5000
|
| 541 |
+
|
| 542 |
+
# === RegFM 特有参数 (新增) ===
|
| 543 |
+
d_r: int = 32 # regulatory head 投影维度
|
| 544 |
+
lambda_reg: float = 0.1 # L_reg 目标权重
|
| 545 |
+
lambda_reg_zero_steps: int = 3000 # Phase 1: λ_reg 严格为 0 的步数
|
| 546 |
+
lambda_reg_ramp_steps: int = 2000 # Phase 2: 线性增长到 lambda_reg 的步数
|
| 547 |
+
gate_init_bias: float = -3.0 # Gate bias 初始值, sigmoid(-3)≈0.05
|
| 548 |
+
sparse_reg_weight: float = 0.01 # 零 entry 稀疏正则权重
|
| 549 |
+
|
| 550 |
+
# === Sparse attention cache (复用) ===
|
| 551 |
+
sparse_cache_path: str = "/home/hp250092/ku50001222/qian/aivc/lfj/GRN/grn_ccfm/cache/norman_attn_L11_sparse.h5"
|
| 552 |
+
delta_topk: int = 100 # per-row top-K (第~92位附近方差较大, 消融对比 {50,100,150})
|
| 553 |
+
|
| 554 |
+
# === EMA ===
|
| 555 |
+
ema_decay: float = 0.9999
|
| 556 |
+
|
| 557 |
+
# === LR warmup ===
|
| 558 |
+
warmup_steps: int = 2000
|
| 559 |
+
|
| 560 |
+
# === Time sampling ===
|
| 561 |
+
t_sample_mode: str = "logit_normal"
|
| 562 |
+
t_mean: float = 0.0
|
| 563 |
+
t_std: float = 1.0
|
| 564 |
+
|
| 565 |
+
# === Inference ===
|
| 566 |
+
ode_steps: int = 20
|
| 567 |
+
ode_method: str = "rk4"
|
| 568 |
+
eval_batch_size: int = 128
|
| 569 |
+
|
| 570 |
+
# === Warm start (可选) ===
|
| 571 |
+
pretrained_backbone: str = "" # scDFM baseline checkpoint 路径
|
| 572 |
+
|
| 573 |
+
# === Paths ===
|
| 574 |
+
result_path: str = "/home/hp250092/ku50001222/qian/aivc/lfj/GRN/result/regfm"
|
| 575 |
+
exp_name: str = ""
|
| 576 |
+
```
|
| 577 |
+
|
| 578 |
+
### 7.2 移除的参数(相比级联方案)
|
| 579 |
+
|
| 580 |
+
以下参数不再需要:
|
| 581 |
+
- `choose_latent_p`(无 latent flow)
|
| 582 |
+
- `latent_weight`(无 latent loss)
|
| 583 |
+
- `noise_beta`(无级联噪声)
|
| 584 |
+
- `t_latent_mean/std`(无 latent 时间步)
|
| 585 |
+
- `latent_steps`(无 latent ODE)
|
| 586 |
+
- `bilinear_head_dim`(无 BilinearLatentDecoder)
|
| 587 |
+
|
| 588 |
+
---
|
| 589 |
+
|
| 590 |
+
## 8. 显存分析
|
| 591 |
+
|
| 592 |
+
### 8.1 R_θ 的显存开销
|
| 593 |
+
|
| 594 |
+
关键张量 `R = Q · K^T`,shape (B, G_sub, G_sub):
|
| 595 |
+
|
| 596 |
+
```
|
| 597 |
+
B=48, G_sub=1000: R = 48 × 1000 × 1000 × 4 bytes = 192 MB
|
| 598 |
+
B=96, G_sub=1000: R = 96 × 1000 × 1000 × 4 bytes = 384 MB
|
| 599 |
+
```
|
| 600 |
+
|
| 601 |
+
**对比**:当前级联方案 grn_att_only 已经在处理 (B=96, G_sub=1000, G_sub=1000) 的 z_target 张量,同样是 384 MB。所以这不是新增的显存瓶颈。
|
| 602 |
+
|
| 603 |
+
**如果显存紧张,可选优化**:
|
| 604 |
+
- 降低 batch_size 到 48(与 baseline 一致)
|
| 605 |
+
- 用 mixed precision (fp16):R 显存减半至 96 MB (B=48)
|
| 606 |
+
- chunk 计算:分块计算 R · V,不需要完整存储 R
|
| 607 |
+
|
| 608 |
+
### 8.2 新增参数量
|
| 609 |
+
|
| 610 |
+
| 模块 | 参数量 |
|
| 611 |
+
|------|--------|
|
| 612 |
+
| RegulatoryHead (W_q, W_k, W_v, out_proj) | 3 × 128 × 32 + 32 × 1 = 12,320 |
|
| 613 |
+
| VelocityGate (MLP: 384→128→1) | 384 × 128 + 128 + 128 × 1 + 1 = 49,409 |
|
| 614 |
+
| **总新增** | **~62K** |
|
| 615 |
+
|
| 616 |
+
scDFM backbone 约 2-3M 参数(4 层 differential_perceiver),新增 ~2% 参数。可忽略不计。
|
| 617 |
+
|
| 618 |
+
---
|
| 619 |
+
|
| 620 |
+
## 9. 实验设计
|
| 621 |
+
|
| 622 |
+
### 9.1 主实验:与 baseline 和级联方案对比
|
| 623 |
+
|
| 624 |
+
| Method | 描述 | GRN 使用方式 |
|
| 625 |
+
|--------|------|-------------|
|
| 626 |
+
| scDFM (baseline) | 原始流匹配 | 无 |
|
| 627 |
+
| Cascaded (grn_att_only) | 级联双 ODE | 生成目标 |
|
| 628 |
+
| Cascaded (grn_svd) | 级联 + SVD 压缩 | 生成目标 |
|
| 629 |
+
| **RegFM (ours)** | 结构化速度分解 | 训练时监督 |
|
| 630 |
+
|
| 631 |
+
评估指标:cell-eval 全套指标(MSE, Pearson, DE Spearman, Direction Match, PR-AUC 等)
|
| 632 |
+
|
| 633 |
+
### 9.2 消融实验
|
| 634 |
+
|
| 635 |
+
| 实验 | 配置 | 验证 |
|
| 636 |
+
|------|------|------|
|
| 637 |
+
| A1: v_int only | RegFM 移除 v_reg(相当于 scDFM + L_reg 辅助 loss) | L_reg 通过 backbone gradient 的间接效果 |
|
| 638 |
+
| A2: v_reg only | 移除 v_int,α 恒=1 | 纯调控驱动的速度场效果 |
|
| 639 |
+
| A3: 无门控 | α 恒=0.5(固定等权混合) | 门控学习的价值 |
|
| 640 |
+
| A4: 无 L_reg | RegFM 架构但 λ_reg=0(R_θ 完全自由学习) | 结构分解本身的归纳偏置 vs 监督信号 |
|
| 641 |
+
| A5: λ_reg 扫描 | λ_reg ∈ {0.01, 0.05, 0.1, 0.5, 1.0} | 最优监督强度 |
|
| 642 |
+
|
| 643 |
+
### 9.3 交互信号消融(论文 story 的关键实验)
|
| 644 |
+
|
| 645 |
+
| R_supervision 信号 | 来源 | 预期 |
|
| 646 |
+
|-------------------|------|------|
|
| 647 |
+
| Random | 随机生成 | 负对照,应 ≈ A4 (无 L_reg) |
|
| 648 |
+
| Δ_attn (scGPT L11) | 预训练模型 | 主实验 |
|
| 649 |
+
| Co-expression Δ | 训练数据统计:Pearson corr(target) - Pearson corr(source) | 纯数据驱动信号 |
|
| 650 |
+
| Known GRN (TRRUST) | 生物数据库 | 先验知识,静态(不含扰动特异性) |
|
| 651 |
+
|
| 652 |
+
如果 Δ_attn > Random → 说明 scGPT attention 变化捕获了有意义的交互结构
|
| 653 |
+
如果 Known GRN ≈ Δ_attn → 说明两者互通
|
| 654 |
+
如果 Δ_attn + Known GRN > 单独任一 → 说明互补
|
| 655 |
+
|
| 656 |
+
### 9.4 可解释性分析
|
| 657 |
+
|
| 658 |
+
1. **R_θ 可视化**:选择特定扰动,可视化 R_θ 的 top entries 作为 heatmap,与已知 GRN 对比
|
| 659 |
+
2. **Gate α 分析**:
|
| 660 |
+
- 被 knockout 的基因的 α 分布(预期偏低——内在驱动)
|
| 661 |
+
- 下游靶基因的 α 分布(预期偏高——调控驱动)
|
| 662 |
+
- α 随 t 的变化(是否反映调控级联的时序?)
|
| 663 |
+
3. **R_θ 随 t 的演化**:提取不同 t 时间步的 R_θ,分析调控结构是否随时间变化
|
| 664 |
+
|
| 665 |
+
---
|
| 666 |
+
|
| 667 |
+
## 10. 文件结构
|
| 668 |
+
|
| 669 |
+
```
|
| 670 |
+
GRN/regfm/ # 新建子目录
|
| 671 |
+
├── _bootstrap_scdfm.py # 复用:scDFM 模块导入
|
| 672 |
+
├── config/
|
| 673 |
+
│ └── config_regfm.py # 新建:RegFMConfig
|
| 674 |
+
├── scripts/
|
| 675 |
+
│ └── run_regfm.py # 新建:主训练/推理脚本
|
| 676 |
+
├── src/
|
| 677 |
+
│ ├── __init__.py
|
| 678 |
+
│ ├── _scdfm_imports.py # 复用:scDFM 导入桥
|
| 679 |
+
│ ├── utils.py # 复用
|
| 680 |
+
│ ├── model/
|
| 681 |
+
│ │ ├── __init__.py
|
| 682 |
+
│ │ ├── model.py # 修改:RegFMModel (继承/包装 scDFM model)
|
| 683 |
+
│ │ └── layers.py # 新建:RegulatoryHead, VelocityGate
|
| 684 |
+
│ ├── denoiser.py # 新建:RegFMDenoiser (简化版, 无级联)
|
| 685 |
+
│ └── data/
|
| 686 |
+
│ ├── __init__.py
|
| 687 |
+
│ ├── data.py # 复用:GRNDatasetWrapper
|
| 688 |
+
│ └── sparse_raw_cache.py # 复用:SparseRawDeltaCache
|
| 689 |
+
└── run_regfm.sh # 新建:SLURM 提交脚本
|
| 690 |
+
```
|
| 691 |
+
|
| 692 |
+
### 10.1 复用清单
|
| 693 |
+
|
| 694 |
+
| 文件 | 来源 | 复用方式 |
|
| 695 |
+
|------|------|---------|
|
| 696 |
+
| `_bootstrap_scdfm.py` | grn_att_only | 直接复制 |
|
| 697 |
+
| `_scdfm_imports.py` | grn_att_only | 直接复制 |
|
| 698 |
+
| `utils.py` | grn_att_only | 直接复制 |
|
| 699 |
+
| `data/data.py` | grn_att_only | 直接复制(GRNDatasetWrapper) |
|
| 700 |
+
| `data/sparse_raw_cache.py` | grn_att_only | 直接复制(SparseRawDeltaCache) |
|
| 701 |
+
| scDFM backbone classes | ori_scDFM | 通过 _scdfm_imports 导入 |
|
| 702 |
+
| ExprDecoder | ori_scDFM | 通过 _scdfm_imports 导入 |
|
| 703 |
+
| AffineProbPath | ori_scDFM | 通过 _scdfm_imports 导入 |
|
| 704 |
+
| cell-eval MetricsEvaluator | cell-eval package | pip install |
|
| 705 |
+
|
| 706 |
+
### 10.2 新建文件清单
|
| 707 |
+
|
| 708 |
+
| 文件 | 内容 | 行数估计 |
|
| 709 |
+
|------|------|---------|
|
| 710 |
+
| `config/config_regfm.py` | RegFMConfig dataclass | ~80 行 |
|
| 711 |
+
| `src/model/layers.py` | RegulatoryHead + VelocityGate | ~60 行 |
|
| 712 |
+
| `src/model/model.py` | RegFMModel(包装 scDFM model + 新增 head) | ~80 行 |
|
| 713 |
+
| `src/denoiser.py` | RegFMDenoiser(train_step + generate) | ~150 行 |
|
| 714 |
+
| `scripts/run_regfm.py` | 主脚本(训练循环 + 评估) | ~300 行 |
|
| 715 |
+
| `run_regfm.sh` | SLURM 提交 | ~20 行 |
|
| 716 |
+
| **总计** | | **~690 行新代码** |
|
| 717 |
+
|
| 718 |
+
---
|
| 719 |
+
|
| 720 |
+
## 11. 风险和缓解
|
| 721 |
+
|
| 722 |
+
| 风险 | 缓解措施 |
|
| 723 |
+
|------|---------|
|
| 724 |
+
| R_θ 显存过大 (G=5000) | 训练用 G_sub=1000,推理同理。如需全基因:低秩分解 |
|
| 725 |
+
| L_reg 干扰 L_vel 的训练 | λ_reg warmup;消融实验 A4 验证 |
|
| 726 |
+
| delta_attention 噪声大,误导 R_θ | 软约束(MSE,非硬对齐);消融实验验证信号质量 |
|
| 727 |
+
| Gate α 塌缩到 0 或 1 | 监控 α 分布;必要时加 entropy regularization |
|
| 728 |
+
| scDFM baseline 本身就够好 | 这正是论文需要验证的假设;若不够好,RegFM 的改进空间更大 |
|
| 729 |
+
|
| 730 |
+
---
|
| 731 |
+
|
| 732 |
+
## 12. 论文结构建议
|
| 733 |
+
|
| 734 |
+
```
|
| 735 |
+
Title: Regulatory Flow Matching: Structuring Velocity Fields
|
| 736 |
+
with Gene Interaction Priors for Perturbation Prediction
|
| 737 |
+
|
| 738 |
+
1. Introduction
|
| 739 |
+
- 扰动预测的重要性
|
| 740 |
+
- Flow matching 的局限(无结构速度场)
|
| 741 |
+
- 核心贡献:���构化速度分解 + 交互矩阵监督
|
| 742 |
+
|
| 743 |
+
2. Background
|
| 744 |
+
- Flow matching / Conditional OT
|
| 745 |
+
- scDFM 回顾
|
| 746 |
+
- Gene regulatory networks
|
| 747 |
+
|
| 748 |
+
3. Method: Regulatory Flow Matching
|
| 749 |
+
3.1 Velocity field decomposition
|
| 750 |
+
3.2 Regulatory interaction head
|
| 751 |
+
3.3 Gated velocity mixing
|
| 752 |
+
3.4 Interaction supervision objective
|
| 753 |
+
3.5 Training and inference
|
| 754 |
+
|
| 755 |
+
4. Experiments
|
| 756 |
+
4.1 Setup (Norman dataset, baselines, metrics)
|
| 757 |
+
4.2 Main results (vs scDFM, vs cascaded methods)
|
| 758 |
+
4.3 Ablation study (decomposition components)
|
| 759 |
+
4.4 Interaction signal analysis (Δ_attn vs known GRN vs random)
|
| 760 |
+
4.5 Interpretability (R_θ visualization, gate analysis)
|
| 761 |
+
|
| 762 |
+
5. Related Work
|
| 763 |
+
- Flow matching for biology
|
| 764 |
+
- GRN-informed generative models
|
| 765 |
+
- Privileged information learning
|
| 766 |
+
|
| 767 |
+
6. Conclusion
|
| 768 |
+
```
|
GRN/SB/_bootstrap_scdfm.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Bootstrap scDFM imports by creating missing __init__.py files and loading
|
| 3 |
+
its modules under a 'scdfm_src' prefix in sys.modules.
|
| 4 |
+
|
| 5 |
+
This module MUST be imported before any CCFM src imports.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import sys
|
| 9 |
+
import os
|
| 10 |
+
import types
|
| 11 |
+
|
| 12 |
+
_SCDFM_ROOT = os.path.normpath(
|
| 13 |
+
os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "transfer", "code", "scDFM")
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
# Directories in scDFM that need __init__.py to be proper packages
|
| 17 |
+
_DIRS_NEEDING_INIT = [
|
| 18 |
+
"src",
|
| 19 |
+
"src/models",
|
| 20 |
+
"src/models/origin",
|
| 21 |
+
"src/data_process",
|
| 22 |
+
"src/tokenizer",
|
| 23 |
+
"src/script",
|
| 24 |
+
"src/models/perturbation",
|
| 25 |
+
]
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def _ensure_init_files():
|
| 29 |
+
"""Create missing __init__.py files in scDFM so it can be imported as packages."""
|
| 30 |
+
created = []
|
| 31 |
+
for d in _DIRS_NEEDING_INIT:
|
| 32 |
+
init_path = os.path.join(_SCDFM_ROOT, d, "__init__.py")
|
| 33 |
+
if not os.path.exists(init_path):
|
| 34 |
+
with open(init_path, "w") as f:
|
| 35 |
+
f.write("# Auto-created by CCFM bootstrap\n")
|
| 36 |
+
created.append(init_path)
|
| 37 |
+
return created
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def bootstrap():
|
| 41 |
+
"""Load scDFM's src package as 'scdfm_src' in sys.modules."""
|
| 42 |
+
if "scdfm_src" in sys.modules:
|
| 43 |
+
return # Already bootstrapped
|
| 44 |
+
|
| 45 |
+
# Create missing __init__.py files
|
| 46 |
+
_ensure_init_files()
|
| 47 |
+
|
| 48 |
+
# Save CCFM's src modules
|
| 49 |
+
saved = {}
|
| 50 |
+
for key in list(sys.modules.keys()):
|
| 51 |
+
if key == "src" or key.startswith("src."):
|
| 52 |
+
saved[key] = sys.modules.pop(key)
|
| 53 |
+
|
| 54 |
+
# Add scDFM root to path
|
| 55 |
+
sys.path.insert(0, _SCDFM_ROOT)
|
| 56 |
+
|
| 57 |
+
try:
|
| 58 |
+
# Import scDFM modules (their relative imports work now)
|
| 59 |
+
import src as scdfm_src_pkg
|
| 60 |
+
import src.models
|
| 61 |
+
import src.models.origin
|
| 62 |
+
import src.models.origin.blocks
|
| 63 |
+
import src.models.origin.layers
|
| 64 |
+
import src.models.origin.model
|
| 65 |
+
import src.flow_matching
|
| 66 |
+
import src.flow_matching.path
|
| 67 |
+
import src.flow_matching.path.path
|
| 68 |
+
import src.flow_matching.path.path_sample
|
| 69 |
+
import src.flow_matching.path.affine
|
| 70 |
+
import src.flow_matching.path.scheduler
|
| 71 |
+
import src.flow_matching.path.scheduler.scheduler
|
| 72 |
+
# Skip src.flow_matching.ot (requires 'ot' package, not needed for CCFM)
|
| 73 |
+
import src.utils
|
| 74 |
+
import src.utils.utils
|
| 75 |
+
import src.tokenizer
|
| 76 |
+
import src.tokenizer.gene_tokenizer
|
| 77 |
+
# Skip src.data_process (has heavy deps like bs4, rdkit)
|
| 78 |
+
# We handle data loading separately in CCFM
|
| 79 |
+
|
| 80 |
+
# Re-register all under scdfm_src.* prefix
|
| 81 |
+
for key in list(sys.modules.keys()):
|
| 82 |
+
if key == "src" or key.startswith("src."):
|
| 83 |
+
new_key = "scdfm_" + key
|
| 84 |
+
sys.modules[new_key] = sys.modules[key]
|
| 85 |
+
|
| 86 |
+
finally:
|
| 87 |
+
# Remove scDFM's src.* entries
|
| 88 |
+
for key in list(sys.modules.keys()):
|
| 89 |
+
if (key == "src" or key.startswith("src.")) and not key.startswith("scdfm_"):
|
| 90 |
+
del sys.modules[key]
|
| 91 |
+
|
| 92 |
+
# Restore CCFM's src modules
|
| 93 |
+
for key, mod in saved.items():
|
| 94 |
+
sys.modules[key] = mod
|
| 95 |
+
|
| 96 |
+
# Remove scDFM from front of path
|
| 97 |
+
if _SCDFM_ROOT in sys.path:
|
| 98 |
+
sys.path.remove(_SCDFM_ROOT)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
bootstrap()
|
GRN/SB/config/__init__.py
ADDED
|
File without changes
|
GRN/SB/config/config_sb.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
@dataclass
|
| 6 |
+
class SBConfig:
|
| 7 |
+
# === Base (same as scDFM FlowConfig) ===
|
| 8 |
+
model_type: str = "sb"
|
| 9 |
+
batch_size: int = 48
|
| 10 |
+
ntoken: int = 512
|
| 11 |
+
d_model: int = 128
|
| 12 |
+
nhead: int = 8
|
| 13 |
+
nlayers: int = 4
|
| 14 |
+
d_hid: int = 512
|
| 15 |
+
lr: float = 5e-5
|
| 16 |
+
steps: int = 200000
|
| 17 |
+
eta_min: float = 1e-6
|
| 18 |
+
devices: str = "1"
|
| 19 |
+
test_only: bool = False
|
| 20 |
+
|
| 21 |
+
data_name: str = "norman"
|
| 22 |
+
perturbation_function: str = "crisper"
|
| 23 |
+
noise_type: str = "Gaussian"
|
| 24 |
+
poisson_alpha: float = 0.8
|
| 25 |
+
poisson_target_sum: int = -1
|
| 26 |
+
|
| 27 |
+
print_every: int = 5000
|
| 28 |
+
mode: str = "predict_y"
|
| 29 |
+
result_path: str = "/home/hp250092/ku50001222/qian/aivc/lfj/GRN/result/sb"
|
| 30 |
+
fusion_method: str = "differential_perceiver"
|
| 31 |
+
infer_top_gene: int = 1000
|
| 32 |
+
n_top_genes: int = 5000
|
| 33 |
+
checkpoint_path: str = ""
|
| 34 |
+
gamma: float = 0.5
|
| 35 |
+
split_method: str = "additive"
|
| 36 |
+
use_mmd_loss: bool = True
|
| 37 |
+
fold: int = 1
|
| 38 |
+
use_negative_edge: bool = True
|
| 39 |
+
topk: int = 30
|
| 40 |
+
|
| 41 |
+
# === Anisotropic diffusion ===
|
| 42 |
+
sigma_min: float = 0.01
|
| 43 |
+
sigma_max: float = 2.0
|
| 44 |
+
sigma_init: float = 0.5
|
| 45 |
+
sigma_hidden_dim: int = 256
|
| 46 |
+
sigma_num_layers: int = 2
|
| 47 |
+
|
| 48 |
+
# === Score training ===
|
| 49 |
+
score_weight: float = 0.1
|
| 50 |
+
score_head_depth: int = 2
|
| 51 |
+
score_t_clip: float = 0.02
|
| 52 |
+
use_score: bool = True # False for A1-A3 ablations (no score head)
|
| 53 |
+
|
| 54 |
+
# === σ_g regularization ===
|
| 55 |
+
sigma_base: float = 0.5
|
| 56 |
+
sigma_sparse_weight: float = 0.01
|
| 57 |
+
sigma_volume_weight: float = 0.01
|
| 58 |
+
|
| 59 |
+
# === OT coupling ===
|
| 60 |
+
ot_method: str = "sinkhorn" # "sinkhorn" or "exact"
|
| 61 |
+
ot_reg: float = 0.05
|
| 62 |
+
ot_use_sigma: bool = True # use anisotropic Mahalanobis cost
|
| 63 |
+
|
| 64 |
+
# === SDE inference ===
|
| 65 |
+
sde_steps: int = 50
|
| 66 |
+
use_sde_inference: bool = True # False = PF-ODE (dx/dt = v_θ)
|
| 67 |
+
|
| 68 |
+
# === Source-Anchored Bridge ===
|
| 69 |
+
source_anchored: bool = False # True = x_0 = source; False = x_0 = noise
|
| 70 |
+
|
| 71 |
+
# === EMA ===
|
| 72 |
+
ema_decay: float = 0.9999
|
| 73 |
+
|
| 74 |
+
# === Logit-normal time-step sampling ===
|
| 75 |
+
t_sample_mode: str = "logit_normal"
|
| 76 |
+
t_mean: float = 0.0
|
| 77 |
+
t_std: float = 1.0
|
| 78 |
+
|
| 79 |
+
# === LR warmup ===
|
| 80 |
+
warmup_steps: int = 2000
|
| 81 |
+
|
| 82 |
+
# === Inference ===
|
| 83 |
+
ode_method: str = "rk4"
|
| 84 |
+
ode_steps: int = 20
|
| 85 |
+
eval_batch_size: int = 32
|
| 86 |
+
|
| 87 |
+
exp_name: str = ""
|
| 88 |
+
|
| 89 |
+
def __post_init__(self):
|
| 90 |
+
if self.data_name == "norman":
|
| 91 |
+
self.n_top_genes = 5000
|
| 92 |
+
|
| 93 |
+
def make_path(self):
|
| 94 |
+
if self.exp_name:
|
| 95 |
+
return os.path.join(self.result_path, self.exp_name)
|
| 96 |
+
exp_name = (
|
| 97 |
+
f"sb-{self.data_name}-f{self.fold}"
|
| 98 |
+
f"-d{self.d_model}-lr{self.lr}"
|
| 99 |
+
f"-sw{self.score_weight}-si{self.sigma_init}"
|
| 100 |
+
f"-ot{self.ot_method}-reg{self.ot_reg}"
|
| 101 |
+
f"-sde{self.sde_steps if self.use_sde_inference else 'off'}"
|
| 102 |
+
)
|
| 103 |
+
return os.path.join(self.result_path, exp_name)
|
GRN/SB/run_a1_baseline.sh
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
#PJM -L rscgrp=b-batch
|
| 3 |
+
#PJM -L gpu=1
|
| 4 |
+
#PJM -L elapse=48:00:00
|
| 5 |
+
#PJM -N sb_a1_baseline
|
| 6 |
+
#PJM -j
|
| 7 |
+
#PJM -o logs/a1_%j.out
|
| 8 |
+
|
| 9 |
+
module load cuda/12.2.2
|
| 10 |
+
module load cudnn/8.9.7
|
| 11 |
+
module load gcc-toolset/12
|
| 12 |
+
|
| 13 |
+
source /home/hp250092/ku50001222/qian/aivc/lfj/ori_scDFM_env/bin/activate
|
| 14 |
+
|
| 15 |
+
cd /home/hp250092/ku50001222/qian/aivc/lfj/GRN/SB
|
| 16 |
+
|
| 17 |
+
export PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:256
|
| 18 |
+
|
| 19 |
+
# === A1: Baseline reproduction (no score, no sigma, exact OT, ODE) ===
|
| 20 |
+
accelerate launch --num_processes=1 scripts/run_sb.py \
|
| 21 |
+
--data-name norman \
|
| 22 |
+
--d-model 128 --nhead 8 --nlayers 4 --d-hid 512 \
|
| 23 |
+
--batch-size 48 --lr 5e-5 --steps 200000 \
|
| 24 |
+
--fusion-method differential_perceiver \
|
| 25 |
+
--perturbation-function crisper \
|
| 26 |
+
--noise-type Gaussian \
|
| 27 |
+
--infer-top-gene 1000 --n-top-genes 5000 \
|
| 28 |
+
--use-mmd-loss --gamma 0.5 \
|
| 29 |
+
--split-method additive --fold 1 --topk 30 --use-negative-edge \
|
| 30 |
+
--no-use-score \
|
| 31 |
+
--ot-method exact --no-ot-use-sigma \
|
| 32 |
+
--no-use-sde-inference --ode-steps 20 --ode-method rk4 \
|
| 33 |
+
--ema-decay 0.9999 --warmup-steps 2000 \
|
| 34 |
+
--t-sample-mode logit_normal --t-mean 0.0 --t-std 1.0 \
|
| 35 |
+
--print-every 5000 --eval-batch-size 32 \
|
| 36 |
+
--exp-name A1_baseline \
|
| 37 |
+
--result-path /home/hp250092/ku50001222/qian/aivc/lfj/GRN/result/SB
|
GRN/SB/run_eval_rk4.sh
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
#PJM -L rscgrp=b-batch
|
| 3 |
+
#PJM -L gpu=1
|
| 4 |
+
#PJM -L elapse=6:00:00
|
| 5 |
+
#PJM -N sb_eval_rk4
|
| 6 |
+
#PJM -j
|
| 7 |
+
#PJM -o /home/hp250092/ku50001222/qian/aivc/lfj/GRN/SB/logs/eval_rk4_%j.out
|
| 8 |
+
|
| 9 |
+
module load cuda/12.2.2
|
| 10 |
+
module load cudnn/8.9.7
|
| 11 |
+
module load gcc-toolset/12
|
| 12 |
+
|
| 13 |
+
source /home/hp250092/ku50001222/qian/aivc/lfj/ori_scDFM_env/bin/activate
|
| 14 |
+
|
| 15 |
+
cd /home/hp250092/ku50001222/qian/aivc/lfj/GRN/SB
|
| 16 |
+
|
| 17 |
+
export PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:256
|
| 18 |
+
|
| 19 |
+
# === Re-evaluate A1_baseline with torchdiffeq RK4 ODE (was Euler before) ===
|
| 20 |
+
echo "=== Evaluating A1_baseline with RK4 ODE ==="
|
| 21 |
+
accelerate launch --num_processes=1 scripts/run_sb.py \
|
| 22 |
+
--data-name norman \
|
| 23 |
+
--d-model 128 --nhead 8 --nlayers 4 --d-hid 512 \
|
| 24 |
+
--batch-size 48 --lr 5e-5 --steps 200000 \
|
| 25 |
+
--fusion-method differential_perceiver \
|
| 26 |
+
--perturbation-function crisper \
|
| 27 |
+
--noise-type Gaussian \
|
| 28 |
+
--infer-top-gene 1000 --n-top-genes 5000 \
|
| 29 |
+
--use-mmd-loss --gamma 0.5 \
|
| 30 |
+
--split-method additive --fold 1 --topk 30 --use-negative-edge \
|
| 31 |
+
--no-use-score \
|
| 32 |
+
--ot-method exact --no-ot-use-sigma \
|
| 33 |
+
--no-use-sde-inference --ode-steps 20 --ode-method rk4 \
|
| 34 |
+
--ema-decay 0.9999 --warmup-steps 2000 \
|
| 35 |
+
--t-sample-mode logit_normal --t-mean 0.0 --t-std 1.0 \
|
| 36 |
+
--print-every 5000 --eval-batch-size 32 \
|
| 37 |
+
--exp-name A1_baseline \
|
| 38 |
+
--result-path /home/hp250092/ku50001222/qian/aivc/lfj/GRN/result/SB \
|
| 39 |
+
--checkpoint-path /home/hp250092/ku50001222/qian/aivc/lfj/GRN/result/SB/A1_baseline/iteration_195000/checkpoint.pt \
|
| 40 |
+
--test-only
|
| 41 |
+
|
| 42 |
+
# === Re-evaluate A6_dsm_aniso with ODE instead of SDE ===
|
| 43 |
+
echo "=== Evaluating A6_dsm_aniso with RK4 ODE (was SDE-50 before) ==="
|
| 44 |
+
accelerate launch --num_processes=1 scripts/run_sb.py \
|
| 45 |
+
--data-name norman \
|
| 46 |
+
--d-model 128 --nhead 8 --nlayers 4 --d-hid 512 \
|
| 47 |
+
--batch-size 48 --lr 5e-5 --steps 200000 \
|
| 48 |
+
--fusion-method differential_perceiver \
|
| 49 |
+
--perturbation-function crisper \
|
| 50 |
+
--noise-type Gaussian \
|
| 51 |
+
--infer-top-gene 1000 --n-top-genes 5000 \
|
| 52 |
+
--use-mmd-loss --gamma 0.5 \
|
| 53 |
+
--split-method additive --fold 1 --topk 30 --use-negative-edge \
|
| 54 |
+
--use-score --score-weight 0.5 --score-head-depth 2 --score-t-clip 0.02 \
|
| 55 |
+
--sigma-min 0.01 --sigma-max 2.0 --sigma-init 0.5 \
|
| 56 |
+
--sigma-base 0.5 --sigma-sparse-weight 0.01 --sigma-volume-weight 0.01 \
|
| 57 |
+
--ot-method sinkhorn --ot-reg 0.05 --ot-use-sigma \
|
| 58 |
+
--no-use-sde-inference --ode-steps 20 \
|
| 59 |
+
--ema-decay 0.9999 --warmup-steps 2000 \
|
| 60 |
+
--t-sample-mode logit_normal --t-mean 0.0 --t-std 1.0 \
|
| 61 |
+
--print-every 5000 --eval-batch-size 32 \
|
| 62 |
+
--exp-name A6_dsm_aniso \
|
| 63 |
+
--result-path /home/hp250092/ku50001222/qian/aivc/lfj/GRN/result/SB \
|
| 64 |
+
--checkpoint-path /home/hp250092/ku50001222/qian/aivc/lfj/GRN/result/SB/A6_dsm_aniso/iteration_195000/checkpoint.pt \
|
| 65 |
+
--test-only
|
GRN/SB/run_sb.sh
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
#PJM -L rscgrp=b-batch
|
| 3 |
+
#PJM -L gpu=1
|
| 4 |
+
#PJM -L elapse=48:00:00
|
| 5 |
+
#PJM -N sb_a5_full
|
| 6 |
+
#PJM -j
|
| 7 |
+
#PJM -o logs/sb_%j.out
|
| 8 |
+
|
| 9 |
+
module load cuda/12.2.2
|
| 10 |
+
module load cudnn/8.9.7
|
| 11 |
+
module load gcc-toolset/12
|
| 12 |
+
|
| 13 |
+
source /home/hp250092/ku50001222/qian/aivc/lfj/ori_scDFM_env/bin/activate
|
| 14 |
+
|
| 15 |
+
cd /home/hp250092/ku50001222/qian/aivc/lfj/GRN/SB
|
| 16 |
+
|
| 17 |
+
export PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:256
|
| 18 |
+
|
| 19 |
+
# === A5: Full ASB with SDE inference ===
|
| 20 |
+
accelerate launch --num_processes=1 scripts/run_sb.py \
|
| 21 |
+
--data-name norman \
|
| 22 |
+
--d-model 128 --nhead 8 --nlayers 4 --d-hid 512 \
|
| 23 |
+
--batch-size 48 --lr 5e-5 --steps 200000 \
|
| 24 |
+
--fusion-method differential_perceiver \
|
| 25 |
+
--perturbation-function crisper \
|
| 26 |
+
--noise-type Gaussian \
|
| 27 |
+
--infer-top-gene 1000 --n-top-genes 5000 \
|
| 28 |
+
--use-mmd-loss --gamma 0.5 \
|
| 29 |
+
--split-method additive --fold 1 --topk 30 --use-negative-edge \
|
| 30 |
+
--sigma-min 0.01 --sigma-max 2.0 --sigma-init 0.5 \
|
| 31 |
+
--sigma-base 0.5 --sigma-sparse-weight 0.01 --sigma-volume-weight 0.01 \
|
| 32 |
+
--score-weight 0.1 --score-head-depth 2 --score-t-clip 0.02 --use-score \
|
| 33 |
+
--ot-method sinkhorn --ot-reg 0.05 --ot-use-sigma \
|
| 34 |
+
--use-sde-inference --sde-steps 50 \
|
| 35 |
+
--ema-decay 0.9999 --warmup-steps 2000 \
|
| 36 |
+
--t-sample-mode logit_normal --t-mean 0.0 --t-std 1.0 \
|
| 37 |
+
--print-every 5000 --eval-batch-size 32 \
|
| 38 |
+
--exp-name A5_full_asb_sde \
|
| 39 |
+
--result-path /home/hp250092/ku50001222/qian/aivc/lfj/GRN/result/SB
|
GRN/SB/run_sb_a6.sh
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
#PJM -L rscgrp=b-batch
|
| 3 |
+
#PJM -L gpu=1
|
| 4 |
+
#PJM -L elapse=48:00:00
|
| 5 |
+
#PJM -N sb_a6_dsm
|
| 6 |
+
#PJM -j
|
| 7 |
+
#PJM -o logs/sb_%j.out
|
| 8 |
+
|
| 9 |
+
module load cuda/12.2.2
|
| 10 |
+
module load cudnn/8.9.7
|
| 11 |
+
module load gcc-toolset/12
|
| 12 |
+
|
| 13 |
+
source /home/hp250092/ku50001222/qian/aivc/lfj/ori_scDFM_env/bin/activate
|
| 14 |
+
|
| 15 |
+
cd /home/hp250092/ku50001222/qian/aivc/lfj/GRN/SB
|
| 16 |
+
|
| 17 |
+
export PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:256
|
| 18 |
+
|
| 19 |
+
# === A6: Fixed DSM score loss + anisotropic sigma unlocked ===
|
| 20 |
+
# vs A5: (1) var_t-weighted score loss (≡ ε-prediction, loss_s ~1 not ~16)
|
| 21 |
+
# (2) L1 sparse penalty removed from total loss (σ_g free to vary per gene)
|
| 22 |
+
# (3) score_weight 0.1 → 0.5 (compensate loss_s magnitude drop 16x → 1x)
|
| 23 |
+
accelerate launch --num_processes=1 scripts/run_sb.py \
|
| 24 |
+
--data-name norman \
|
| 25 |
+
--d-model 128 --nhead 8 --nlayers 4 --d-hid 512 \
|
| 26 |
+
--batch-size 48 --lr 5e-5 --steps 200000 \
|
| 27 |
+
--fusion-method differential_perceiver \
|
| 28 |
+
--perturbation-function crisper \
|
| 29 |
+
--noise-type Gaussian \
|
| 30 |
+
--infer-top-gene 1000 --n-top-genes 5000 \
|
| 31 |
+
--use-mmd-loss --gamma 0.5 \
|
| 32 |
+
--split-method additive --fold 1 --topk 30 --use-negative-edge \
|
| 33 |
+
--sigma-min 0.01 --sigma-max 2.0 --sigma-init 0.5 \
|
| 34 |
+
--sigma-base 0.5 --sigma-sparse-weight 0.01 --sigma-volume-weight 0.01 \
|
| 35 |
+
--score-weight 0.5 --score-head-depth 2 --score-t-clip 0.02 --use-score \
|
| 36 |
+
--ot-method sinkhorn --ot-reg 0.05 --ot-use-sigma \
|
| 37 |
+
--use-sde-inference --sde-steps 50 \
|
| 38 |
+
--ema-decay 0.9999 --warmup-steps 2000 \
|
| 39 |
+
--t-sample-mode logit_normal --t-mean 0.0 --t-std 1.0 \
|
| 40 |
+
--print-every 5000 --eval-batch-size 32 \
|
| 41 |
+
--exp-name A6_dsm_aniso \
|
| 42 |
+
--result-path /home/hp250092/ku50001222/qian/aivc/lfj/GRN/result/SB
|
GRN/SB/run_sb_sa6.sh
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
#PJM -L rscgrp=b-batch
|
| 3 |
+
#PJM -L gpu=1
|
| 4 |
+
#PJM -L elapse=48:00:00
|
| 5 |
+
#PJM -N sb_sa6_sde
|
| 6 |
+
#PJM -j
|
| 7 |
+
#PJM -o /home/hp250092/ku50001222/qian/aivc/lfj/GRN/SB/logs/sb_sa6_%j.out
|
| 8 |
+
|
| 9 |
+
module load cuda/12.2.2
|
| 10 |
+
module load cudnn/8.9.7
|
| 11 |
+
module load gcc-toolset/12
|
| 12 |
+
|
| 13 |
+
source /home/hp250092/ku50001222/qian/aivc/lfj/ori_scDFM_env/bin/activate
|
| 14 |
+
|
| 15 |
+
cd /home/hp250092/ku50001222/qian/aivc/lfj/GRN/SB
|
| 16 |
+
|
| 17 |
+
export PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:256
|
| 18 |
+
|
| 19 |
+
# === SA6: Source-Anchored + Score Head + SDE inference ===
|
| 20 |
+
# 对比 SA1 (无 score, ODE): score head 在 source-anchored 下是否有用?
|
| 21 |
+
# 对比 A6 (noise-anchored, score, SDE): source-anchoring 对 SDE 推理的影响?
|
| 22 |
+
accelerate launch --num_processes=1 scripts/run_sb.py \
|
| 23 |
+
--data-name norman \
|
| 24 |
+
--d-model 128 --nhead 8 --nlayers 4 --d-hid 512 \
|
| 25 |
+
--batch-size 48 --lr 5e-5 --steps 200000 \
|
| 26 |
+
--fusion-method differential_perceiver \
|
| 27 |
+
--perturbation-function crisper \
|
| 28 |
+
--noise-type Gaussian \
|
| 29 |
+
--infer-top-gene 1000 --n-top-genes 5000 \
|
| 30 |
+
--use-mmd-loss --gamma 0.5 \
|
| 31 |
+
--split-method additive --fold 1 --topk 30 --use-negative-edge \
|
| 32 |
+
--sigma-min 0.001 --sigma-max 0.5 --sigma-init 0.01 \
|
| 33 |
+
--sigma-base 0.01 --sigma-sparse-weight 0.01 --sigma-volume-weight 0.01 \
|
| 34 |
+
--use-score --score-weight 0.5 --score-head-depth 2 --score-t-clip 0.02 \
|
| 35 |
+
--ot-method sinkhorn --ot-reg 0.05 --ot-use-sigma \
|
| 36 |
+
--use-sde-inference --sde-steps 50 \
|
| 37 |
+
--ema-decay 0.9999 --warmup-steps 2000 \
|
| 38 |
+
--t-sample-mode uniform \
|
| 39 |
+
--print-every 5000 --eval-batch-size 32 \
|
| 40 |
+
--source-anchored \
|
| 41 |
+
--exp-name SA6_source_anchored_sde \
|
| 42 |
+
--result-path /home/hp250092/ku50001222/qian/aivc/lfj/GRN/result/SB
|
GRN/SB/run_sb_source_anchored.sh
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
#PJM -L rscgrp=b-batch
|
| 3 |
+
#PJM -L gpu=1
|
| 4 |
+
#PJM -L elapse=48:00:00
|
| 5 |
+
#PJM -N sb_sa1_ode
|
| 6 |
+
#PJM -j
|
| 7 |
+
#PJM -o /home/hp250092/ku50001222/qian/aivc/lfj/GRN/SB/logs/sb_sa1_%j.out
|
| 8 |
+
|
| 9 |
+
module load cuda/12.2.2
|
| 10 |
+
module load cudnn/8.9.7
|
| 11 |
+
module load gcc-toolset/12
|
| 12 |
+
|
| 13 |
+
source /home/hp250092/ku50001222/qian/aivc/lfj/ori_scDFM_env/bin/activate
|
| 14 |
+
|
| 15 |
+
cd /home/hp250092/ku50001222/qian/aivc/lfj/GRN/SB
|
| 16 |
+
|
| 17 |
+
export PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:256
|
| 18 |
+
|
| 19 |
+
# === SA1: Source-Anchored Bridge, no score head, ODE inference ===
|
| 20 |
+
# x_0 = source (control cells), not random noise
|
| 21 |
+
# sigma ~100x smaller (0.01 vs 0.5) to match perturbation effect scale
|
| 22 |
+
# OT couples control <-> perturbed cells (biologically meaningful)
|
| 23 |
+
# uniform time sampling (short trajectory, all timesteps equally important)
|
| 24 |
+
accelerate launch --num_processes=1 scripts/run_sb.py \
|
| 25 |
+
--data-name norman \
|
| 26 |
+
--d-model 128 --nhead 8 --nlayers 4 --d-hid 512 \
|
| 27 |
+
--batch-size 48 --lr 5e-5 --steps 200000 \
|
| 28 |
+
--fusion-method differential_perceiver \
|
| 29 |
+
--perturbation-function crisper \
|
| 30 |
+
--noise-type Gaussian \
|
| 31 |
+
--infer-top-gene 1000 --n-top-genes 5000 \
|
| 32 |
+
--use-mmd-loss --gamma 0.5 \
|
| 33 |
+
--split-method additive --fold 1 --topk 30 --use-negative-edge \
|
| 34 |
+
--sigma-min 0.001 --sigma-max 0.5 --sigma-init 0.01 \
|
| 35 |
+
--sigma-base 0.01 --sigma-sparse-weight 0.01 --sigma-volume-weight 0.01 \
|
| 36 |
+
--no-use-score \
|
| 37 |
+
--ot-method sinkhorn --ot-reg 0.05 --ot-use-sigma \
|
| 38 |
+
--no-use-sde-inference --ode-steps 20 \
|
| 39 |
+
--ema-decay 0.9999 --warmup-steps 2000 \
|
| 40 |
+
--t-sample-mode uniform \
|
| 41 |
+
--print-every 5000 --eval-batch-size 32 \
|
| 42 |
+
--source-anchored \
|
| 43 |
+
--exp-name SA1_source_anchored_ode \
|
| 44 |
+
--result-path /home/hp250092/ku50001222/qian/aivc/lfj/GRN/result/SB
|
GRN/SB/scripts/run_sb.py
ADDED
|
@@ -0,0 +1,366 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Training and evaluation entry point for Anisotropic Schrödinger Bridge (SB).
|
| 3 |
+
|
| 4 |
+
Simplified from grn_svd: no latent stream, no sparse cache, no SVD dict.
|
| 5 |
+
Single-stage generation with SDE (or PF-ODE ablation).
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import sys
|
| 9 |
+
import os
|
| 10 |
+
|
| 11 |
+
_PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
| 12 |
+
sys.path.insert(0, _PROJECT_ROOT)
|
| 13 |
+
|
| 14 |
+
import _bootstrap_scdfm # noqa: F401
|
| 15 |
+
|
| 16 |
+
import copy
|
| 17 |
+
import csv
|
| 18 |
+
import torch
|
| 19 |
+
import tyro
|
| 20 |
+
import tqdm
|
| 21 |
+
import numpy as np
|
| 22 |
+
import pandas as pd
|
| 23 |
+
import anndata as ad
|
| 24 |
+
from torch.utils.data import DataLoader
|
| 25 |
+
from tqdm import trange
|
| 26 |
+
from accelerate import Accelerator, DistributedDataParallelKwargs
|
| 27 |
+
from torch.optim.lr_scheduler import LinearLR, CosineAnnealingLR, SequentialLR
|
| 28 |
+
from torch.utils.tensorboard import SummaryWriter
|
| 29 |
+
|
| 30 |
+
from config.config_sb import SBConfig as Config
|
| 31 |
+
from src.data.data import get_data_classes
|
| 32 |
+
from src.model.model import SBModel
|
| 33 |
+
from src.denoiser import SBDenoiser
|
| 34 |
+
from src.utils import (
|
| 35 |
+
save_checkpoint, load_checkpoint, pick_eval_score,
|
| 36 |
+
process_vocab, set_requires_grad_for_p_only, GeneVocab,
|
| 37 |
+
)
|
| 38 |
+
from cell_eval import MetricsEvaluator
|
| 39 |
+
|
| 40 |
+
_REPO_ROOT = os.path.normpath(os.path.join(_PROJECT_ROOT, "..", "..", "transfer", "code"))
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
@torch.inference_mode()
|
| 44 |
+
def test(data_sampler, denoiser, accelerator, config, vocab, data_manager,
|
| 45 |
+
batch_size=32, path_dir="./"):
|
| 46 |
+
"""Evaluate: generate predictions and compute cell-eval metrics."""
|
| 47 |
+
device = accelerator.device
|
| 48 |
+
gene_ids_test = vocab.encode(list(data_sampler.adata.var_names))
|
| 49 |
+
gene_ids_test = torch.tensor(gene_ids_test, dtype=torch.long, device=device)
|
| 50 |
+
|
| 51 |
+
perturbation_name_list = data_sampler._perturbation_covariates
|
| 52 |
+
control_data = data_sampler.get_control_data()
|
| 53 |
+
inverse_dict = {v: str(k) for k, v in data_manager.perturbation_dict.items()}
|
| 54 |
+
|
| 55 |
+
all_pred = [control_data["src_cell_data"]]
|
| 56 |
+
obs_pred = ["control"] * control_data["src_cell_data"].shape[0]
|
| 57 |
+
all_real = [control_data["src_cell_data"]]
|
| 58 |
+
obs_real = ["control"] * control_data["src_cell_data"].shape[0]
|
| 59 |
+
|
| 60 |
+
for pert_name in perturbation_name_list:
|
| 61 |
+
pert_data = data_sampler.get_perturbation_data(pert_name)
|
| 62 |
+
target = pert_data["tgt_cell_data"]
|
| 63 |
+
pert_id = pert_data["condition_id"].to(device)
|
| 64 |
+
source = control_data["src_cell_data"].to(device)
|
| 65 |
+
|
| 66 |
+
if config.perturbation_function == "crisper":
|
| 67 |
+
pert_name_crisper = [
|
| 68 |
+
inverse_dict[int(p)] for p in pert_id[0].cpu().numpy()
|
| 69 |
+
]
|
| 70 |
+
pert_id = torch.tensor(
|
| 71 |
+
vocab.encode(pert_name_crisper), dtype=torch.long, device=device
|
| 72 |
+
).repeat(source.shape[0], 1)
|
| 73 |
+
|
| 74 |
+
idx = torch.randperm(source.shape[0])
|
| 75 |
+
source = source[idx][:128]
|
| 76 |
+
|
| 77 |
+
preds = []
|
| 78 |
+
for i in trange(0, 128, batch_size, desc=pert_name):
|
| 79 |
+
bs = source[i:i+batch_size]
|
| 80 |
+
bp = pert_id[0].repeat(bs.shape[0], 1).to(device)
|
| 81 |
+
model = denoiser.module if hasattr(denoiser, "module") else denoiser
|
| 82 |
+
pred = model.generate(
|
| 83 |
+
bs, bp, gene_ids_test,
|
| 84 |
+
steps=config.sde_steps if config.use_sde_inference else config.ode_steps,
|
| 85 |
+
method="sde" if config.use_sde_inference else "ode",
|
| 86 |
+
)
|
| 87 |
+
preds.append(pred)
|
| 88 |
+
|
| 89 |
+
preds = torch.cat(preds, 0).cpu().numpy()
|
| 90 |
+
all_pred.append(preds)
|
| 91 |
+
all_real.append(target)
|
| 92 |
+
obs_pred.extend([pert_name] * preds.shape[0])
|
| 93 |
+
obs_real.extend([pert_name] * target.shape[0])
|
| 94 |
+
|
| 95 |
+
all_pred = np.concatenate(all_pred, 0)
|
| 96 |
+
all_real = np.concatenate(all_real, 0)
|
| 97 |
+
pred_adata = ad.AnnData(X=all_pred, obs=pd.DataFrame({"perturbation": obs_pred}))
|
| 98 |
+
real_adata = ad.AnnData(X=all_real, obs=pd.DataFrame({"perturbation": obs_real}))
|
| 99 |
+
|
| 100 |
+
eval_score = None
|
| 101 |
+
if accelerator.is_main_process:
|
| 102 |
+
evaluator = MetricsEvaluator(
|
| 103 |
+
adata_pred=pred_adata, adata_real=real_adata,
|
| 104 |
+
control_pert="control", pert_col="perturbation", num_threads=32,
|
| 105 |
+
)
|
| 106 |
+
results, agg_results = evaluator.compute()
|
| 107 |
+
results.write_csv(os.path.join(path_dir, "results.csv"))
|
| 108 |
+
agg_results.write_csv(os.path.join(path_dir, "agg_results.csv"))
|
| 109 |
+
pred_adata.write_h5ad(os.path.join(path_dir, "pred.h5ad"))
|
| 110 |
+
real_adata.write_h5ad(os.path.join(path_dir, "real.h5ad"))
|
| 111 |
+
df = agg_results.to_pandas()
|
| 112 |
+
for m in ("mse", "pearson_delta", "pr_auc"):
|
| 113 |
+
if m in df.columns and df[m].notna().any():
|
| 114 |
+
eval_score = float(df[m].iloc[0])
|
| 115 |
+
break
|
| 116 |
+
if eval_score is not None:
|
| 117 |
+
print(f"Eval score: {eval_score:.4f}")
|
| 118 |
+
|
| 119 |
+
return eval_score
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
if __name__ == "__main__":
|
| 123 |
+
config = tyro.cli(Config)
|
| 124 |
+
|
| 125 |
+
ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
|
| 126 |
+
accelerator = Accelerator(kwargs_handlers=[ddp_kwargs])
|
| 127 |
+
if accelerator.is_main_process:
|
| 128 |
+
print(config)
|
| 129 |
+
save_path = config.make_path()
|
| 130 |
+
os.makedirs(save_path, exist_ok=True)
|
| 131 |
+
device = accelerator.device
|
| 132 |
+
|
| 133 |
+
# === Data loading ===
|
| 134 |
+
Data, PerturbationDataset, TrainSampler, TestDataset = get_data_classes()
|
| 135 |
+
scdfm_data_path = os.path.join(_REPO_ROOT, "scDFM", "data")
|
| 136 |
+
data_manager = Data(scdfm_data_path)
|
| 137 |
+
data_manager.load_data(config.data_name)
|
| 138 |
+
|
| 139 |
+
if "gene_name" in data_manager.adata.var.columns and data_manager.adata.var_names[0].startswith("ENSG"):
|
| 140 |
+
data_manager.adata.var_names = data_manager.adata.var["gene_name"].values
|
| 141 |
+
data_manager.adata.var_names_make_unique()
|
| 142 |
+
|
| 143 |
+
data_manager.process_data(
|
| 144 |
+
n_top_genes=config.n_top_genes,
|
| 145 |
+
split_method=config.split_method,
|
| 146 |
+
fold=config.fold,
|
| 147 |
+
use_negative_edge=config.use_negative_edge,
|
| 148 |
+
k=config.topk,
|
| 149 |
+
)
|
| 150 |
+
train_sampler, valid_sampler, _ = data_manager.load_flow_data(batch_size=config.batch_size)
|
| 151 |
+
|
| 152 |
+
# === Mask path ===
|
| 153 |
+
if config.use_negative_edge:
|
| 154 |
+
mask_path = os.path.join(
|
| 155 |
+
data_manager.data_path, data_manager.data_name,
|
| 156 |
+
f"mask_fold_{config.fold}topk_{config.topk}{config.split_method}_negative_edge.pt",
|
| 157 |
+
)
|
| 158 |
+
else:
|
| 159 |
+
mask_path = os.path.join(
|
| 160 |
+
data_manager.data_path, data_manager.data_name,
|
| 161 |
+
f"mask_fold_{config.fold}topk_{config.topk}{config.split_method}.pt",
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
# === Vocab ===
|
| 165 |
+
orig_cwd = os.getcwd()
|
| 166 |
+
os.chdir(os.path.join(_REPO_ROOT, "scDFM"))
|
| 167 |
+
vocab = process_vocab(data_manager, config)
|
| 168 |
+
os.chdir(orig_cwd)
|
| 169 |
+
|
| 170 |
+
gene_ids = vocab.encode(list(data_manager.adata.var_names))
|
| 171 |
+
gene_ids = torch.tensor(gene_ids, dtype=torch.long, device=device)
|
| 172 |
+
|
| 173 |
+
# === Build SBModel ===
|
| 174 |
+
vf = SBModel(
|
| 175 |
+
ntoken=len(vocab),
|
| 176 |
+
d_model=config.d_model,
|
| 177 |
+
nhead=config.nhead,
|
| 178 |
+
d_hid=config.d_hid,
|
| 179 |
+
nlayers=config.nlayers,
|
| 180 |
+
fusion_method=config.fusion_method,
|
| 181 |
+
perturbation_function=config.perturbation_function,
|
| 182 |
+
mask_path=mask_path,
|
| 183 |
+
sigma_min=config.sigma_min,
|
| 184 |
+
sigma_max=config.sigma_max,
|
| 185 |
+
sigma_init=config.sigma_init,
|
| 186 |
+
sigma_hidden_dim=config.sigma_hidden_dim,
|
| 187 |
+
sigma_num_layers=config.sigma_num_layers,
|
| 188 |
+
score_head_depth=config.score_head_depth,
|
| 189 |
+
use_score=config.use_score,
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
# === Simple PerturbationDataset (no sparse cache needed) ===
|
| 193 |
+
base_dataset = PerturbationDataset(train_sampler, config.batch_size)
|
| 194 |
+
dataloader = DataLoader(
|
| 195 |
+
base_dataset, batch_size=1, shuffle=False,
|
| 196 |
+
num_workers=4, pin_memory=True, persistent_workers=True,
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
# === Build SBDenoiser ===
|
| 200 |
+
denoiser = SBDenoiser(
|
| 201 |
+
model=vf,
|
| 202 |
+
noise_type=config.noise_type,
|
| 203 |
+
use_mmd_loss=config.use_mmd_loss,
|
| 204 |
+
gamma=config.gamma,
|
| 205 |
+
poisson_alpha=config.poisson_alpha,
|
| 206 |
+
poisson_target_sum=config.poisson_target_sum,
|
| 207 |
+
score_weight=config.score_weight,
|
| 208 |
+
score_t_clip=config.score_t_clip,
|
| 209 |
+
use_score=config.use_score,
|
| 210 |
+
sigma_base=config.sigma_base,
|
| 211 |
+
sigma_sparse_weight=config.sigma_sparse_weight,
|
| 212 |
+
sigma_volume_weight=config.sigma_volume_weight,
|
| 213 |
+
ot_method=config.ot_method,
|
| 214 |
+
ot_reg=config.ot_reg,
|
| 215 |
+
ot_use_sigma=config.ot_use_sigma,
|
| 216 |
+
sigma_min=config.sigma_min,
|
| 217 |
+
t_sample_mode=config.t_sample_mode,
|
| 218 |
+
t_mean=config.t_mean,
|
| 219 |
+
t_std=config.t_std,
|
| 220 |
+
sde_steps=config.sde_steps,
|
| 221 |
+
use_sde_inference=config.use_sde_inference,
|
| 222 |
+
source_anchored=config.source_anchored,
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
# === EMA model ===
|
| 226 |
+
ema_model = copy.deepcopy(vf).to(device)
|
| 227 |
+
ema_model.eval()
|
| 228 |
+
ema_model.requires_grad_(False)
|
| 229 |
+
|
| 230 |
+
# === Optimizer & Scheduler ===
|
| 231 |
+
save_path = config.make_path()
|
| 232 |
+
optimizer = torch.optim.Adam(vf.parameters(), lr=config.lr)
|
| 233 |
+
warmup_scheduler = LinearLR(optimizer, start_factor=1e-3, end_factor=1.0, total_iters=config.warmup_steps)
|
| 234 |
+
cosine_scheduler = CosineAnnealingLR(optimizer, T_max=max(config.steps - config.warmup_steps, 1), eta_min=config.eta_min)
|
| 235 |
+
scheduler = SequentialLR(optimizer, [warmup_scheduler, cosine_scheduler], milestones=[config.warmup_steps])
|
| 236 |
+
|
| 237 |
+
start_iteration = 0
|
| 238 |
+
if config.checkpoint_path != "":
|
| 239 |
+
start_iteration, _ = load_checkpoint(config.checkpoint_path, vf, optimizer, scheduler)
|
| 240 |
+
ema_model.load_state_dict(vf.state_dict())
|
| 241 |
+
|
| 242 |
+
# === Prepare with accelerator ===
|
| 243 |
+
denoiser = accelerator.prepare(denoiser)
|
| 244 |
+
optimizer, scheduler, dataloader = accelerator.prepare(optimizer, scheduler, dataloader)
|
| 245 |
+
|
| 246 |
+
inverse_dict = {v: str(k) for k, v in data_manager.perturbation_dict.items()}
|
| 247 |
+
|
| 248 |
+
# === Test-only mode ===
|
| 249 |
+
if config.test_only:
|
| 250 |
+
eval_path = os.path.join(save_path, "eval_only")
|
| 251 |
+
os.makedirs(eval_path, exist_ok=True)
|
| 252 |
+
eval_score = test(
|
| 253 |
+
valid_sampler, denoiser, accelerator, config, vocab, data_manager,
|
| 254 |
+
batch_size=config.eval_batch_size, path_dir=eval_path,
|
| 255 |
+
)
|
| 256 |
+
sys.exit(0)
|
| 257 |
+
|
| 258 |
+
# === Loss logging ===
|
| 259 |
+
if accelerator.is_main_process:
|
| 260 |
+
os.makedirs(save_path, exist_ok=True)
|
| 261 |
+
csv_path = os.path.join(save_path, 'loss_curve.csv')
|
| 262 |
+
csv_file = open(csv_path, 'a' if start_iteration > 0 and os.path.exists(csv_path) else 'w', newline='')
|
| 263 |
+
csv_writer = csv.writer(csv_file)
|
| 264 |
+
if start_iteration == 0 or not os.path.exists(csv_path):
|
| 265 |
+
csv_writer.writerow([
|
| 266 |
+
'iteration', 'loss', 'loss_v', 'loss_s', 'loss_mmd',
|
| 267 |
+
'loss_sparse', 'loss_volume', 'sigma_mean', 'sigma_std', 'lr',
|
| 268 |
+
])
|
| 269 |
+
tb_writer = SummaryWriter(log_dir=os.path.join(save_path, 'tb_logs'))
|
| 270 |
+
|
| 271 |
+
# === Training loop ===
|
| 272 |
+
pbar = tqdm.tqdm(total=config.steps, initial=start_iteration)
|
| 273 |
+
iteration = start_iteration
|
| 274 |
+
|
| 275 |
+
while iteration < config.steps:
|
| 276 |
+
for batch_data in dataloader:
|
| 277 |
+
source = batch_data["src_cell_data"].squeeze(0).to(device)
|
| 278 |
+
target = batch_data["tgt_cell_data"].squeeze(0).to(device)
|
| 279 |
+
perturbation_id = batch_data["condition_id"].squeeze(0).to(device)
|
| 280 |
+
|
| 281 |
+
# Random gene subset (same as scDFM)
|
| 282 |
+
G_full = source.shape[-1]
|
| 283 |
+
input_gene_ids_pos = torch.randperm(G_full, device=device)[:config.infer_top_gene]
|
| 284 |
+
source_sub = source[:, input_gene_ids_pos]
|
| 285 |
+
target_sub = target[:, input_gene_ids_pos]
|
| 286 |
+
gene_ids_sub = gene_ids[input_gene_ids_pos]
|
| 287 |
+
|
| 288 |
+
if config.perturbation_function == "crisper":
|
| 289 |
+
pert_name = [inverse_dict[int(p)] for p in perturbation_id[0].cpu().numpy()]
|
| 290 |
+
perturbation_id = torch.tensor(
|
| 291 |
+
vocab.encode(pert_name), dtype=torch.long, device=device
|
| 292 |
+
).repeat(source_sub.shape[0], 1)
|
| 293 |
+
|
| 294 |
+
base_denoiser = denoiser.module if hasattr(denoiser, "module") else denoiser
|
| 295 |
+
base_denoiser.model.train()
|
| 296 |
+
|
| 297 |
+
B = source_sub.shape[0]
|
| 298 |
+
gene_input = gene_ids_sub.unsqueeze(0).expand(B, -1)
|
| 299 |
+
|
| 300 |
+
loss_dict = base_denoiser.train_step(source_sub, target_sub, perturbation_id, gene_input)
|
| 301 |
+
|
| 302 |
+
loss = loss_dict["loss"]
|
| 303 |
+
optimizer.zero_grad(set_to_none=True)
|
| 304 |
+
accelerator.backward(loss)
|
| 305 |
+
optimizer.step()
|
| 306 |
+
scheduler.step()
|
| 307 |
+
|
| 308 |
+
# EMA update
|
| 309 |
+
with torch.no_grad():
|
| 310 |
+
for ema_p, model_p in zip(ema_model.parameters(), vf.parameters()):
|
| 311 |
+
ema_p.lerp_(model_p.data, 1 - config.ema_decay)
|
| 312 |
+
|
| 313 |
+
# Checkpoint & eval
|
| 314 |
+
if iteration % config.print_every == 0:
|
| 315 |
+
save_path_ = os.path.join(save_path, f"iteration_{iteration}")
|
| 316 |
+
os.makedirs(save_path_, exist_ok=True)
|
| 317 |
+
if accelerator.is_main_process:
|
| 318 |
+
save_checkpoint(
|
| 319 |
+
model=ema_model, optimizer=optimizer, scheduler=scheduler,
|
| 320 |
+
iteration=iteration, eval_score=None,
|
| 321 |
+
save_path=save_path_, is_best=False,
|
| 322 |
+
)
|
| 323 |
+
if iteration + config.print_every >= config.steps:
|
| 324 |
+
orig_state = copy.deepcopy(vf.state_dict())
|
| 325 |
+
vf.load_state_dict(ema_model.state_dict())
|
| 326 |
+
eval_score = test(
|
| 327 |
+
valid_sampler, denoiser, accelerator, config, vocab, data_manager,
|
| 328 |
+
batch_size=config.eval_batch_size, path_dir=save_path_,
|
| 329 |
+
)
|
| 330 |
+
vf.load_state_dict(orig_state)
|
| 331 |
+
if accelerator.is_main_process and eval_score is not None:
|
| 332 |
+
tb_writer.add_scalar('eval/score', eval_score, iteration)
|
| 333 |
+
|
| 334 |
+
# Logging
|
| 335 |
+
if accelerator.is_main_process:
|
| 336 |
+
lr = scheduler.get_last_lr()[0]
|
| 337 |
+
csv_writer.writerow([
|
| 338 |
+
iteration, loss.item(),
|
| 339 |
+
loss_dict["loss_v"].item(), loss_dict["loss_s"].item(),
|
| 340 |
+
loss_dict["loss_mmd"].item(),
|
| 341 |
+
loss_dict["loss_sparse"].item(), loss_dict["loss_volume"].item(),
|
| 342 |
+
loss_dict["sigma_mean"].item(), loss_dict["sigma_std"].item(), lr,
|
| 343 |
+
])
|
| 344 |
+
if iteration % 100 == 0:
|
| 345 |
+
csv_file.flush()
|
| 346 |
+
tb_writer.add_scalar('loss/total', loss.item(), iteration)
|
| 347 |
+
tb_writer.add_scalar('loss/velocity', loss_dict["loss_v"].item(), iteration)
|
| 348 |
+
tb_writer.add_scalar('loss/score', loss_dict["loss_s"].item(), iteration)
|
| 349 |
+
tb_writer.add_scalar('loss/mmd', loss_dict["loss_mmd"].item(), iteration)
|
| 350 |
+
tb_writer.add_scalar('sigma/mean', loss_dict["sigma_mean"].item(), iteration)
|
| 351 |
+
tb_writer.add_scalar('sigma/std', loss_dict["sigma_std"].item(), iteration)
|
| 352 |
+
tb_writer.add_scalar('lr', lr, iteration)
|
| 353 |
+
|
| 354 |
+
accelerator.wait_for_everyone()
|
| 355 |
+
pbar.update(1)
|
| 356 |
+
pbar.set_description(
|
| 357 |
+
f"L={loss.item():.4f} v={loss_dict['loss_v'].item():.3f} "
|
| 358 |
+
f"s={loss_dict['loss_s'].item():.3f} σ={loss_dict['sigma_mean'].item():.3f}"
|
| 359 |
+
)
|
| 360 |
+
iteration += 1
|
| 361 |
+
if iteration >= config.steps:
|
| 362 |
+
break
|
| 363 |
+
|
| 364 |
+
if accelerator.is_main_process:
|
| 365 |
+
csv_file.close()
|
| 366 |
+
tb_writer.close()
|
GRN/SB/src/__init__.py
ADDED
|
File without changes
|
GRN/SB/src/_scdfm_imports.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Central import hub for scDFM modules.
|
| 3 |
+
Requires _bootstrap_scdfm to have been imported first (at script entry point).
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import sys
|
| 7 |
+
|
| 8 |
+
# Ensure bootstrap has run
|
| 9 |
+
if "scdfm_src" not in sys.modules:
|
| 10 |
+
import os
|
| 11 |
+
sys.path.insert(0, os.path.normpath(os.path.join(os.path.dirname(__file__), "..")))
|
| 12 |
+
import _bootstrap_scdfm
|
| 13 |
+
|
| 14 |
+
import scdfm_src.models.origin.layers as _layers
|
| 15 |
+
import scdfm_src.models.origin.model as _model
|
| 16 |
+
import scdfm_src.flow_matching.path as _fm_path
|
| 17 |
+
import scdfm_src.flow_matching.path.scheduler.scheduler as _scheduler
|
| 18 |
+
import scdfm_src.utils.utils as _utils
|
| 19 |
+
import scdfm_src.tokenizer.gene_tokenizer as _tokenizer
|
| 20 |
+
# === scDFM Layers ===
|
| 21 |
+
GeneadaLN = _layers.GeneadaLN
|
| 22 |
+
ContinuousValueEncoder = _layers.ContinuousValueEncoder
|
| 23 |
+
GeneEncoder = _layers.GeneEncoder
|
| 24 |
+
BatchLabelEncoder = _layers.BatchLabelEncoder
|
| 25 |
+
TimestepEmbedder = _layers.TimestepEmbedder
|
| 26 |
+
ExprDecoder = _layers.ExprDecoder
|
| 27 |
+
|
| 28 |
+
# === scDFM Blocks ===
|
| 29 |
+
DifferentialTransformerBlock = _model.DifferentialTransformerBlock
|
| 30 |
+
PerceiverBlock = _model.PerceiverBlock
|
| 31 |
+
DiffPerceiverBlock = _model.DiffPerceiverBlock
|
| 32 |
+
|
| 33 |
+
# === scDFM Flow Matching ===
|
| 34 |
+
AffineProbPath = _fm_path.AffineProbPath
|
| 35 |
+
CondOTScheduler = _scheduler.CondOTScheduler
|
| 36 |
+
|
| 37 |
+
# === scDFM Utils ===
|
| 38 |
+
save_checkpoint = _utils.save_checkpoint
|
| 39 |
+
load_checkpoint = _utils.load_checkpoint
|
| 40 |
+
make_lognorm_poisson_noise = _utils.make_lognorm_poisson_noise
|
| 41 |
+
pick_eval_score = _utils.pick_eval_score
|
| 42 |
+
process_vocab = _utils.process_vocab
|
| 43 |
+
set_requires_grad_for_p_only = _utils.set_requires_grad_for_p_only
|
| 44 |
+
get_perturbation_emb = _utils.get_perturbation_emb
|
| 45 |
+
|
| 46 |
+
# === scDFM Tokenizer ===
|
| 47 |
+
GeneVocab = _tokenizer.GeneVocab
|
| 48 |
+
|
| 49 |
+
# === scDFM Data ===
|
| 50 |
+
# Data loading handled separately in CCFM (scDFM data module has heavy deps)
|
GRN/SB/src/data/__init__.py
ADDED
|
File without changes
|
GRN/SB/src/data/data.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Data loading for grn_svd.
|
| 3 |
+
Imports scDFM Data/PerturbationDataset by temporarily swapping sys.modules
|
| 4 |
+
so that scDFM's 'src.*' packages are visible during import.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import sys
|
| 8 |
+
import os
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
from torch.utils.data import Dataset
|
| 12 |
+
|
| 13 |
+
_SCDFM_ROOT = os.path.normpath(
|
| 14 |
+
os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", "transfer", "code", "scDFM")
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
# Cache to avoid repeated imports
|
| 18 |
+
_cached_classes = {}
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def get_data_classes():
|
| 22 |
+
"""Lazily import scDFM data classes with proper module isolation."""
|
| 23 |
+
if _cached_classes:
|
| 24 |
+
return (
|
| 25 |
+
_cached_classes["Data"],
|
| 26 |
+
_cached_classes["PerturbationDataset"],
|
| 27 |
+
_cached_classes["TrainSampler"],
|
| 28 |
+
_cached_classes["TestDataset"],
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
# Save CCFM's src modules
|
| 32 |
+
saved = {}
|
| 33 |
+
for key in list(sys.modules.keys()):
|
| 34 |
+
if key == "src" or key.startswith("src."):
|
| 35 |
+
saved[key] = sys.modules.pop(key)
|
| 36 |
+
|
| 37 |
+
# Ensure __init__.py exists for scDFM data_process
|
| 38 |
+
for d in ["src", "src/data_process", "src/utils", "src/tokenizer"]:
|
| 39 |
+
init_path = os.path.join(_SCDFM_ROOT, d, "__init__.py")
|
| 40 |
+
if not os.path.exists(init_path):
|
| 41 |
+
os.makedirs(os.path.dirname(init_path), exist_ok=True)
|
| 42 |
+
with open(init_path, "w") as f:
|
| 43 |
+
f.write("# Auto-created by CCFM\n")
|
| 44 |
+
|
| 45 |
+
sys.path.insert(0, _SCDFM_ROOT)
|
| 46 |
+
try:
|
| 47 |
+
from src.data_process.data import Data, PerturbationDataset, TrainSampler, TestDataset
|
| 48 |
+
_cached_classes["Data"] = Data
|
| 49 |
+
_cached_classes["PerturbationDataset"] = PerturbationDataset
|
| 50 |
+
_cached_classes["TrainSampler"] = TrainSampler
|
| 51 |
+
_cached_classes["TestDataset"] = TestDataset
|
| 52 |
+
finally:
|
| 53 |
+
# Remove scDFM's src.* entries
|
| 54 |
+
for key in list(sys.modules.keys()):
|
| 55 |
+
if (key == "src" or key.startswith("src.")) and not key.startswith("scdfm_"):
|
| 56 |
+
del sys.modules[key]
|
| 57 |
+
|
| 58 |
+
# Restore CCFM's src modules
|
| 59 |
+
for key, mod in saved.items():
|
| 60 |
+
sys.modules[key] = mod
|
| 61 |
+
|
| 62 |
+
if _SCDFM_ROOT in sys.path:
|
| 63 |
+
sys.path.remove(_SCDFM_ROOT)
|
| 64 |
+
|
| 65 |
+
return Data, PerturbationDataset, TrainSampler, TestDataset
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class GRNDatasetWrapper(Dataset):
|
| 69 |
+
"""
|
| 70 |
+
Wraps scDFM PerturbationDataset to produce sparse delta triplets.
|
| 71 |
+
|
| 72 |
+
Returns delta_values (B, G_sub, K) and delta_indices (B, G_sub, K)
|
| 73 |
+
instead of dense z_target (B, G_sub, G_sub).
|
| 74 |
+
SVD projection happens on GPU in denoiser.train_step().
|
| 75 |
+
"""
|
| 76 |
+
|
| 77 |
+
def __init__(self, base_dataset, sparse_cache, gene_ids_cpu, infer_top_gene):
|
| 78 |
+
self.base = base_dataset # scDFM PerturbationDataset
|
| 79 |
+
self.sparse_cache = sparse_cache # SparseDeltaCache (multi-process safe)
|
| 80 |
+
self.gene_ids = gene_ids_cpu # (G_full,) CPU tensor — vocab-encoded gene IDs
|
| 81 |
+
self.infer_top_gene = infer_top_gene
|
| 82 |
+
|
| 83 |
+
def __len__(self):
|
| 84 |
+
return len(self.base)
|
| 85 |
+
|
| 86 |
+
def __getitem__(self, idx):
|
| 87 |
+
batch = self.base[idx]
|
| 88 |
+
|
| 89 |
+
# 1. Random gene subset
|
| 90 |
+
G_full = batch["src_cell_data"].shape[-1]
|
| 91 |
+
input_gene_ids = torch.randperm(G_full)[:self.infer_top_gene]
|
| 92 |
+
|
| 93 |
+
# 2. Sparse cache lookup → sparse triplets (runs in worker process)
|
| 94 |
+
src_names = batch["src_cell_id"]
|
| 95 |
+
tgt_names = batch["tgt_cell_id"]
|
| 96 |
+
if src_names and isinstance(src_names[0], (tuple, list)):
|
| 97 |
+
src_names = [n[0] for n in src_names]
|
| 98 |
+
tgt_names = [n[0] for n in tgt_names]
|
| 99 |
+
delta_values, delta_indices = self.sparse_cache.lookup_delta(
|
| 100 |
+
src_names, tgt_names, input_gene_ids, device=torch.device("cpu")
|
| 101 |
+
) # delta_values: (B, G_sub, K), delta_indices: (B, G_sub, K) int16
|
| 102 |
+
|
| 103 |
+
# 3. Subset expression data
|
| 104 |
+
return {
|
| 105 |
+
"src_cell_data": batch["src_cell_data"][:, input_gene_ids], # (B, G_sub)
|
| 106 |
+
"tgt_cell_data": batch["tgt_cell_data"][:, input_gene_ids], # (B, G_sub)
|
| 107 |
+
"condition_id": batch["condition_id"], # (B, 2)
|
| 108 |
+
"delta_values": delta_values, # (B, G_sub, K)
|
| 109 |
+
"delta_indices": delta_indices, # (B, G_sub, K) int16
|
| 110 |
+
"gene_ids_sub": self.gene_ids[input_gene_ids], # (G_sub,)
|
| 111 |
+
"input_gene_ids": input_gene_ids, # (G_sub,)
|
| 112 |
+
}
|
GRN/SB/src/denoiser.py
ADDED
|
@@ -0,0 +1,297 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
SBDenoiser — Anisotropic Schrödinger Bridge denoiser.
|
| 3 |
+
|
| 4 |
+
Training: Joint velocity + score matching with anisotropic bridge paths.
|
| 5 |
+
v_θ target = x_T - x₀ (PF-ODE velocity, same as scDFM).
|
| 6 |
+
s_θ target = -(x_t - μ_t) / var_t (conditional score).
|
| 7 |
+
Minibatch anisotropic OT per step.
|
| 8 |
+
|
| 9 |
+
Inference: Euler-Maruyama SDE using drift = v_θ + (σ²/2)·s_θ.
|
| 10 |
+
Or PF-ODE ablation: drift = v_θ.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
import math
|
| 14 |
+
import torch
|
| 15 |
+
import torch.nn as nn
|
| 16 |
+
import torchdiffeq
|
| 17 |
+
|
| 18 |
+
from ._scdfm_imports import make_lognorm_poisson_noise
|
| 19 |
+
from .model.model import SBModel
|
| 20 |
+
from .ot_anisotropic import AnisotropicOTSampler
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def pairwise_sq_dists(X, Y):
|
| 24 |
+
return torch.cdist(X, Y, p=2) ** 2
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@torch.no_grad()
|
| 28 |
+
def median_sigmas(X, scales=(0.5, 1.0, 2.0, 4.0)):
|
| 29 |
+
D2 = pairwise_sq_dists(X, X)
|
| 30 |
+
tri = D2[~torch.eye(D2.size(0), dtype=bool, device=D2.device)]
|
| 31 |
+
m = torch.median(tri).clamp_min(1e-12)
|
| 32 |
+
s2 = torch.tensor(scales, device=X.device) * m
|
| 33 |
+
return [float(s.item()) for s in torch.sqrt(s2)]
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def mmd2_unbiased_multi_sigma(X, Y, sigmas):
|
| 37 |
+
m, n = X.size(0), Y.size(0)
|
| 38 |
+
Dxx = pairwise_sq_dists(X, X)
|
| 39 |
+
Dyy = pairwise_sq_dists(Y, Y)
|
| 40 |
+
Dxy = pairwise_sq_dists(X, Y)
|
| 41 |
+
vals = []
|
| 42 |
+
for sigma in sigmas:
|
| 43 |
+
beta = 1.0 / (2.0 * (sigma ** 2) + 1e-12)
|
| 44 |
+
Kxx = torch.exp(-beta * Dxx)
|
| 45 |
+
Kyy = torch.exp(-beta * Dyy)
|
| 46 |
+
Kxy = torch.exp(-beta * Dxy)
|
| 47 |
+
term_xx = (Kxx.sum() - Kxx.diag().sum()) / (m * (m - 1) + 1e-12)
|
| 48 |
+
term_yy = (Kyy.sum() - Kyy.diag().sum()) / (n * (n - 1) + 1e-12)
|
| 49 |
+
term_xy = Kxy.mean()
|
| 50 |
+
vals.append(term_xx + term_yy - 2.0 * term_xy)
|
| 51 |
+
return torch.stack(vals).mean()
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class SBDenoiser(nn.Module):
|
| 55 |
+
"""
|
| 56 |
+
Anisotropic Schrödinger Bridge Denoiser.
|
| 57 |
+
|
| 58 |
+
σ_g simultaneously controls:
|
| 59 |
+
1. OT coupling cost (Mahalanobis weights)
|
| 60 |
+
2. Bridge noise level (conditional bridge variance)
|
| 61 |
+
3. SDE diffusion strength (Euler-Maruyama noise)
|
| 62 |
+
"""
|
| 63 |
+
|
| 64 |
+
def __init__(
|
| 65 |
+
self,
|
| 66 |
+
model: SBModel,
|
| 67 |
+
noise_type: str = "Gaussian",
|
| 68 |
+
use_mmd_loss: bool = True,
|
| 69 |
+
gamma: float = 0.5,
|
| 70 |
+
poisson_alpha: float = 0.8,
|
| 71 |
+
poisson_target_sum: float = 1e4,
|
| 72 |
+
# Score training
|
| 73 |
+
score_weight: float = 0.1,
|
| 74 |
+
score_t_clip: float = 0.02,
|
| 75 |
+
use_score: bool = True,
|
| 76 |
+
# σ_g regularization
|
| 77 |
+
sigma_base: float = 0.5,
|
| 78 |
+
sigma_sparse_weight: float = 0.01,
|
| 79 |
+
sigma_volume_weight: float = 0.01,
|
| 80 |
+
# OT coupling
|
| 81 |
+
ot_method: str = "sinkhorn",
|
| 82 |
+
ot_reg: float = 0.05,
|
| 83 |
+
ot_use_sigma: bool = True,
|
| 84 |
+
sigma_min: float = 0.01,
|
| 85 |
+
# Time sampling
|
| 86 |
+
t_sample_mode: str = "logit_normal",
|
| 87 |
+
t_mean: float = 0.0,
|
| 88 |
+
t_std: float = 1.0,
|
| 89 |
+
# SDE inference
|
| 90 |
+
sde_steps: int = 50,
|
| 91 |
+
use_sde_inference: bool = True,
|
| 92 |
+
# Source-Anchored Bridge
|
| 93 |
+
source_anchored: bool = False,
|
| 94 |
+
):
|
| 95 |
+
super().__init__()
|
| 96 |
+
self.model = model
|
| 97 |
+
self.noise_type = noise_type
|
| 98 |
+
self.use_mmd_loss = use_mmd_loss
|
| 99 |
+
self.gamma = gamma
|
| 100 |
+
self.poisson_alpha = poisson_alpha
|
| 101 |
+
self.poisson_target_sum = poisson_target_sum
|
| 102 |
+
self.score_weight = score_weight
|
| 103 |
+
self.score_t_clip = score_t_clip
|
| 104 |
+
self.use_score = use_score
|
| 105 |
+
self.sigma_base = sigma_base
|
| 106 |
+
self.sigma_sparse_weight = sigma_sparse_weight
|
| 107 |
+
self.sigma_volume_weight = sigma_volume_weight
|
| 108 |
+
self.ot_use_sigma = ot_use_sigma
|
| 109 |
+
self.t_sample_mode = t_sample_mode
|
| 110 |
+
self.t_mean = t_mean
|
| 111 |
+
self.t_std = t_std
|
| 112 |
+
self.sde_steps = sde_steps
|
| 113 |
+
self.use_sde_inference = use_sde_inference
|
| 114 |
+
self.source_anchored = source_anchored
|
| 115 |
+
|
| 116 |
+
self.ot_sampler = AnisotropicOTSampler(
|
| 117 |
+
method=ot_method, reg=ot_reg, sigma_min=sigma_min,
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
def _make_noise(self, source: torch.Tensor) -> torch.Tensor:
|
| 121 |
+
if self.noise_type == "Gaussian":
|
| 122 |
+
return torch.randn_like(source)
|
| 123 |
+
elif self.noise_type == "Poisson":
|
| 124 |
+
return make_lognorm_poisson_noise(
|
| 125 |
+
target_log=source,
|
| 126 |
+
alpha=self.poisson_alpha,
|
| 127 |
+
per_cell_L=self.poisson_target_sum,
|
| 128 |
+
)
|
| 129 |
+
else:
|
| 130 |
+
raise ValueError(f"Unknown noise_type: {self.noise_type}")
|
| 131 |
+
|
| 132 |
+
def _sample_t(self, n: int, device: torch.device) -> torch.Tensor:
|
| 133 |
+
if self.t_sample_mode == "logit_normal":
|
| 134 |
+
t = torch.sigmoid(torch.randn(n, device=device) * self.t_std + self.t_mean)
|
| 135 |
+
else:
|
| 136 |
+
t = torch.rand(n, device=device)
|
| 137 |
+
return t.clamp(self.score_t_clip, 1.0 - self.score_t_clip)
|
| 138 |
+
|
| 139 |
+
def train_step(
|
| 140 |
+
self,
|
| 141 |
+
source: torch.Tensor, # (B, G) control expression
|
| 142 |
+
target: torch.Tensor, # (B, G) perturbed expression
|
| 143 |
+
perturbation_id: torch.Tensor, # (B, n_pert)
|
| 144 |
+
gene_input: torch.Tensor, # (B, G) vocab-encoded gene IDs
|
| 145 |
+
) -> dict:
|
| 146 |
+
"""
|
| 147 |
+
Single training step with anisotropic bridge + minibatch OT.
|
| 148 |
+
"""
|
| 149 |
+
B = source.shape[0]
|
| 150 |
+
device = source.device
|
| 151 |
+
|
| 152 |
+
# 1. Sample time
|
| 153 |
+
t = self._sample_t(B, device) # (B,)
|
| 154 |
+
t_col = t.unsqueeze(-1) # (B, 1)
|
| 155 |
+
|
| 156 |
+
# 2. Get σ_g from sigma_net (independent of backbone)
|
| 157 |
+
# Need gene_emb and pert_emb — compute them via the model's encoder
|
| 158 |
+
with torch.no_grad():
|
| 159 |
+
gene_emb = self.model.encoder(gene_input) # (B, G, d)
|
| 160 |
+
pert_emb = self.model.get_perturbation_emb(
|
| 161 |
+
perturbation_id, cell_1=source) # (B, d)
|
| 162 |
+
# σ_g with gradient (for regularization loss)
|
| 163 |
+
sigma_g = self.model.sigma_net(pert_emb, t, gene_emb) # (B, G)
|
| 164 |
+
sigma_g_det = sigma_g.detach() # for bridge sampling
|
| 165 |
+
|
| 166 |
+
# 3. Create x_0 and do minibatch anisotropic OT
|
| 167 |
+
if self.source_anchored:
|
| 168 |
+
x_0 = source # bridge from control
|
| 169 |
+
else:
|
| 170 |
+
x_0 = self._make_noise(source) # bridge from noise
|
| 171 |
+
if self.ot_use_sigma:
|
| 172 |
+
sigma_for_ot = sigma_g_det.mean(0) # (G,) batch mean
|
| 173 |
+
x_0, target_matched = self.ot_sampler.sample_plan_fix_x0(
|
| 174 |
+
x_0, target, sigma_for_ot)
|
| 175 |
+
else:
|
| 176 |
+
x_0, target_matched = self.ot_sampler.sample_plan_fix_x0(
|
| 177 |
+
x_0, target, sigma_g=None)
|
| 178 |
+
|
| 179 |
+
# 4. Anisotropic conditional bridge sampling
|
| 180 |
+
mu_t = (1 - t_col) * x_0 + t_col * target_matched # (B, G)
|
| 181 |
+
var_t = (sigma_g_det ** 2 * (t_col * (1 - t_col))).clamp(min=1e-8)
|
| 182 |
+
std_t = torch.sqrt(var_t) # (B, G)
|
| 183 |
+
eps = torch.randn_like(x_0)
|
| 184 |
+
x_t = mu_t + std_t * eps # (B, G)
|
| 185 |
+
|
| 186 |
+
# 5. Targets
|
| 187 |
+
v_target = target_matched - x_0 # source-anchored: Δ
|
| 188 |
+
s_target = -eps / (std_t + 1e-8) # conditional score
|
| 189 |
+
|
| 190 |
+
# 6. Full model forward
|
| 191 |
+
pred_v, pred_s, sigma_g_pred = self.model(
|
| 192 |
+
gene_input, source, x_t, t, perturbation_id)
|
| 193 |
+
|
| 194 |
+
# 7. Velocity loss
|
| 195 |
+
loss_v = ((pred_v - v_target) ** 2).mean()
|
| 196 |
+
|
| 197 |
+
# 8. Score loss (var_t-weighted DSM — equivalent to ε-prediction)
|
| 198 |
+
# var_t weighting cancels the 1/var_t in s_target, giving bounded loss ~O(1)
|
| 199 |
+
loss_s = torch.tensor(0.0, device=device)
|
| 200 |
+
if self.use_score and pred_s is not None:
|
| 201 |
+
loss_s = (var_t * (pred_s - s_target) ** 2).mean()
|
| 202 |
+
|
| 203 |
+
# 9. σ_g regularization
|
| 204 |
+
# Volume penalty anchors geometric mean at σ_base (global scale).
|
| 205 |
+
# L1 sparse penalty removed — it killed per-gene anisotropy by
|
| 206 |
+
# pulling every σ_g to σ_base. Sigmoid [σ_min, σ_max] prevents
|
| 207 |
+
# collapse/explosion; volume penalty alone is sufficient.
|
| 208 |
+
loss_sparse = (sigma_g_pred - self.sigma_base).abs().mean() # monitor only
|
| 209 |
+
loss_volume = (sigma_g_pred.log().mean() - math.log(self.sigma_base)) ** 2
|
| 210 |
+
|
| 211 |
+
# 10. MMD loss (optional)
|
| 212 |
+
loss_mmd = torch.tensor(0.0, device=device)
|
| 213 |
+
if self.use_mmd_loss:
|
| 214 |
+
x1_hat = x_t + pred_v * (1 - t_col)
|
| 215 |
+
sigmas_mmd = median_sigmas(target_matched, scales=(0.5, 1.0, 2.0, 4.0))
|
| 216 |
+
loss_mmd = mmd2_unbiased_multi_sigma(x1_hat, target_matched, sigmas_mmd)
|
| 217 |
+
|
| 218 |
+
# 11. Total loss
|
| 219 |
+
# loss_sparse excluded — kept in return dict for monitoring
|
| 220 |
+
loss = (
|
| 221 |
+
loss_v
|
| 222 |
+
+ self.score_weight * loss_s
|
| 223 |
+
+ self.sigma_volume_weight * loss_volume
|
| 224 |
+
+ self.gamma * loss_mmd
|
| 225 |
+
)
|
| 226 |
+
|
| 227 |
+
return {
|
| 228 |
+
"loss": loss,
|
| 229 |
+
"loss_v": loss_v.detach(),
|
| 230 |
+
"loss_s": loss_s.detach(),
|
| 231 |
+
"loss_mmd": loss_mmd.detach(),
|
| 232 |
+
"loss_sparse": loss_sparse.detach(),
|
| 233 |
+
"loss_volume": loss_volume.detach(),
|
| 234 |
+
"sigma_mean": sigma_g_pred.mean().detach(),
|
| 235 |
+
"sigma_std": sigma_g_pred.std().detach(),
|
| 236 |
+
}
|
| 237 |
+
|
| 238 |
+
@torch.no_grad()
|
| 239 |
+
def generate(
|
| 240 |
+
self,
|
| 241 |
+
source: torch.Tensor, # (B, G)
|
| 242 |
+
perturbation_id: torch.Tensor, # (B, n_pert)
|
| 243 |
+
gene_ids: torch.Tensor, # (B, G) or (G,)
|
| 244 |
+
steps: int = None,
|
| 245 |
+
method: str = "sde",
|
| 246 |
+
) -> torch.Tensor:
|
| 247 |
+
"""
|
| 248 |
+
Generate perturbed expression via SDE or PF-ODE.
|
| 249 |
+
|
| 250 |
+
SDE: dX = [v_θ + (σ²/2)·s_θ] dt + σ·dB (Euler-Maruyama)
|
| 251 |
+
PF-ODE: dx/dt = v_θ (torchdiffeq RK4)
|
| 252 |
+
"""
|
| 253 |
+
B, G = source.shape
|
| 254 |
+
device = source.device
|
| 255 |
+
steps = steps or self.sde_steps
|
| 256 |
+
|
| 257 |
+
if gene_ids.dim() == 1:
|
| 258 |
+
gene_ids = gene_ids.unsqueeze(0).expand(B, -1)
|
| 259 |
+
|
| 260 |
+
if self.source_anchored:
|
| 261 |
+
x_0 = source.clone() # start from control
|
| 262 |
+
else:
|
| 263 |
+
x_0 = self._make_noise(source) # start from noise
|
| 264 |
+
|
| 265 |
+
use_sde = self.use_sde_inference and (method != "ode")
|
| 266 |
+
|
| 267 |
+
if use_sde:
|
| 268 |
+
# SDE: Euler-Maruyama (no high-order SDE solver available)
|
| 269 |
+
x_t = x_0
|
| 270 |
+
dt = 1.0 / steps
|
| 271 |
+
for i in range(steps):
|
| 272 |
+
t_val = i * dt
|
| 273 |
+
t = torch.full((B,), t_val, device=device)
|
| 274 |
+
pred_v, pred_s, sigma_g = self.model(
|
| 275 |
+
gene_ids, source, x_t, t, perturbation_id)
|
| 276 |
+
if pred_s is not None:
|
| 277 |
+
drift = pred_v + 0.5 * sigma_g ** 2 * pred_s
|
| 278 |
+
diffusion_noise = sigma_g * math.sqrt(dt) * torch.randn_like(x_t)
|
| 279 |
+
x_t = x_t + drift * dt + diffusion_noise
|
| 280 |
+
else:
|
| 281 |
+
x_t = x_t + pred_v * dt
|
| 282 |
+
else:
|
| 283 |
+
# PF-ODE: torchdiffeq RK4 (matches scDFM inference)
|
| 284 |
+
def ode_func(t_scalar, x):
|
| 285 |
+
t_batch = torch.full((B,), t_scalar.item(), device=device)
|
| 286 |
+
pred_v, _, _ = self.model(
|
| 287 |
+
gene_ids, source, x, t_batch, perturbation_id)
|
| 288 |
+
return pred_v
|
| 289 |
+
|
| 290 |
+
t_span = torch.linspace(0, 1, steps, device=device)
|
| 291 |
+
trajectory = torchdiffeq.odeint(
|
| 292 |
+
ode_func, x_0, t_span,
|
| 293 |
+
method="rk4", atol=1e-4, rtol=1e-4,
|
| 294 |
+
)
|
| 295 |
+
x_t = trajectory[-1]
|
| 296 |
+
|
| 297 |
+
return torch.clamp(x_t, min=0)
|
GRN/SB/src/model/__init__.py
ADDED
|
File without changes
|
GRN/SB/src/model/layers.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
ASB layers: AnisotropicSigmaNet and ScoreDecoder.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import math
|
| 6 |
+
import torch
|
| 7 |
+
import torch.nn as nn
|
| 8 |
+
import torch.nn.functional as F
|
| 9 |
+
|
| 10 |
+
from .._scdfm_imports import TimestepEmbedder
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class AnisotropicSigmaNet(nn.Module):
|
| 14 |
+
"""
|
| 15 |
+
Predicts per-gene anisotropic diffusion coefficient σ_g(perturbation, t).
|
| 16 |
+
|
| 17 |
+
Input: pert_emb (B, d_model), t (B,), gene_emb (B, G, d_model)
|
| 18 |
+
Output: sigma_g (B, G) in [sigma_min, sigma_max]
|
| 19 |
+
|
| 20 |
+
Architecture: condition c = pert_emb + t_emb → c + gene_emb → MLP → sigmoid → [min, max]
|
| 21 |
+
Does NOT depend on x_t — can be called before bridge sampling.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
def __init__(
|
| 25 |
+
self,
|
| 26 |
+
d_model: int = 128,
|
| 27 |
+
hidden_dim: int = 256,
|
| 28 |
+
num_layers: int = 2,
|
| 29 |
+
sigma_min: float = 0.01,
|
| 30 |
+
sigma_max: float = 2.0,
|
| 31 |
+
sigma_init: float = 0.5,
|
| 32 |
+
):
|
| 33 |
+
super().__init__()
|
| 34 |
+
self.sigma_min = sigma_min
|
| 35 |
+
self.sigma_max = sigma_max
|
| 36 |
+
|
| 37 |
+
self.t_embedder = TimestepEmbedder(d_model)
|
| 38 |
+
|
| 39 |
+
layers = []
|
| 40 |
+
in_dim = d_model
|
| 41 |
+
for i in range(num_layers):
|
| 42 |
+
layers.append(nn.Linear(in_dim if i == 0 else hidden_dim, hidden_dim))
|
| 43 |
+
layers.append(nn.SiLU())
|
| 44 |
+
layers.append(nn.Linear(hidden_dim, 1))
|
| 45 |
+
self.mlp = nn.Sequential(*layers)
|
| 46 |
+
|
| 47 |
+
self._init_bias(sigma_init)
|
| 48 |
+
|
| 49 |
+
def _init_bias(self, sigma_init):
|
| 50 |
+
"""Initialize final bias so sigmoid output maps to sigma_init."""
|
| 51 |
+
target = (sigma_init - self.sigma_min) / (self.sigma_max - self.sigma_min)
|
| 52 |
+
target = max(min(target, 0.999), 0.001)
|
| 53 |
+
bias_val = math.log(target / (1 - target)) # logit
|
| 54 |
+
nn.init.constant_(self.mlp[-1].bias, bias_val)
|
| 55 |
+
nn.init.zeros_(self.mlp[-1].weight)
|
| 56 |
+
|
| 57 |
+
def forward(self, pert_emb: torch.Tensor, t: torch.Tensor,
|
| 58 |
+
gene_emb: torch.Tensor) -> torch.Tensor:
|
| 59 |
+
"""
|
| 60 |
+
Args:
|
| 61 |
+
pert_emb: (B, d_model) perturbation embedding
|
| 62 |
+
t: (B,) timestep
|
| 63 |
+
gene_emb: (B, G, d_model) gene embeddings
|
| 64 |
+
|
| 65 |
+
Returns:
|
| 66 |
+
sigma_g: (B, G) in [sigma_min, sigma_max]
|
| 67 |
+
"""
|
| 68 |
+
t_emb = self.t_embedder(t) # (B, d_model)
|
| 69 |
+
c = pert_emb + t_emb # (B, d_model)
|
| 70 |
+
c_exp = c.unsqueeze(1).expand_as(gene_emb) # (B, G, d_model)
|
| 71 |
+
h = gene_emb + c_exp # (B, G, d_model)
|
| 72 |
+
raw = self.mlp(h).squeeze(-1) # (B, G)
|
| 73 |
+
sigma = self.sigma_min + (self.sigma_max - self.sigma_min) * torch.sigmoid(raw)
|
| 74 |
+
return sigma
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
class ScoreDecoder(nn.Module):
|
| 78 |
+
"""
|
| 79 |
+
Decodes backbone hidden states to score function prediction.
|
| 80 |
+
|
| 81 |
+
Input: backbone output (B, G, d_model), pert_emb (B, d_model)
|
| 82 |
+
Output: score prediction (B, G)
|
| 83 |
+
"""
|
| 84 |
+
|
| 85 |
+
def __init__(self, d_model: int = 128, depth: int = 2):
|
| 86 |
+
super().__init__()
|
| 87 |
+
self.proj = nn.Linear(d_model * 2, d_model) # concat with pert_emb
|
| 88 |
+
blocks = []
|
| 89 |
+
for _ in range(depth):
|
| 90 |
+
blocks.extend([
|
| 91 |
+
nn.LayerNorm(d_model),
|
| 92 |
+
nn.Linear(d_model, d_model),
|
| 93 |
+
nn.SiLU(),
|
| 94 |
+
])
|
| 95 |
+
blocks.append(nn.Linear(d_model, 1))
|
| 96 |
+
self.mlp = nn.Sequential(*blocks)
|
| 97 |
+
|
| 98 |
+
def forward(self, x: torch.Tensor, pert_emb: torch.Tensor) -> torch.Tensor:
|
| 99 |
+
"""
|
| 100 |
+
Args:
|
| 101 |
+
x: (B, G, d_model) backbone output
|
| 102 |
+
pert_emb: (B, d_model) perturbation embedding
|
| 103 |
+
|
| 104 |
+
Returns:
|
| 105 |
+
score: (B, G)
|
| 106 |
+
"""
|
| 107 |
+
x_with_pert = torch.cat(
|
| 108 |
+
[x, pert_emb[:, None, :].expand(-1, x.size(1), -1)], dim=-1
|
| 109 |
+
)
|
| 110 |
+
h = self.proj(x_with_pert)
|
| 111 |
+
return self.mlp(h).squeeze(-1)
|
GRN/SB/src/model/model.py
ADDED
|
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
SBModel — Anisotropic Schrödinger Bridge model.
|
| 3 |
+
|
| 4 |
+
Shared backbone with scDFM, dual output heads (velocity + score),
|
| 5 |
+
plus AnisotropicSigmaNet for per-gene diffusion coefficients.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
import torch.nn as nn
|
| 10 |
+
from torch import Tensor
|
| 11 |
+
from typing import Optional, Tuple
|
| 12 |
+
|
| 13 |
+
from .layers import AnisotropicSigmaNet, ScoreDecoder
|
| 14 |
+
from .._scdfm_imports import (
|
| 15 |
+
GeneadaLN,
|
| 16 |
+
ContinuousValueEncoder,
|
| 17 |
+
GeneEncoder,
|
| 18 |
+
BatchLabelEncoder,
|
| 19 |
+
TimestepEmbedder,
|
| 20 |
+
ExprDecoder,
|
| 21 |
+
DifferentialTransformerBlock,
|
| 22 |
+
PerceiverBlock,
|
| 23 |
+
DiffPerceiverBlock,
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class SBModel(nn.Module):
|
| 28 |
+
"""
|
| 29 |
+
Anisotropic Schrödinger Bridge model.
|
| 30 |
+
|
| 31 |
+
forward(gene_id, cell_1, x_t, t, perturbation_id)
|
| 32 |
+
→ (pred_velocity, pred_score, sigma_g)
|
| 33 |
+
|
| 34 |
+
- pred_velocity: (B, G) PF-ODE velocity (target = x_T - x₀)
|
| 35 |
+
- pred_score: (B, G) score function (target = conditional score)
|
| 36 |
+
- sigma_g: (B, G) per-gene diffusion coefficient in [σ_min, σ_max]
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
def __init__(
|
| 40 |
+
self,
|
| 41 |
+
ntoken: int = 6000,
|
| 42 |
+
d_model: int = 128,
|
| 43 |
+
nhead: int = 8,
|
| 44 |
+
d_hid: int = 512,
|
| 45 |
+
nlayers: int = 4,
|
| 46 |
+
dropout: float = 0.1,
|
| 47 |
+
fusion_method: str = "differential_perceiver",
|
| 48 |
+
perturbation_function: str = "crisper",
|
| 49 |
+
use_perturbation_interaction: bool = True,
|
| 50 |
+
mask_path: str = None,
|
| 51 |
+
# Sigma net params
|
| 52 |
+
sigma_min: float = 0.01,
|
| 53 |
+
sigma_max: float = 2.0,
|
| 54 |
+
sigma_init: float = 0.5,
|
| 55 |
+
sigma_hidden_dim: int = 256,
|
| 56 |
+
sigma_num_layers: int = 2,
|
| 57 |
+
# Score decoder params
|
| 58 |
+
score_head_depth: int = 2,
|
| 59 |
+
use_score: bool = True,
|
| 60 |
+
):
|
| 61 |
+
super().__init__()
|
| 62 |
+
self.d_model = d_model
|
| 63 |
+
self.fusion_method = fusion_method
|
| 64 |
+
self.perturbation_function = perturbation_function
|
| 65 |
+
self.use_score = use_score
|
| 66 |
+
|
| 67 |
+
# === Timestep embedder (single, not cascaded) ===
|
| 68 |
+
self.t_embedder = TimestepEmbedder(d_model)
|
| 69 |
+
|
| 70 |
+
# === Perturbation embedder ===
|
| 71 |
+
self.perturbation_embedder = BatchLabelEncoder(ntoken, d_model)
|
| 72 |
+
|
| 73 |
+
# === Expression stream (reused from scDFM) ===
|
| 74 |
+
self.value_encoder_1 = ContinuousValueEncoder(d_model, dropout)
|
| 75 |
+
self.value_encoder_2 = ContinuousValueEncoder(d_model, dropout)
|
| 76 |
+
self.encoder = GeneEncoder(
|
| 77 |
+
ntoken, d_model,
|
| 78 |
+
use_perturbation_interaction=use_perturbation_interaction,
|
| 79 |
+
mask_path=mask_path,
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
self.fusion_layer = nn.Sequential(
|
| 83 |
+
nn.Linear(2 * d_model, d_model),
|
| 84 |
+
nn.GELU(),
|
| 85 |
+
nn.Linear(d_model, d_model),
|
| 86 |
+
nn.LayerNorm(d_model),
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
# === Shared backbone blocks ===
|
| 90 |
+
if fusion_method == "differential_transformer":
|
| 91 |
+
self.blocks = nn.ModuleList([
|
| 92 |
+
DifferentialTransformerBlock(d_model, nhead, i, mlp_ratio=4.0)
|
| 93 |
+
for i in range(nlayers)
|
| 94 |
+
])
|
| 95 |
+
elif fusion_method == "differential_perceiver":
|
| 96 |
+
self.blocks = nn.ModuleList([
|
| 97 |
+
DiffPerceiverBlock(d_model, nhead, i, mlp_ratio=4.0)
|
| 98 |
+
for i in range(nlayers)
|
| 99 |
+
])
|
| 100 |
+
elif fusion_method == "perceiver":
|
| 101 |
+
self.blocks = nn.ModuleList([
|
| 102 |
+
PerceiverBlock(d_model, d_model, heads=nhead, mlp_ratio=4.0, dropout=0.1)
|
| 103 |
+
for _ in range(nlayers)
|
| 104 |
+
])
|
| 105 |
+
else:
|
| 106 |
+
raise ValueError(f"Invalid fusion method: {fusion_method}")
|
| 107 |
+
|
| 108 |
+
# === Per-layer gene AdaLN + adapter ===
|
| 109 |
+
self.gene_adaLN = nn.ModuleList([
|
| 110 |
+
GeneadaLN(d_model, dropout) for _ in range(nlayers)
|
| 111 |
+
])
|
| 112 |
+
self.adapter_layer = nn.ModuleList([
|
| 113 |
+
nn.Sequential(
|
| 114 |
+
nn.Linear(2 * d_model, d_model),
|
| 115 |
+
nn.LeakyReLU(),
|
| 116 |
+
nn.Dropout(dropout),
|
| 117 |
+
nn.Linear(d_model, d_model),
|
| 118 |
+
nn.LeakyReLU(),
|
| 119 |
+
)
|
| 120 |
+
for _ in range(nlayers)
|
| 121 |
+
])
|
| 122 |
+
|
| 123 |
+
# === Velocity decoder head (reused ExprDecoder from scDFM) ===
|
| 124 |
+
self.final_layer = ExprDecoder(d_model, explicit_zero_prob=False, use_batch_labels=True)
|
| 125 |
+
|
| 126 |
+
# === Score decoder head (NEW) ===
|
| 127 |
+
if use_score:
|
| 128 |
+
self.score_decoder = ScoreDecoder(d_model, depth=score_head_depth)
|
| 129 |
+
|
| 130 |
+
# === Anisotropic sigma network (NEW, independent of backbone) ===
|
| 131 |
+
self.sigma_net = AnisotropicSigmaNet(
|
| 132 |
+
d_model=d_model,
|
| 133 |
+
hidden_dim=sigma_hidden_dim,
|
| 134 |
+
num_layers=sigma_num_layers,
|
| 135 |
+
sigma_min=sigma_min,
|
| 136 |
+
sigma_max=sigma_max,
|
| 137 |
+
sigma_init=sigma_init,
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
self.initialize_weights()
|
| 141 |
+
|
| 142 |
+
def initialize_weights(self):
|
| 143 |
+
def _basic_init(module):
|
| 144 |
+
if isinstance(module, nn.Linear):
|
| 145 |
+
torch.nn.init.xavier_uniform_(module.weight)
|
| 146 |
+
if module.bias is not None:
|
| 147 |
+
nn.init.constant_(module.bias, 0)
|
| 148 |
+
self.apply(_basic_init)
|
| 149 |
+
# Re-initialize sigma bias after global init
|
| 150 |
+
self.sigma_net._init_bias(self.sigma_net.sigma_min +
|
| 151 |
+
(self.sigma_net.sigma_max - self.sigma_net.sigma_min) * 0.5)
|
| 152 |
+
|
| 153 |
+
def get_perturbation_emb(
|
| 154 |
+
self,
|
| 155 |
+
perturbation_id: Optional[Tensor] = None,
|
| 156 |
+
perturbation_emb: Optional[Tensor] = None,
|
| 157 |
+
cell_1: Optional[Tensor] = None,
|
| 158 |
+
) -> Tensor:
|
| 159 |
+
"""Get perturbation embedding, replicating scDFM logic."""
|
| 160 |
+
assert perturbation_emb is None or perturbation_id is None
|
| 161 |
+
if perturbation_id is not None:
|
| 162 |
+
if self.perturbation_function == "crisper":
|
| 163 |
+
perturbation_emb = self.encoder(perturbation_id)
|
| 164 |
+
else:
|
| 165 |
+
perturbation_emb = self.perturbation_embedder(perturbation_id)
|
| 166 |
+
perturbation_emb = perturbation_emb.mean(1)
|
| 167 |
+
elif perturbation_emb is not None:
|
| 168 |
+
perturbation_emb = perturbation_emb.to(cell_1.device, dtype=cell_1.dtype)
|
| 169 |
+
if perturbation_emb.dim() == 1:
|
| 170 |
+
perturbation_emb = perturbation_emb.unsqueeze(0)
|
| 171 |
+
if perturbation_emb.size(0) == 1:
|
| 172 |
+
perturbation_emb = perturbation_emb.expand(cell_1.shape[0], -1).contiguous()
|
| 173 |
+
perturbation_emb = self.perturbation_embedder.enc_norm(perturbation_emb)
|
| 174 |
+
return perturbation_emb
|
| 175 |
+
|
| 176 |
+
def forward(
|
| 177 |
+
self,
|
| 178 |
+
gene_id: Tensor, # (B, G) gene token IDs
|
| 179 |
+
cell_1: Tensor, # (B, G) source expression
|
| 180 |
+
x_t: Tensor, # (B, G) noised target expression
|
| 181 |
+
t: Tensor, # (B,) timestep
|
| 182 |
+
perturbation_id: Optional[Tensor] = None,
|
| 183 |
+
) -> Tuple[Tensor, Optional[Tensor], Tensor]:
|
| 184 |
+
if t.dim() == 0:
|
| 185 |
+
t = t.repeat(cell_1.size(0))
|
| 186 |
+
|
| 187 |
+
# 1. Expression embedding (aligned with scDFM)
|
| 188 |
+
gene_emb = self.encoder(gene_id) # (B, G, d)
|
| 189 |
+
val_emb_1 = self.value_encoder_1(x_t)
|
| 190 |
+
val_emb_2 = self.value_encoder_2(cell_1) + gene_emb
|
| 191 |
+
x = self.fusion_layer(torch.cat([val_emb_1, val_emb_2], dim=-1)) + gene_emb
|
| 192 |
+
|
| 193 |
+
# 2. Conditioning vector (single t, no cascaded)
|
| 194 |
+
t_emb = self.t_embedder(t)
|
| 195 |
+
pert_emb = self.get_perturbation_emb(perturbation_id, cell_1=cell_1)
|
| 196 |
+
c = t_emb + pert_emb
|
| 197 |
+
|
| 198 |
+
# 3. Shared backbone
|
| 199 |
+
for i, block in enumerate(self.blocks):
|
| 200 |
+
x = self.gene_adaLN[i](gene_emb, x)
|
| 201 |
+
pert_exp = pert_emb[:, None, :].expand(-1, x.size(1), -1)
|
| 202 |
+
x = torch.cat([x, pert_exp], dim=-1)
|
| 203 |
+
x = self.adapter_layer[i](x)
|
| 204 |
+
x = block(x, val_emb_2, c)
|
| 205 |
+
|
| 206 |
+
# 4a. Velocity head
|
| 207 |
+
x_with_pert = torch.cat([x, pert_emb[:, None, :].expand(-1, x.size(1), -1)], dim=-1)
|
| 208 |
+
pred_velocity = self.final_layer(x_with_pert)["pred"] # (B, G)
|
| 209 |
+
|
| 210 |
+
# 4b. Score head
|
| 211 |
+
pred_score = None
|
| 212 |
+
if self.use_score:
|
| 213 |
+
pred_score = self.score_decoder(x, pert_emb) # (B, G)
|
| 214 |
+
|
| 215 |
+
# 4c. Sigma (independent of backbone, only depends on pert_emb, t, gene_emb)
|
| 216 |
+
sigma_g = self.sigma_net(pert_emb, t, gene_emb) # (B, G)
|
| 217 |
+
|
| 218 |
+
return pred_velocity, pred_score, sigma_g
|
GRN/SB/src/ot_anisotropic.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Anisotropic OT sampler with Mahalanobis cost weighted by learned σ_g.
|
| 3 |
+
|
| 4 |
+
Cost: C(x₀, x_T | σ_g) = Σ_g (x₀_g - x_T_g)² / (σ_g² + ε)
|
| 5 |
+
|
| 6 |
+
Uses pot.sinkhorn for entropic OT (vs baseline's pot.emd).
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import warnings
|
| 10 |
+
import numpy as np
|
| 11 |
+
import torch
|
| 12 |
+
import ot as pot
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class AnisotropicOTSampler:
|
| 16 |
+
"""
|
| 17 |
+
Minibatch OT sampler with anisotropic Mahalanobis cost.
|
| 18 |
+
|
| 19 |
+
Called every train_step with current σ_g (detached).
|
| 20 |
+
No caching or periodic re-coupling needed.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
def __init__(
|
| 24 |
+
self,
|
| 25 |
+
method: str = "sinkhorn",
|
| 26 |
+
reg: float = 0.05,
|
| 27 |
+
sigma_min: float = 0.01,
|
| 28 |
+
eps: float = 1e-6,
|
| 29 |
+
):
|
| 30 |
+
self.method = method
|
| 31 |
+
self.reg = reg
|
| 32 |
+
self.sigma_min = sigma_min
|
| 33 |
+
self.eps = eps
|
| 34 |
+
|
| 35 |
+
def _compute_cost(self, x0, x1, sigma_g=None):
|
| 36 |
+
"""
|
| 37 |
+
Compute cost matrix.
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
x0: (N, G) source
|
| 41 |
+
x1: (M, G) target
|
| 42 |
+
sigma_g: (G,) per-gene sigma, or None for isotropic
|
| 43 |
+
|
| 44 |
+
Returns:
|
| 45 |
+
M: (N, M) cost matrix (numpy)
|
| 46 |
+
"""
|
| 47 |
+
if sigma_g is not None:
|
| 48 |
+
# Mahalanobis cost: w_g = 1 / (σ_g² + ε)
|
| 49 |
+
w = (1.0 / (sigma_g ** 2 + self.eps))
|
| 50 |
+
w = w.clamp(max=1.0 / (self.sigma_min ** 2)) # prevent extreme weights
|
| 51 |
+
diff = x0.unsqueeze(1) - x1.unsqueeze(0) # (N, M, G)
|
| 52 |
+
cost = (diff ** 2 * w.unsqueeze(0).unsqueeze(0)).sum(-1) # (N, M)
|
| 53 |
+
else:
|
| 54 |
+
# Isotropic Euclidean
|
| 55 |
+
cost = torch.cdist(x0, x1, p=2) ** 2 # (N, M)
|
| 56 |
+
|
| 57 |
+
# Normalize to prevent Sinkhorn numerical issues
|
| 58 |
+
cost_max = cost.max()
|
| 59 |
+
if cost_max > 0:
|
| 60 |
+
cost = cost / cost_max
|
| 61 |
+
|
| 62 |
+
return cost.detach().cpu().numpy()
|
| 63 |
+
|
| 64 |
+
def get_plan(self, x0, x1, sigma_g=None):
|
| 65 |
+
"""Compute OT plan."""
|
| 66 |
+
M = self._compute_cost(x0, x1, sigma_g)
|
| 67 |
+
a = pot.unif(x0.shape[0])
|
| 68 |
+
b = pot.unif(x1.shape[0])
|
| 69 |
+
|
| 70 |
+
if self.method == "sinkhorn":
|
| 71 |
+
plan = pot.sinkhorn(a, b, M, reg=self.reg, warn=False)
|
| 72 |
+
elif self.method == "exact":
|
| 73 |
+
plan = pot.emd(a, b, M)
|
| 74 |
+
else:
|
| 75 |
+
raise ValueError(f"Unknown OT method: {self.method}")
|
| 76 |
+
|
| 77 |
+
# Fallback on numerical errors
|
| 78 |
+
if not np.all(np.isfinite(plan)) or np.abs(plan.sum()) < 1e-8:
|
| 79 |
+
warnings.warn("Numerical error in OT plan, falling back to uniform.")
|
| 80 |
+
plan = np.ones_like(plan) / plan.size
|
| 81 |
+
|
| 82 |
+
return plan
|
| 83 |
+
|
| 84 |
+
def sample_plan_fix_x0(self, x0, x1, sigma_g=None):
|
| 85 |
+
"""
|
| 86 |
+
Sample matched x1 from OT plan, keeping x0 order.
|
| 87 |
+
|
| 88 |
+
Args:
|
| 89 |
+
x0: (N, G) source (noise)
|
| 90 |
+
x1: (M, G) target (perturbed expression)
|
| 91 |
+
sigma_g: (G,) per-gene sigma or None
|
| 92 |
+
|
| 93 |
+
Returns:
|
| 94 |
+
x0: (N, G) unchanged
|
| 95 |
+
x1_matched: (N, G) reordered target
|
| 96 |
+
"""
|
| 97 |
+
pi = self.get_plan(x0, x1, sigma_g)
|
| 98 |
+
matched_indices = []
|
| 99 |
+
for i in range(pi.shape[0]):
|
| 100 |
+
prob = pi[i]
|
| 101 |
+
prob_sum = prob.sum()
|
| 102 |
+
if prob_sum > 0:
|
| 103 |
+
prob = prob / prob_sum
|
| 104 |
+
else:
|
| 105 |
+
prob = np.ones(pi.shape[1]) / pi.shape[1]
|
| 106 |
+
j = np.random.choice(pi.shape[1], p=prob)
|
| 107 |
+
matched_indices.append(j)
|
| 108 |
+
matched_indices = torch.tensor(matched_indices, dtype=torch.long, device=x1.device)
|
| 109 |
+
return x0, x1[matched_indices]
|
GRN/SB/src/utils.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Re-export scDFM utility functions from the central import module.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from ._scdfm_imports import (
|
| 6 |
+
save_checkpoint,
|
| 7 |
+
load_checkpoint,
|
| 8 |
+
make_lognorm_poisson_noise,
|
| 9 |
+
pick_eval_score,
|
| 10 |
+
process_vocab,
|
| 11 |
+
set_requires_grad_for_p_only,
|
| 12 |
+
get_perturbation_emb,
|
| 13 |
+
GeneVocab,
|
| 14 |
+
)
|
GRN/baseline/baseline_5418102.out
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a54b5b210fdf6666037de0b25be137b0eb687cff9e44eeef3cd6bc06ece893de
|
| 3 |
+
size 44993224
|
GRN/baseline/baseline_d128_5527533.out
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2aca6813ea4c289ea96e195c67bf8a3ad93bc2333b789ed2a5e7d6443288d688
|
| 3 |
+
size 20793723
|
GRN/baseline/d128/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/iteration_100000/checkpoint.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5d9f865cceaf5416e2c75eda60ba310268f8d532a5bccbc49d8118e1ac267ba1
|
| 3 |
+
size 697879461
|
GRN/baseline/d128/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/loss_curve.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30-gene_noise_scale/iteration_215000/agg_results.csv
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
statistic,overlap_at_N,overlap_at_50,overlap_at_100,overlap_at_200,overlap_at_500,precision_at_N,precision_at_50,precision_at_100,precision_at_200,precision_at_500,de_spearman_sig,de_direction_match,de_spearman_lfc_sig,de_sig_genes_recall,de_nsig_counts_real,de_nsig_counts_pred,pr_auc,roc_auc,pearson_delta,mse,mae,mse_delta,mae_delta,discrimination_score_l1,discrimination_score_l2,discrimination_score_cosine,pearson_edistance,clustering_agreement
|
| 2 |
+
count,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0
|
| 3 |
+
null_count,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
|
| 4 |
+
mean,0.06870723975901172,0.05673145117294057,0.06178054611098185,0.06279620935091165,0.06870723975901172,0.1328009007879472,0.05641025641025644,0.060512820512820545,0.06294871794871797,0.09405128205128206,0.3302981554355994,0.7501098846062679,0.7070736354059303,0.9439070775427181,137.89743589743588,986.0,0.2992672357260559,0.5052572787713436,0.17479875495131963,0.17378047987436637,0.24015644307319933,0.1737804794922853,0.24015644383736146,0.7409598948060486,0.7712031558185406,0.7054569362261671,0.5894095251440064,0.5539158017707632
|
| 5 |
+
std,0.07679721643631165,0.03580260630440812,0.047601325636955266,0.05660971461975514,0.07679721643631165,0.07267391308620531,0.0360910529434111,0.048501151258648546,0.057739409472281816,0.06921588983902344,0.0,0.07801718304990432,0.045601523705551456,0.025656279782161437,72.75871981447884,2.6950246556825492,0.0,2.2494727197818324e-16,0.15601927014310582,0.01723582388540501,0.012264578613397855,0.01723582356820253,0.012264577664446134,0.2464878295278745,0.2277797530123566,0.2699445454348245,0.0,1.1247363598909162e-16
|
| 6 |
+
min,0.014084507042253521,0.0,0.01,0.014084507042253521,0.014084507042253521,0.03242147922998987,0.0,0.01,0.01,0.018,0.33029815543559937,0.6,0.5777058279370952,0.8837209302325582,32.0,980.0,0.29926723572605585,0.505257278771344,-0.04524721950292587,0.11434374749660492,0.2019038200378418,0.11434374749660492,0.2019038200378418,0.20512820512820518,0.23076923076923073,0.05128205128205132,0.5894095251440064,0.5539158017707626
|
| 7 |
+
25%,0.03636363636363636,0.03125,0.030927835051546393,0.03636363636363636,0.03636363636363636,0.09898989898989899,0.02,0.03,0.03,0.062,0.33029815543559937,0.6948051948051948,0.6843389989281307,0.9344262295081968,103.0,985.0,0.29926723572605585,0.505257278771344,0.0786370113492012,0.16741803288459778,0.23513180017471313,0.16741803288459778,0.23513180017471313,0.6153846153846154,0.641025641025641,0.5384615384615384,0.5894095251440064,0.5539158017707626
|
| 8 |
+
50%,0.052083333333333336,0.06,0.05263157894736842,0.052083333333333336,0.052083333333333336,0.1116751269035533,0.06,0.05,0.05,0.074,0.33029815543559937,0.75,0.7021493500976977,0.9484536082474226,117.0,986.0,0.29926723572605585,0.505257278771344,0.1494819074869156,0.17376112937927246,0.2394474297761917,0.17376112937927246,0.2394474297761917,0.7948717948717949,0.8461538461538461,0.7692307692307692,0.5894095251440064,0.5539158017707626
|
| 9 |
+
75%,0.06578947368421052,0.08,0.07,0.06578947368421052,0.06578947368421052,0.15182186234817813,0.08,0.07,0.07,0.108,0.33029815543559937,0.8,0.7435060664157069,0.9578947368421052,158.0,988.0,0.29926723572605585,0.505257278771344,0.23058994114398956,0.1866130381822586,0.24731183052062988,0.1866130381822586,0.24731183052062988,1.0,0.9743589743589743,0.9487179487179487,0.5894095251440064,0.5539158017707626
|
| 10 |
+
max,0.38903394255874674,0.16,0.23,0.305,0.38903394255874674,0.37955465587044535,0.16,0.23,0.305,0.372,0.33029815543559937,0.9347258485639687,0.798831345858713,1.0,384.0,992.0,0.29926723572605585,0.505257278771344,0.7125794887542725,0.21626025438308716,0.2681756019592285,0.21626025438308716,0.2681756019592285,1.0,1.0,1.0,0.5894095251440064,0.5539158017707626
|
GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30-gene_noise_scale/iteration_215000/checkpoint.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:47a078dc2cd96c574088e1d889d8f32df372b603fde6e76435d40b5c0189fcc1
|
| 3 |
+
size 697879461
|
GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30-gene_noise_scale/iteration_215000/pred.h5ad
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:37151d34a01a3d67927d627db5196d72464cbe2d918fc7f1719d16103a6a905f
|
| 3 |
+
size 49952825
|
GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30-gene_noise_scale/iteration_215000/real.h5ad
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d906eb6262ae9d587a091c8f6722e562182c9cd62e18f7c2faecbd7c4935ec3c
|
| 3 |
+
size 75069953
|
GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30-gene_noise_scale/iteration_215000/results.csv
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
perturbation,overlap_at_N,overlap_at_50,overlap_at_100,overlap_at_200,overlap_at_500,precision_at_N,precision_at_50,precision_at_100,precision_at_200,precision_at_500,de_spearman_sig,de_direction_match,de_spearman_lfc_sig,de_sig_genes_recall,de_nsig_counts_real,de_nsig_counts_pred,pr_auc,roc_auc,pearson_delta,mse,mae,mse_delta,mae_delta,discrimination_score_l1,discrimination_score_l2,discrimination_score_cosine,pearson_edistance,clustering_agreement
|
| 2 |
+
AHR+FEV,0.10747663551401869,0.16,0.12,0.1,0.10747663551401869,0.2077001013171226,0.16,0.12,0.1,0.164,0.33029815543559937,0.7149532710280374,0.7405809655314368,0.9579439252336449,214.0,987.0,0.29926723572605585,0.505257278771344,0.1687520146369934,0.1943982094526291,0.2592649459838867,0.1943982094526291,0.2592649459838867,0.9230769230769231,0.9743589743589743,0.9487179487179487,0.5894095251440064,0.5539158017707626
|
| 3 |
+
AHR+KLF1,0.05714285714285714,0.08,0.07,0.05714285714285714,0.05714285714285714,0.1347517730496454,0.08,0.07,0.055,0.086,0.33029815543559937,0.6357142857142857,0.6670808812268919,0.95,140.0,987.0,0.29926723572605585,0.505257278771344,0.005311842076480389,0.1893630176782608,0.2517276704311371,0.1893630176782608,0.2517276704311371,0.41025641025641024,0.41025641025641024,0.2564102564102564,0.5894095251440064,0.5539158017707626
|
| 4 |
+
BCL2L11+BAK1,0.03125,0.03125,0.03125,0.03125,0.03125,0.03242147922998987,0.02,0.01,0.025,0.026,0.33029815543559937,0.78125,0.7206232813932172,1.0,32.0,987.0,0.29926723572605585,0.505257278771344,0.0065612467005848885,0.17376112937927246,0.23513180017471313,0.17376112937927246,0.23513180017471313,0.3589743589743589,0.3846153846153846,0.9487179487179487,0.5894095251440064,0.5539158017707626
|
| 5 |
+
BPGM+ZBTB1,0.03636363636363636,0.06,0.03,0.03636363636363636,0.03636363636363636,0.10547667342799188,0.06,0.03,0.03,0.058,0.33029815543559937,0.7909090909090909,0.7813842955641034,0.9454545454545454,110.0,986.0,0.29926723572605585,0.505257278771344,0.19321857392787933,0.1814109832048416,0.2446288764476776,0.1814109832048416,0.2446288764476776,0.717948717948718,0.717948717948718,0.5897435897435898,0.5894095251440064,0.5539158017707626
|
| 6 |
+
CBL+PTPN12,0.057692307692307696,0.04,0.06,0.057692307692307696,0.057692307692307696,0.09898989898989899,0.04,0.06,0.045,0.076,0.33029815543559937,0.8173076923076923,0.69942387709378,0.9423076923076923,104.0,990.0,0.29926723572605585,0.505257278771344,0.2184145301580429,0.16535134613513947,0.229812890291214,0.16535134613513947,0.229812890291214,0.6666666666666667,0.717948717948718,0.641025641025641,0.5894095251440064,0.5539158017707626
|
| 7 |
+
CBL+PTPN9,0.05263157894736842,0.02,0.05263157894736842,0.05263157894736842,0.05263157894736842,0.09210526315789473,0.02,0.05,0.035,0.072,0.33029815543559937,0.8210526315789474,0.7017413701391416,0.9578947368421052,95.0,988.0,0.29926723572605585,0.505257278771344,0.27182719111442566,0.15468913316726685,0.22347722947597504,0.15468913316726685,0.22347724437713623,1.0,1.0,0.8205128205128205,0.5894095251440064,0.5539158017707626
|
| 8 |
+
CBL+TGFBR2,0.0410958904109589,0.04,0.0410958904109589,0.0410958904109589,0.0410958904109589,0.06598984771573604,0.04,0.04,0.04,0.046,0.33029815543559937,0.7945205479452054,0.7141622955877815,0.8904109589041096,73.0,985.0,0.29926723572605585,0.505257278771344,0.1311197429895401,0.18153353035449982,0.24506469070911407,0.18153353035449982,0.24506469070911407,0.6153846153846154,0.5128205128205128,0.3076923076923077,0.5894095251440064,0.5539158017707626
|
| 9 |
+
CBL+UBASH3A,0.02127659574468085,0.02127659574468085,0.02127659574468085,0.02127659574468085,0.02127659574468085,0.04369918699186992,0.02,0.01,0.01,0.018,0.33029815543559937,0.7446808510638298,0.5777058279370952,0.9148936170212766,47.0,984.0,0.29926723572605585,0.505257278771344,0.1250695437192917,0.1900354027748108,0.24834005534648895,0.1900354027748108,0.24834005534648895,0.6153846153846154,0.5384615384615384,0.41025641025641024,0.5894095251440064,0.5539158017707626
|
| 10 |
+
CBL+UBASH3B,0.03424657534246575,0.04,0.03,0.03424657534246575,0.03424657534246575,0.1414141414141414,0.04,0.03,0.03,0.108,0.33029815543559937,0.8287671232876712,0.7301904633256788,0.958904109589041,146.0,990.0,0.29926723572605585,0.505257278771344,0.2766973078250885,0.17427249252796173,0.23610509932041168,0.17427249252796173,0.23610509932041168,0.8717948717948718,0.8205128205128205,0.7692307692307692,0.5894095251440064,0.5539158017707626
|
| 11 |
+
CDKN1B+CDKN1A,0.04854368932038835,0.06,0.05,0.04854368932038835,0.04854368932038835,0.09979633401221996,0.06,0.05,0.065,0.06,0.33029815543559937,0.7669902912621359,0.7339322584896543,0.9514563106796117,103.0,982.0,0.29926723572605585,0.505257278771344,0.23911680281162262,0.16785940527915955,0.2394474297761917,0.16785940527915955,0.2394474297761917,1.0,1.0,0.9487179487179487,0.5894095251440064,0.5539158017707626
|
| 12 |
+
CDKN1C+CDKN1B,0.06363636363636363,0.06,0.06,0.06363636363636363,0.06363636363636363,0.10681586978636826,0.06,0.06,0.07,0.064,0.33029815543559937,0.7727272727272727,0.7458140604290081,0.9545454545454546,110.0,983.0,0.29926723572605585,0.505257278771344,0.18302451074123383,0.17865359783172607,0.24654284119606018,0.17865359783172607,0.24654284119606018,0.9743589743589743,0.9743589743589743,0.9487179487179487,0.5894095251440064,0.5539158017707626
|
| 13 |
+
CEBPE+CEBPA,0.38903394255874674,0.1,0.23,0.305,0.38903394255874674,0.3775303643724696,0.1,0.23,0.305,0.372,0.33029815543559937,0.9347258485639687,0.6843389989281307,0.9738903394255874,383.0,988.0,0.29926723572605585,0.505257278771344,0.7125794887542725,0.11434374749660492,0.2019038200378418,0.11434374749660492,0.2019038200378418,1.0,1.0,1.0,0.5894095251440064,0.5539158017707626
|
| 14 |
+
CEBPE+CNN1,0.06802721088435375,0.06,0.08,0.06802721088435375,0.06802721088435375,0.14198782961460446,0.06,0.08,0.085,0.096,0.33029815543559937,0.7482993197278912,0.7476175001322561,0.9523809523809523,147.0,986.0,0.29926723572605585,0.505257278771344,0.0786370113492012,0.1708095520734787,0.24067263305187225,0.1708095520734787,0.24067263305187225,0.9743589743589743,0.9487179487179487,0.7948717948717949,0.5894095251440064,0.5539158017707626
|
| 15 |
+
ETS2+IGDCC3,0.05217391304347826,0.08,0.05,0.05217391304347826,0.05217391304347826,0.10986775178026449,0.08,0.05,0.06,0.078,0.33029815543559937,0.6,0.6357508209143723,0.9391304347826087,115.0,983.0,0.29926723572605585,0.505257278771344,-0.04524721950292587,0.18772271275520325,0.24757909774780273,0.18772269785404205,0.24757909774780273,0.8205128205128205,0.7692307692307692,0.3076923076923077,0.5894095251440064,0.5539158017707626
|
| 16 |
+
ETS2+IKZF3,0.05913978494623656,0.1,0.07,0.05913978494623656,0.05913978494623656,0.18346774193548387,0.1,0.07,0.065,0.114,0.33029815543559937,0.6989247311827957,0.7435060664157069,0.978494623655914,186.0,992.0,0.29926723572605585,0.505257278771344,0.17796382308006287,0.1700170636177063,0.2362302541732788,0.1700170636177063,0.2362302541732788,0.5128205128205128,0.46153846153846156,0.5128205128205128,0.5894095251440064,0.5539158017707626
|
| 17 |
+
FEV+ISL2,0.06578947368421052,0.06,0.08,0.06578947368421052,0.06578947368421052,0.1458966565349544,0.06,0.08,0.07,0.098,0.33029815543559937,0.7697368421052632,0.7633361254038531,0.9473684210526315,152.0,987.0,0.29926723572605585,0.505257278771344,0.17916952073574066,0.17820444703102112,0.2440398633480072,0.17820444703102112,0.2440398633480072,1.0,1.0,0.9230769230769231,0.5894095251440064,0.5539158017707626
|
| 18 |
+
FOSB+CEBPB,0.21568627450980393,0.12,0.17,0.185,0.21568627450980393,0.25227963525835867,0.12,0.17,0.185,0.186,0.33029815543559937,0.8862745098039215,0.6878863777851776,0.9764705882352941,255.0,987.0,0.29926723572605585,0.505257278771344,0.5117895603179932,0.14016836881637573,0.22231537103652954,0.14016836881637573,0.22231537103652954,1.0,1.0,1.0,0.5894095251440064,0.5539158017707626
|
| 19 |
+
FOXA3+FOXA1,0.05,0.06,0.06,0.05,0.05,0.1334012219959267,0.06,0.06,0.075,0.076,0.33029815543559937,0.75,0.7137284058604855,0.9357142857142857,140.0,982.0,0.29926723572605585,0.505257278771344,0.14031392335891724,0.15765589475631714,0.2285369634628296,0.15765589475631714,0.2285369634628296,1.0,1.0,0.9487179487179487,0.5894095251440064,0.5539158017707626
|
| 20 |
+
KLF1+BAK1,0.052083333333333336,0.04,0.052083333333333336,0.052083333333333336,0.052083333333333336,0.08934010152284264,0.04,0.05,0.05,0.056,0.33029815543559937,0.65625,0.6824349997286001,0.9166666666666666,96.0,985.0,0.29926723572605585,0.505257278771344,-0.008375725708901882,0.17321588099002838,0.23909367620944977,0.17321588099002838,0.23909367620944977,0.4871794871794872,0.6153846153846154,0.10256410256410253,0.5894095251440064,0.5539158017707626
|
| 21 |
+
KLF1+CEBPA,0.3333333333333333,0.1,0.2,0.225,0.3333333333333333,0.37955465587044535,0.1,0.2,0.225,0.326,0.33029815543559937,0.8984375,0.6779425083142634,0.9765625,384.0,988.0,0.29926723572605585,0.505257278771344,0.46055611968040466,0.15729443728923798,0.2413816899061203,0.15729443728923798,0.2413816899061203,1.0,1.0,0.9743589743589743,0.5894095251440064,0.5539158017707626
|
| 22 |
+
KLF1+CLDN6,0.04580152671755725,0.02,0.03,0.04580152671755725,0.04580152671755725,0.12449392712550607,0.02,0.03,0.055,0.064,0.33029815543559937,0.7633587786259542,0.6997544309203502,0.9389312977099237,131.0,988.0,0.29926723572605585,0.505257278771344,0.111509308218956,0.19790221750736237,0.2643733620643616,0.19790221750736237,0.2643733620643616,0.20512820512820518,0.3076923076923077,0.5128205128205128,0.5894095251440064,0.5539158017707626
|
| 23 |
+
LYL1+CEBPB,0.06962025316455696,0.04,0.07,0.06962025316455696,0.06962025316455696,0.15182186234817813,0.04,0.07,0.08,0.096,0.33029815543559937,0.7025316455696202,0.6918626767742858,0.9493670886075949,158.0,988.0,0.29926723572605585,0.505257278771344,0.08169694989919662,0.16782453656196594,0.23627114295959473,0.16782453656196594,0.23627114295959473,1.0,0.8461538461538461,0.7692307692307692,0.5894095251440064,0.5539158017707626
|
| 24 |
+
MAP2K3+ELMSAN1,0.01948051948051948,0.02,0.01,0.01948051948051948,0.01948051948051948,0.14285714285714285,0.02,0.01,0.04,0.11,0.33029815543559937,0.6948051948051948,0.7132342659355881,0.9090909090909091,154.0,980.0,0.29926723572605585,0.505257278771344,0.0429171547293663,0.16872559487819672,0.23630212247371674,0.16872559487819672,0.23630212247371674,0.9743589743589743,0.9487179487179487,0.7692307692307692,0.5894095251440064,0.5539158017707626
|
| 25 |
+
MAP2K3+IKZF3,0.037037037037037035,0.06,0.03,0.037037037037037035,0.037037037037037035,0.10435663627152988,0.06,0.03,0.035,0.074,0.33029815543559937,0.7314814814814815,0.798831345858713,0.9537037037037037,108.0,987.0,0.29926723572605585,0.505257278771344,0.4110649526119232,0.16886118054389954,0.23300549387931824,0.16886118054389954,0.23300549387931824,1.0,0.9743589743589743,0.8717948717948718,0.5894095251440064,0.5539158017707626
|
| 26 |
+
MAP2K3+MAP2K6,0.014084507042253521,0.02,0.014084507042253521,0.014084507042253521,0.014084507042253521,0.06829765545361875,0.02,0.01,0.02,0.056,0.33029815543559937,0.7323943661971831,0.7579607103143848,0.9436619718309859,71.0,981.0,0.29926723572605585,0.505257278771344,0.10516710579395294,0.16741803288459778,0.23477335274219513,0.16741803288459778,0.23477335274219513,0.6153846153846154,0.8205128205128205,0.9743589743589743,0.5894095251440064,0.5539158017707626
|
| 27 |
+
MAP2K3+SLC38A2,0.017543859649122806,0.02,0.017543859649122806,0.017543859649122806,0.017543859649122806,0.05279187817258883,0.02,0.02,0.015,0.034,0.33029815543559937,0.8421052631578947,0.7629220779220779,0.9122807017543859,57.0,985.0,0.29926723572605585,0.505257278771344,0.2121874839067459,0.15877388417720795,0.22804735600948334,0.15877388417720795,0.22804735600948334,0.5897435897435898,0.8461538461538461,0.9743589743589743,0.5894095251440064,0.5539158017707626
|
| 28 |
+
MAP2K6+ELMSAN1,0.05813953488372093,0.06,0.05813953488372093,0.05813953488372093,0.05813953488372093,0.07739307535641547,0.06,0.05,0.04,0.062,0.33029815543559937,0.6627906976744186,0.7021493500976977,0.8837209302325582,86.0,982.0,0.29926723572605585,0.505257278771344,0.029662083834409714,0.17078661918640137,0.23623931407928467,0.17078661918640137,0.23623931407928467,0.7948717948717949,0.8205128205128205,0.8205128205128205,0.5894095251440064,0.5539158017707626
|
| 29 |
+
MAPK1+IKZF3,0.07317073170731707,0.14,0.1,0.07317073170731707,0.07317073170731707,0.15943491422805248,0.14,0.1,0.075,0.088,0.33029815543559937,0.6646341463414634,0.6604438882049259,0.9634146341463414,164.0,991.0,0.29926723572605585,0.505257278771344,0.025348778814077377,0.18578451871871948,0.24731183052062988,0.18578451871871948,0.24731183052062988,0.717948717948718,0.4871794871794872,0.5641025641025641,0.5894095251440064,0.5539158017707626
|
| 30 |
+
PTPN12+PTPN9,0.02040816326530612,0.02,0.02040816326530612,0.02040816326530612,0.02040816326530612,0.0922920892494929,0.02,0.02,0.025,0.062,0.33029815543559937,0.8061224489795918,0.7016082747713852,0.9285714285714286,98.0,986.0,0.29926723572605585,0.505257278771344,0.1817389279603958,0.15891411900520325,0.22618001699447632,0.15891411900520325,0.22618001699447632,0.7435897435897436,0.8461538461538461,0.6923076923076923,0.5894095251440064,0.5539158017707626
|
| 31 |
+
PTPN12+UBASH3A,0.06956521739130435,0.04,0.07,0.06956521739130435,0.06956521739130435,0.1065989847715736,0.04,0.07,0.065,0.078,0.33029815543559937,0.8,0.6923661275410227,0.9130434782608695,115.0,985.0,0.29926723572605585,0.505257278771344,0.1494819074869156,0.17540070414543152,0.2363496571779251,0.17540070414543152,0.2363496571779251,0.4871794871794872,0.41025641025641024,0.5384615384615384,0.5894095251440064,0.5539158017707626
|
| 32 |
+
SGK1+S1PR2,0.05641025641025641,0.06,0.06,0.05641025641025641,0.05641025641025641,0.18800813008130082,0.06,0.06,0.06,0.138,0.33029815543559937,0.6871794871794872,0.698462657228238,0.9487179487179487,195.0,984.0,0.29926723572605585,0.505257278771344,0.07468250393867493,0.1918719857931137,0.2555564343929291,0.1918719857931137,0.2555564343929291,0.20512820512820518,0.8717948717948718,0.7435897435897436,0.5894095251440064,0.5539158017707626
|
| 33 |
+
SGK1+TBX2,0.04918032786885246,0.1,0.06,0.04918032786885246,0.04918032786885246,0.11550151975683891,0.1,0.06,0.03,0.072,0.33029815543559937,0.7131147540983607,0.7076060118266526,0.9344262295081968,122.0,987.0,0.29926723572605585,0.505257278771344,0.14901331067085266,0.1866130381822586,0.24730084836483002,0.1866130381822586,0.24730084836483002,0.8205128205128205,0.8461538461538461,0.641025641025641,0.5894095251440064,0.5539158017707626
|
| 34 |
+
SGK1+TBX3,0.01834862385321101,0.0,0.01,0.01834862385321101,0.01834862385321101,0.10233029381965553,0.0,0.01,0.015,0.058,0.33029815543559937,0.6880733944954128,0.6805027715466899,0.926605504587156,109.0,987.0,0.29926723572605585,0.505257278771344,0.18228711187839508,0.1877463310956955,0.2486482709646225,0.1877463310956955,0.2486482709646225,0.6923076923076923,0.8205128205128205,0.8974358974358975,0.5894095251440064,0.5539158017707626
|
| 35 |
+
TBX3+TBX2,0.024691358024691357,0.02,0.04,0.024691358024691357,0.024691358024691357,0.15736040609137056,0.02,0.04,0.02,0.124,0.33029815543559937,0.7901234567901234,0.7589743007047635,0.9567901234567902,162.0,985.0,0.29926723572605585,0.505257278771344,0.23058994114398956,0.21626025438308716,0.2681756019592285,0.21626025438308716,0.2681756019592285,0.8717948717948718,1.0,0.9230769230769231,0.5894095251440064,0.5539158017707626
|
| 36 |
+
TGFBR2+IGDCC3,0.05504587155963303,0.08,0.06,0.05504587155963303,0.05504587155963303,0.09959349593495935,0.08,0.06,0.03,0.068,0.33029815543559937,0.6880733944954128,0.6375651690339922,0.8990825688073395,109.0,984.0,0.29926723572605585,0.505257278771344,0.06332975625991821,0.1883746236562729,0.2505951523780823,0.1883746236562729,0.2505951523780823,0.4871794871794872,0.641025641025641,0.2564102564102564,0.5894095251440064,0.5539158017707626
|
| 37 |
+
TGFBR2+PRTG,0.05128205128205128,0.06,0.04,0.05128205128205128,0.05128205128205128,0.1116751269035533,0.06,0.04,0.05,0.062,0.33029815543559937,0.6581196581196581,0.6395338379674735,0.9401709401709402,117.0,985.0,0.29926723572605585,0.505257278771344,0.10318620502948761,0.17879818379878998,0.2450597584247589,0.17879818379878998,0.2450597584247589,0.6153846153846154,0.6666666666666667,0.5128205128205128,0.5894095251440064,0.5539158017707626
|
| 38 |
+
UBASH3B+OSR2,0.04580152671755725,0.02,0.05,0.04580152671755725,0.04580152671755725,0.12664640324214793,0.02,0.05,0.045,0.07,0.33029815543559937,0.6030534351145038,0.636700138874052,0.9541984732824428,131.0,987.0,0.29926723572605585,0.505257278771344,-0.017943568527698517,0.18036401271820068,0.24724172055721283,0.18036401271820068,0.24724172055721283,0.28205128205128205,0.23076923076923073,0.05128205128205132,0.5894095251440064,0.5539158017707626
|
| 39 |
+
UBASH3B+ZBTB25,0.030927835051546393,0.04,0.030927835051546393,0.030927835051546393,0.030927835051546393,0.09302325581395349,0.04,0.03,0.035,0.054,0.33029815543559937,0.7938144329896907,0.747732817750771,0.9484536082474226,97.0,989.0,0.29926723572605585,0.505257278771344,0.25990623235702515,0.16194941103458405,0.2271558940410614,0.16194941103458405,0.2271559089422226,1.0,0.9743589743589743,0.8974358974358975,0.5894095251440064,0.5539158017707626
|
| 40 |
+
ZC3HAV1+CEBPE,0.08641975308641975,0.06,0.08,0.08641975308641975,0.08641975308641975,0.16227180527383367,0.06,0.08,0.095,0.118,0.33029815543559937,0.8209876543209876,0.7384795473575855,0.9876543209876543,162.0,986.0,0.29926723572605585,0.505257278771344,0.3948254883289337,0.16430911421775818,0.23621705174446106,0.16430911421775818,0.23621705174446106,0.8461538461538461,0.8717948717948718,0.9487179487179487,0.5894095251440064,0.5539158017707626
|
GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30-gene_noise_scale/loss_curve.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b852162ef28e157b01f5ed9dde9b1e1447de6cff03792f0ec6936413d3db768f
|
| 3 |
+
size 10719440
|
GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/eval_only/agg_results.csv
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
statistic,overlap_at_N,overlap_at_50,overlap_at_100,overlap_at_200,overlap_at_500,precision_at_N,precision_at_50,precision_at_100,precision_at_200,precision_at_500,de_spearman_sig,de_direction_match,de_spearman_lfc_sig,de_sig_genes_recall,de_nsig_counts_real,de_nsig_counts_pred,pr_auc,roc_auc,pearson_delta,mse,mae,mse_delta,mae_delta,discrimination_score_l1,discrimination_score_l2,discrimination_score_cosine,pearson_edistance,clustering_agreement
|
| 2 |
+
count,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0
|
| 3 |
+
null_count,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
|
| 4 |
+
mean,0.0687149057815637,0.07009615384615388,0.07204117801259942,0.0707105259176851,0.0687149057815637,0.13346609982888166,0.06923076923076926,0.07076923076923079,0.06743589743589745,0.06723076923076925,-0.015221740472608315,0.7470488612797193,0.7576351502630989,0.9520688391366315,137.89743589743588,986.3333333333334,0.2813484798668912,0.5039338097416642,-0.09816616807037438,0.3772946603787251,0.4452836765692784,0.37729477080015034,0.4452836933808449,0.46811308349769887,0.4667981591058515,0.47600262984878366,-0.015104033722959698,0.12075294248443438
|
| 5 |
+
std,0.05384035544352767,0.0452954342776042,0.05989275067839226,0.060780925117476396,0.05384035544352767,0.0719676074753807,0.045153808219918,0.060060698042835904,0.05975687358472482,0.05601807111547109,7.029602249318226e-18,0.08882782747139281,0.12346406500381771,0.028015030879046145,72.75871981447884,4.0674143689969435,1.1247363598909162e-16,1.1247363598909162e-16,0.1142131448976139,0.20924049905090392,0.19915411993246646,0.20924049545127096,0.19915412127783333,0.2792597688586586,0.27333985853690296,0.28587730554904617,3.514801124659113e-18,2.8118408997272905e-17
|
| 6 |
+
min,0.0,0.0,0.0,0.0,0.0,0.032355915065722954,0.0,0.0,0.01,0.02,-0.015221740472608321,0.547945205479452,0.4665742024965326,0.8782608695652174,32.0,975.0,0.28134847986689143,0.5039338097416648,-0.27481886744499207,0.21513372659683228,0.23026183247566223,0.21513384580612183,0.23026184737682343,0.02564102564102566,0.02564102564102566,0.02564102564102566,-0.0151040337229597,0.12075294248443444
|
| 7 |
+
25%,0.03636363636363636,0.04,0.03508771929824561,0.03636363636363636,0.03636363636363636,0.09634888438133875,0.04,0.03,0.04,0.038,-0.015221740472608321,0.6818181818181818,0.6924097500771367,0.9479166666666666,103.0,985.0,0.28134847986689143,0.5039338097416648,-0.15727567672729492,0.2479596734046936,0.28246796131134033,0.24795979261398315,0.28246796131134033,0.23076923076923073,0.2564102564102564,0.2564102564102564,-0.0151040337229597,0.12075294248443444
|
| 8 |
+
50%,0.05555555555555555,0.06,0.06,0.05555555555555555,0.05555555555555555,0.11189516129032258,0.06,0.06,0.05,0.05,-0.015221740472608321,0.7557251908396947,0.783661735893894,0.9579439252336449,117.0,987.0,0.28134847986689143,0.5039338097416648,-0.10714814811944962,0.30672508478164673,0.36839988827705383,0.3067252039909363,0.36839988827705383,0.4358974358974359,0.4358974358974359,0.4358974358974359,-0.0151040337229597,0.12075294248443444
|
| 9 |
+
75%,0.08602150537634409,0.1,0.09375,0.08602150537634409,0.08602150537634409,0.15587044534412955,0.1,0.09,0.075,0.074,-0.015221740472608321,0.8155339805825242,0.8520840046410592,0.971830985915493,158.0,989.0,0.28134847986689143,0.5039338097416648,-0.04866274446249008,0.4154452085494995,0.545539915561676,0.41544538736343384,0.545539915561676,0.6923076923076923,0.6923076923076923,0.717948717948718,-0.0151040337229597,0.12075294248443444
|
| 10 |
+
max,0.2584856396866841,0.2,0.31,0.31,0.2584856396866841,0.37917087967644086,0.2,0.31,0.31,0.268,-0.015221740472608321,0.8932291666666666,0.9190682247506919,1.0,384.0,994.0,0.28134847986689143,0.5039338097416648,0.33663976192474365,1.1777018308639526,1.0532764196395874,1.1777019500732422,1.0532764196395874,0.9743589743589743,1.0,1.0,-0.0151040337229597,0.12075294248443444
|
GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/eval_only/pred.h5ad
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a511f1478f94c16809d7b314e1a6df18f61294fb27918fcf16a0b23c685c2a57
|
| 3 |
+
size 49952825
|
GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/eval_only/real.h5ad
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d906eb6262ae9d587a091c8f6722e562182c9cd62e18f7c2faecbd7c4935ec3c
|
| 3 |
+
size 75069953
|
GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/eval_only/results.csv
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
perturbation,overlap_at_N,overlap_at_50,overlap_at_100,overlap_at_200,overlap_at_500,precision_at_N,precision_at_50,precision_at_100,precision_at_200,precision_at_500,de_spearman_sig,de_direction_match,de_spearman_lfc_sig,de_sig_genes_recall,de_nsig_counts_real,de_nsig_counts_pred,pr_auc,roc_auc,pearson_delta,mse,mae,mse_delta,mae_delta,discrimination_score_l1,discrimination_score_l2,discrimination_score_cosine,pearson_edistance,clustering_agreement
|
| 2 |
+
AHR+FEV,0.11682242990654206,0.1,0.16,0.125,0.11682242990654206,0.2074898785425101,0.1,0.16,0.125,0.116,-0.015221740472608321,0.8084112149532711,0.8520840046410592,0.9579439252336449,214.0,988.0,0.28134847986689143,0.5039338097416648,-0.043981004506349564,0.3413451910018921,0.4569126069545746,0.34134531021118164,0.45691266655921936,0.9743589743589743,1.0,0.9487179487179487,-0.0151040337229597,0.12075294248443444
|
| 3 |
+
AHR+KLF1,0.11428571428571428,0.14,0.13,0.11428571428571428,0.11428571428571428,0.13549039433771487,0.14,0.13,0.09,0.066,-0.015221740472608321,0.7785714285714286,0.8016101861777768,0.9571428571428572,140.0,989.0,0.28134847986689143,0.5039338097416648,-0.23507823050022125,0.3427257835865021,0.45915278792381287,0.3427259027957916,0.45915281772613525,0.4358974358974359,0.3846153846153846,0.17948717948717952,-0.0151040337229597,0.12075294248443444
|
| 4 |
+
BCL2L11+BAK1,0.09375,0.09375,0.09375,0.09375,0.09375,0.032355915065722954,0.06,0.03,0.045,0.038,-0.015221740472608321,0.84375,0.8434463794683776,1.0,32.0,989.0,0.28134847986689143,0.5039338097416648,0.07442901283502579,0.24253018200397491,0.3316977620124817,0.24253028631210327,0.3316977620124817,0.05128205128205132,0.02564102564102566,0.8974358974358975,-0.0151040337229597,0.12075294248443444
|
| 5 |
+
BPGM+ZBTB1,0.05454545454545454,0.08,0.06,0.05454545454545454,0.05454545454545454,0.10616784630940344,0.08,0.06,0.04,0.038,-0.015221740472608321,0.6818181818181818,0.7084041684892157,0.9545454545454546,110.0,989.0,0.28134847986689143,0.5039338097416648,-0.1599692851305008,0.3571409285068512,0.47234514355659485,0.35714101791381836,0.47234517335891724,0.5641025641025641,0.5641025641025641,0.4358974358974359,-0.0151040337229597,0.12075294248443444
|
| 6 |
+
CBL+PTPN12,0.038461538461538464,0.06,0.04,0.038461538461538464,0.038461538461538464,0.09634888438133875,0.06,0.04,0.045,0.038,-0.015221740472608321,0.6153846153846154,0.4665742024965326,0.9134615384615384,104.0,986.0,0.28134847986689143,0.5039338097416648,-0.16663847863674164,0.2556036412715912,0.2707608640193939,0.25560376048088074,0.2707608640193939,0.6923076923076923,0.6153846153846154,0.3846153846153846,-0.0151040337229597,0.12075294248443444
|
| 7 |
+
CBL+PTPN9,0.031578947368421054,0.04,0.031578947368421054,0.031578947368421054,0.031578947368421054,0.09035532994923857,0.04,0.03,0.045,0.034,-0.015221740472608321,0.5684210526315789,0.5098407010274644,0.9368421052631579,95.0,985.0,0.28134847986689143,0.5039338097416648,-0.09738074988126755,0.4154452085494995,0.545539915561676,0.41544538736343384,0.545539915561676,0.641025641025641,0.7435897435897436,0.717948717948718,-0.0151040337229597,0.12075294248443444
|
| 8 |
+
CBL+TGFBR2,0.0273972602739726,0.02,0.0273972602739726,0.0273972602739726,0.0273972602739726,0.0708502024291498,0.02,0.02,0.035,0.036,-0.015221740472608321,0.6575342465753424,0.6924097500771367,0.958904109589041,73.0,988.0,0.28134847986689143,0.5039338097416648,-0.09446369856595993,0.4138798117637634,0.5478195548057556,0.4138799011707306,0.5478195548057556,0.33333333333333337,0.33333333333333337,0.23076923076923073,-0.0151040337229597,0.12075294248443444
|
| 9 |
+
CBL+UBASH3A,0.0,0.0,0.0,0.0,0.0,0.0465587044534413,0.0,0.0,0.01,0.022,-0.015221740472608321,0.7446808510638298,0.8717622571692877,0.9787234042553191,47.0,988.0,0.28134847986689143,0.5039338097416648,-0.1351209431886673,0.3813636600971222,0.5037379264831543,0.38136377930641174,0.5037379264831543,0.6923076923076923,0.8205128205128205,0.7435897435897436,-0.0151040337229597,0.12075294248443444
|
| 10 |
+
CBL+UBASH3B,0.08904109589041095,0.14,0.11,0.08904109589041095,0.08904109589041095,0.1417004048582996,0.14,0.11,0.07,0.042,-0.015221740472608321,0.547945205479452,0.5069527797611008,0.958904109589041,146.0,988.0,0.28134847986689143,0.5039338097416648,-0.10714814811944962,0.5745765566825867,0.6785131096839905,0.5745766758918762,0.6785131692886353,0.4358974358974359,0.5384615384615384,0.23076923076923073,-0.0151040337229597,0.12075294248443444
|
| 11 |
+
CDKN1B+CDKN1A,0.02912621359223301,0.02,0.03,0.02912621359223301,0.02912621359223301,0.10060975609756098,0.02,0.03,0.045,0.062,-0.015221740472608321,0.8155339805825242,0.9190682247506919,0.9611650485436893,103.0,984.0,0.28134847986689143,0.5039338097416648,-0.12452513724565506,0.23722562193870544,0.2739097476005554,0.237225741147995,0.2739097476005554,0.8717948717948718,0.5897435897435898,0.7435897435897436,-0.0151040337229597,0.12075294248443444
|
| 12 |
+
CDKN1C+CDKN1B,0.03636363636363636,0.02,0.02,0.03636363636363636,0.03636363636363636,0.10940695296523517,0.02,0.02,0.045,0.074,-0.015221740472608321,0.8090909090909091,0.9107089723822662,0.9727272727272728,110.0,978.0,0.28134847986689143,0.5039338097416648,-0.09455454349517822,0.22628702223300934,0.23856350779533386,0.2262871414422989,0.23856352269649506,0.8974358974358975,0.5384615384615384,0.5897435897435898,-0.0151040337229597,0.12075294248443444
|
| 13 |
+
CEBPE+CEBPA,0.2584856396866841,0.12,0.31,0.31,0.2584856396866841,0.3738508682328907,0.12,0.31,0.31,0.268,-0.015221740472608321,0.8929503916449086,0.8602535942363543,0.9556135770234987,383.0,979.0,0.28134847986689143,0.5039338097416648,-0.04866274446249008,0.30672508478164673,0.2823221981525421,0.3067252039909363,0.2823222577571869,0.02564102564102566,0.1282051282051282,0.33333333333333337,-0.0151040337229597,0.12075294248443444
|
| 14 |
+
CEBPE+CNN1,0.06802721088435375,0.1,0.07,0.06802721088435375,0.06802721088435375,0.1458966565349544,0.1,0.07,0.08,0.092,-0.015221740472608321,0.8231292517006803,0.8962469486619458,0.9795918367346939,147.0,987.0,0.28134847986689143,0.5039338097416648,-0.11555256694555283,0.31393852829933167,0.43378910422325134,0.3139386475086212,0.43378913402557373,0.6666666666666667,0.3076923076923077,0.28205128205128205,-0.0151040337229597,0.12075294248443444
|
| 15 |
+
ETS2+IGDCC3,0.08695652173913043,0.1,0.09,0.08695652173913043,0.08695652173913043,0.11189516129032258,0.1,0.09,0.06,0.05,-0.015221740472608321,0.7739130434782608,0.8359986739075524,0.9652173913043478,115.0,992.0,0.28134847986689143,0.5039338097416648,-0.03447481989860535,0.6627277731895447,0.761300802230835,0.6627278923988342,0.761300802230835,0.23076923076923073,0.23076923076923073,0.33333333333333337,-0.0151040337229597,0.12075294248443444
|
| 16 |
+
ETS2+IKZF3,0.08602150537634409,0.14,0.1,0.08602150537634409,0.08602150537634409,0.1814516129032258,0.14,0.1,0.095,0.068,-0.015221740472608321,0.7795698924731183,0.7054478792034593,0.967741935483871,186.0,992.0,0.28134847986689143,0.5039338097416648,-0.26221078634262085,1.0517516136169434,0.9778569340705872,1.051751732826233,0.9778569340705872,0.6666666666666667,0.7435897435897436,0.3589743589743589,-0.0151040337229597,0.12075294248443444
|
| 17 |
+
FEV+ISL2,0.06578947368421052,0.02,0.05,0.06578947368421052,0.06578947368421052,0.14661274014155712,0.02,0.05,0.07,0.066,-0.015221740472608321,0.743421052631579,0.8255491546864049,0.9539473684210527,152.0,989.0,0.28134847986689143,0.5039338097416648,-0.18080253899097443,0.2640528380870819,0.2989949882030487,0.26405295729637146,0.2989950180053711,0.8974358974358975,0.9743589743589743,0.8461538461538461,-0.0151040337229597,0.12075294248443444
|
| 18 |
+
FOSB+CEBPB,0.1568627450980392,0.2,0.11,0.155,0.1568627450980392,0.25050709939148075,0.2,0.11,0.155,0.192,-0.015221740472608321,0.8823529411764706,0.8882606303658555,0.9686274509803922,255.0,986.0,0.28134847986689143,0.5039338097416648,-0.06540774554014206,0.29934367537498474,0.36839988827705383,0.2993438243865967,0.36839988827705383,0.4871794871794872,0.6153846153846154,0.4358974358974359,-0.0151040337229597,0.12075294248443444
|
| 19 |
+
FOXA3+FOXA1,0.06428571428571428,0.1,0.07,0.06428571428571428,0.06428571428571428,0.13224821973550355,0.1,0.07,0.055,0.08,-0.015221740472608321,0.75,0.901255193527225,0.9285714285714286,140.0,983.0,0.28134847986689143,0.5039338097416648,-0.1105005219578743,0.2329549938440323,0.27338269352912903,0.23295509815216064,0.2733827233314514,0.4358974358974359,0.3846153846153846,0.2564102564102564,-0.0151040337229597,0.12075294248443444
|
| 20 |
+
KLF1+BAK1,0.07291666666666667,0.08,0.07291666666666667,0.07291666666666667,0.07291666666666667,0.0922920892494929,0.08,0.07,0.045,0.04,-0.015221740472608321,0.6458333333333334,0.7011887314769581,0.9479166666666666,96.0,986.0,0.28134847986689143,0.5039338097416648,-0.21891900897026062,0.36979371309280396,0.4932716488838196,0.3697938323020935,0.49327167868614197,0.28205128205128205,0.2564102564102564,0.1282051282051282,-0.0151040337229597,0.12075294248443444
|
| 21 |
+
KLF1+CEBPA,0.25,0.08,0.24,0.27,0.25,0.37917087967644086,0.08,0.24,0.27,0.26,-0.015221740472608321,0.8932291666666666,0.8307995818747724,0.9765625,384.0,989.0,0.28134847986689143,0.5039338097416648,-0.061934586614370346,0.2844974398612976,0.3347744047641754,0.2844974994659424,0.3347744047641754,0.23076923076923073,0.3076923076923077,0.15384615384615385,-0.0151040337229597,0.12075294248443444
|
| 22 |
+
KLF1+CLDN6,0.04580152671755725,0.04,0.04,0.04580152671755725,0.04580152671755725,0.1301715438950555,0.04,0.04,0.055,0.064,-0.015221740472608321,0.7557251908396947,0.8292067051035661,0.9847328244274809,131.0,991.0,0.28134847986689143,0.5039338097416648,-0.25307101011276245,0.4211171269416809,0.5332732796669006,0.42111721634864807,0.5332733392715454,0.5897435897435898,0.8205128205128205,0.41025641025641024,-0.0151040337229597,0.12075294248443444
|
| 23 |
+
LYL1+CEBPB,0.056962025316455694,0.02,0.08,0.056962025316455694,0.056962025316455694,0.15587044534412955,0.02,0.08,0.065,0.09,-0.015221740472608321,0.8227848101265823,0.9115847071032298,0.9746835443037974,158.0,988.0,0.28134847986689143,0.5039338097416648,-0.18229441344738007,0.24158596992492676,0.28246796131134033,0.2415860891342163,0.28246796131134033,0.8461538461538461,0.4871794871794872,0.5641025641025641,-0.0151040337229597,0.12075294248443444
|
| 24 |
+
MAP2K3+ELMSAN1,0.012987012987012988,0.04,0.02,0.012987012987012988,0.012987012987012988,0.14979757085020243,0.04,0.02,0.02,0.062,-0.015221740472608321,0.8051948051948052,0.7557730366246674,0.961038961038961,154.0,988.0,0.28134847986689143,0.5039338097416648,0.010955821722745895,0.4610966444015503,0.6009400486946106,0.46109670400619507,0.6009401082992554,0.2564102564102564,0.3076923076923077,0.8461538461538461,-0.0151040337229597,0.12075294248443444
|
| 25 |
+
MAP2K3+IKZF3,0.07407407407407407,0.08,0.06,0.07407407407407407,0.07407407407407407,0.10739614994934144,0.08,0.06,0.05,0.036,-0.015221740472608321,0.75,0.7662882362473429,0.9814814814814815,108.0,987.0,0.28134847986689143,0.5039338097416648,-0.21385297179222107,0.4999431073665619,0.5989157557487488,0.49994322657585144,0.5989157557487488,0.17948717948717952,0.28205128205128205,0.10256410256410253,-0.0151040337229597,0.12075294248443444
|
| 26 |
+
MAP2K3+MAP2K6,0.028169014084507043,0.04,0.028169014084507043,0.028169014084507043,0.028169014084507043,0.06990881458966565,0.04,0.03,0.02,0.02,-0.015221740472608321,0.7323943661971831,0.6632745897496378,0.971830985915493,71.0,987.0,0.28134847986689143,0.5039338097416648,0.13404417037963867,0.2546497881412506,0.35721859335899353,0.25464990735054016,0.3572186231613159,0.05128205128205132,0.07692307692307687,0.9743589743589743,-0.0151040337229597,0.12075294248443444
|
| 27 |
+
MAP2K3+SLC38A2,0.03508771929824561,0.04,0.03508771929824561,0.03508771929824561,0.03508771929824561,0.05268490374873354,0.04,0.06,0.045,0.024,-0.015221740472608321,0.7719298245614035,0.6917532467532468,0.9122807017543859,57.0,987.0,0.28134847986689143,0.5039338097416648,0.33663976192474365,0.2342057228088379,0.3242628276348114,0.23420584201812744,0.3242628276348114,0.10256410256410253,0.1282051282051282,1.0,-0.0151040337229597,0.12075294248443444
|
| 28 |
+
MAP2K6+ELMSAN1,0.05813953488372093,0.06,0.05813953488372093,0.05813953488372093,0.05813953488372093,0.08198380566801619,0.06,0.06,0.05,0.042,-0.015221740472608321,0.8255813953488372,0.8137041127441264,0.9418604651162791,86.0,988.0,0.28134847986689143,0.5039338097416648,0.02608500048518181,0.49398142099380493,0.6317513585090637,0.4939815402030945,0.6317513585090637,0.1282051282051282,0.1282051282051282,0.717948717948718,-0.0151040337229597,0.12075294248443444
|
| 29 |
+
MAPK1+IKZF3,0.07926829268292683,0.08,0.08,0.07926829268292683,0.07926829268292683,0.16279069767441862,0.08,0.08,0.08,0.062,-0.015221740472608321,0.7560975609756098,0.7807603098919187,0.9817073170731707,164.0,989.0,0.28134847986689143,0.5039338097416648,-0.27481886744499207,0.6549879312515259,0.7375942468643188,0.6549879312515259,0.7375942468643188,0.8717948717948718,0.8461538461538461,0.6666666666666667,-0.0151040337229597,0.12075294248443444
|
| 30 |
+
PTPN12+PTPN9,0.05102040816326531,0.02,0.05102040816326531,0.05102040816326531,0.05102040816326531,0.09210526315789473,0.02,0.05,0.03,0.034,-0.015221740472608321,0.6530612244897959,0.5569145611998929,0.9285714285714286,98.0,988.0,0.28134847986689143,0.5039338097416648,-0.15727567672729492,0.2679036557674408,0.3364432454109192,0.26790374517440796,0.3364432454109192,0.641025641025641,0.7435897435897436,0.717948717948718,-0.0151040337229597,0.12075294248443444
|
| 31 |
+
PTPN12+UBASH3A,0.05217391304347826,0.04,0.06,0.05217391304347826,0.05217391304347826,0.10337768679631525,0.04,0.06,0.055,0.05,-0.015221740472608321,0.6608695652173913,0.567604630560667,0.8782608695652174,115.0,977.0,0.28134847986689143,0.5039338097416648,-0.15085093677043915,0.2479596734046936,0.25695696473121643,0.24795979261398315,0.25695696473121643,0.717948717948718,0.8205128205128205,0.5897435897435898,-0.0151040337229597,0.12075294248443444
|
| 32 |
+
SGK1+S1PR2,0.07692307692307693,0.14,0.11,0.07692307692307693,0.07692307692307693,0.18921668362156663,0.14,0.11,0.08,0.082,-0.015221740472608321,0.764102564102564,0.783661735893894,0.9538461538461539,195.0,983.0,0.28134847986689143,0.5039338097416648,-0.14698824286460876,0.24700455367565155,0.2739073634147644,0.2470046728849411,0.2739073634147644,0.4358974358974359,0.6923076923076923,0.46153846153846156,-0.0151040337229597,0.12075294248443444
|
| 33 |
+
SGK1+TBX2,0.09836065573770492,0.12,0.11,0.09836065573770492,0.09836065573770492,0.1111111111111111,0.12,0.11,0.075,0.048,-0.015221740472608321,0.6967213114754098,0.7599896872118121,0.8934426229508197,122.0,981.0,0.28134847986689143,0.5039338097416648,-0.15363425016403198,0.23979508876800537,0.2535415589809418,0.2397952377796173,0.2535415589809418,0.46153846153846156,0.641025641025641,0.641025641025641,-0.0151040337229597,0.12075294248443444
|
| 34 |
+
SGK1+TBX3,0.027522935779816515,0.06,0.03,0.027522935779816515,0.027522935779816515,0.09948717948717949,0.06,0.03,0.03,0.04,-0.015221740472608321,0.7247706422018348,0.7509871897072727,0.8899082568807339,109.0,975.0,0.28134847986689143,0.5039338097416648,-0.09961944818496704,0.23880180716514587,0.23026183247566223,0.23880194127559662,0.23026184737682343,0.23076923076923073,0.1282051282051282,0.02564102564102566,-0.0151040337229597,0.12075294248443444
|
| 35 |
+
TBX3+TBX2,0.024691358024691357,0.06,0.03,0.024691358024691357,0.024691358024691357,0.15634517766497463,0.06,0.03,0.02,0.028,-0.015221740472608321,0.6728395061728395,0.6206747662861152,0.9506172839506173,162.0,985.0,0.28134847986689143,0.5039338097416648,-0.02766571380198002,0.2535446286201477,0.312412828207016,0.25354474782943726,0.312412828207016,0.17948717948717952,0.23076923076923073,0.07692307692307687,-0.0151040337229597,0.12075294248443444
|
| 36 |
+
TGFBR2+IGDCC3,0.03669724770642202,0.08,0.04,0.03669724770642202,0.03669724770642202,0.09939148073022312,0.08,0.04,0.04,0.038,-0.015221740472608321,0.6422018348623854,0.6681604374724842,0.8990825688073395,109.0,986.0,0.28134847986689143,0.5039338097416648,-0.08248806744813919,0.373935729265213,0.5070279836654663,0.37393584847450256,0.5070280432701111,0.33333333333333337,0.41025641025641024,0.3076923076923077,-0.0151040337229597,0.12075294248443444
|
| 37 |
+
TGFBR2+PRTG,0.05128205128205128,0.08,0.06,0.05128205128205128,0.05128205128205128,0.11324570273003033,0.08,0.06,0.045,0.05,-0.015221740472608321,0.7435897435897436,0.8064378325713858,0.9572649572649573,117.0,989.0,0.28134847986689143,0.5039338097416648,-0.09338690340518951,0.2547054588794708,0.32076728343963623,0.254705548286438,0.32076728343963623,0.8205128205128205,0.6666666666666667,0.717948717948718,-0.0151040337229597,0.12075294248443444
|
| 38 |
+
UBASH3B+OSR2,0.022900763358778626,0.02,0.01,0.022900763358778626,0.022900763358778626,0.12677484787018256,0.02,0.01,0.03,0.048,-0.015221740472608321,0.8702290076335878,0.8392693088345262,0.9541984732824428,131.0,986.0,0.28134847986689143,0.5039338097416648,-0.0992603525519371,0.36252862215042114,0.4986651837825775,0.3625286817550659,0.4986652433872223,0.07692307692307687,0.05128205128205132,0.02564102564102566,-0.0151040337229597,0.12075294248443444
|
| 39 |
+
UBASH3B+ZBTB25,0.05154639175257732,0.02,0.05154639175257732,0.05154639175257732,0.05154639175257732,0.09356136820925554,0.02,0.05,0.035,0.026,-0.015221740472608321,0.5979381443298969,0.5989109633633871,0.9587628865979382,97.0,994.0,0.28134847986689143,0.5039338097416648,-0.12927569448947906,1.1777018308639526,1.0532764196395874,1.1777019500732422,1.0532764196395874,0.4871794871794872,0.4358974358974359,0.28205128205128205,-0.0151040337229597,0.12075294248443444
|
| 40 |
+
ZC3HAV1+CEBPE,0.05555555555555555,0.04,0.04,0.05555555555555555,0.05555555555555555,0.15869786368260427,0.04,0.04,0.07,0.096,-0.015221740472608321,0.8333333333333334,0.8549527885602453,0.9629629629629629,162.0,983.0,0.28134847986689143,0.5039338097416648,0.011173766106367111,0.21513372659683228,0.25333306193351746,0.21513384580612183,0.25333306193351746,0.33333333333333337,0.20512820512820518,0.20512820512820518,-0.0151040337229597,0.12075294248443444
|
GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/iteration_200000/agg_results.csv
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
statistic,overlap_at_N,overlap_at_50,overlap_at_100,overlap_at_200,overlap_at_500,precision_at_N,precision_at_50,precision_at_100,precision_at_200,precision_at_500,de_spearman_sig,de_direction_match,de_spearman_lfc_sig,de_sig_genes_recall,de_nsig_counts_real,de_nsig_counts_pred,pr_auc,roc_auc,pearson_delta,mse,mae,mse_delta,mae_delta,discrimination_score_l1,discrimination_score_l2,discrimination_score_cosine,pearson_edistance,clustering_agreement
|
| 2 |
+
count,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0,39.0
|
| 3 |
+
null_count,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
|
| 4 |
+
mean,0.11519421730128585,0.1441666666666667,0.13608619577039166,0.12199893110144551,0.11519421730128585,0.13097097118424614,0.14461538461538462,0.13615384615384615,0.11423076923076923,0.08974358974358979,0.6332418020734603,0.9324184676174031,0.8917746949131188,0.9184538727141668,137.89743589743588,979.0,0.2880575211910082,0.48935821586474293,0.8659466023628528,0.0032901372820234452,0.020664628714514084,0.0032901372611283874,0.02066462878615428,0.982905982905983,0.9855358316896778,0.9822485207100592,0.9094629991495885,0.5172253296851504
|
| 5 |
+
std,0.10911679356972502,0.178098816942986,0.15915939539825893,0.1308561436072056,0.10911679356972502,0.07367382584049335,0.17790958901180168,0.1589706259628751,0.13112638771298427,0.09193954476740565,1.1247363598909162e-16,0.044847550762776356,0.03591595623842836,0.046859942919392134,72.75871981447884,6.095727580596274,5.623681799454581e-17,1.1247363598909162e-16,0.14835018982393972,0.00254428920337228,0.006301677701137848,0.0025442891910013588,0.006301678112752034,0.053699219011755825,0.04771496559382434,0.05233528622867887,1.1247363598909162e-16,1.1247363598909162e-16
|
| 6 |
+
min,0.0,0.0,0.0,0.0,0.0,0.03023983315954119,0.0,0.01,0.005,0.018,0.6332418020734609,0.8028169014084507,0.7653738585436534,0.7894736842105263,32.0,959.0,0.28805752119100814,0.48935821586474293,0.3626399338245392,0.0010964160319417715,0.013331997208297253,0.0010964160319417715,0.013331997208297253,0.6923076923076923,0.7435897435897436,0.6923076923076923,0.9094629991495881,0.517225329685151
|
| 7 |
+
25%,0.05504587155963303,0.06,0.05263157894736842,0.05504587155963303,0.05504587155963303,0.09304703476482618,0.06,0.05,0.05,0.044,0.6332418020734609,0.908256880733945,0.8790188213514347,0.8775510204081632,103.0,975.0,0.28805752119100814,0.48935821586474293,0.8353385329246521,0.0018225981621071696,0.015485486015677452,0.0018225981621071696,0.015485486015677452,1.0,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 8 |
+
50%,0.09090909090909091,0.08,0.09,0.09090909090909091,0.09090909090909091,0.11031664964249234,0.08,0.09,0.075,0.06,0.6332418020734609,0.9426229508196722,0.8925854546657469,0.9361702127659575,117.0,979.0,0.28805752119100814,0.48935821586474293,0.9191223382949829,0.002537589054554701,0.019970763474702835,0.002537589054554701,0.019970763474702835,1.0,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 9 |
+
75%,0.11794871794871795,0.14,0.14,0.11794871794871795,0.11794871794871795,0.150253807106599,0.14,0.14,0.11,0.088,0.6332418020734609,0.9636363636363636,0.9127565302830871,0.9523809523809523,158.0,984.0,0.28805752119100814,0.48935821586474293,0.9598571062088013,0.0036052181385457516,0.023301221430301666,0.0036052181385457516,0.023301221430301666,1.0,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 10 |
+
max,0.47780678851174935,0.7,0.68,0.6,0.47780678851174935,0.38119312436804853,0.7,0.68,0.6,0.44,0.6332418020734609,1.0,0.9523373052245646,0.9882352941176471,384.0,989.0,0.28805752119100814,0.48935821586474293,0.9823881387710571,0.014034540392458439,0.043725334107875824,0.014034540392458439,0.04372533783316612,1.0,1.0,1.0,0.9094629991495881,0.517225329685151
|
GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/iteration_200000/checkpoint.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e2abf1b204fe86b56eb509b961e05ba1e7d79d1550cf374803dcf4d679653698
|
| 3 |
+
size 697879461
|
GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/iteration_200000/distributional_results.csv
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
perturbation,mmd,energy_distance,var_ratio_median,var_ratio_mean,var_ratio_q25,var_ratio_q75,n_active_genes,gene_corr_preservation,c2st_accuracy,knn_precision,knn_recall
|
| 2 |
+
AHR+FEV,0.017871205333452156,0.6095501879310561,0.84073805809021,1.5346680879592896,0.01687859743833542,1.4509820938110352,419,0.24909744841371353,1.0,0.1640625,0.625
|
| 3 |
+
AHR+KLF1,0.034357126619798894,1.1556012296560043,2.629014492034912,5.149451732635498,1.256199836730957,4.948459148406982,361,0.3365450880756923,1.0,0.1171875,0.9635922330097088
|
| 4 |
+
BCL2L11+BAK1,0.02392877055553816,0.7770588944246519,2.3781304359436035,3.3928043842315674,0.8111352920532227,3.6910805702209473,287,0.0868113699172741,1.0,0.03125,1.0
|
| 5 |
+
BPGM+ZBTB1,0.02522936964912484,0.7498288621276643,1.86097252368927,2.8661279678344727,0.021859455853700638,3.030987501144409,349,0.25932415112769613,1.0,0.0390625,0.9858657243816255
|
| 6 |
+
CBL+PTPN12,0.016337015127273313,0.5414247486838457,1.7423911094665527,2.415642023086548,0.019121097400784492,2.6805419921875,328,0.18471136629861323,1.0,0.0625,0.9883268482490273
|
| 7 |
+
CBL+PTPN9,0.01393049569562868,0.44408079996008354,1.5896856784820557,2.0730984210968018,0.019897516816854477,2.4121322631835938,339,0.26377410903863496,1.0,0.1171875,0.9786324786324786
|
| 8 |
+
CBL+TGFBR2,0.017601488765992213,0.5916268587548528,1.8463736772537231,2.9335246086120605,0.5761939883232117,2.8751730918884277,301,0.19589088754208966,1.0,0.09375,0.967948717948718
|
| 9 |
+
CBL+UBASH3A,0.007156902394987091,0.45373675549275916,1.6510965824127197,1.787581205368042,0.9076695442199707,2.2331206798553467,224,0.2222333329122722,1.0,0.2578125,0.98
|
| 10 |
+
CBL+UBASH3B,0.015620887459537935,0.49923960837651293,1.4929242134094238,2.095428228378296,0.01539005246013403,2.5580332279205322,358,0.2432356083538353,1.0,0.0859375,0.941717791411043
|
| 11 |
+
CDKN1B+CDKN1A,0.016986083613670322,0.6153617795974498,1.868277907371521,2.235821008682251,0.5309271216392517,2.6217470169067383,309,0.21387505095101278,1.0,0.0703125,0.9693877551020408
|
| 12 |
+
CDKN1C+CDKN1B,0.019226830893821822,0.7384003924230473,1.917527675628662,2.821695327758789,0.7641957402229309,3.195326089859009,309,0.24837230459250903,1.0,0.078125,0.9493670886075949
|
| 13 |
+
CEBPE+CEBPA,0.0073074285219678625,0.43896788193304026,0.8989853858947754,1.0896122455596924,0.005009935237467289,1.2995184659957886,532,0.6103019839382675,1.0,0.2890625,0.8041666666666667
|
| 14 |
+
CEBPE+CNN1,0.02035990656723438,0.7694203872828282,1.894156575202942,2.6646556854248047,0.148182675242424,2.9743330478668213,381,0.5166442377179713,1.0,0.078125,0.9742268041237113
|
| 15 |
+
ETS2+IGDCC3,0.024169647573922037,0.8477079142023403,2.3727943897247314,4.564054012298584,0.7376527786254883,3.7823691368103027,361,0.2583669157300191,1.0,0.0546875,0.9835616438356164
|
| 16 |
+
ETS2+IKZF3,0.012025324345781019,0.46212375470986444,1.5302391052246094,2.7363831996917725,0.013686561957001686,2.282700777053833,385,0.31727602111259556,1.0,0.109375,0.961340206185567
|
| 17 |
+
FEV+ISL2,0.006897638672713674,0.3033519381459886,1.0069315433502197,1.477606177330017,0.011266712099313736,1.559715986251831,364,0.16860932955471028,1.0,0.171875,0.8639705882352942
|
| 18 |
+
FOSB+CEBPB,0.002728314392996617,0.3560066317276753,1.0908515453338623,1.3968350887298584,0.4534429609775543,1.4848634004592896,385,0.579491036488545,1.0,0.2109375,0.8235294117647058
|
| 19 |
+
FOXA3+FOXA1,0.01599826934778606,0.621929598202632,1.745820164680481,2.6857924461364746,0.17485445737838745,2.8334083557128906,346,0.28284588495139895,1.0,0.109375,0.9558011049723757
|
| 20 |
+
KLF1+BAK1,0.027583615702892778,0.8643188022623765,2.448317050933838,3.7869436740875244,0.47198760509490967,4.07703161239624,324,0.19351806347505712,1.0,0.0234375,0.9752321981424149
|
| 21 |
+
KLF1+CEBPA,0.012981627639382237,0.6344303869555716,1.1328973770141602,1.4404727220535278,0.007249907590448856,1.715933918952942,550,0.7760439091938879,1.0,0.1171875,0.8928571428571429
|
| 22 |
+
KLF1+CLDN6,0.020552156661975213,0.7275642349563594,1.8240087032318115,2.9733777046203613,0.5940459966659546,2.919407367706299,356,0.31102660067886095,1.0,0.0703125,0.9441860465116279
|
| 23 |
+
LYL1+CEBPB,0.03547898980765074,1.2598850469805747,2.0194613933563232,3.0153822898864746,0.9195641279220581,3.585127353668213,355,0.39999221734436885,1.0,0.0859375,0.949685534591195
|
| 24 |
+
MAP2K3+ELMSAN1,0.029516842630050777,1.0780652402789404,2.645120620727539,3.728822946548462,0.6302378177642822,4.0531086921691895,348,0.39408777226180186,1.0,0.03125,0.9793388429752066
|
| 25 |
+
MAP2K3+IKZF3,0.021609600520675232,0.7117643316855382,1.769496202468872,3.9200069904327393,0.02339593693614006,2.693678855895996,338,0.23962125555710506,1.0,0.0625,0.9590909090909091
|
| 26 |
+
MAP2K3+MAP2K6,0.036565771905466504,1.438551858577351,3.200063705444336,5.679604530334473,1.3887274265289307,5.795802116394043,331,0.3201346622116306,1.0,0.0078125,0.9934497816593887
|
| 27 |
+
MAP2K3+SLC38A2,0.03497964398864528,1.2970835392369073,3.9019343852996826,6.116195201873779,1.7040287256240845,6.520554065704346,341,0.24996802698857934,1.0,0.078125,0.9966329966329966
|
| 28 |
+
MAP2K6+ELMSAN1,0.03380768083873226,1.255449094743681,3.3686110973358154,5.070409774780273,1.2703278064727783,5.113337993621826,336,0.3081864902584166,1.0,0.0,0.9972602739726028
|
| 29 |
+
MAPK1+IKZF3,0.018391710415338353,0.6576522313654216,1.7688603401184082,3.038703680038452,0.02250317670404911,2.9092376232147217,358,0.2657594075014821,1.0,0.109375,0.9393939393939394
|
| 30 |
+
PTPN12+PTPN9,0.01628275425020692,0.5096647152874549,1.7910417318344116,2.648109197616577,0.019618701189756393,2.8518285751342773,347,0.3240963956797399,1.0,0.140625,0.9627118644067797
|
| 31 |
+
PTPN12+UBASH3A,0.021653192814435646,0.6863582789904239,1.75233793258667,2.89522385597229,0.019937967881560326,2.9382004737854004,346,0.26509425329359443,1.0,0.078125,0.9803921568627451
|
| 32 |
+
SGK1+S1PR2,0.024770572770150684,0.7476247961561882,1.080904483795166,2.303792953491211,0.00931306928396225,2.1790482997894287,426,0.33618793997393126,1.0,0.0625,0.842948717948718
|
| 33 |
+
SGK1+TBX2,0.017603437942806943,0.5593371773509048,1.6168527603149414,2.599722385406494,0.37884846329689026,2.863718032836914,362,0.29163202492555734,1.0,0.0625,0.9372822299651568
|
| 34 |
+
SGK1+TBX3,0.023292383447586384,0.7750004697089921,1.9750012159347534,3.356232166290283,0.837866485118866,3.100588321685791,354,0.3037391395593573,1.0,0.2109375,0.9547325102880658
|
| 35 |
+
TBX3+TBX2,0.025463679402857597,0.8960550665937888,2.1132760047912598,3.958543539047241,0.6611448526382446,4.3336029052734375,406,0.37959357453541626,1.0,0.03125,0.9711042311661506
|
| 36 |
+
TGFBR2+IGDCC3,0.02180076625389335,0.6999195532877458,2.003127336502075,3.499674081802368,0.5839447975158691,3.616645336151123,340,0.19584110210984473,1.0,0.0703125,0.9580152671755725
|
| 37 |
+
TGFBR2+PRTG,0.014074736217469935,0.4693327599772079,1.7232533693313599,2.5774388313293457,0.6362400054931641,2.6415903568267822,343,0.18281335564872775,1.0,0.046875,0.9567307692307693
|
| 38 |
+
UBASH3B+OSR2,0.02393426557287104,0.7815013155507184,2.4277689456939697,3.840965986251831,0.7632312774658203,4.080033302307129,399,0.49544318767688666,1.0,0.078125,0.9719350073855244
|
| 39 |
+
UBASH3B+ZBTB25,0.019073698571834302,0.532579537855435,1.8004943132400513,3.3618240356445312,0.02690613642334938,2.8842949867248535,338,0.2401615697349634,1.0,0.0703125,0.9821428571428571
|
| 40 |
+
ZC3HAV1+CEBPE,0.04901620171109028,1.8157486718160714,2.698394536972046,5.284060478210449,0.9405089616775513,5.841714859008789,404,0.6277552124695357,1.0,0.046875,0.9826589595375722
|
GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/iteration_200000/pred.h5ad
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d17f27ff6ca1c8aecd60577e2f878f180e5c5bc614d1954bb3ef23e14be31078
|
| 3 |
+
size 49952825
|
GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/iteration_200000/real.h5ad
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d906eb6262ae9d587a091c8f6722e562182c9cd62e18f7c2faecbd7c4935ec3c
|
| 3 |
+
size 75069953
|
GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/iteration_200000/results.csv
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
perturbation,overlap_at_N,overlap_at_50,overlap_at_100,overlap_at_200,overlap_at_500,precision_at_N,precision_at_50,precision_at_100,precision_at_200,precision_at_500,de_spearman_sig,de_direction_match,de_spearman_lfc_sig,de_sig_genes_recall,de_nsig_counts_real,de_nsig_counts_pred,pr_auc,roc_auc,pearson_delta,mse,mae,mse_delta,mae_delta,discrimination_score_l1,discrimination_score_l2,discrimination_score_cosine,pearson_edistance,clustering_agreement
|
| 2 |
+
AHR+FEV,0.19158878504672897,0.16,0.19,0.195,0.19158878504672897,0.20871327254305977,0.16,0.19,0.195,0.132,0.6332418020734609,0.9345794392523364,0.8925854546657469,0.9626168224299065,214.0,987.0,0.28805752119100814,0.48935821586474293,0.9013275504112244,0.0070017259567976,0.02869582735002041,0.0070017259567976,0.02869582735002041,0.9743589743589743,0.9743589743589743,1.0,0.9094629991495881,0.517225329685151
|
| 3 |
+
AHR+KLF1,0.1357142857142857,0.18,0.17,0.1357142857142857,0.1357142857142857,0.13516260162601626,0.18,0.17,0.11,0.082,0.6332418020734609,0.8928571428571429,0.8721739701152946,0.95,140.0,984.0,0.28805752119100814,0.48935821586474293,0.8318105936050415,0.006111051421612501,0.030300145968794823,0.006111051421612501,0.030300145968794823,1.0,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 4 |
+
BCL2L11+BAK1,0.0625,0.0625,0.0625,0.0625,0.0625,0.03023983315954119,0.08,0.04,0.03,0.032,0.6332418020734609,0.96875,0.9523373052245646,0.90625,32.0,959.0,0.28805752119100814,0.48935821586474293,0.49549615383148193,0.001314719207584858,0.01406504400074482,0.001314719207584858,0.01406504400074482,1.0,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 5 |
+
BPGM+ZBTB1,0.09090909090909091,0.12,0.09,0.09090909090909091,0.09090909090909091,0.10499490316004077,0.12,0.09,0.08,0.06,0.6332418020734609,0.9636363636363636,0.8920449677352441,0.9363636363636364,110.0,981.0,0.28805752119100814,0.48935821586474293,0.9630479216575623,0.002537589054554701,0.017403235659003258,0.002537589054554701,0.017403235659003258,1.0,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 6 |
+
CBL+PTPN12,0.0673076923076923,0.06,0.05,0.0673076923076923,0.0673076923076923,0.09304703476482618,0.06,0.05,0.06,0.046,0.6332418020734609,0.9519230769230769,0.9089832497599488,0.875,104.0,978.0,0.28805752119100814,0.48935821586474293,0.977382242679596,0.0010964160319417715,0.013331997208297253,0.0010964160319417715,0.013331997208297253,0.9743589743589743,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 7 |
+
CBL+PTPN9,0.05263157894736842,0.04,0.05263157894736842,0.05263157894736842,0.05263157894736842,0.08324768756423433,0.04,0.05,0.045,0.036,0.6332418020734609,0.9473684210526315,0.9284834401858955,0.8526315789473684,95.0,973.0,0.28805752119100814,0.48935821586474293,0.953681468963623,0.0012305708369240165,0.01380966603755951,0.0012305708369240165,0.01380966603755951,1.0,1.0,0.9743589743589743,0.9094629991495881,0.517225329685151
|
| 8 |
+
CBL+TGFBR2,0.0273972602739726,0.04,0.0273972602739726,0.0273972602739726,0.0273972602739726,0.06591143151390319,0.04,0.02,0.015,0.04,0.6332418020734609,0.9315068493150684,0.8790188213514347,0.8767123287671232,73.0,971.0,0.28805752119100814,0.48935821586474293,0.9417880773544312,0.0013098922790959477,0.014323247596621513,0.0013098923955112696,0.014323247596621513,1.0,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 9 |
+
CBL+UBASH3A,0.0,0.0,0.0,0.0,0.0,0.0449438202247191,0.0,0.01,0.005,0.018,0.6332418020734609,1.0,0.9229879740980573,0.9361702127659575,47.0,979.0,0.28805752119100814,0.48935821586474293,0.9483551383018494,0.0013087592087686062,0.014976558275520802,0.0013087592087686062,0.014976556412875652,1.0,1.0,0.9743589743589743,0.9094629991495881,0.517225329685151
|
| 10 |
+
CBL+UBASH3B,0.1095890410958904,0.08,0.13,0.1095890410958904,0.1095890410958904,0.12949640287769784,0.08,0.13,0.095,0.074,0.6332418020734609,0.8972602739726028,0.8937534976167962,0.863013698630137,146.0,973.0,0.28805752119100814,0.48935821586474293,0.9693173766136169,0.0018225981621071696,0.015270755626261234,0.0018225981621071696,0.015270755626261234,1.0,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 11 |
+
CDKN1B+CDKN1A,0.08737864077669903,0.06,0.07,0.08737864077669903,0.08737864077669903,0.10295616717635066,0.06,0.07,0.05,0.044,0.6332418020734609,0.9514563106796117,0.8699095022624435,0.9805825242718447,103.0,981.0,0.28805752119100814,0.48935821586474293,0.9378287196159363,0.0014515386428683996,0.015378400683403015,0.0014515385264530778,0.015378400683403015,1.0,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 12 |
+
CDKN1C+CDKN1B,0.05454545454545454,0.04,0.05,0.05454545454545454,0.05454545454545454,0.10520939734422881,0.04,0.05,0.05,0.052,0.6332418020734609,0.9454545454545454,0.8526693536025679,0.9363636363636364,110.0,979.0,0.28805752119100814,0.48935821586474293,0.9132230281829834,0.0019759947899729013,0.01768781617283821,0.0019759945571422577,0.01768781617283821,1.0,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 13 |
+
CEBPE+CEBPA,0.47780678851174935,0.66,0.68,0.6,0.47780678851174935,0.3775303643724696,0.66,0.68,0.6,0.44,0.6332418020734609,0.9817232375979112,0.909864328541876,0.9738903394255874,383.0,988.0,0.28805752119100814,0.48935821586474293,0.976327121257782,0.004681455437093973,0.026166625320911407,0.004681455437093973,0.026166627183556557,1.0,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 14 |
+
CEBPE+CNN1,0.17006802721088435,0.3,0.19,0.17006802721088435,0.17006802721088435,0.14242115971515767,0.3,0.19,0.145,0.098,0.6332418020734609,0.9727891156462585,0.8822202404794474,0.9523809523809523,147.0,983.0,0.28805752119100814,0.48935821586474293,0.8531714677810669,0.0037261589895933867,0.023773400112986565,0.0037261589895933867,0.023773400112986565,1.0,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 15 |
+
ETS2+IGDCC3,0.09565217391304348,0.12,0.11,0.09565217391304348,0.09565217391304348,0.10986775178026449,0.12,0.11,0.07,0.06,0.6332418020734609,0.9478260869565217,0.9043950492548624,0.9391304347826087,115.0,983.0,0.28805752119100814,0.48935821586474293,0.7988293170928955,0.0024004625156521797,0.019970763474702835,0.0024004625156521797,0.019970763474702835,1.0,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 16 |
+
ETS2+IKZF3,0.10215053763440861,0.12,0.13,0.10215053763440861,0.10215053763440861,0.1790437436419125,0.12,0.13,0.095,0.088,0.6332418020734609,0.9408602150537635,0.8917085321652976,0.946236559139785,186.0,983.0,0.28805752119100814,0.48935821586474293,0.9823881387710571,0.0014583180891349912,0.015018215402960777,0.001458318205550313,0.015018216334283352,1.0,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 17 |
+
FEV+ISL2,0.09210526315789473,0.06,0.1,0.09210526315789473,0.09210526315789473,0.1413934426229508,0.06,0.1,0.09,0.088,0.6332418020734609,0.9276315789473685,0.8878203046205918,0.9078947368421053,152.0,976.0,0.28805752119100814,0.48935821586474293,0.9518364071846008,0.0019144585821777582,0.016919277608394623,0.0019144585821777582,0.016919277608394623,1.0,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 18 |
+
FOSB+CEBPB,0.3764705882352941,0.66,0.59,0.445,0.3764705882352941,0.2550607287449393,0.66,0.59,0.445,0.258,0.6332418020734609,1.0,0.9066561943507405,0.9882352941176471,255.0,988.0,0.28805752119100814,0.48935821586474293,0.977456271648407,0.002410425106063485,0.021261680871248245,0.002410425106063485,0.021261680871248245,1.0,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 19 |
+
FOXA3+FOXA1,0.12857142857142856,0.24,0.16,0.12857142857142856,0.12857142857142856,0.1350253807106599,0.24,0.16,0.115,0.074,0.6332418020734609,0.9642857142857143,0.8766017931336103,0.95,140.0,985.0,0.28805752119100814,0.48935821586474293,0.891557514667511,0.003311346983537078,0.023301221430301666,0.003311346983537078,0.023301221430301666,1.0,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 20 |
+
KLF1+BAK1,0.052083333333333336,0.02,0.052083333333333336,0.052083333333333336,0.052083333333333336,0.08324768756423433,0.02,0.05,0.05,0.044,0.6332418020734609,0.9166666666666666,0.8878847093307279,0.84375,96.0,973.0,0.28805752119100814,0.48935821586474293,0.8811741471290588,0.0019680752884596586,0.017907410860061646,0.0019680752884596586,0.017907410860061646,1.0,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 21 |
+
KLF1+CEBPA,0.46875,0.7,0.63,0.54,0.46875,0.38119312436804853,0.7,0.63,0.54,0.434,0.6332418020734609,0.9765625,0.8985173185614233,0.9817708333333334,384.0,989.0,0.28805752119100814,0.48935821586474293,0.9599469900131226,0.005551111418753862,0.029670581221580505,0.005551111418753862,0.029670581221580505,1.0,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 22 |
+
KLF1+CLDN6,0.0916030534351145,0.1,0.07,0.0916030534351145,0.0916030534351145,0.12703252032520326,0.1,0.07,0.085,0.078,0.6332418020734609,0.9618320610687023,0.9197736493700619,0.9541984732824428,131.0,984.0,0.28805752119100814,0.48935821586474293,0.932437539100647,0.0034321732819080353,0.020545365288853645,0.0034321732819080353,0.020545365288853645,1.0,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 23 |
+
LYL1+CEBPB,0.189873417721519,0.36,0.23,0.189873417721519,0.189873417721519,0.150253807106599,0.36,0.23,0.155,0.11,0.6332418020734609,0.9620253164556962,0.8901968064224567,0.9367088607594937,158.0,985.0,0.28805752119100814,0.48935821586474293,0.7348770499229431,0.009552625939249992,0.03352374956011772,0.009552625939249992,0.03352374956011772,1.0,1.0,0.9230769230769231,0.9094629991495881,0.517225329685151
|
| 24 |
+
MAP2K3+ELMSAN1,0.05844155844155844,0.06,0.05,0.05844155844155844,0.05844155844155844,0.13903192584963955,0.06,0.05,0.06,0.074,0.6332418020734609,0.8376623376623377,0.8501914063720967,0.8766233766233766,154.0,971.0,0.28805752119100814,0.48935821586474293,0.8099707365036011,0.002013053512200713,0.017642803490161896,0.002013053512200713,0.017642803490161896,1.0,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 25 |
+
MAP2K3+IKZF3,0.10185185185185185,0.12,0.1,0.10185185185185185,0.10185185185185185,0.10558375634517767,0.12,0.1,0.09,0.054,0.6332418020734609,0.9907407407407407,0.9449131135195935,0.9629629629629629,108.0,985.0,0.28805752119100814,0.48935821586474293,0.9664058089256287,0.003377813147380948,0.020242661237716675,0.003377813147380948,0.020242661237716675,1.0,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 26 |
+
MAP2K3+MAP2K6,0.04225352112676056,0.04,0.04225352112676056,0.04225352112676056,0.04225352112676056,0.06141248720573183,0.04,0.04,0.04,0.04,0.6332418020734609,0.8028169014084507,0.7653738585436534,0.8450704225352113,71.0,977.0,0.28805752119100814,0.48935821586474293,0.43246954679489136,0.0031695833895355463,0.022692466154694557,0.0031695833895355463,0.022692466154694557,0.9743589743589743,1.0,0.9743589743589743,0.9094629991495881,0.517225329685151
|
| 27 |
+
MAP2K3+SLC38A2,0.0,0.0,0.0,0.0,0.0,0.04620123203285421,0.0,0.01,0.03,0.034,0.6332418020734609,0.8421052631578947,0.7997402597402598,0.7894736842105263,57.0,974.0,0.28805752119100814,0.48935821586474293,0.3626399338245392,0.0032979159150272608,0.024129455909132957,0.0032979159150272608,0.024129455909132957,0.9743589743589743,0.9743589743589743,0.9743589743589743,0.9094629991495881,0.517225329685151
|
| 28 |
+
MAP2K6+ELMSAN1,0.05813953488372093,0.0,0.05813953488372093,0.05813953488372093,0.05813953488372093,0.07692307692307693,0.0,0.06,0.055,0.048,0.6332418020734609,0.8837209302325582,0.894297661862015,0.872093023255814,86.0,975.0,0.28805752119100814,0.48935821586474293,0.6674091815948486,0.0033246292732656,0.02223532274365425,0.0033246292732656,0.022235320881009102,0.9743589743589743,0.9743589743589743,0.9743589743589743,0.9094629991495881,0.517225329685151
|
| 29 |
+
MAPK1+IKZF3,0.11585365853658537,0.04,0.1,0.11585365853658537,0.11585365853658537,0.15768056968463887,0.04,0.1,0.105,0.068,0.6332418020734609,0.9024390243902439,0.8718288112420843,0.9451219512195121,164.0,983.0,0.28805752119100814,0.48935821586474293,0.8927130699157715,0.0036052181385457516,0.021833917126059532,0.0036052181385457516,0.021833917126059532,1.0,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 30 |
+
PTPN12+PTPN9,0.07142857142857142,0.08,0.07142857142857142,0.07142857142857142,0.07142857142857142,0.08829568788501027,0.08,0.07,0.06,0.038,0.6332418020734609,0.8979591836734694,0.9328248753300088,0.8775510204081632,98.0,974.0,0.28805752119100814,0.48935821586474293,0.9598571062088013,0.0011219970183447003,0.013857576996088028,0.0011219970183447003,0.013857576996088028,1.0,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 31 |
+
PTPN12+UBASH3A,0.05217391304347826,0.06,0.06,0.05217391304347826,0.05217391304347826,0.10337768679631525,0.06,0.06,0.06,0.06,0.6332418020734609,0.8956521739130435,0.870493106845128,0.8782608695652174,115.0,977.0,0.28805752119100814,0.48935821586474293,0.9191223382949829,0.0035613691434264183,0.019410477951169014,0.0035613691434264183,0.019410477951169014,0.8717948717948718,0.8461538461538461,0.9230769230769231,0.9094629991495881,0.517225329685151
|
| 32 |
+
SGK1+S1PR2,0.11794871794871795,0.1,0.14,0.11794871794871795,0.11794871794871795,0.1894093686354379,0.1,0.14,0.115,0.118,0.6332418020734609,0.9179487179487179,0.8576184578525248,0.9538461538461539,195.0,982.0,0.28805752119100814,0.48935821586474293,0.8353385329246521,0.006171041633933783,0.025686632841825485,0.006171041168272495,0.025686632841825485,0.6923076923076923,0.7435897435897436,0.6923076923076923,0.9094629991495881,0.517225329685151
|
| 33 |
+
SGK1+TBX2,0.09836065573770492,0.12,0.11,0.09836065573770492,0.09836065573770492,0.11451942740286299,0.12,0.11,0.075,0.066,0.6332418020734609,0.9426229508196722,0.9307324393379983,0.9180327868852459,122.0,978.0,0.28805752119100814,0.48935821586474293,0.9189925193786621,0.0025060800835490227,0.017901940271258354,0.0025060800835490227,0.017901940271258354,1.0,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 34 |
+
SGK1+TBX3,0.05504587155963303,0.06,0.05,0.05504587155963303,0.05504587155963303,0.10245901639344263,0.06,0.05,0.05,0.046,0.6332418020734609,0.908256880733945,0.9127565302830871,0.9174311926605505,109.0,976.0,0.28805752119100814,0.48935821586474293,0.9049872159957886,0.004011864773929119,0.021642543375492096,0.004011864773929119,0.021642543375492096,1.0,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 35 |
+
TBX3+TBX2,0.06790123456790123,0.06,0.07,0.06790123456790123,0.06790123456790123,0.1523517382413088,0.06,0.07,0.08,0.09,0.6332418020734609,0.8641975308641975,0.9033925796729284,0.9197530864197531,162.0,978.0,0.28805752119100814,0.48935821586474293,0.9266531467437744,0.0029927673749625683,0.021586159244179726,0.0029927671421319246,0.021586159244179726,1.0,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 36 |
+
TGFBR2+IGDCC3,0.05504587155963303,0.08,0.06,0.05504587155963303,0.05504587155963303,0.10520939734422881,0.08,0.06,0.045,0.042,0.6332418020734609,0.963302752293578,0.9037931274184953,0.944954128440367,109.0,979.0,0.28805752119100814,0.48935821586474293,0.9237167239189148,0.0026932626497000456,0.0206748079508543,0.0026932626497000456,0.0206748079508543,0.9743589743589743,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 37 |
+
TGFBR2+PRTG,0.08547008547008547,0.14,0.1,0.08547008547008547,0.08547008547008547,0.11031664964249234,0.14,0.1,0.065,0.056,0.6332418020734609,0.9572649572649573,0.9128981488420895,0.9230769230769231,117.0,979.0,0.28805752119100814,0.48935821586474293,0.9544602036476135,0.0011664817575365305,0.014456653967499733,0.0011664817575365305,0.014456653967499733,1.0,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 38 |
+
UBASH3B+OSR2,0.0916030534351145,0.08,0.1,0.0916030534351145,0.0916030534351145,0.12255406797116375,0.08,0.1,0.085,0.082,0.6332418020734609,0.9236641221374046,0.8793504967418011,0.9083969465648855,131.0,971.0,0.28805752119100814,0.48935821586474293,0.7370474338531494,0.0020488190930336714,0.019415294751524925,0.0020488190930336714,0.019415294751524925,1.0,1.0,1.0,0.9094629991495881,0.517225329685151
|
| 39 |
+
UBASH3B+ZBTB25,0.030927835051546393,0.02,0.030927835051546393,0.030927835051546393,0.030927835051546393,0.08512820512820513,0.02,0.05,0.06,0.044,0.6332418020734609,0.9278350515463918,0.9436015809444894,0.8556701030927835,97.0,975.0,0.28805752119100814,0.48935821586474293,0.9656861424446106,0.0016514494782313704,0.015485486015677452,0.0016514494782313704,0.015485486015677452,1.0,0.9743589743589743,0.9743589743589743,0.9094629991495881,0.517225329685151
|
| 40 |
+
ZC3HAV1+CEBPE,0.2654320987654321,0.38,0.33,0.2654320987654321,0.2654320987654321,0.16142131979695432,0.38,0.33,0.255,0.152,0.6332418020734609,0.9691358024691358,0.8848201842142793,0.9814814814814815,162.0,985.0,0.28805752119100814,0.48935821586474293,0.8211876153945923,0.014034540392458439,0.043725334107875824,0.014034540392458439,0.04372533783316612,0.9230769230769231,0.9487179487179487,0.9230769230769231,0.9094629991495881,0.517225329685151
|
GRN/baseline/flow-fusion_differential_perceiver-norman-origin-predict_y-gamma_0.5-perturbation_function_crisper-lr_5e-05-dim_model_128-infer_top_gene_1000-split_method_additive-use_mmd_loss_True-fold_1-use_negative_edge_True-topk_30/loss_curve.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2282cffef7bf70b28f2f3fe4db4bc57d7a81f2c09f693a7213065b67ac48f30d
|
| 3 |
+
size 11191256
|
GRN/dim1_ablation/run_dim1.py
ADDED
|
@@ -0,0 +1,461 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Training and evaluation entry point for CCFM (Cascaded Conditioned Flow Matching).
|
| 3 |
+
Dim-1 ablation: identical to run_cascaded.py but wraps scGPT extractor with SlicedScGPTExtractor.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import sys
|
| 7 |
+
import os
|
| 8 |
+
|
| 9 |
+
# Set up paths — grn_ccfm/ is the CCFM project root (one level up from dim1_ablation/)
|
| 10 |
+
_ABLATION_DIR = os.path.dirname(os.path.abspath(__file__))
|
| 11 |
+
_PROJECT_ROOT = os.path.join(_ABLATION_DIR, "..", "grn_ccfm")
|
| 12 |
+
_PROJECT_ROOT = os.path.normpath(_PROJECT_ROOT)
|
| 13 |
+
sys.path.insert(0, _PROJECT_ROOT)
|
| 14 |
+
sys.path.insert(0, _ABLATION_DIR) # for sliced_extractor
|
| 15 |
+
|
| 16 |
+
# Bootstrap scDFM imports (must happen before any CCFM src imports)
|
| 17 |
+
import _bootstrap_scdfm # noqa: F401
|
| 18 |
+
|
| 19 |
+
import copy
|
| 20 |
+
import torch
|
| 21 |
+
import torch.nn as nn
|
| 22 |
+
import tyro
|
| 23 |
+
import tqdm
|
| 24 |
+
import numpy as np
|
| 25 |
+
import pandas as pd
|
| 26 |
+
import anndata as ad
|
| 27 |
+
import scanpy as sc
|
| 28 |
+
from torch.utils.data import DataLoader
|
| 29 |
+
from tqdm import trange
|
| 30 |
+
from accelerate import Accelerator, DistributedDataParallelKwargs
|
| 31 |
+
from torch.optim.lr_scheduler import LinearLR, CosineAnnealingLR, SequentialLR
|
| 32 |
+
|
| 33 |
+
from config.config_cascaded import CascadedFlowConfig as Config
|
| 34 |
+
from src.data.data import get_data_classes
|
| 35 |
+
from src.model.model import CascadedFlowModel
|
| 36 |
+
from src.data.scgpt_extractor import FrozenScGPTExtractor
|
| 37 |
+
from src.data.scgpt_cache import ScGPTFeatureCache
|
| 38 |
+
from src.denoiser import CascadedDenoiser
|
| 39 |
+
from src.utils import (
|
| 40 |
+
save_checkpoint,
|
| 41 |
+
load_checkpoint,
|
| 42 |
+
pick_eval_score,
|
| 43 |
+
process_vocab,
|
| 44 |
+
set_requires_grad_for_p_only,
|
| 45 |
+
GeneVocab,
|
| 46 |
+
)
|
| 47 |
+
from sliced_extractor import SlicedScGPTExtractor
|
| 48 |
+
|
| 49 |
+
from cell_eval import MetricsEvaluator
|
| 50 |
+
|
| 51 |
+
# Resolve scDFM directory paths
|
| 52 |
+
_REPO_ROOT = os.path.normpath(os.path.join(_PROJECT_ROOT, "..", "..", "transfer", "code")) # transfer/code/
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
@torch.inference_mode()
|
| 56 |
+
def test(data_sampler, denoiser, accelerator, config, vocab, data_manager,
|
| 57 |
+
batch_size=128, path_dir="./"):
|
| 58 |
+
"""Evaluate: generate predictions and compute cell-eval metrics."""
|
| 59 |
+
device = accelerator.device
|
| 60 |
+
gene_ids_test = vocab.encode(list(data_sampler.adata.var_names))
|
| 61 |
+
gene_ids_test = torch.tensor(gene_ids_test, dtype=torch.long, device=device)
|
| 62 |
+
|
| 63 |
+
perturbation_name_list = data_sampler._perturbation_covariates
|
| 64 |
+
control_data = data_sampler.get_control_data()
|
| 65 |
+
inverse_dict = {v: str(k) for k, v in data_manager.perturbation_dict.items()}
|
| 66 |
+
|
| 67 |
+
all_pred_expressions = [control_data["src_cell_data"]]
|
| 68 |
+
obs_perturbation_name_pred = ["control"] * control_data["src_cell_data"].shape[0]
|
| 69 |
+
all_target_expressions = [control_data["src_cell_data"]]
|
| 70 |
+
obs_perturbation_name_real = ["control"] * control_data["src_cell_data"].shape[0]
|
| 71 |
+
|
| 72 |
+
print("perturbation_name_list:", len(perturbation_name_list))
|
| 73 |
+
for perturbation_name in perturbation_name_list:
|
| 74 |
+
perturbation_data = data_sampler.get_perturbation_data(perturbation_name)
|
| 75 |
+
target = perturbation_data["tgt_cell_data"]
|
| 76 |
+
perturbation_id = perturbation_data["condition_id"]
|
| 77 |
+
source = control_data["src_cell_data"].to(device)
|
| 78 |
+
perturbation_id = perturbation_id.to(device)
|
| 79 |
+
|
| 80 |
+
if config.perturbation_function == "crisper":
|
| 81 |
+
perturbation_name_crisper = [
|
| 82 |
+
inverse_dict[int(p_id)] for p_id in perturbation_id[0].cpu().numpy()
|
| 83 |
+
]
|
| 84 |
+
perturbation_id = torch.tensor(
|
| 85 |
+
vocab.encode(perturbation_name_crisper), dtype=torch.long, device=device
|
| 86 |
+
)
|
| 87 |
+
perturbation_id = perturbation_id.repeat(source.shape[0], 1)
|
| 88 |
+
|
| 89 |
+
idx = torch.randperm(source.shape[0])
|
| 90 |
+
source = source[idx]
|
| 91 |
+
N = 128
|
| 92 |
+
source = source[:N]
|
| 93 |
+
|
| 94 |
+
pred_expressions = []
|
| 95 |
+
for i in trange(0, N, batch_size, desc=perturbation_name):
|
| 96 |
+
batch_source = source[i : i + batch_size]
|
| 97 |
+
batch_pert_id = perturbation_id[0].repeat(batch_source.shape[0], 1).to(device)
|
| 98 |
+
|
| 99 |
+
# Get the underlying model for generation
|
| 100 |
+
model = denoiser.module if hasattr(denoiser, "module") else denoiser
|
| 101 |
+
|
| 102 |
+
pred = model.generate(
|
| 103 |
+
batch_source,
|
| 104 |
+
batch_pert_id,
|
| 105 |
+
gene_ids_test,
|
| 106 |
+
latent_steps=config.latent_steps,
|
| 107 |
+
expr_steps=config.expr_steps,
|
| 108 |
+
method=config.ode_method,
|
| 109 |
+
)
|
| 110 |
+
pred_expressions.append(pred)
|
| 111 |
+
|
| 112 |
+
pred_expressions = torch.cat(pred_expressions, dim=0).cpu().numpy()
|
| 113 |
+
all_pred_expressions.append(pred_expressions)
|
| 114 |
+
all_target_expressions.append(target)
|
| 115 |
+
obs_perturbation_name_pred.extend([perturbation_name] * pred_expressions.shape[0])
|
| 116 |
+
obs_perturbation_name_real.extend([perturbation_name] * target.shape[0])
|
| 117 |
+
|
| 118 |
+
all_pred_expressions = np.concatenate(all_pred_expressions, axis=0)
|
| 119 |
+
all_target_expressions = np.concatenate(all_target_expressions, axis=0)
|
| 120 |
+
obs_pred = pd.DataFrame({"perturbation": obs_perturbation_name_pred})
|
| 121 |
+
obs_real = pd.DataFrame({"perturbation": obs_perturbation_name_real})
|
| 122 |
+
pred_adata = ad.AnnData(X=all_pred_expressions, obs=obs_pred)
|
| 123 |
+
real_adata = ad.AnnData(X=all_target_expressions, obs=obs_real)
|
| 124 |
+
|
| 125 |
+
eval_score = None
|
| 126 |
+
if accelerator.is_main_process:
|
| 127 |
+
evaluator = MetricsEvaluator(
|
| 128 |
+
adata_pred=pred_adata,
|
| 129 |
+
adata_real=real_adata,
|
| 130 |
+
control_pert="control",
|
| 131 |
+
pert_col="perturbation",
|
| 132 |
+
num_threads=32,
|
| 133 |
+
)
|
| 134 |
+
results, agg_results = evaluator.compute()
|
| 135 |
+
results.write_csv(os.path.join(path_dir, "results.csv"))
|
| 136 |
+
agg_results.write_csv(os.path.join(path_dir, "agg_results.csv"))
|
| 137 |
+
pred_adata.write_h5ad(os.path.join(path_dir, "pred.h5ad"))
|
| 138 |
+
real_adata.write_h5ad(os.path.join(path_dir, "real.h5ad"))
|
| 139 |
+
eval_score = pick_eval_score(agg_results, "mse")
|
| 140 |
+
print(f"Current evaluation score: {eval_score:.4f}")
|
| 141 |
+
|
| 142 |
+
return eval_score
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
if __name__ == "__main__":
|
| 146 |
+
config = tyro.cli(Config)
|
| 147 |
+
|
| 148 |
+
ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
|
| 149 |
+
accelerator = Accelerator(kwargs_handlers=[ddp_kwargs])
|
| 150 |
+
if accelerator.is_main_process:
|
| 151 |
+
print(config)
|
| 152 |
+
save_path = config.make_path()
|
| 153 |
+
os.makedirs(save_path, exist_ok=True)
|
| 154 |
+
device = accelerator.device
|
| 155 |
+
|
| 156 |
+
# === Data loading (reuse scDFM) ===
|
| 157 |
+
Data, PerturbationDataset, TrainSampler, TestDataset = get_data_classes()
|
| 158 |
+
|
| 159 |
+
scdfm_data_path = os.path.join(_REPO_ROOT, "scDFM", "data")
|
| 160 |
+
data_manager = Data(scdfm_data_path)
|
| 161 |
+
data_manager.load_data(config.data_name)
|
| 162 |
+
|
| 163 |
+
# Convert var_names from Ensembl IDs to gene symbols if needed.
|
| 164 |
+
# scDFM vocab and perturbation encoding both expect gene symbols as var_names.
|
| 165 |
+
if "gene_name" in data_manager.adata.var.columns and data_manager.adata.var_names[0].startswith("ENSG"):
|
| 166 |
+
data_manager.adata.var_names = data_manager.adata.var["gene_name"].values
|
| 167 |
+
data_manager.adata.var_names_make_unique()
|
| 168 |
+
if accelerator.is_main_process:
|
| 169 |
+
print(f"Converted var_names to gene symbols, sample: {list(data_manager.adata.var_names[:5])}")
|
| 170 |
+
|
| 171 |
+
data_manager.process_data(
|
| 172 |
+
n_top_genes=config.n_top_genes,
|
| 173 |
+
split_method=config.split_method,
|
| 174 |
+
fold=config.fold,
|
| 175 |
+
use_negative_edge=config.use_negative_edge,
|
| 176 |
+
k=config.topk,
|
| 177 |
+
)
|
| 178 |
+
train_sampler, valid_sampler, _ = data_manager.load_flow_data(batch_size=config.batch_size)
|
| 179 |
+
|
| 180 |
+
train_dataset = PerturbationDataset(train_sampler, config.batch_size)
|
| 181 |
+
dataloader = DataLoader(
|
| 182 |
+
train_dataset, batch_size=1, shuffle=False,
|
| 183 |
+
num_workers=8, pin_memory=True, persistent_workers=True,
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
# === Build mask path ===
|
| 187 |
+
if config.use_negative_edge:
|
| 188 |
+
mask_path = os.path.join(
|
| 189 |
+
data_manager.data_path, data_manager.data_name,
|
| 190 |
+
f"mask_fold_{config.fold}topk_{config.topk}{config.split_method}_negative_edge.pt",
|
| 191 |
+
)
|
| 192 |
+
else:
|
| 193 |
+
mask_path = os.path.join(
|
| 194 |
+
data_manager.data_path, data_manager.data_name,
|
| 195 |
+
f"mask_fold_{config.fold}topk_{config.topk}{config.split_method}.pt",
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
# === Vocab ===
|
| 199 |
+
orig_cwd = os.getcwd()
|
| 200 |
+
os.chdir(os.path.join(_REPO_ROOT, "scDFM"))
|
| 201 |
+
vocab = process_vocab(data_manager, config)
|
| 202 |
+
os.chdir(orig_cwd)
|
| 203 |
+
|
| 204 |
+
# Vocab is built from var_names (may be Ensembl IDs or gene symbols)
|
| 205 |
+
gene_ids = vocab.encode(list(data_manager.adata.var_names))
|
| 206 |
+
gene_ids = torch.tensor(gene_ids, dtype=torch.long, device=device)
|
| 207 |
+
|
| 208 |
+
# === Build CascadedFlowModel ===
|
| 209 |
+
vf = CascadedFlowModel(
|
| 210 |
+
ntoken=len(vocab),
|
| 211 |
+
d_model=config.d_model,
|
| 212 |
+
nhead=config.nhead,
|
| 213 |
+
d_hid=config.d_hid,
|
| 214 |
+
nlayers=config.nlayers,
|
| 215 |
+
fusion_method=config.fusion_method,
|
| 216 |
+
perturbation_function=config.perturbation_function,
|
| 217 |
+
mask_path=mask_path,
|
| 218 |
+
scgpt_dim=config.scgpt_dim,
|
| 219 |
+
bottleneck_dim=config.bottleneck_dim,
|
| 220 |
+
dh_depth=config.dh_depth,
|
| 221 |
+
)
|
| 222 |
+
|
| 223 |
+
# === Build FrozenScGPTExtractor ===
|
| 224 |
+
# var_names have been converted to gene symbols above, matching scGPT vocab.
|
| 225 |
+
hvg_gene_names = list(data_manager.adata.var_names)
|
| 226 |
+
scgpt_model_dir = os.path.join(
|
| 227 |
+
os.path.dirname(_REPO_ROOT), # transfer/
|
| 228 |
+
config.scgpt_model_dir.replace("transfer/", ""),
|
| 229 |
+
)
|
| 230 |
+
scgpt_extractor = FrozenScGPTExtractor(
|
| 231 |
+
model_dir=scgpt_model_dir,
|
| 232 |
+
hvg_gene_names=hvg_gene_names,
|
| 233 |
+
device=device,
|
| 234 |
+
max_seq_len=config.scgpt_max_seq_len,
|
| 235 |
+
target_std=config.target_std,
|
| 236 |
+
warmup_batches=config.warmup_batches,
|
| 237 |
+
)
|
| 238 |
+
scgpt_extractor = scgpt_extractor.to(device)
|
| 239 |
+
|
| 240 |
+
# === Dim-1 ablation: wrap extractor to slice features ===
|
| 241 |
+
if config.scgpt_dim < scgpt_extractor.scgpt_d_model:
|
| 242 |
+
print(f"[Ablation] Slicing scGPT features: {scgpt_extractor.scgpt_d_model} -> {config.scgpt_dim}")
|
| 243 |
+
scgpt_extractor = SlicedScGPTExtractor(scgpt_extractor, n_dims=config.scgpt_dim)
|
| 244 |
+
|
| 245 |
+
# === Build CascadedDenoiser ===
|
| 246 |
+
denoiser = CascadedDenoiser(
|
| 247 |
+
model=vf,
|
| 248 |
+
scgpt_extractor=scgpt_extractor,
|
| 249 |
+
choose_latent_p=config.choose_latent_p,
|
| 250 |
+
latent_weight=config.latent_weight,
|
| 251 |
+
noise_type=config.noise_type,
|
| 252 |
+
use_mmd_loss=config.use_mmd_loss,
|
| 253 |
+
gamma=config.gamma,
|
| 254 |
+
poisson_alpha=config.poisson_alpha,
|
| 255 |
+
poisson_target_sum=config.poisson_target_sum,
|
| 256 |
+
t_sample_mode=config.t_sample_mode,
|
| 257 |
+
t_expr_mean=config.t_expr_mean,
|
| 258 |
+
t_expr_std=config.t_expr_std,
|
| 259 |
+
t_latent_mean=config.t_latent_mean,
|
| 260 |
+
t_latent_std=config.t_latent_std,
|
| 261 |
+
noise_beta=config.noise_beta,
|
| 262 |
+
feature_mode=config.feature_mode,
|
| 263 |
+
attn_layer=config.attn_layer,
|
| 264 |
+
attn_use_rank_norm=config.attn_use_rank_norm,
|
| 265 |
+
attn_multi_layer=config.attn_multi_layer,
|
| 266 |
+
)
|
| 267 |
+
|
| 268 |
+
# === Load scGPT cache if configured ===
|
| 269 |
+
scgpt_cache = None
|
| 270 |
+
if config.scgpt_cache_path and config.feature_mode == "attention_delta":
|
| 271 |
+
if accelerator.is_main_process:
|
| 272 |
+
print("WARNING: scGPT cache is not compatible with attention_delta mode. Ignoring cache.")
|
| 273 |
+
config.scgpt_cache_path = ""
|
| 274 |
+
if config.scgpt_cache_path:
|
| 275 |
+
scgpt_cache = ScGPTFeatureCache(
|
| 276 |
+
config.scgpt_cache_path,
|
| 277 |
+
target_std=config.target_std,
|
| 278 |
+
)
|
| 279 |
+
if accelerator.is_main_process:
|
| 280 |
+
print(f"Using pre-extracted scGPT cache: {config.scgpt_cache_path}")
|
| 281 |
+
print(f" Cache shape: {scgpt_cache.features.shape}, cells: {len(scgpt_cache.name_to_idx)}")
|
| 282 |
+
|
| 283 |
+
# === EMA model (on same device as training model) ===
|
| 284 |
+
ema_model = copy.deepcopy(vf).to(device)
|
| 285 |
+
ema_model.eval()
|
| 286 |
+
ema_model.requires_grad_(False)
|
| 287 |
+
|
| 288 |
+
# === Optimizer & Scheduler (with warmup) ===
|
| 289 |
+
save_path = config.make_path()
|
| 290 |
+
optimizer = torch.optim.Adam(vf.parameters(), lr=config.lr)
|
| 291 |
+
warmup_scheduler = LinearLR(
|
| 292 |
+
optimizer, start_factor=1e-3, end_factor=1.0, total_iters=config.warmup_steps,
|
| 293 |
+
)
|
| 294 |
+
cosine_scheduler = CosineAnnealingLR(
|
| 295 |
+
optimizer, T_max=max(config.steps - config.warmup_steps, 1), eta_min=config.eta_min,
|
| 296 |
+
)
|
| 297 |
+
scheduler = SequentialLR(
|
| 298 |
+
optimizer, [warmup_scheduler, cosine_scheduler], milestones=[config.warmup_steps],
|
| 299 |
+
)
|
| 300 |
+
|
| 301 |
+
start_iteration = 0
|
| 302 |
+
if config.checkpoint_path != "":
|
| 303 |
+
start_iteration, _ = load_checkpoint(config.checkpoint_path, vf, optimizer, scheduler)
|
| 304 |
+
# Sync EMA with loaded weights
|
| 305 |
+
ema_model.load_state_dict(vf.state_dict())
|
| 306 |
+
|
| 307 |
+
# === Prepare with accelerator ===
|
| 308 |
+
denoiser = accelerator.prepare(denoiser)
|
| 309 |
+
optimizer, scheduler, dataloader = accelerator.prepare(optimizer, scheduler, dataloader)
|
| 310 |
+
|
| 311 |
+
inverse_dict = {v: str(k) for k, v in data_manager.perturbation_dict.items()}
|
| 312 |
+
|
| 313 |
+
# === Test-only mode ===
|
| 314 |
+
if config.test_only:
|
| 315 |
+
eval_path = os.path.join(save_path, "eval_only")
|
| 316 |
+
os.makedirs(eval_path, exist_ok=True)
|
| 317 |
+
if accelerator.is_main_process:
|
| 318 |
+
print(f"Test-only mode. Saving results to {eval_path}")
|
| 319 |
+
eval_score = test(
|
| 320 |
+
valid_sampler, denoiser, accelerator, config, vocab, data_manager,
|
| 321 |
+
batch_size=config.batch_size, path_dir=eval_path,
|
| 322 |
+
)
|
| 323 |
+
if accelerator.is_main_process and eval_score is not None:
|
| 324 |
+
print(f"Final evaluation score: {eval_score:.4f}")
|
| 325 |
+
sys.exit(0)
|
| 326 |
+
|
| 327 |
+
# === Loss logging (CSV + TensorBoard) ===
|
| 328 |
+
import csv
|
| 329 |
+
from torch.utils.tensorboard import SummaryWriter
|
| 330 |
+
if accelerator.is_main_process:
|
| 331 |
+
os.makedirs(save_path, exist_ok=True)
|
| 332 |
+
csv_path = os.path.join(save_path, 'loss_curve.csv')
|
| 333 |
+
if start_iteration > 0 and os.path.exists(csv_path):
|
| 334 |
+
csv_file = open(csv_path, 'a', newline='')
|
| 335 |
+
csv_writer = csv.writer(csv_file)
|
| 336 |
+
else:
|
| 337 |
+
csv_file = open(csv_path, 'w', newline='')
|
| 338 |
+
csv_writer = csv.writer(csv_file)
|
| 339 |
+
csv_writer.writerow(['iteration', 'loss', 'loss_expr', 'loss_latent', 'loss_mmd', 'lr'])
|
| 340 |
+
tb_writer = SummaryWriter(log_dir=os.path.join(save_path, 'tb_logs'))
|
| 341 |
+
|
| 342 |
+
# === Training loop ===
|
| 343 |
+
pbar = tqdm.tqdm(total=config.steps, initial=start_iteration)
|
| 344 |
+
iteration = start_iteration
|
| 345 |
+
|
| 346 |
+
while iteration < config.steps:
|
| 347 |
+
for batch_data in dataloader:
|
| 348 |
+
source = batch_data["src_cell_data"].squeeze(0)
|
| 349 |
+
target = batch_data["tgt_cell_data"].squeeze(0)
|
| 350 |
+
perturbation_id = batch_data["condition_id"].squeeze(0).to(device)
|
| 351 |
+
|
| 352 |
+
if config.perturbation_function == "crisper":
|
| 353 |
+
perturbation_name = [
|
| 354 |
+
inverse_dict[int(p_id)] for p_id in perturbation_id[0].cpu().numpy()
|
| 355 |
+
]
|
| 356 |
+
perturbation_id = torch.tensor(
|
| 357 |
+
vocab.encode(perturbation_name), dtype=torch.long, device=device
|
| 358 |
+
)
|
| 359 |
+
perturbation_id = perturbation_id.repeat(source.shape[0], 1)
|
| 360 |
+
|
| 361 |
+
# Get the underlying denoiser for train_step
|
| 362 |
+
base_denoiser = denoiser.module if hasattr(denoiser, "module") else denoiser
|
| 363 |
+
base_denoiser.model.train()
|
| 364 |
+
|
| 365 |
+
if scgpt_cache is not None:
|
| 366 |
+
# Cache mode: sample gene subset here, look up pre-extracted features
|
| 367 |
+
# DataLoader collate wraps strings in tuples; unwrap them
|
| 368 |
+
tgt_cell_names = [n[0] if isinstance(n, (tuple, list)) else n for n in batch_data["tgt_cell_id"]]
|
| 369 |
+
input_gene_ids = torch.randperm(source.shape[-1], device=device)[:config.infer_top_gene]
|
| 370 |
+
cached_z_target = scgpt_cache.lookup(tgt_cell_names, input_gene_ids, device=device)
|
| 371 |
+
loss_dict = base_denoiser.train_step(
|
| 372 |
+
source, target, perturbation_id, gene_ids,
|
| 373 |
+
infer_top_gene=config.infer_top_gene,
|
| 374 |
+
cached_z_target=cached_z_target,
|
| 375 |
+
cached_gene_ids=input_gene_ids,
|
| 376 |
+
)
|
| 377 |
+
else:
|
| 378 |
+
loss_dict = base_denoiser.train_step(
|
| 379 |
+
source, target, perturbation_id, gene_ids,
|
| 380 |
+
infer_top_gene=config.infer_top_gene,
|
| 381 |
+
)
|
| 382 |
+
|
| 383 |
+
loss = loss_dict["loss"]
|
| 384 |
+
optimizer.zero_grad(set_to_none=True)
|
| 385 |
+
accelerator.backward(loss)
|
| 386 |
+
optimizer.step()
|
| 387 |
+
scheduler.step()
|
| 388 |
+
|
| 389 |
+
# === EMA update ===
|
| 390 |
+
with torch.no_grad():
|
| 391 |
+
decay = config.ema_decay
|
| 392 |
+
for ema_p, model_p in zip(ema_model.parameters(), vf.parameters()):
|
| 393 |
+
ema_p.lerp_(model_p.data, 1 - decay)
|
| 394 |
+
|
| 395 |
+
if iteration % config.print_every == 0:
|
| 396 |
+
save_path_ = os.path.join(save_path, f"iteration_{iteration}")
|
| 397 |
+
os.makedirs(save_path_, exist_ok=True)
|
| 398 |
+
if accelerator.is_main_process:
|
| 399 |
+
print(f"Saving iteration {iteration} checkpoint...")
|
| 400 |
+
# Save EMA model (used for inference) and training state
|
| 401 |
+
save_checkpoint(
|
| 402 |
+
model=ema_model,
|
| 403 |
+
optimizer=optimizer,
|
| 404 |
+
scheduler=scheduler,
|
| 405 |
+
iteration=iteration,
|
| 406 |
+
eval_score=None,
|
| 407 |
+
save_path=save_path_,
|
| 408 |
+
is_best=False,
|
| 409 |
+
)
|
| 410 |
+
# Evaluate with EMA weights
|
| 411 |
+
# Only evaluate at the start and the last checkpoint
|
| 412 |
+
if iteration == 0 or iteration + config.print_every >= config.steps:
|
| 413 |
+
# Swap EMA weights into denoiser for evaluation
|
| 414 |
+
orig_state = copy.deepcopy(vf.state_dict())
|
| 415 |
+
vf.load_state_dict(ema_model.state_dict())
|
| 416 |
+
|
| 417 |
+
eval_score = test(
|
| 418 |
+
valid_sampler, denoiser, accelerator, config, vocab, data_manager,
|
| 419 |
+
batch_size=config.batch_size, path_dir=save_path_,
|
| 420 |
+
)
|
| 421 |
+
|
| 422 |
+
# Restore training weights
|
| 423 |
+
vf.load_state_dict(orig_state)
|
| 424 |
+
|
| 425 |
+
if accelerator.is_main_process and eval_score is not None:
|
| 426 |
+
tb_writer.add_scalar('eval/score', eval_score, iteration)
|
| 427 |
+
|
| 428 |
+
# --- Per-iteration loss logging ---
|
| 429 |
+
if accelerator.is_main_process:
|
| 430 |
+
current_lr = scheduler.get_last_lr()[0]
|
| 431 |
+
csv_writer.writerow([
|
| 432 |
+
iteration, loss.item(),
|
| 433 |
+
loss_dict["loss_expr"].item(),
|
| 434 |
+
loss_dict["loss_latent"].item(),
|
| 435 |
+
loss_dict["loss_mmd"].item(),
|
| 436 |
+
current_lr,
|
| 437 |
+
])
|
| 438 |
+
if iteration % 100 == 0:
|
| 439 |
+
csv_file.flush()
|
| 440 |
+
tb_writer.add_scalar('loss/train', loss.item(), iteration)
|
| 441 |
+
tb_writer.add_scalar('loss/expr', loss_dict["loss_expr"].item(), iteration)
|
| 442 |
+
tb_writer.add_scalar('loss/latent', loss_dict["loss_latent"].item(), iteration)
|
| 443 |
+
tb_writer.add_scalar('loss/mmd', loss_dict["loss_mmd"].item(), iteration)
|
| 444 |
+
tb_writer.add_scalar('lr', current_lr, iteration)
|
| 445 |
+
|
| 446 |
+
accelerator.wait_for_everyone()
|
| 447 |
+
|
| 448 |
+
pbar.update(1)
|
| 449 |
+
pbar.set_description(
|
| 450 |
+
f"loss: {loss.item():.4f} (expr: {loss_dict['loss_expr'].item():.4f}, "
|
| 451 |
+
f"latent: {loss_dict['loss_latent'].item():.4f}, "
|
| 452 |
+
f"mmd: {loss_dict['loss_mmd'].item():.4f}), iter: {iteration}"
|
| 453 |
+
)
|
| 454 |
+
iteration += 1
|
| 455 |
+
if iteration >= config.steps:
|
| 456 |
+
break
|
| 457 |
+
|
| 458 |
+
# === Close logging ===
|
| 459 |
+
if accelerator.is_main_process:
|
| 460 |
+
csv_file.close()
|
| 461 |
+
tb_writer.close()
|
GRN/dim1_ablation/run_eval_iter60000.sh
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
#PJM -L rscgrp=b-batch
|
| 3 |
+
#PJM -L gpu=1
|
| 4 |
+
#PJM -L elapse=4:00:00
|
| 5 |
+
#PJM -N grn_dim1_eval
|
| 6 |
+
#PJM -j
|
| 7 |
+
#PJM -o /home/hp250092/ku50001222/qian/aivc/lfj/GRN/dim1_ablation/logs/dim1_eval_%j.out
|
| 8 |
+
|
| 9 |
+
module load cuda/12.2.2
|
| 10 |
+
module load cudnn/8.9.7
|
| 11 |
+
module load gcc-toolset/12
|
| 12 |
+
|
| 13 |
+
source /home/hp250092/ku50001222/qian/aivc/lfj/stack_env/bin/activate
|
| 14 |
+
|
| 15 |
+
cd /home/hp250092/ku50001222/qian/aivc/lfj/GRN/dim1_ablation
|
| 16 |
+
|
| 17 |
+
export PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:256
|
| 18 |
+
|
| 19 |
+
CKPT=/home/hp250092/ku50001222/qian/aivc/lfj/GRN/result/dim1_ablation/grn-norman-f1-topk30-negTrue-d512-lr5e-05-lw1.0-lp0.4-ema0.9999-ln-wu2000-rk4-online-attn_L11/iteration_60000/checkpoint.pt
|
| 20 |
+
|
| 21 |
+
echo "=========================================="
|
| 22 |
+
echo "Job ID: $PJM_JOBID"
|
| 23 |
+
echo "Job Name: $PJM_JOBNAME"
|
| 24 |
+
echo "Start: $(date)"
|
| 25 |
+
echo "Node: $(hostname)"
|
| 26 |
+
echo "GPU: $(nvidia-smi --query-gpu=name,memory.total --format=csv,noheader 2>/dev/null || echo 'N/A')"
|
| 27 |
+
echo "Eval checkpoint: $CKPT"
|
| 28 |
+
echo "=========================================="
|
| 29 |
+
|
| 30 |
+
accelerate launch --num_processes=1 run_dim1.py \
|
| 31 |
+
--data-name norman \
|
| 32 |
+
--d-model 512 \
|
| 33 |
+
--d-hid 2048 \
|
| 34 |
+
--nhead 8 \
|
| 35 |
+
--nlayers 4 \
|
| 36 |
+
--batch-size 48 \
|
| 37 |
+
--lr 5e-5 \
|
| 38 |
+
--steps 50000 \
|
| 39 |
+
--fusion-method differential_perceiver \
|
| 40 |
+
--perturbation-function crisper \
|
| 41 |
+
--noise-type Gaussian \
|
| 42 |
+
--infer-top-gene 1000 \
|
| 43 |
+
--n-top-genes 5000 \
|
| 44 |
+
--use-mmd-loss \
|
| 45 |
+
--gamma 0.5 \
|
| 46 |
+
--split-method additive \
|
| 47 |
+
--fold 1 \
|
| 48 |
+
--scgpt-dim 1 \
|
| 49 |
+
--bottleneck-dim 512 \
|
| 50 |
+
--latent-weight 1.0 \
|
| 51 |
+
--choose-latent-p 0.4 \
|
| 52 |
+
--dh-depth 2 \
|
| 53 |
+
--print-every 5000 \
|
| 54 |
+
--topk 30 \
|
| 55 |
+
--use-negative-edge \
|
| 56 |
+
--ema-decay 0.9999 \
|
| 57 |
+
--t-sample-mode logit_normal \
|
| 58 |
+
--t-expr-mean 0.0 \
|
| 59 |
+
--t-expr-std 1.0 \
|
| 60 |
+
--t-latent-mean 0.0 \
|
| 61 |
+
--t-latent-std 1.0 \
|
| 62 |
+
--warmup-steps 2000 \
|
| 63 |
+
--ode-method rk4 \
|
| 64 |
+
--feature-mode attention_delta \
|
| 65 |
+
--attn-layer 11 \
|
| 66 |
+
--attn-use-rank-norm \
|
| 67 |
+
--result-path /home/hp250092/ku50001222/qian/aivc/lfj/GRN/result/dim1_ablation \
|
| 68 |
+
--checkpoint-path "$CKPT" \
|
| 69 |
+
--test-only
|
| 70 |
+
|
| 71 |
+
echo "=========================================="
|
| 72 |
+
echo "Finished: $(date)"
|
| 73 |
+
echo "=========================================="
|