| """ |
| Pre-extract scGPT per-gene features for all training cells. |
| |
| Saves to HDF5 for use with ScGPTFeatureCache during training. |
| Must run on GPU node via pjsub. |
| |
| Usage: |
| python scripts/preextract_scgpt.py --data_name norman --batch_size 256 --output scgpt_cache_norman.h5 |
| """ |
|
|
| import sys |
| import os |
| import argparse |
|
|
| |
| _PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) |
| sys.path.insert(0, _PROJECT_ROOT) |
|
|
| |
| import _bootstrap_scdfm |
|
|
| import torch |
| import numpy as np |
| import h5py |
| from tqdm import tqdm |
|
|
| from src.data.data import get_data_classes |
| from src.data.scgpt_extractor import FrozenScGPTExtractor |
|
|
| _REPO_ROOT = os.path.dirname(_PROJECT_ROOT) |
|
|
|
|
| def main(): |
| parser = argparse.ArgumentParser(description="Pre-extract scGPT features") |
| parser.add_argument("--data_name", type=str, default="norman") |
| parser.add_argument("--n_top_genes", type=int, default=5000) |
| parser.add_argument("--split_method", type=str, default="additive") |
| parser.add_argument("--fold", type=int, default=1) |
| parser.add_argument("--topk", type=int, default=15) |
| parser.add_argument("--use_negative_edge", action="store_true") |
| parser.add_argument("--scgpt_model_dir", type=str, default="transfer/data/scGPT_pretrained") |
| parser.add_argument("--batch_size", type=int, default=256) |
| parser.add_argument("--output", type=str, default="scgpt_cache_norman.h5") |
| args = parser.parse_args() |
|
|
| if args.data_name == "norman": |
| args.n_top_genes = 5000 |
|
|
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| print(f"Device: {device}") |
|
|
| |
| Data, PerturbationDataset, TrainSampler, TestDataset = get_data_classes() |
|
|
| scdfm_data_path = os.path.join(_REPO_ROOT, "scDFM", "data") |
| data_manager = Data(scdfm_data_path) |
| data_manager.load_data(args.data_name) |
|
|
| |
| if "gene_name" in data_manager.adata.var.columns and data_manager.adata.var_names[0].startswith("ENSG"): |
| data_manager.adata.var_names = data_manager.adata.var["gene_name"].values |
| data_manager.adata.var_names_make_unique() |
| print(f"Converted var_names to gene symbols, sample: {list(data_manager.adata.var_names[:5])}") |
|
|
| data_manager.process_data( |
| n_top_genes=args.n_top_genes, |
| split_method=args.split_method, |
| fold=args.fold, |
| use_negative_edge=args.use_negative_edge, |
| k=args.topk, |
| ) |
|
|
| |
| adata = data_manager.adata |
| n_cells = adata.n_obs |
| n_genes = adata.n_vars |
| hvg_gene_names = list(adata.var_names) |
| cell_names = list(adata.obs_names) |
|
|
| print(f"Cells: {n_cells}, Genes: {n_genes}") |
| print(f"HVG gene names sample: {hvg_gene_names[:5]}") |
|
|
| |
| scgpt_model_dir = os.path.join( |
| os.path.dirname(_REPO_ROOT), |
| args.scgpt_model_dir.replace("transfer/", ""), |
| ) |
|
|
| |
| import json |
| vocab_path = os.path.join(scgpt_model_dir, "vocab.json") |
| with open(vocab_path, "r") as f: |
| scgpt_vocab = json.load(f) |
| n_valid = sum(1 for g in hvg_gene_names if g in scgpt_vocab) |
| max_seq_len = n_valid + 2 |
| print(f"Valid genes in scGPT vocab: {n_valid}/{n_genes}, max_seq_len={max_seq_len}") |
|
|
| extractor = FrozenScGPTExtractor( |
| model_dir=scgpt_model_dir, |
| hvg_gene_names=hvg_gene_names, |
| device=device, |
| max_seq_len=max_seq_len, |
| target_std=1.0, |
| warmup_batches=0, |
| ) |
| extractor = extractor.to(device) |
| extractor.eval() |
|
|
| scgpt_dim = extractor.scgpt_d_model |
| print(f"scGPT d_model: {scgpt_dim}") |
|
|
| |
| |
| import scipy.sparse as sp |
| if sp.issparse(adata.X): |
| X = torch.from_numpy(adata.X.toarray()).float() |
| else: |
| X = torch.from_numpy(np.array(adata.X)).float() |
|
|
| |
| print(f"Output: {args.output}") |
| print(f"Features shape: ({n_cells}, {n_genes}, {scgpt_dim}) float16") |
|
|
| h5 = h5py.File(args.output, "w") |
| feat_ds = h5.create_dataset( |
| "features", |
| shape=(n_cells, n_genes, scgpt_dim), |
| dtype=np.float16, |
| chunks=(min(args.batch_size, n_cells), n_genes, scgpt_dim), |
| ) |
|
|
| |
| |
| |
| |
| extractor.running_mean.zero_() |
| extractor.running_var.fill_(1.0) |
| extractor._stats_frozen = True |
|
|
| running_sum = torch.zeros(scgpt_dim, dtype=torch.float64) |
| running_sq_sum = torch.zeros(scgpt_dim, dtype=torch.float64) |
| total_valid_count = 0 |
|
|
| for start in tqdm(range(0, n_cells, args.batch_size), desc="Extracting"): |
| end = min(start + args.batch_size, n_cells) |
| batch_expr = X[start:end].to(device) |
|
|
| |
| with torch.no_grad(): |
| feats = extractor.extract(batch_expr, gene_indices=None) |
|
|
| feats_cpu = feats.cpu() |
|
|
| |
| nonzero_mask = feats_cpu.abs().sum(-1) > 0 |
| if nonzero_mask.any(): |
| valid_feats = feats_cpu[nonzero_mask].double() |
| running_sum += valid_feats.sum(dim=0) |
| running_sq_sum += (valid_feats ** 2).sum(dim=0) |
| total_valid_count += valid_feats.shape[0] |
|
|
| |
| feat_ds[start:end] = feats_cpu.numpy().astype(np.float16) |
|
|
| |
| global_mean = (running_sum / total_valid_count).float() |
| global_var = ((running_sq_sum / total_valid_count) - global_mean.double() ** 2).float().clamp_min(0) |
| print(f"Global mean range: [{global_mean.min():.4f}, {global_mean.max():.4f}]") |
| print(f"Global var range: [{global_var.min():.4f}, {global_var.max():.4f}]") |
|
|
| |
| h5.create_dataset("norm_mean", data=global_mean.numpy()) |
| h5.create_dataset("norm_var", data=global_var.numpy()) |
|
|
| |
| dt = h5py.string_dtype() |
| h5.create_dataset("cell_names", data=np.array(cell_names, dtype=object), dtype=dt) |
|
|
| h5.close() |
| print(f"Done! Saved to {args.output}") |
| print(f" Features: ({n_cells}, {n_genes}, {scgpt_dim}) float16") |
| print(f" Valid features counted: {total_valid_count}") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|