| import random |
| import numpy as np |
| import torch |
| import os |
| from src.tokenizer.gene_tokenizer import GeneVocab |
| from typing import Optional, Dict |
| import numpy as np |
| import pandas as pd |
| from scipy import stats, sparse |
| import networkx as nx |
|
|
| def set_seed(seed): |
| random.seed(seed) |
| np.random.seed(seed) |
| torch.manual_seed(seed) |
| torch.cuda.manual_seed_all(seed) |
| |
|
|
| def pick_eval_score(agg_results, scheme): |
| df = agg_results.to_pandas() |
|
|
| if scheme in ["pearson_delta", "mse", "mae", "mse_delta"]: |
| return float(df[scheme].iloc[0]) |
|
|
| if scheme == "reverse": |
| return float(df["pr_auc"].iloc[0]) |
|
|
| if scheme == "forward": |
| pear = float(df["pearson_delta"].iloc[0]) |
| mse = float(df["mse_delta"].iloc[0]) |
| alpha = 0.05 |
| return pear - alpha * mse |
|
|
| if scheme == "de": |
| keys = ["de_spearman_sig", "de_direction_match", "de_sig_genes_recall"] |
| return float(df[keys].iloc[0].mean()) |
|
|
| if scheme == "composite": |
| pear = float(df["pearson_delta"].iloc[0]) |
| mse = float(df["mse_delta"].iloc[0]) |
| pra = float(df["pr_auc"].iloc[0]) |
| de = float(df[["de_spearman_sig","de_direction_match","de_sig_genes_recall"]].iloc[0].mean()) |
| return 0.4*pra + 0.3*de + 0.3*(pear - 0.05*mse) |
|
|
| raise ValueError("unknown scheme") |
|
|
| def make_lognorm_poisson_noise(target_log, alpha=1.0, per_cell_L=None, eps=1e-8): |
| """ |
| target_log: log1p(normalized_counts) |
| alpha: noise intensity (0.3~1.0; smaller is more conservative) |
| per_cell_L: if specified (e.g. 1e4), normalize expected total per cell to L |
| """ |
| base = torch.expm1(target_log) |
| if per_cell_L is not None or per_cell_L == -1 : |
| scale = per_cell_L / (base.sum(dim=1, keepdim=True) + eps) |
| lam = (base * scale).clamp_min(1e-8) |
| else: |
| lam = (alpha * base).clamp_min(1e-8) |
| x0_counts = torch.poisson(lam) |
| x0_log = torch.log1p(x0_counts) |
| return x0_log |
|
|
|
|
| def save_checkpoint(model, optimizer, scheduler, iteration, eval_score, save_path, is_best=False): |
| """save checkpoint""" |
| checkpoint = { |
| 'iteration': iteration, |
| 'model_state_dict': model.state_dict(), |
| 'optimizer_state_dict': optimizer.state_dict(), |
| 'scheduler_state_dict': scheduler.state_dict(), |
| 'eval_score': eval_score, |
| } |
| |
| checkpoint_path = os.path.join(save_path, f'checkpoint.pt') |
| torch.save(checkpoint, checkpoint_path) |
| print(f"save checkpoint to: {checkpoint_path}") |
| |
| |
| if is_best: |
| best_path = os.path.join(save_path, 'best_checkpoint.pt') |
| torch.save(checkpoint, best_path) |
| print(f"save best checkpoint: {best_path}") |
|
|
| def load_checkpoint(checkpoint_path, model, optimizer, scheduler): |
| """load checkpoint""" |
| if os.path.exists(checkpoint_path): |
| checkpoint = torch.load(checkpoint_path, map_location='cpu') |
| model.load_state_dict(checkpoint['model_state_dict']) |
| optimizer.load_state_dict(checkpoint['optimizer_state_dict']) |
| scheduler.load_state_dict(checkpoint['scheduler_state_dict']) |
| iteration = checkpoint['iteration'] |
| eval_score = checkpoint.get('eval_score', float('-inf')) |
| print(f"loading {checkpoint_path} checkpoint, iteration: {iteration}, eval_score: {eval_score}") |
| return iteration, eval_score |
| else: |
| print(f"Checkpoint file not found: {checkpoint_path}") |
| return 0, float('-inf') |
| |
| |
| def process_vocab(data_manager, config): |
| vocab_path = os.path.join('src/tokenizer',config.data_name+'_'+str(config.n_top_genes)+'_highly_vocab.json') |
| if os.path.exists(vocab_path): |
| print('##### loading vocab from file #####') |
| vocab = GeneVocab.from_file(vocab_path) |
| else: |
| print('##### building vocab #####') |
| highly_gene = data_manager.adata.var[data_manager.adata_train.var['highly_variable']] |
| vocab = GeneVocab(list(highly_gene.index), specials=['<pad>', '<cls>', '<mask>', 'control']) |
| vocab.save_json(vocab_path) |
| vocab = GeneVocab.from_file(vocab_path) |
| return vocab |
|
|
| def perturbation_id_to_emb_id(perturbation_id, vocab, perturbation_dict=None): |
| device = perturbation_id.device |
| inverse_dict = {v: str(k) for k, v in perturbation_dict.items()} |
| perturbation_name = [inverse_dict[int(p_id)] for p_id in perturbation_id.cpu().numpy()] |
| emb_id = vocab.encode(perturbation_name) |
| emb_id = torch.tensor(emb_id, dtype=torch.long, device=device) |
| return emb_id |
|
|
| def emb_id_to_perturbation_id(perturbation_id, vocab, perturbation_dict=None): |
| device = perturbation_id.device |
| inverse_dict = {v: str(k) for k, v in perturbation_dict.items()} |
| perturbation_name = [inverse_dict[int(p_id)] for p_id in perturbation_id.cpu().numpy()] |
| emb_id = vocab.encode(perturbation_name) |
| emb_id = torch.tensor(emb_id, dtype=torch.long, device=device) |
| return emb_id |
|
|
|
|
| @torch.no_grad() |
| def approximate_with_two_perturbations( |
| model, |
| perturb_vec: torch.Tensor, |
| candidate_ids: Optional[torch.Tensor]=None, |
| topk: int = 64, |
| nonneg_renorm: bool = True, |
| return_reconstruction: bool = True, |
| topn_output: int = 10, |
| mode: str = "pair", |
| ): |
| device = next(model.parameters()).device |
| dtype = next(model.parameters()).dtype |
|
|
| |
| if perturb_vec.dim() == 1: |
| perturb_vec = perturb_vec.unsqueeze(0) |
| perturb_vec = perturb_vec.to(device=device, dtype=dtype) |
| B, d = perturb_vec.shape |
|
|
| |
| if model.perturbation_function == 'crisper': |
| assert candidate_ids is not None, "In 'crisper' mode, candidate_ids (N, L) must be provided." |
| |
| E = model.encoder(candidate_ids.to(device=device)).to(dtype=dtype).mean(dim=1) |
| cand_index = torch.arange(E.size(0), device=device) |
| else: |
| if candidate_ids is None: |
| all_ids = torch.arange(model.perturbation_embedder.embedding.num_embeddings, device=device) |
| E = model.perturbation_embedder(all_ids).to(dtype=dtype) |
| cand_index = all_ids |
| else: |
| if candidate_ids.dim() == 2 and candidate_ids.size(1) == 1: |
| candidate_ids = candidate_ids.squeeze(1) |
| E = model.perturbation_embedder(candidate_ids.to(device=device)).to(dtype=dtype) |
| cand_index = candidate_ids.to(device) |
|
|
| N = E.size(0) |
| assert N >= 1, "Need at least 1 candidate." |
|
|
| |
| if mode == "single": |
| E_norm = torch.nn.functional.normalize(E, dim=-1) |
| P_norm = torch.nn.functional.normalize(perturb_vec, dim=-1) |
| sims = P_norm @ E_norm.t() |
| k = min(topn_output, N) |
| topn_val, topn_idx = torch.topk(sims, k=k, dim=-1) |
| return { |
| "topn_ids": cand_index[topn_idx], |
| "topn_scores": topn_val, |
| } |
|
|
| |
| elif mode == "pair": |
| assert N >= 2, "Pair mode requires at least 2 candidates." |
|
|
| |
| E_norm = torch.nn.functional.normalize(E, dim=-1) |
| P_norm = torch.nn.functional.normalize(perturb_vec, dim=-1) |
| sims = P_norm @ E_norm.t() |
| k = min(topk, N) |
| _, topk_idx = torch.topk(sims, k=k, dim=-1) |
|
|
| all_pairs = [] |
| I2 = torch.eye(2, device=device, dtype=dtype) |
|
|
| for b in range(B): |
| cand = E[topk_idx[b]] |
| idxs = topk_idx[b] |
| pair_results = [] |
|
|
| for i in range(k): |
| e_i = cand[i] |
| for j in range(i + 1, k): |
| e_j = cand[j] |
| |
| A = torch.stack([e_i, e_j], dim=1) |
| ATA = A.t() @ A |
| ATp = A.t() @ perturb_vec[b] |
| |
| w = torch.linalg.solve(ATA + 1e-6 * I2, ATp) |
|
|
| if nonneg_renorm: |
| w = torch.clamp(w, min=0) |
| s = w.sum() |
| if s > 0: |
| w = w / s |
|
|
| recon = A @ w |
| err = torch.norm(perturb_vec[b] - recon, p=2) |
|
|
| pair_results.append(( |
| err.item(), |
| cand_index[idxs[i]].item(), |
| cand_index[idxs[j]].item(), |
| w.detach().clone().cpu(), |
| recon.detach().clone().cpu() if return_reconstruction else None |
| )) |
|
|
| |
| pair_results.sort(key=lambda x: x[0]) |
| pair_results = pair_results[:min(topn_output, len(pair_results))] |
| all_pairs.append(pair_results) |
|
|
| return { |
| "topn_pairs": all_pairs, |
| } |
|
|
| else: |
| raise ValueError("mode must be 'pair' or 'single'") |
|
|
| @torch.no_grad() |
| def get_perturbation_embedding(model, perturbation_id: torch.Tensor) -> torch.Tensor: |
| device = next(model.parameters()).device |
| dtype = next(model.parameters()).dtype |
|
|
| if model.perturbation_function == 'crisper': |
| |
| emb = model.encoder(perturbation_id.to(device=device)).to(dtype=dtype) |
| emb = emb.mean(dim=1) |
| else: |
| |
| if perturbation_id.dim() == 2 and perturbation_id.size(1) == 1: |
| perturbation_id = perturbation_id.squeeze(1) |
| emb = model.perturbation_embedder(perturbation_id.to(device=device)).to(dtype=dtype) |
| return emb |
|
|
| def freeze_backbone_for_p(vf): |
| for name, p in vf.named_parameters(): |
| if name.startswith('p_head.') or name == 'p_mask_embed': |
| p.requires_grad_(True) |
| else: |
| p.requires_grad_(False) |
|
|
| def unfreeze_all(vf): |
| for _, p in vf.named_parameters(): |
| p.requires_grad_(True) |
| |
| def set_requires_grad_for_p_only(vf, p_only: str): |
| if hasattr(vf, "module"): |
| base_vf = vf.module |
| else: |
| base_vf = vf |
| for name, p in base_vf.named_parameters(): |
| if p_only == 'predict_p': |
| if name.startswith("p_head.") or name == "p_mask_embed": |
| p.requires_grad_(True) |
| else: |
| p.requires_grad_(False) |
| else: |
| p.requires_grad_(True) |
| |
| @torch.no_grad() |
| def get_perturbation_emb(vf, perturbation_id=None, perturbation_emb=None, |
| cell_1=None, use_mask: bool=False): |
| if use_mask: |
| B = cell_1.size(0) |
| return vf.p_mask_embed[None, :].expand(B, -1).to(cell_1.device, dtype=cell_1.dtype) |
|
|
| assert perturbation_emb is None or perturbation_id is None |
| if perturbation_id is not None: |
| if vf.perturbation_function == 'crisper': |
| perturbation_emb = vf.encoder(perturbation_id) |
| else: |
| perturbation_emb = vf.perturbation_embedder(perturbation_id) |
| perturbation_emb = perturbation_emb.mean(1) |
| elif perturbation_emb is not None: |
| perturbation_emb = perturbation_emb.to(cell_1.device, dtype=cell_1.dtype) |
| if perturbation_emb.dim() == 1: |
| perturbation_emb = perturbation_emb.unsqueeze(0) |
| if perturbation_emb.size(0) == 1: |
| perturbation_emb = perturbation_emb.expand(cell_1.shape[0], -1).contiguous() |
| perturbation_emb = vf.perturbation_embedder.enc_norm(perturbation_emb) |
| return perturbation_emb |
|
|
|
|
| import numpy as np |
| import pandas as pd |
| from scipy import stats, sparse |
| import networkx as nx |
| import torch |
| from scipy import sparse |
|
|
|
|
| def preprocess_expression(X, log1p=False, zscore_per_gene=True): |
|
|
| if isinstance(X, pd.DataFrame): |
| X = X.values |
| X = X.astype(np.float64, copy=False) |
| if log1p: |
| X = np.log1p(np.clip(X, a_min=0, a_max=None)) |
| if zscore_per_gene: |
| |
| mean = X.mean(axis=0, keepdims=True) |
| std = X.std(axis=0, ddof=1, keepdims=True) |
| std[std == 0] = 1.0 |
| X = (X - mean) / std |
| return X |
|
|
| |
| def correlation_matrix(X, method="pearson"): |
| """ |
| X: (cells, genes) |
| return: (genes, genes) correlation matrix in [-1, 1] |
| """ |
| if method == "pearson": |
| |
| C = np.corrcoef(X.T) |
| elif method == "spearman": |
| |
| X_rank = np.apply_along_axis(stats.rankdata, 0, X) |
| C = np.corrcoef(X_rank.T) |
| else: |
| raise ValueError("method must be 'pearson' or 'spearman'") |
| |
| np.fill_diagonal(C, 1.0) |
| C = np.clip(C, -1.0, 1.0) |
| return C |
| def safe_correlation_matrix(X, method="pearson"): |
| """ |
| X: (cells, genes). Returns (genes, genes) correlation matrix, no NaN/Inf. |
| Rules: |
| - If any gene has variance 0, set correlation with any other gene to 0 (self-correlation to 1) |
| - If data contains NaN/Inf, replace with 0 (you can change to median/mean) |
| """ |
| X = np.asarray(X, dtype=np.float64) |
|
|
| |
| X = np.nan_to_num(X, nan=0.0, posinf=0.0, neginf=0.0) |
|
|
| |
| if method == "spearman": |
| X = np.apply_along_axis(stats.rankdata, 0, X) |
|
|
| n, g = X.shape |
|
|
| |
| Xc = X - X.mean(axis=0, keepdims=True) |
|
|
| |
| std = Xc.std(axis=0, ddof=1) |
| zero_var = std == 0.0 |
|
|
| |
| cov = (Xc.T @ Xc) / max(n - 1, 1) |
|
|
| |
| denom = np.outer(std, std) |
| with np.errstate(divide="ignore", invalid="ignore"): |
| C = cov / denom |
|
|
| |
| C[np.isnan(C)] = 0.0 |
| C[np.isinf(C)] = 0.0 |
| C[zero_var, :] = 0.0 |
| C[:, zero_var] = 0.0 |
| np.fill_diagonal(C, 1.0) |
|
|
| |
| np.clip(C, -1.0, 1.0, out=C) |
| return C |
| |
| def soft_threshold_weights(C, beta=6, use_abs=True): |
| """ |
| C: correlation matrix (genes, genes) |
| return: weighted adjacency (non-negative) |
| """ |
| W = np.abs(C) if use_abs else np.maximum(C, 0.0) |
| W = np.power(W, beta) |
| np.fill_diagonal(W, 0.0) |
| return W |
|
|
| |
| def sparsify_topk(W, k=10, keep_symmetry=True): |
| """ |
| For each gene, keep k edges with largest absolute value. |
| W: dense (genes, genes) |
| return: csr sparse adjacency |
| """ |
| n = W.shape[0] |
| rows, cols, data = [], [], [] |
| for i in range(n): |
| |
| row = W[i].copy() |
| row[i] = 0.0 |
| if k >= n - 1: |
| idx = np.where(row != 0)[0] |
| else: |
| idx = np.argpartition(-np.abs(row), kth=min(k, n-2))[:k] |
| vals = row[idx] |
| mask = vals != 0 |
| rows.extend([i]*mask.sum()) |
| cols.extend(idx[mask]) |
| data.extend(vals[mask]) |
|
|
| A = sparse.csr_matrix((data, (rows, cols)), shape=W.shape) |
| if keep_symmetry: |
| |
| A = A.maximum(A.T) |
| return A |
|
|
| |
| def sparsify_threshold(W, tau=0.3, keep_symmetry=True): |
| """ |
| Only keep edges where |W_ij| >= tau |
| W: dense (genes, genes) |
| """ |
| M = np.abs(W) >= tau |
| np.fill_diagonal(M, False) |
| rows, cols = np.where(M) |
| data = W[rows, cols] |
| A = sparse.csr_matrix((data, (rows, cols)), shape=W.shape) |
| if keep_symmetry: |
| A = A.maximum(A.T) |
| return A |
|
|
| |
| def gcn_normalize(A, add_self_loops=True): |
| """ |
| Kipf & Welling GCN symmetric normalization: \hat A = D^{-1/2} (A + I) D^{-1/2} |
| A: csr adjacency |
| """ |
| if add_self_loops: |
| A = A + sparse.eye(A.shape[0], format='csr') |
| deg = np.array(A.sum(axis=1)).flatten() |
| deg[deg == 0] = 1.0 |
| D_inv_sqrt = sparse.diags(1.0 / np.sqrt(deg)) |
| A_hat = D_inv_sqrt @ A @ D_inv_sqrt |
| return A_hat.tocsr() |
|
|
| |
| def to_networkx(A, gene_names=None, weight_attr="weight"): |
| """ |
| A: csr adjacency |
| """ |
| |
| if hasattr(nx, "from_scipy_sparse_array"): |
| G = nx.from_scipy_sparse_array(A, edge_attribute=weight_attr) |
| else: |
| G = nx.from_scipy_sparse_matrix(A, edge_attribute=weight_attr) |
| |
| if gene_names is not None: |
| mapping = {i: name for i, name in enumerate(gene_names)} |
| G = nx.relabel_nodes(G, mapping) |
| return G |
|
|
|
|
| def adjacency_to_mha_mask(A_csr: sparse.csr_matrix, allow_self=True): |
| |
| A = A_csr.tolil(copy=True) |
| if allow_self: |
| A.setdiag(1) |
| A = A.tocsr().toarray().astype(bool) |
| disallow = ~A |
| |
| attn_mask_bool = torch.from_numpy(disallow) |
| return attn_mask_bool |
|
|
| |
| def build_gene_coexpression_graph( |
| X, |
| method="pearson", |
| wgcna_beta=None, |
| sparsify="topk", |
| k=10, |
| tau=0.3, |
| log1p=True, |
| zscore_per_gene=True, |
| use_negative_edge=False |
| ): |
| Xp = preprocess_expression(X, log1p=log1p, zscore_per_gene=zscore_per_gene) |
| |
| C = safe_correlation_matrix(Xp, method=method) |
| |
| |
| |
| if wgcna_beta is not None: |
| W = soft_threshold_weights(C, beta=wgcna_beta, use_abs=True) |
| else: |
| W = C |
|
|
| sign_matrix = np.sign(W) |
| if use_negative_edge: |
| W = np.abs(W) |
| |
| if sparsify == "topk": |
| A = sparsify_topk(W, k=k, keep_symmetry=True) |
| elif sparsify == "threshold": |
| A = sparsify_threshold(W, tau=tau, keep_symmetry=True) |
| else: |
| raise ValueError("sparsify must be 'topk' or 'threshold'") |
| mask = adjacency_to_mha_mask(A) |
| |
| return mask |
|
|
| def sorted_pad_mask(mask, pad_size=4, gene_names=None): |
| sorted_gene_names = sorted(list(gene_names)) |
| reorder_idx = [gene_names.index(g) for g in sorted_gene_names] |
| mask_sorted = mask[reorder_idx][:, reorder_idx] |
| orig_shape = mask_sorted.shape |
| pad_row = torch.ones((pad_size, orig_shape[1]), dtype=mask_sorted.dtype) |
| pad_col = torch.ones((orig_shape[0] + pad_size, pad_size), dtype=mask_sorted.dtype) |
| mask_padded = torch.cat([pad_row, mask_sorted], dim=0) |
| mask_padded = torch.cat([pad_col, mask_padded], dim=1) |
| length = mask.shape[0] |
| mask_padded[torch.arange(length), torch.arange(length)] = False |
| return mask_padded |