| """ |
| Adaptive Prompt Selection pipeline for Stack. |
| |
| Orchestrates the full pipeline: |
| Step 0: Data splitting |
| Step 1: Embedding extraction |
| Step 2: Arm construction (cell type x sub-cluster) |
| Step 3: CellBandit selection of unperturbed prompt cells |
| Step 4: Bridge prediction (unperturbed -> predicted perturbed) |
| Step 5: TopK retrieval of real perturbed cells |
| Step 6: Final generation with selected prompts |
| """ |
|
|
| from __future__ import annotations |
|
|
| import logging |
| from typing import Dict, List, Optional, Tuple |
|
|
| import anndata as ad |
| import numpy as np |
| from scipy.sparse import issparse |
| from sklearn.cluster import KMeans |
| from sklearn.metrics.pairwise import cosine_similarity |
|
|
| try: |
| from .cell_bandit import CellBandit |
| except ImportError: |
| from cell_bandit import CellBandit |
|
|
| LOGGER = logging.getLogger("adaptive_prompt_selection") |
|
|
|
|
| |
| |
| |
|
|
| def create_cell_similarity_fn(precomputed_embeddings: np.ndarray): |
| """Create a closure that computes cosine similarity to query embedding. |
| |
| Pre-normalizes all embeddings once. Each call is just index lookup + dot product. |
| |
| Args: |
| precomputed_embeddings: (n_cells, embedding_dim) array. |
| |
| Returns: |
| fn(query_embedding, cell_indices) -> List[float] |
| """ |
| norms = np.linalg.norm(precomputed_embeddings, axis=1, keepdims=True) |
| norms = np.maximum(norms, 1e-12) |
| normed = precomputed_embeddings / norms |
|
|
| def similarity_fn(query_embedding: np.ndarray, cell_indices: List[int]) -> List[float]: |
| q = query_embedding.ravel() |
| q_norm = np.linalg.norm(q) |
| if q_norm < 1e-12: |
| return [0.0] * len(cell_indices) |
| q_normed = q / q_norm |
| subset = normed[cell_indices] |
| sims = subset @ q_normed |
| |
| sims = np.clip(sims, 0.0, 1.0) |
| return sims.tolist() |
|
|
| return similarity_fn |
|
|
|
|
| |
| |
| |
|
|
| def setup_cell_arms( |
| pool_adata: ad.AnnData, |
| pool_embeddings: np.ndarray, |
| cell_type_col: str = "cell_type", |
| n_clusters_per_type: int = 5, |
| random_state: int = 42, |
| ) -> Tuple[List[Dict], np.ndarray]: |
| """Cluster unperturbed pool cells by cell type then k-means. |
| |
| Each (cell_type, cluster_id) = one arm. |
| |
| Args: |
| pool_adata: AnnData of unperturbed pool cells. |
| pool_embeddings: (n_cells, dim) embeddings aligned with pool_adata. |
| cell_type_col: Column in pool_adata.obs for cell type. |
| n_clusters_per_type: Max sub-clusters per cell type. |
| random_state: Random seed for k-means. |
| |
| Returns: |
| (arms, cluster_labels) where cluster_labels[i] = arm_id for cell i. |
| """ |
| cell_types = pool_adata.obs[cell_type_col].values |
| unique_types = sorted(set(cell_types)) |
|
|
| arms: List[Dict] = [] |
| cluster_labels = np.full(len(pool_adata), -1, dtype=int) |
| arm_id = 0 |
|
|
| for ct in unique_types: |
| ct_mask = cell_types == ct |
| ct_indices = np.where(ct_mask)[0] |
| n_cells_ct = len(ct_indices) |
|
|
| if n_cells_ct == 0: |
| continue |
|
|
| |
| n_clust = min(n_clusters_per_type, max(1, n_cells_ct // 10)) |
|
|
| ct_embeddings = pool_embeddings[ct_indices] |
|
|
| if n_clust <= 1: |
| |
| arms.append({ |
| "arm_id": arm_id, |
| "cell_type": str(ct), |
| "cluster_id": 0, |
| "cell_indices": ct_indices.tolist(), |
| "samples": 0, |
| "mean_sim": 0.0, |
| "variance": 0.0, |
| "focus_score": 0.0, |
| "sampled_indices": [], |
| "sampled_scores": [], |
| }) |
| cluster_labels[ct_indices] = arm_id |
| arm_id += 1 |
| else: |
| km = KMeans(n_clusters=n_clust, random_state=random_state, n_init=10) |
| km_labels = km.fit_predict(ct_embeddings) |
|
|
| for cid in range(n_clust): |
| members = ct_indices[km_labels == cid] |
| if len(members) == 0: |
| continue |
| arms.append({ |
| "arm_id": arm_id, |
| "cell_type": str(ct), |
| "cluster_id": int(cid), |
| "cell_indices": members.tolist(), |
| "samples": 0, |
| "mean_sim": 0.0, |
| "variance": 0.0, |
| "focus_score": 0.0, |
| "sampled_indices": [], |
| "sampled_scores": [], |
| }) |
| cluster_labels[members] = arm_id |
| arm_id += 1 |
|
|
| LOGGER.info( |
| "Created %d arms from %d cell types (%d total pool cells)", |
| len(arms), len(unique_types), len(pool_adata), |
| ) |
| return arms, cluster_labels |
|
|
|
|
| |
| |
| |
|
|
| def adaptive_prompt_selection( |
| model, |
| full_adata: ad.AnnData, |
| genelist_path: str, |
| query_cell_type: str, |
| perturbation: str, |
| control_name: str = "Dimethyl Sulfoxide", |
| cell_type_col: str = "cell_type", |
| perturbation_col: str = "sm_name", |
| control_col: str = "control", |
| |
| n_clusters_per_type: int = 5, |
| zoom_ratio: float = 0.25, |
| coarse_samples_per_arm: int = 10, |
| coarse_ratio: float = 0.2, |
| extra_fine_samples_per_arm: int = 10, |
| exploration_weight: float = 1.0, |
| top_ratio: float = 0.2, |
| temperature: float = 0.06, |
| |
| n_prompt_cells: Optional[int] = None, |
| top_k_real: Optional[int] = None, |
| prompt_ratio: float = 0.25, |
| context_ratio: float = 0.4, |
| context_ratio_min: float = 0.2, |
| num_steps: int = 5, |
| mode: str = "mdm", |
| gene_name_col: Optional[str] = None, |
| batch_size: int = 16, |
| num_workers: int = 4, |
| random_seed: int = 42, |
| show_progress: bool = True, |
| ) -> Tuple[ad.AnnData, Dict]: |
| """Full adaptive prompt selection pipeline. |
| |
| Args: |
| model: Loaded Stack model (ICL_FinetunedModel). |
| full_adata: Complete dataset with all cell types and conditions. |
| genelist_path: Path to gene list pickle. |
| query_cell_type: Cell type to predict for (e.g. "B cells"). |
| perturbation: Target perturbation (e.g. drug name). |
| control_name: Name of control condition in perturbation_col. |
| cell_type_col: obs column for cell type. |
| perturbation_col: obs column for perturbation name. |
| control_col: obs column for control flag (bool). |
| |
| Returns: |
| (pred_adata, details) — predicted AnnData and bandit selection details. |
| """ |
| import torch |
|
|
| |
| LOGGER.info("Step 0: Splitting data...") |
| obs = full_adata.obs |
|
|
| query_mask = (obs[cell_type_col] == query_cell_type) & (obs[control_col] == True) |
| query_cells = full_adata[query_mask].copy() |
| LOGGER.info(" Query cells (%s, control): %d", query_cell_type, query_cells.n_obs) |
|
|
| unperturbed_pool_mask = (obs[cell_type_col] != query_cell_type) & (obs[control_col] == True) |
| unperturbed_pool = full_adata[unperturbed_pool_mask].copy() |
| LOGGER.info(" Unperturbed pool (non-%s, control): %d", query_cell_type, unperturbed_pool.n_obs) |
|
|
| perturbed_pool_mask = (obs[cell_type_col] != query_cell_type) & (obs[perturbation_col] == perturbation) |
| perturbed_pool = full_adata[perturbed_pool_mask].copy() |
| LOGGER.info(" Perturbed pool (non-%s, %s): %d", query_cell_type, perturbation, perturbed_pool.n_obs) |
|
|
| if query_cells.n_obs == 0: |
| raise ValueError(f"No query cells found for {query_cell_type} under control") |
| if unperturbed_pool.n_obs == 0: |
| raise ValueError("No unperturbed pool cells found") |
| if perturbed_pool.n_obs == 0: |
| raise ValueError(f"No perturbed pool cells found for {perturbation}") |
|
|
| |
| |
| |
| |
| n_base_per_batch = int(model.n_cells * (prompt_ratio + context_ratio)) |
| n_test_per_batch = model.n_cells - n_base_per_batch |
| num_batches = max(1, int(np.ceil(query_cells.n_obs / n_test_per_batch))) |
| if n_prompt_cells is None: |
| |
| |
| n_prompt_cells = min(unperturbed_pool.n_obs, n_base_per_batch * num_batches) |
| if top_k_real is None: |
| top_k_real = min(perturbed_pool.n_obs, n_base_per_batch * num_batches) |
| LOGGER.info(" n_base_per_batch=%d, n_test_per_batch=%d, num_batches=%d", |
| n_base_per_batch, n_test_per_batch, num_batches) |
| LOGGER.info(" n_prompt_cells=%d, top_k_real=%d", n_prompt_cells, top_k_real) |
|
|
| |
| LOGGER.info("Step 1: Extracting embeddings...") |
| query_emb, _ = model.get_latent_representation( |
| adata_path=query_cells, genelist_path=genelist_path, |
| gene_name_col=gene_name_col, batch_size=batch_size, |
| show_progress=show_progress, num_workers=num_workers, |
| ) |
| query_mean = query_emb.mean(axis=0) |
| LOGGER.info(" Query mean embedding shape: %s", query_mean.shape) |
|
|
| pool_emb, _ = model.get_latent_representation( |
| adata_path=unperturbed_pool, genelist_path=genelist_path, |
| gene_name_col=gene_name_col, batch_size=batch_size, |
| show_progress=show_progress, num_workers=num_workers, |
| ) |
| LOGGER.info(" Pool embeddings shape: %s", pool_emb.shape) |
|
|
| |
| LOGGER.info("Step 2: Building arms (clustering)...") |
| arms, cluster_labels = setup_cell_arms( |
| unperturbed_pool, pool_emb, cell_type_col, |
| n_clusters_per_type=n_clusters_per_type, random_state=random_seed, |
| ) |
|
|
| |
| LOGGER.info("Step 3: Running CellBandit selection...") |
| sim_fn = create_cell_similarity_fn(pool_emb) |
| bandit = CellBandit( |
| similarity_fn=sim_fn, |
| zoom_ratio=zoom_ratio, |
| coarse_samples_per_arm=coarse_samples_per_arm, |
| coarse_ratio=coarse_ratio, |
| extra_fine_samples_per_arm=extra_fine_samples_per_arm, |
| exploration_weight=exploration_weight, |
| top_ratio=top_ratio, |
| temperature=temperature, |
| ) |
| selected_indices, bandit_details = bandit.select_cells( |
| query_embedding=query_mean, |
| arms=arms, |
| k=n_prompt_cells, |
| rng=np.random.default_rng(random_seed), |
| ) |
| matched_unperturbed = unperturbed_pool[selected_indices].copy() |
| LOGGER.info(" Bandit selected %d unperturbed cells", len(selected_indices)) |
|
|
| |
| LOGGER.info("Step 4: Bridge prediction (unperturbed -> predicted perturbed)...") |
| intermediate_pred = model.get_incontext_prediction( |
| base_adata_or_path=perturbed_pool, |
| test_adata_or_path=matched_unperturbed, |
| genelist_path=genelist_path, |
| prompt_ratio=prompt_ratio, |
| context_ratio=context_ratio, |
| mode="predict", |
| gene_name_col=gene_name_col, |
| batch_size=batch_size, |
| show_progress=show_progress, |
| num_workers=num_workers, |
| random_seed=random_seed, |
| ) |
| |
| if issparse(intermediate_pred): |
| intermediate_pred_dense = intermediate_pred.toarray() |
| else: |
| intermediate_pred_dense = np.asarray(intermediate_pred) |
| LOGGER.info(" Intermediate prediction shape: %s", intermediate_pred_dense.shape) |
|
|
| |
| LOGGER.info("Step 5: Retrieving TopK real perturbed cells...") |
|
|
| |
| pred_adata_tmp = ad.AnnData( |
| X=intermediate_pred_dense, |
| obs=matched_unperturbed.obs.copy(), |
| var=matched_unperturbed.var.copy() if hasattr(matched_unperturbed, 'var') else None, |
| ) |
|
|
| pred_emb, _ = model.get_latent_representation( |
| adata_path=pred_adata_tmp, genelist_path=genelist_path, |
| gene_name_col=gene_name_col, batch_size=batch_size, |
| show_progress=show_progress, num_workers=num_workers, |
| ) |
|
|
| perturbed_emb, _ = model.get_latent_representation( |
| adata_path=perturbed_pool, genelist_path=genelist_path, |
| gene_name_col=gene_name_col, batch_size=batch_size, |
| show_progress=show_progress, num_workers=num_workers, |
| ) |
|
|
| |
| |
| selected_arm_map: Dict[int, List[int]] = {} |
| for pos, cell_idx in enumerate(selected_indices): |
| arm_label = cluster_labels[cell_idx] |
| selected_arm_map.setdefault(arm_label, []).append(pos) |
|
|
| |
| |
| |
| n_arms_with_cells = len(selected_arm_map) |
| LOGGER.info(" %d arms contributed to selection", n_arms_with_cells) |
|
|
| |
| arm_sizes = {aid: len(positions) for aid, positions in selected_arm_map.items()} |
| total_selected = sum(arm_sizes.values()) |
| arm_quotas: Dict[int, int] = {} |
| allocated = 0 |
| sorted_arms = sorted(arm_sizes.items(), key=lambda x: x[1], reverse=True) |
| for i, (aid, size) in enumerate(sorted_arms): |
| if i == len(sorted_arms) - 1: |
| |
| arm_quotas[aid] = top_k_real - allocated |
| else: |
| quota = max(1, int(round(top_k_real * size / total_selected))) |
| arm_quotas[aid] = quota |
| allocated += quota |
|
|
| |
| all_retrieved_idx: List[int] = [] |
| all_retrieved_sims: List[float] = [] |
| already_selected: set = set() |
| per_arm_retrieval_info = [] |
|
|
| for aid, positions in selected_arm_map.items(): |
| quota = arm_quotas[aid] |
| |
| arm_pred_emb = pred_emb[positions] |
| arm_mean = arm_pred_emb.mean(axis=0, keepdims=True) |
|
|
| sims = cosine_similarity(arm_mean, perturbed_emb)[0] |
| |
| for idx in already_selected: |
| sims[idx] = -1.0 |
| top_idx = np.argsort(sims)[-quota:] |
| retrieved_sims = sims[top_idx] |
|
|
| all_retrieved_idx.extend(top_idx.tolist()) |
| all_retrieved_sims.extend(retrieved_sims.tolist()) |
| already_selected.update(top_idx.tolist()) |
|
|
| arm_ct = arms[aid]["cell_type"] if aid < len(arms) else "unknown" |
| per_arm_retrieval_info.append({ |
| "arm_id": int(aid), |
| "cell_type": arm_ct, |
| "n_selected_unperturbed": len(positions), |
| "n_retrieved_perturbed": int(quota), |
| "mean_similarity": float(np.mean(retrieved_sims)), |
| }) |
| LOGGER.info(" Arm %d (%s): %d unperturbed → %d perturbed (mean_sim=%.4f)", |
| aid, arm_ct, len(positions), quota, float(np.mean(retrieved_sims))) |
|
|
| final_prompts = perturbed_pool[all_retrieved_idx].copy() |
| LOGGER.info(" Selected %d real perturbed cells as final prompts", final_prompts.n_obs) |
|
|
| bandit_details["n_final_prompts"] = final_prompts.n_obs |
| bandit_details["per_arm_retrieval"] = per_arm_retrieval_info |
| bandit_details["top_k_similarities"] = all_retrieved_sims |
|
|
| |
| LOGGER.info("Step 6: Final generation with adaptive prompts...") |
| result = model.get_incontext_generation( |
| base_adata_or_path=final_prompts, |
| test_adata_or_path=query_cells, |
| genelist_path=genelist_path, |
| prompt_ratio=prompt_ratio, |
| context_ratio=context_ratio, |
| context_ratio_min=context_ratio_min, |
| num_steps=num_steps, |
| mode=mode, |
| gene_name_col=gene_name_col, |
| batch_size=batch_size, |
| show_progress=show_progress, |
| num_workers=num_workers, |
| random_seed=random_seed, |
| ) |
|
|
| if isinstance(result, tuple): |
| predictions, test_logit = result |
| else: |
| predictions, test_logit = result, None |
|
|
| pred_adata = ad.AnnData( |
| X=predictions, |
| obs=query_cells.obs.copy(), |
| var=query_cells.var.copy(), |
| ) |
| if test_logit is not None: |
| pred_adata.obs["gen_logit"] = np.asarray(test_logit) |
|
|
| LOGGER.info("Adaptive prompt selection complete. Prediction shape: %s", pred_adata.shape) |
| return pred_adata, bandit_details |
|
|
|
|
| |
| |
| |
|
|
| def run_baseline( |
| model, |
| context_adata: ad.AnnData, |
| query_adata: ad.AnnData, |
| genelist_path: str, |
| prompt_ratio: float = 0.25, |
| context_ratio: float = 0.4, |
| context_ratio_min: float = 0.2, |
| num_steps: int = 5, |
| mode: str = "mdm", |
| gene_name_col: Optional[str] = None, |
| batch_size: int = 16, |
| num_workers: int = 4, |
| random_seed: int = 42, |
| show_progress: bool = True, |
| ) -> ad.AnnData: |
| """Run standard random-prompt Stack generation for comparison.""" |
| LOGGER.info("Running baseline (random prompt) generation...") |
|
|
| result = model.get_incontext_generation( |
| base_adata_or_path=context_adata, |
| test_adata_or_path=query_adata, |
| genelist_path=genelist_path, |
| prompt_ratio=prompt_ratio, |
| context_ratio=context_ratio, |
| context_ratio_min=context_ratio_min, |
| num_steps=num_steps, |
| mode=mode, |
| gene_name_col=gene_name_col, |
| batch_size=batch_size, |
| show_progress=show_progress, |
| num_workers=num_workers, |
| random_seed=random_seed, |
| ) |
|
|
| if isinstance(result, tuple): |
| predictions, test_logit = result |
| else: |
| predictions, test_logit = result, None |
|
|
| pred_adata = ad.AnnData( |
| X=predictions, |
| obs=query_adata.obs.copy(), |
| var=query_adata.var.copy(), |
| ) |
| if test_logit is not None: |
| pred_adata.obs["gen_logit"] = np.asarray(test_logit) |
|
|
| LOGGER.info("Baseline generation complete. Prediction shape: %s", pred_adata.shape) |
| return pred_adata |
|
|