lfj-code / transfer /code /CCFM /src /data /scgpt_extractor.py
ethan1115's picture
Upload folder using huggingface_hub
0161e74 verified
"""
FrozenScGPTExtractor — Frozen scGPT model for on-the-fly per-gene feature extraction.
Analogous to LatentForcing's dinov2_hf.py RAE class:
- Frozen encoder (no gradients)
- Running statistics for normalization
- Variance matching to align scale with expression embeddings
"""
import sys
import os
import json
import logging
import warnings
from typing import List, Optional
import torch
import torch.nn as nn
import numpy as np
import types
# Set up scGPT imports — create minimal package stubs to avoid scgpt/__init__.py
# pulling in heavy dependencies (datasets, scbank, etc.)
_SCGPT_ROOT = os.path.normpath(os.path.join(os.path.dirname(__file__), "..", "..", "..", "scGPT"))
if _SCGPT_ROOT not in sys.path:
sys.path.insert(0, _SCGPT_ROOT)
# Create minimal package stubs
for pkg, subdir in [
("scgpt", "scgpt"),
("scgpt.model", "scgpt/model"),
("scgpt.utils", "scgpt/utils"),
]:
if pkg not in sys.modules:
mod = types.ModuleType(pkg)
mod.__path__ = [os.path.join(_SCGPT_ROOT, subdir)]
sys.modules[pkg] = mod
# Add logger stub
if not hasattr(sys.modules["scgpt"], "logger"):
sys.modules["scgpt"].logger = logging.getLogger("scgpt")
from scgpt.model.dsbn import DomainSpecificBatchNorm1d # noqa: F401 (dependency of model.py)
from scgpt.model.grad_reverse import grad_reverse # noqa: F401 (dependency of model.py)
from scgpt.model.model import TransformerModel
def _load_pretrained_safe(model, pretrained_params, verbose=False):
"""Load pretrained weights with non-strict matching (simplified from scGPT)."""
model_dict = model.state_dict()
loaded = 0
for key, val in pretrained_params.items():
# Handle flash attention -> standard attention key mapping
new_key = key.replace("Wqkv.", "in_proj_").replace("inner_attn.out_proj", "out_proj")
if new_key in model_dict and model_dict[new_key].shape == val.shape:
model_dict[new_key] = val
loaded += 1
elif key in model_dict and model_dict[key].shape == val.shape:
model_dict[key] = val
loaded += 1
model.load_state_dict(model_dict)
if verbose:
print(f"Loaded {loaded}/{len(pretrained_params)} pretrained parameters")
class FrozenScGPTExtractor(nn.Module):
"""
Wraps a frozen scGPT TransformerModel for on-the-fly per-gene feature extraction.
Similar to LatentForcing's RAE (frozen DINO-v2 encoder).
Given expression values for G HVG genes, extracts contextualized per-gene features
from scGPT's transformer encoder, then scatters them back to a fixed G-length tensor.
Output: (B, G, scgpt_d_model) normalized features.
"""
def __init__(
self,
model_dir: str,
hvg_gene_names: List[str],
device: torch.device = torch.device("cpu"),
max_seq_len: int = 1200,
target_std: float = 1.0,
warmup_batches: int = 200,
):
super().__init__()
self.device = device
self.max_seq_len = max_seq_len
self.target_std = target_std
self.warmup_batches = warmup_batches
self.n_hvg = len(hvg_gene_names)
# Load scGPT vocab as a simple dict (avoid torchtext dependency)
vocab_path = os.path.join(model_dir, "vocab.json")
with open(vocab_path, "r") as f:
self.scgpt_vocab = json.load(f) # {gene_name: index}
# Build HVG -> scGPT vocab ID mapping
self.hvg_gene_names = hvg_gene_names
hvg_to_scgpt_id = []
missing_count = 0
for gene in hvg_gene_names:
if gene in self.scgpt_vocab:
hvg_to_scgpt_id.append(self.scgpt_vocab[gene])
else:
hvg_to_scgpt_id.append(-1)
missing_count += 1
if missing_count > 0:
warnings.warn(
f"FrozenScGPTExtractor: {missing_count}/{len(hvg_gene_names)} HVG genes "
f"not found in scGPT vocab, will use zero vectors."
)
self.register_buffer(
"hvg_to_scgpt_id",
torch.tensor(hvg_to_scgpt_id, dtype=torch.long),
)
# Load scGPT model config
args_path = os.path.join(model_dir, "args.json")
with open(args_path, "r") as f:
model_args = json.load(f)
self.scgpt_d_model = model_args.get("embsize", 512)
# Build scGPT model (using a simple Vocab-like wrapper)
pad_token = model_args.get("pad_token", "<pad>")
pad_value = model_args.get("pad_value", 0)
vocab_size = len(self.scgpt_vocab)
pad_token_id = self.scgpt_vocab.get(pad_token, 0)
# Create a minimal vocab-like object that TransformerModel needs
class _SimpleVocab:
def __getitem__(self, token):
return self._map.get(token, 0)
def __len__(self):
return self._size
def __contains__(self, token):
return token in self._map
simple_vocab = _SimpleVocab()
simple_vocab._map = self.scgpt_vocab
simple_vocab._size = vocab_size
self.scgpt_model = TransformerModel(
ntoken=vocab_size,
d_model=self.scgpt_d_model,
nhead=model_args.get("nheads", 8),
d_hid=model_args.get("d_hid", 512),
nlayers=model_args.get("nlayers", 12),
vocab=simple_vocab,
dropout=0.0,
pad_token=pad_token,
pad_value=pad_value,
input_emb_style="continuous",
use_fast_transformer=False,
)
# Load pretrained weights
model_file = os.path.join(model_dir, "best_model.pt")
pretrained_params = torch.load(model_file, map_location="cpu")
_load_pretrained_safe(self.scgpt_model, pretrained_params, verbose=True)
# Freeze all parameters
self.scgpt_model.eval()
for p in self.scgpt_model.parameters():
p.requires_grad_(False)
# Pad/CLS token IDs
self.pad_token_id = pad_token_id
self.cls_token_id = self.scgpt_vocab.get("<cls>", pad_token_id)
# Running statistics for normalization (like dinov2_hf.py)
self.register_buffer("running_mean", torch.zeros(self.scgpt_d_model))
self.register_buffer("running_var", torch.ones(self.scgpt_d_model))
self.register_buffer("n_batches_seen", torch.tensor(0, dtype=torch.long))
self._stats_frozen = False
def _update_running_stats(self, z: torch.Tensor):
"""Update running mean/var from a batch of features. z: (total_genes, d_model)"""
if self._stats_frozen or z.numel() == 0:
return
batch_mean = z.mean(dim=0)
batch_var = z.var(dim=0, unbiased=False)
n = self.n_batches_seen.item()
# Exponential moving average
momentum = 1.0 / (n + 1)
self.running_mean.lerp_(batch_mean, momentum)
self.running_var.lerp_(batch_var, momentum)
self.n_batches_seen += 1
if self.n_batches_seen.item() >= self.warmup_batches:
self._stats_frozen = True
@torch.no_grad()
def extract(self, expression_values: torch.Tensor, gene_indices: Optional[torch.Tensor] = None) -> torch.Tensor:
"""
Extract per-gene contextualized features from frozen scGPT.
Args:
expression_values: (B, G) expression values for G genes
gene_indices: (G,) optional indices into the full HVG list.
If provided, selects the corresponding subset of
hvg_to_scgpt_id mapping. If None, assumes expression_values
covers all n_hvg genes.
Returns:
(B, G, scgpt_d_model) normalized per-gene features
"""
B, G = expression_values.shape
device = expression_values.device
# Select the appropriate scGPT ID mapping
if gene_indices is not None:
hvg_ids = self.hvg_to_scgpt_id[gene_indices] # (G,)
else:
hvg_ids = self.hvg_to_scgpt_id # (n_hvg,)
# Valid mask: genes that have a scGPT vocab mapping
valid_mask = hvg_ids >= 0 # (G,)
valid_scgpt_ids = hvg_ids[valid_mask] # (G_valid,)
n_valid = valid_scgpt_ids.shape[0]
# Get expression values for valid genes only
expr_valid = expression_values[:, valid_mask] # (B, G_valid)
# Limit sequence length
if n_valid + 1 > self.max_seq_len: # +1 for CLS
perm = torch.randperm(n_valid, device=device)[:self.max_seq_len - 1]
perm, _ = perm.sort()
selected_scgpt_ids = valid_scgpt_ids[perm]
selected_expr = expr_valid[:, perm]
seq_len = self.max_seq_len
selected_valid_idx = torch.where(valid_mask)[0][perm]
else:
selected_scgpt_ids = valid_scgpt_ids
selected_expr = expr_valid
seq_len = n_valid + 1
selected_valid_idx = torch.where(valid_mask)[0]
# Build input: prepend CLS token
cls_ids = torch.full((B, 1), self.cls_token_id, dtype=torch.long, device=device)
gene_ids = selected_scgpt_ids.unsqueeze(0).expand(B, -1)
src = torch.cat([cls_ids, gene_ids], dim=1) # (B, seq_len)
cls_val = torch.zeros(B, 1, device=device)
values = torch.cat([cls_val, selected_expr], dim=1) # (B, seq_len)
# Padding mask
src_key_padding_mask = torch.zeros(B, seq_len, dtype=torch.bool, device=device)
# Run frozen scGPT encoder
encoder_out = self.scgpt_model._encode(
src, values, src_key_padding_mask
) # (B, seq_len, d_model)
# Skip CLS token, get per-gene features
gene_features = encoder_out[:, 1:, :] # (B, seq-1, d_model)
# Scatter back to fixed G positions
output = torch.zeros(B, G, self.scgpt_d_model, device=device, dtype=gene_features.dtype)
idx = selected_valid_idx.unsqueeze(0).unsqueeze(-1).expand(B, -1, self.scgpt_d_model)
output.scatter_(1, idx, gene_features)
# Update running statistics (only during training warmup)
if self.training and not self._stats_frozen:
nonzero_mask = output.abs().sum(-1) > 0
if nonzero_mask.any():
nonzero_feats = output[nonzero_mask]
self._update_running_stats(nonzero_feats)
# Normalize: zero mean, unit variance, then scale
eps = 1e-6
output = (output - self.running_mean) / (self.running_var.sqrt() + eps)
output = output * self.target_std
return output
def train(self, mode: bool = True):
"""Override to keep scGPT always in eval mode."""
super().train(mode)
self.scgpt_model.eval()
return self