File size: 2,586 Bytes
0161e74 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 | """
ScGPTFeatureCache — Load pre-extracted scGPT features from HDF5.
Replaces on-the-fly FrozenScGPTExtractor during training when a cache file exists.
"""
import h5py
import numpy as np
import torch
class ScGPTFeatureCache:
"""
Loads pre-extracted per-gene scGPT features from HDF5 and provides
batch lookup by cell name and gene indices.
HDF5 layout:
/features (N, G_full, scgpt_dim) float16
/norm_mean (scgpt_dim,) float32
/norm_var (scgpt_dim,) float32
/cell_names (N,) string
"""
def __init__(self, h5_path: str, target_std: float = 1.0):
self.h5_path = h5_path
self.target_std = target_std
self.h5 = h5py.File(h5_path, "r")
self.features = self.h5["features"] # lazy dataset, shape (N, G, D)
self.norm_mean = torch.from_numpy(self.h5["norm_mean"][:]).float() # (D,)
self.norm_var = torch.from_numpy(self.h5["norm_var"][:]).float() # (D,)
# Build cell_name -> row index mapping
cell_names = self.h5["cell_names"].asstr()[:]
self.name_to_idx = {name: i for i, name in enumerate(cell_names)}
def lookup(self, cell_names, gene_indices, device=None) -> torch.Tensor:
"""
Retrieve pre-extracted features for a batch.
Args:
cell_names: list of str, cell identifiers from batch
gene_indices: (G_sub,) tensor, gene subset indices
device: target torch device
Returns:
(B, G_sub, D) tensor, normalized features
"""
# Map cell names to HDF5 row indices
row_indices = np.array([self.name_to_idx[n] for n in cell_names])
# h5py fancy indexing requires sorted, unique indices
unique_indices, inverse = np.unique(row_indices, return_inverse=True)
raw = self.features[unique_indices.tolist()] # (U, G_full, D) as numpy
# Map back to original batch order (handles duplicates)
raw = raw[inverse]
# Select gene subset
gene_idx_np = gene_indices.cpu().numpy()
raw = raw[:, gene_idx_np, :] # (B, G_sub, D)
z = torch.from_numpy(raw.astype(np.float32))
# Normalize: (x - mean) / sqrt(var) * target_std
eps = 1e-6
z = (z - self.norm_mean) / (self.norm_var.sqrt() + eps)
z = z * self.target_std
if device is not None:
z = z.to(device)
return z
def close(self):
self.h5.close()
def __del__(self):
try:
self.h5.close()
except Exception:
pass
|