File size: 10,816 Bytes
0161e74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
"""
FrozenScGPTExtractor — Frozen scGPT model for on-the-fly per-gene feature extraction.

Analogous to LatentForcing's dinov2_hf.py RAE class:
- Frozen encoder (no gradients)
- Running statistics for normalization
- Variance matching to align scale with expression embeddings
"""

import sys
import os
import json
import logging
import warnings
from typing import List, Optional

import torch
import torch.nn as nn
import numpy as np
import types

# Set up scGPT imports — create minimal package stubs to avoid scgpt/__init__.py
# pulling in heavy dependencies (datasets, scbank, etc.)
_SCGPT_ROOT = os.path.normpath(os.path.join(os.path.dirname(__file__), "..", "..", "..", "scGPT"))
if _SCGPT_ROOT not in sys.path:
    sys.path.insert(0, _SCGPT_ROOT)

# Create minimal package stubs
for pkg, subdir in [
    ("scgpt", "scgpt"),
    ("scgpt.model", "scgpt/model"),
    ("scgpt.utils", "scgpt/utils"),
]:
    if pkg not in sys.modules:
        mod = types.ModuleType(pkg)
        mod.__path__ = [os.path.join(_SCGPT_ROOT, subdir)]
        sys.modules[pkg] = mod

# Add logger stub
if not hasattr(sys.modules["scgpt"], "logger"):
    sys.modules["scgpt"].logger = logging.getLogger("scgpt")

from scgpt.model.dsbn import DomainSpecificBatchNorm1d  # noqa: F401 (dependency of model.py)
from scgpt.model.grad_reverse import grad_reverse  # noqa: F401 (dependency of model.py)
from scgpt.model.model import TransformerModel


def _load_pretrained_safe(model, pretrained_params, verbose=False):
    """Load pretrained weights with non-strict matching (simplified from scGPT)."""
    model_dict = model.state_dict()
    loaded = 0
    for key, val in pretrained_params.items():
        # Handle flash attention -> standard attention key mapping
        new_key = key.replace("Wqkv.", "in_proj_").replace("inner_attn.out_proj", "out_proj")
        if new_key in model_dict and model_dict[new_key].shape == val.shape:
            model_dict[new_key] = val
            loaded += 1
        elif key in model_dict and model_dict[key].shape == val.shape:
            model_dict[key] = val
            loaded += 1
    model.load_state_dict(model_dict)
    if verbose:
        print(f"Loaded {loaded}/{len(pretrained_params)} pretrained parameters")


class FrozenScGPTExtractor(nn.Module):
    """
    Wraps a frozen scGPT TransformerModel for on-the-fly per-gene feature extraction.
    Similar to LatentForcing's RAE (frozen DINO-v2 encoder).

    Given expression values for G HVG genes, extracts contextualized per-gene features
    from scGPT's transformer encoder, then scatters them back to a fixed G-length tensor.

    Output: (B, G, scgpt_d_model) normalized features.
    """

    def __init__(
        self,
        model_dir: str,
        hvg_gene_names: List[str],
        device: torch.device = torch.device("cpu"),
        max_seq_len: int = 1200,
        target_std: float = 1.0,
        warmup_batches: int = 200,
    ):
        super().__init__()
        self.device = device
        self.max_seq_len = max_seq_len
        self.target_std = target_std
        self.warmup_batches = warmup_batches
        self.n_hvg = len(hvg_gene_names)

        # Load scGPT vocab as a simple dict (avoid torchtext dependency)
        vocab_path = os.path.join(model_dir, "vocab.json")
        with open(vocab_path, "r") as f:
            self.scgpt_vocab = json.load(f)  # {gene_name: index}

        # Build HVG -> scGPT vocab ID mapping
        self.hvg_gene_names = hvg_gene_names
        hvg_to_scgpt_id = []
        missing_count = 0
        for gene in hvg_gene_names:
            if gene in self.scgpt_vocab:
                hvg_to_scgpt_id.append(self.scgpt_vocab[gene])
            else:
                hvg_to_scgpt_id.append(-1)
                missing_count += 1
        if missing_count > 0:
            warnings.warn(
                f"FrozenScGPTExtractor: {missing_count}/{len(hvg_gene_names)} HVG genes "
                f"not found in scGPT vocab, will use zero vectors."
            )
        self.register_buffer(
            "hvg_to_scgpt_id",
            torch.tensor(hvg_to_scgpt_id, dtype=torch.long),
        )

        # Load scGPT model config
        args_path = os.path.join(model_dir, "args.json")
        with open(args_path, "r") as f:
            model_args = json.load(f)

        self.scgpt_d_model = model_args.get("embsize", 512)

        # Build scGPT model (using a simple Vocab-like wrapper)
        pad_token = model_args.get("pad_token", "<pad>")
        pad_value = model_args.get("pad_value", 0)
        vocab_size = len(self.scgpt_vocab)
        pad_token_id = self.scgpt_vocab.get(pad_token, 0)

        # Create a minimal vocab-like object that TransformerModel needs
        class _SimpleVocab:
            def __getitem__(self, token):
                return self._map.get(token, 0)
            def __len__(self):
                return self._size
            def __contains__(self, token):
                return token in self._map

        simple_vocab = _SimpleVocab()
        simple_vocab._map = self.scgpt_vocab
        simple_vocab._size = vocab_size

        self.scgpt_model = TransformerModel(
            ntoken=vocab_size,
            d_model=self.scgpt_d_model,
            nhead=model_args.get("nheads", 8),
            d_hid=model_args.get("d_hid", 512),
            nlayers=model_args.get("nlayers", 12),
            vocab=simple_vocab,
            dropout=0.0,
            pad_token=pad_token,
            pad_value=pad_value,
            input_emb_style="continuous",
            use_fast_transformer=False,
        )

        # Load pretrained weights
        model_file = os.path.join(model_dir, "best_model.pt")
        pretrained_params = torch.load(model_file, map_location="cpu")
        _load_pretrained_safe(self.scgpt_model, pretrained_params, verbose=True)

        # Freeze all parameters
        self.scgpt_model.eval()
        for p in self.scgpt_model.parameters():
            p.requires_grad_(False)

        # Pad/CLS token IDs
        self.pad_token_id = pad_token_id
        self.cls_token_id = self.scgpt_vocab.get("<cls>", pad_token_id)

        # Running statistics for normalization (like dinov2_hf.py)
        self.register_buffer("running_mean", torch.zeros(self.scgpt_d_model))
        self.register_buffer("running_var", torch.ones(self.scgpt_d_model))
        self.register_buffer("n_batches_seen", torch.tensor(0, dtype=torch.long))
        self._stats_frozen = False

    def _update_running_stats(self, z: torch.Tensor):
        """Update running mean/var from a batch of features. z: (total_genes, d_model)"""
        if self._stats_frozen or z.numel() == 0:
            return

        batch_mean = z.mean(dim=0)
        batch_var = z.var(dim=0, unbiased=False)
        n = self.n_batches_seen.item()

        # Exponential moving average
        momentum = 1.0 / (n + 1)
        self.running_mean.lerp_(batch_mean, momentum)
        self.running_var.lerp_(batch_var, momentum)
        self.n_batches_seen += 1

        if self.n_batches_seen.item() >= self.warmup_batches:
            self._stats_frozen = True

    @torch.no_grad()
    def extract(self, expression_values: torch.Tensor, gene_indices: Optional[torch.Tensor] = None) -> torch.Tensor:
        """
        Extract per-gene contextualized features from frozen scGPT.

        Args:
            expression_values: (B, G) expression values for G genes
            gene_indices: (G,) optional indices into the full HVG list.
                         If provided, selects the corresponding subset of
                         hvg_to_scgpt_id mapping. If None, assumes expression_values
                         covers all n_hvg genes.

        Returns:
            (B, G, scgpt_d_model) normalized per-gene features
        """
        B, G = expression_values.shape
        device = expression_values.device

        # Select the appropriate scGPT ID mapping
        if gene_indices is not None:
            hvg_ids = self.hvg_to_scgpt_id[gene_indices]  # (G,)
        else:
            hvg_ids = self.hvg_to_scgpt_id  # (n_hvg,)

        # Valid mask: genes that have a scGPT vocab mapping
        valid_mask = hvg_ids >= 0  # (G,)
        valid_scgpt_ids = hvg_ids[valid_mask]  # (G_valid,)
        n_valid = valid_scgpt_ids.shape[0]

        # Get expression values for valid genes only
        expr_valid = expression_values[:, valid_mask]  # (B, G_valid)

        # Limit sequence length
        if n_valid + 1 > self.max_seq_len:  # +1 for CLS
            perm = torch.randperm(n_valid, device=device)[:self.max_seq_len - 1]
            perm, _ = perm.sort()
            selected_scgpt_ids = valid_scgpt_ids[perm]
            selected_expr = expr_valid[:, perm]
            seq_len = self.max_seq_len
            selected_valid_idx = torch.where(valid_mask)[0][perm]
        else:
            selected_scgpt_ids = valid_scgpt_ids
            selected_expr = expr_valid
            seq_len = n_valid + 1
            selected_valid_idx = torch.where(valid_mask)[0]

        # Build input: prepend CLS token
        cls_ids = torch.full((B, 1), self.cls_token_id, dtype=torch.long, device=device)
        gene_ids = selected_scgpt_ids.unsqueeze(0).expand(B, -1)
        src = torch.cat([cls_ids, gene_ids], dim=1)  # (B, seq_len)

        cls_val = torch.zeros(B, 1, device=device)
        values = torch.cat([cls_val, selected_expr], dim=1)  # (B, seq_len)

        # Padding mask
        src_key_padding_mask = torch.zeros(B, seq_len, dtype=torch.bool, device=device)

        # Run frozen scGPT encoder
        encoder_out = self.scgpt_model._encode(
            src, values, src_key_padding_mask
        )  # (B, seq_len, d_model)

        # Skip CLS token, get per-gene features
        gene_features = encoder_out[:, 1:, :]  # (B, seq-1, d_model)

        # Scatter back to fixed G positions
        output = torch.zeros(B, G, self.scgpt_d_model, device=device, dtype=gene_features.dtype)
        idx = selected_valid_idx.unsqueeze(0).unsqueeze(-1).expand(B, -1, self.scgpt_d_model)
        output.scatter_(1, idx, gene_features)

        # Update running statistics (only during training warmup)
        if self.training and not self._stats_frozen:
            nonzero_mask = output.abs().sum(-1) > 0
            if nonzero_mask.any():
                nonzero_feats = output[nonzero_mask]
                self._update_running_stats(nonzero_feats)

        # Normalize: zero mean, unit variance, then scale
        eps = 1e-6
        output = (output - self.running_mean) / (self.running_var.sqrt() + eps)
        output = output * self.target_std

        return output

    def train(self, mode: bool = True):
        """Override to keep scGPT always in eval mode."""
        super().train(mode)
        self.scgpt_model.eval()
        return self