File size: 7,017 Bytes
0161e74 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 | """
Pre-extract scGPT per-gene features for all training cells.
Saves to HDF5 for use with ScGPTFeatureCache during training.
Must run on GPU node via pjsub.
Usage:
python scripts/preextract_scgpt.py --data_name norman --batch_size 256 --output scgpt_cache_norman.h5
"""
import sys
import os
import argparse
# Set up paths
_PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, _PROJECT_ROOT)
# Bootstrap scDFM imports
import _bootstrap_scdfm # noqa: F401
import torch
import numpy as np
import h5py
from tqdm import tqdm
from src.data.data import get_data_classes
from src.data.scgpt_extractor import FrozenScGPTExtractor
_REPO_ROOT = os.path.dirname(_PROJECT_ROOT) # transfer/code/
def main():
parser = argparse.ArgumentParser(description="Pre-extract scGPT features")
parser.add_argument("--data_name", type=str, default="norman")
parser.add_argument("--n_top_genes", type=int, default=5000)
parser.add_argument("--split_method", type=str, default="additive")
parser.add_argument("--fold", type=int, default=1)
parser.add_argument("--topk", type=int, default=15)
parser.add_argument("--use_negative_edge", action="store_true")
parser.add_argument("--scgpt_model_dir", type=str, default="transfer/data/scGPT_pretrained")
parser.add_argument("--batch_size", type=int, default=256)
parser.add_argument("--output", type=str, default="scgpt_cache_norman.h5")
args = parser.parse_args()
if args.data_name == "norman":
args.n_top_genes = 5000
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Device: {device}")
# === Load data (same as run_cascaded.py) ===
Data, PerturbationDataset, TrainSampler, TestDataset = get_data_classes()
scdfm_data_path = os.path.join(_REPO_ROOT, "scDFM", "data")
data_manager = Data(scdfm_data_path)
data_manager.load_data(args.data_name)
# Convert var_names from Ensembl IDs to gene symbols if needed
if "gene_name" in data_manager.adata.var.columns and data_manager.adata.var_names[0].startswith("ENSG"):
data_manager.adata.var_names = data_manager.adata.var["gene_name"].values
data_manager.adata.var_names_make_unique()
print(f"Converted var_names to gene symbols, sample: {list(data_manager.adata.var_names[:5])}")
data_manager.process_data(
n_top_genes=args.n_top_genes,
split_method=args.split_method,
fold=args.fold,
use_negative_edge=args.use_negative_edge,
k=args.topk,
)
# Get all cells from the adata (train + valid + test all share the same adata genes)
adata = data_manager.adata
n_cells = adata.n_obs
n_genes = adata.n_vars
hvg_gene_names = list(adata.var_names)
cell_names = list(adata.obs_names)
print(f"Cells: {n_cells}, Genes: {n_genes}")
print(f"HVG gene names sample: {hvg_gene_names[:5]}")
# === Build FrozenScGPTExtractor with large max_seq_len ===
scgpt_model_dir = os.path.join(
os.path.dirname(_REPO_ROOT), # transfer/
args.scgpt_model_dir.replace("transfer/", ""),
)
# Count valid genes to set max_seq_len
import json
vocab_path = os.path.join(scgpt_model_dir, "vocab.json")
with open(vocab_path, "r") as f:
scgpt_vocab = json.load(f)
n_valid = sum(1 for g in hvg_gene_names if g in scgpt_vocab)
max_seq_len = n_valid + 2 # +1 CLS, +1 margin
print(f"Valid genes in scGPT vocab: {n_valid}/{n_genes}, max_seq_len={max_seq_len}")
extractor = FrozenScGPTExtractor(
model_dir=scgpt_model_dir,
hvg_gene_names=hvg_gene_names,
device=device,
max_seq_len=max_seq_len,
target_std=1.0,
warmup_batches=0, # no warmup needed, we compute global stats
)
extractor = extractor.to(device)
extractor.eval()
scgpt_dim = extractor.scgpt_d_model
print(f"scGPT d_model: {scgpt_dim}")
# === Get expression matrix ===
# adata.X may be sparse
import scipy.sparse as sp
if sp.issparse(adata.X):
X = torch.from_numpy(adata.X.toarray()).float()
else:
X = torch.from_numpy(np.array(adata.X)).float()
# === Create HDF5 output ===
print(f"Output: {args.output}")
print(f"Features shape: ({n_cells}, {n_genes}, {scgpt_dim}) float16")
h5 = h5py.File(args.output, "w")
feat_ds = h5.create_dataset(
"features",
shape=(n_cells, n_genes, scgpt_dim),
dtype=np.float16,
chunks=(min(args.batch_size, n_cells), n_genes, scgpt_dim),
)
# === Extract features in batches ===
# We pass gene_indices=None so extract() uses all genes
# We do NOT apply normalization yet — store raw features, compute stats after
# Temporarily disable normalization by setting running_mean=0, running_var=1
extractor.running_mean.zero_()
extractor.running_var.fill_(1.0)
extractor._stats_frozen = True # don't update stats during extraction
running_sum = torch.zeros(scgpt_dim, dtype=torch.float64)
running_sq_sum = torch.zeros(scgpt_dim, dtype=torch.float64)
total_valid_count = 0
for start in tqdm(range(0, n_cells, args.batch_size), desc="Extracting"):
end = min(start + args.batch_size, n_cells)
batch_expr = X[start:end].to(device) # (B, G)
# Extract with target_std=1.0 and identity normalization → raw features
with torch.no_grad():
feats = extractor.extract(batch_expr, gene_indices=None) # (B, G, D)
feats_cpu = feats.cpu()
# Accumulate stats on non-zero features (genes with valid scGPT mapping)
nonzero_mask = feats_cpu.abs().sum(-1) > 0 # (B, G)
if nonzero_mask.any():
valid_feats = feats_cpu[nonzero_mask].double() # (K, D)
running_sum += valid_feats.sum(dim=0)
running_sq_sum += (valid_feats ** 2).sum(dim=0)
total_valid_count += valid_feats.shape[0]
# Store raw (un-normalized) features as float16
feat_ds[start:end] = feats_cpu.numpy().astype(np.float16)
# === Compute global normalization statistics ===
global_mean = (running_sum / total_valid_count).float()
global_var = ((running_sq_sum / total_valid_count) - global_mean.double() ** 2).float().clamp_min(0)
print(f"Global mean range: [{global_mean.min():.4f}, {global_mean.max():.4f}]")
print(f"Global var range: [{global_var.min():.4f}, {global_var.max():.4f}]")
# Save stats and cell names
h5.create_dataset("norm_mean", data=global_mean.numpy())
h5.create_dataset("norm_var", data=global_var.numpy())
# Save cell names as variable-length strings
dt = h5py.string_dtype()
h5.create_dataset("cell_names", data=np.array(cell_names, dtype=object), dtype=dt)
h5.close()
print(f"Done! Saved to {args.output}")
print(f" Features: ({n_cells}, {n_genes}, {scgpt_dim}) float16")
print(f" Valid features counted: {total_valid_count}")
if __name__ == "__main__":
main()
|