File size: 8,123 Bytes
0161e74 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 | """Custom generation loop with per-step embedding-based prompt selection.
Re-implements the MDM loop from ``get_incontext_generation`` but replaces
random prompt selection with the two-stage embedding similarity approach
at every generation step.
"""
from __future__ import annotations
import logging
import math
from typing import Optional, Tuple
import anndata as ad
import numpy as np
import pandas as pd
from scipy.sparse import csr_matrix
from stack.models.utils import align_result_to_adata_numpy
from prompt_selection.prompt_selector import select_prompt_indices
LOGGER = logging.getLogger("prompt_selection.custom_generation")
def custom_generation_loop(
model,
query_adata: ad.AnnData,
prompt_pert_adata: ad.AnnData,
genelist_path: str,
query_embeddings: np.ndarray,
prompt_ctrl_embeddings: np.ndarray,
predicted_pert_embeddings: np.ndarray,
prompt_pert_embeddings: np.ndarray,
*,
num_steps: int = 5,
prompt_ratio: float = 0.25,
context_ratio: float = 0.4,
context_ratio_min: float = 0.2,
top_k1: int = 512,
batch_size: int = 32,
num_workers: int = 4,
gene_name_col: Optional[str] = None,
) -> Tuple[csr_matrix, np.ndarray]:
"""Run MDM generation with per-step prompt re-selection.
Parameters
----------
model : StateICLModel (ICL_FinetunedModel)
Loaded ``bc_large_aligned`` model in eval mode.
query_adata : AnnData
Query cells (B cells, DMSO). Will be mutated in-place each step.
prompt_pert_adata : AnnData
Real perturbed prompt cells (T cells, Dabrafenib).
genelist_path : str
Path to pickled gene list.
query_embeddings : (N_query, D)
prompt_ctrl_embeddings : (N_ctrl, D)
predicted_pert_embeddings : (N_ctrl, D)
prompt_pert_embeddings : (N_pert, D)
num_steps, prompt_ratio, context_ratio, context_ratio_min, top_k1 :
Generation hyperparameters.
batch_size, num_workers : DataLoader settings.
gene_name_col : Optional gene name column.
Returns
-------
result : csr_matrix
Final predicted expression (aligned to query_adata gene space).
final_logit : np.ndarray
Per-cell logit scores from the last step.
"""
n_cells = model.n_cells # 512
cell_names = query_adata.obs_names.copy()
N = len(cell_names)
# Global state indexed by obs_names
is_masked = pd.Series(np.ones(N, dtype=bool), index=cell_names)
test_logit = pd.Series(np.zeros(N, dtype=np.float32), index=cell_names)
# MDM schedule (matches get_incontext_generation lines 869-875)
t = (np.arange(num_steps, dtype=np.float32) + 1) / num_steps
cr_list = np.linspace(context_ratio_min, context_ratio, num_steps, dtype=np.float32)
mr_list = 1 - t
mr_list[-1] = 0.0
LOGGER.info("Masking ratio schedule: %s", mr_list)
LOGGER.info("Context ratio schedule: %s", cr_list)
result = None
for step_idx, (mr, cr) in enumerate(zip(mr_list, cr_list)):
ratio = prompt_ratio + cr
n_test_cells = max(1, int(n_cells * (1 - ratio)))
n_base_cells = n_cells - n_test_cells
num_batches = math.ceil(N / n_test_cells)
LOGGER.info(
"Step %d/%d: mr=%.3f cr=%.3f n_test=%d n_base=%d batches=%d",
step_idx + 1, num_steps, mr, cr, n_test_cells, n_base_cells, num_batches,
)
mixed_adata_list = []
is_test_cell_parts = []
flat_idx_to_cell_name = []
for i in range(num_batches):
start = i * n_test_cells
end = min((i + 1) * n_test_cells, N)
batch_cell_names = cell_names[start:end]
actual_len = len(batch_cell_names)
# Pad last batch if needed
if actual_len < n_test_cells:
pad_names = cell_names[:n_test_cells - actual_len]
current_test_slice = query_adata[list(batch_cell_names) + list(pad_names)]
else:
current_test_slice = query_adata[list(batch_cell_names)]
# Two-stage prompt selection
batch_global_indices = np.arange(start, end)
selected_base_idx = select_prompt_indices(
query_embeddings=query_embeddings,
batch_global_indices=batch_global_indices,
prompt_ctrl_embeddings=prompt_ctrl_embeddings,
predicted_pert_embeddings=predicted_pert_embeddings,
prompt_pert_embeddings=prompt_pert_embeddings,
n_base_cells=n_base_cells,
top_k1=top_k1,
)
actual_base = len(selected_base_idx)
# Build mixed sample: [base_cells, test_cells]
sample_adata = ad.concat(
[prompt_pert_adata[selected_base_idx], current_test_slice],
join="inner",
axis=0,
)
mixed_adata_list.append(sample_adata)
# Track test cell positions (only real cells, not padding)
batch_mask = np.zeros(actual_base + n_test_cells, dtype=bool)
batch_mask[actual_base : actual_base + actual_len] = True
is_test_cell_parts.append(batch_mask)
flat_idx_to_cell_name.extend(batch_cell_names)
is_test_cell_mask = np.concatenate(is_test_cell_parts)
# Concatenate all batches
full_mixed_adata = ad.concat(mixed_adata_list, axis=0, join="inner")
# Model forward pass
mean_preds, disp_preds, count_preds, logit_preds = model.get_prediction(
adata_path=full_mixed_adata,
genelist_path=genelist_path,
gene_name_col=gene_name_col,
cell_ratio=prompt_ratio,
context_ratio=cr,
batch_size=batch_size,
num_workers=num_workers,
)
# Extract test cell results
result_counts = count_preds[is_test_cell_mask] # (N, n_model_genes)
new_logit = logit_preds[is_test_cell_mask] # (N,)
result_cell_names = np.array(flat_idx_to_cell_name) # (N,)
# ---- MDM unmasking logic (mirrors get_incontext_prediction lines 694-703) ----
cur_is_masked = is_masked.loc[result_cell_names].values.copy()
cur_new_logit = new_logit.copy()
cell_indices_to_keep = np.zeros(N, dtype=bool)
# 1) Already unmasked cells keep their previous prediction
cell_indices_to_keep[~cur_is_masked] = True
# 2) Compute unmask_rate
n_masked = cur_is_masked.sum()
n_total = len(cur_is_masked)
if n_masked > 0:
unmask_rate = (n_masked / n_total - mr) * n_total / n_masked
unmask_rate = np.clip(unmask_rate, 0.0, 1.0)
else:
unmask_rate = 1.0
# 3) Unmask cells with logit above quantile threshold
if n_masked > 0:
masked_logits = cur_new_logit[cur_is_masked]
threshold = np.quantile(masked_logits, unmask_rate)
cell_indices_to_keep[cur_is_masked] = cur_new_logit[cur_is_masked] > threshold
# 4) Update global is_masked
new_is_masked = cur_is_masked.copy()
new_is_masked[~cell_indices_to_keep] = False
new_is_masked[cur_new_logit > 0] = True
is_masked.loc[result_cell_names] = new_is_masked
# 5) Update global test_logit
test_logit.loc[result_cell_names] = cur_new_logit
LOGGER.info(
"Step %d: unmasked fraction = %.3f",
step_idx + 1,
(cur_new_logit < 0).sum() / len(cur_new_logit),
)
# Align to query_adata gene space
aligned_result = align_result_to_adata_numpy(
result_counts,
query_adata,
genelist_path,
gene_name_col,
cell_indices_to_keep=cell_indices_to_keep,
)
result = csr_matrix(aligned_result)
# Update query_adata for next step (consistent with original code)
if query_adata.raw is not None:
query_adata.raw.X = result
else:
query_adata.X = result
final_logit = test_logit.loc[cell_names].values
return result, final_logit
|