| |
| """ |
| Visualize B-cell experiment results: Adaptive Prompt Selection vs Stack Baseline. |
| |
| Reads cell-eval CSV outputs and generates comparison charts. |
| |
| Usage: |
| python code/adaptive_prompt_selection/visualize_results.py \ |
| --results-dir data/bcell_test_results \ |
| --output-dir data/bcell_test_results/figures |
| """ |
|
|
| import argparse |
| from pathlib import Path |
|
|
| import matplotlib |
| matplotlib.use("Agg") |
| import matplotlib.pyplot as plt |
| import matplotlib.ticker as mticker |
| import numpy as np |
| import pandas as pd |
|
|
|
|
| |
| |
| |
|
|
| |
| HIGHER_IS_BETTER = { |
| "overlap_at_N", "overlap_at_50", "overlap_at_100", "overlap_at_200", "overlap_at_500", |
| "precision_at_N", "precision_at_50", "precision_at_100", "precision_at_200", "precision_at_500", |
| "de_spearman_sig", "de_direction_match", "de_spearman_lfc_sig", "de_sig_genes_recall", |
| "pr_auc", "roc_auc", "pearson_delta", |
| "discrimination_score_l1", "discrimination_score_l2", "discrimination_score_cosine", |
| } |
| LOWER_IS_BETTER = {"mse", "mae", "mse_delta", "mae_delta"} |
|
|
| |
| SKIP_METRICS = { |
| "de_nsig_counts_real", "de_nsig_counts_pred", |
| "pearson_edistance", |
| "de_spearman_sig", |
| } |
|
|
| |
| METRIC_GROUPS = { |
| "DE Gene Overlap": ["overlap_at_50", "overlap_at_100", "overlap_at_200", "overlap_at_500", "overlap_at_N"], |
| "DE Precision": ["precision_at_50", "precision_at_100", "precision_at_200", "precision_at_500", "precision_at_N"], |
| "DE Quality": ["de_direction_match", "de_spearman_lfc_sig", "de_sig_genes_recall"], |
| "Classification": ["pr_auc", "roc_auc"], |
| "Expression Error": ["pearson_delta", "mse", "mae", "mse_delta", "mae_delta"], |
| "Discrimination": ["discrimination_score_l1", "discrimination_score_l2", "discrimination_score_cosine"], |
| } |
|
|
| NICE_NAMES = { |
| "overlap_at_N": "Overlap@N", |
| "overlap_at_50": "Overlap@50", |
| "overlap_at_100": "Overlap@100", |
| "overlap_at_200": "Overlap@200", |
| "overlap_at_500": "Overlap@500", |
| "precision_at_N": "Precision@N", |
| "precision_at_50": "Precision@50", |
| "precision_at_100": "Precision@100", |
| "precision_at_200": "Precision@200", |
| "precision_at_500": "Precision@500", |
| "de_direction_match": "DE Direction Match", |
| "de_spearman_lfc_sig": "DE Spearman LFC (sig)", |
| "de_sig_genes_recall": "DE Sig Genes Recall", |
| "pr_auc": "PR-AUC", |
| "roc_auc": "ROC-AUC", |
| "pearson_delta": "Pearson (delta)", |
| "mse": "MSE", |
| "mae": "MAE", |
| "mse_delta": "MSE (delta)", |
| "mae_delta": "MAE (delta)", |
| "discrimination_score_l1": "Discrim L1", |
| "discrimination_score_l2": "Discrim L2", |
| "discrimination_score_cosine": "Discrim Cosine", |
| } |
|
|
|
|
| def load_per_pert_results(results_dir: Path): |
| """Load per-perturbation results for both methods.""" |
| ada = pd.read_csv(results_dir / "celleval_adaptive" / "results.csv") |
| bas = pd.read_csv(results_dir / "celleval_baseline" / "results.csv") |
| return ada, bas |
|
|
|
|
| def load_comparison(results_dir: Path): |
| """Load the mean comparison CSV.""" |
| return pd.read_csv(results_dir / "comparison_mean.csv") |
|
|
|
|
| |
| |
| |
|
|
| def plot_per_drug_bars(ada_df, bas_df, output_dir: Path, drug: str): |
| """Side-by-side bar chart for a single drug across key metrics.""" |
| ada_row = ada_df[ada_df["perturbation"] == drug].iloc[0] |
| bas_row = bas_df[bas_df["perturbation"] == drug].iloc[0] |
|
|
| key_metrics = [ |
| "pearson_delta", "mse", "mae", |
| "de_direction_match", "de_spearman_lfc_sig", "de_sig_genes_recall", |
| "overlap_at_N", "pr_auc", "roc_auc", |
| ] |
| |
| filtered = [] |
| for m in key_metrics: |
| a, b = ada_row[m], bas_row[m] |
| if abs(a) > 1e-10 or abs(b) > 1e-10: |
| if a != -1.0 and b != -1.0: |
| filtered.append(m) |
| metrics = filtered |
|
|
| ada_vals = [ada_row[m] for m in metrics] |
| bas_vals = [bas_row[m] for m in metrics] |
| labels = [NICE_NAMES.get(m, m) for m in metrics] |
|
|
| x = np.arange(len(metrics)) |
| width = 0.35 |
|
|
| fig, ax = plt.subplots(figsize=(12, 5)) |
| bars_a = ax.bar(x - width / 2, ada_vals, width, label="Adaptive", color="#2196F3", alpha=0.85) |
| bars_b = ax.bar(x + width / 2, bas_vals, width, label="Baseline (Random)", color="#FF9800", alpha=0.85) |
|
|
| ax.set_ylabel("Metric Value") |
| ax.set_title(f"Adaptive vs Baseline — {drug}", fontsize=14, fontweight="bold") |
| ax.set_xticks(x) |
| ax.set_xticklabels(labels, rotation=35, ha="right", fontsize=9) |
| ax.legend(fontsize=11) |
| ax.grid(axis="y", alpha=0.3) |
|
|
| |
| for i, m in enumerate(metrics): |
| a, b = ada_vals[i], bas_vals[i] |
| diff = a - b |
| if m in LOWER_IS_BETTER: |
| better = "A" if diff < 0 else "B" |
| else: |
| better = "A" if diff > 0 else "B" |
| color = "#2196F3" if better == "A" else "#FF9800" |
| sign = "+" if diff > 0 else "" |
| ax.annotate(f"{sign}{diff:.4f}", xy=(x[i], max(a, b)), |
| fontsize=7, ha="center", va="bottom", color=color, fontweight="bold") |
|
|
| fig.tight_layout() |
| fig.savefig(output_dir / f"per_drug_{drug.replace(' ', '_')}.png", dpi=150) |
| plt.close(fig) |
| print(f" Saved per_drug_{drug.replace(' ', '_')}.png") |
|
|
|
|
| |
| |
| |
|
|
| def plot_radar(ada_row, bas_row, output_dir: Path, drug: str): |
| """Radar chart comparing the two methods on a single drug.""" |
| metrics = [ |
| "pearson_delta", "de_direction_match", "de_spearman_lfc_sig", |
| "de_sig_genes_recall", "overlap_at_N", "pr_auc", "roc_auc", |
| ] |
| |
| metrics = [m for m in metrics if ada_row[m] != -1.0 and bas_row[m] != -1.0] |
| if len(metrics) < 3: |
| return |
|
|
| labels = [NICE_NAMES.get(m, m) for m in metrics] |
| ada_vals = [ada_row[m] for m in metrics] |
| bas_vals = [bas_row[m] for m in metrics] |
|
|
| angles = np.linspace(0, 2 * np.pi, len(metrics), endpoint=False).tolist() |
| ada_vals_c = ada_vals + [ada_vals[0]] |
| bas_vals_c = bas_vals + [bas_vals[0]] |
| angles_c = angles + [angles[0]] |
|
|
| fig, ax = plt.subplots(figsize=(7, 7), subplot_kw=dict(polar=True)) |
| ax.plot(angles_c, ada_vals_c, "o-", linewidth=2, label="Adaptive", color="#2196F3") |
| ax.fill(angles_c, ada_vals_c, alpha=0.15, color="#2196F3") |
| ax.plot(angles_c, bas_vals_c, "s-", linewidth=2, label="Baseline", color="#FF9800") |
| ax.fill(angles_c, bas_vals_c, alpha=0.15, color="#FF9800") |
|
|
| ax.set_thetagrids(np.degrees(angles), labels, fontsize=9) |
| ax.set_title(f"Adaptive vs Baseline — {drug}", fontsize=13, fontweight="bold", pad=20) |
| ax.legend(loc="upper right", bbox_to_anchor=(1.3, 1.1), fontsize=10) |
| ax.set_ylim(0, 1.05) |
|
|
| fig.tight_layout() |
| fig.savefig(output_dir / f"radar_{drug.replace(' ', '_')}.png", dpi=150) |
| plt.close(fig) |
| print(f" Saved radar_{drug.replace(' ', '_')}.png") |
|
|
|
|
| |
| |
| |
|
|
| def plot_grouped_comparison(comparison_df, output_dir: Path): |
| """Multi-panel grouped bar chart from the mean comparison.""" |
| |
| comp = comparison_df[~comparison_df["metric"].isin(SKIP_METRICS)].copy() |
| |
| comp = comp[comp["diff"].abs() > 1e-12] |
|
|
| if comp.empty: |
| print(" No non-trivial metric differences to plot.") |
| return |
|
|
| metrics = comp["metric"].tolist() |
| ada_vals = comp["adaptive"].tolist() |
| bas_vals = comp["baseline"].tolist() |
| diffs = comp["diff"].tolist() |
| labels = [NICE_NAMES.get(m, m) for m in metrics] |
|
|
| x = np.arange(len(metrics)) |
| width = 0.35 |
|
|
| fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(14, 8), gridspec_kw={"height_ratios": [3, 1]}) |
|
|
| |
| ax1.bar(x - width / 2, ada_vals, width, label="Adaptive", color="#2196F3", alpha=0.85) |
| ax1.bar(x + width / 2, bas_vals, width, label="Baseline", color="#FF9800", alpha=0.85) |
| ax1.set_ylabel("Metric Value (mean across perturbations)") |
| ax1.set_title("Adaptive Prompt Selection vs Random Baseline — Mean Comparison", fontsize=13, fontweight="bold") |
| ax1.set_xticks(x) |
| ax1.set_xticklabels(labels, rotation=40, ha="right", fontsize=8) |
| ax1.legend(fontsize=10) |
| ax1.grid(axis="y", alpha=0.3) |
|
|
| |
| colors = [] |
| for m, d in zip(metrics, diffs): |
| if m in LOWER_IS_BETTER: |
| colors.append("#2196F3" if d < 0 else "#FF9800") |
| else: |
| colors.append("#2196F3" if d > 0 else "#FF9800") |
| ax2.bar(x, diffs, 0.5, color=colors, alpha=0.85) |
| ax2.axhline(0, color="black", linewidth=0.8) |
| ax2.set_ylabel("Diff (Adaptive - Baseline)") |
| ax2.set_xticks(x) |
| ax2.set_xticklabels(labels, rotation=40, ha="right", fontsize=8) |
| ax2.grid(axis="y", alpha=0.3) |
|
|
| |
| from matplotlib.patches import Patch |
| legend_elements = [ |
| Patch(facecolor="#2196F3", alpha=0.85, label="Adaptive wins"), |
| Patch(facecolor="#FF9800", alpha=0.85, label="Baseline wins"), |
| ] |
| ax2.legend(handles=legend_elements, fontsize=9, loc="upper right") |
|
|
| fig.tight_layout() |
| fig.savefig(output_dir / "comparison_mean_bars.png", dpi=150) |
| plt.close(fig) |
| print(" Saved comparison_mean_bars.png") |
|
|
|
|
| |
| |
| |
|
|
| def plot_dabrafenib_detail(ada_df, bas_df, output_dir: Path): |
| """Detailed comparison for Dabrafenib only (the real test drug).""" |
| if "Dabrafenib" not in ada_df["perturbation"].values: |
| print(" No Dabrafenib data found, skipping detail plot.") |
| return |
|
|
| ada = ada_df[ada_df["perturbation"] == "Dabrafenib"].iloc[0] |
| bas = bas_df[bas_df["perturbation"] == "Dabrafenib"].iloc[0] |
|
|
| |
| all_metrics = [] |
| for group_name, group_metrics in METRIC_GROUPS.items(): |
| for m in group_metrics: |
| if m in SKIP_METRICS: |
| continue |
| a, b = ada.get(m, None), bas.get(m, None) |
| if a is None or b is None: |
| continue |
| if a == -1.0 and b == -1.0: |
| continue |
| all_metrics.append((group_name, m, a, b)) |
|
|
| if not all_metrics: |
| return |
|
|
| fig, ax = plt.subplots(figsize=(10, max(6, len(all_metrics) * 0.4))) |
|
|
| y_pos = np.arange(len(all_metrics)) |
| labels = [] |
| ada_vals = [] |
| bas_vals = [] |
| win_colors = [] |
|
|
| for group, m, a, b in all_metrics: |
| labels.append(f"[{group}] {NICE_NAMES.get(m, m)}") |
| ada_vals.append(a) |
| bas_vals.append(b) |
| diff = a - b |
| if m in LOWER_IS_BETTER: |
| win_colors.append("#2196F3" if diff < 0 else "#FF9800") |
| else: |
| win_colors.append("#2196F3" if diff > 0 else "#FF9800") |
|
|
| |
| h = 0.35 |
| ax.barh(y_pos - h / 2, ada_vals, h, label="Adaptive", color="#2196F3", alpha=0.8) |
| ax.barh(y_pos + h / 2, bas_vals, h, label="Baseline", color="#FF9800", alpha=0.8) |
|
|
| ax.set_yticks(y_pos) |
| ax.set_yticklabels(labels, fontsize=8) |
| ax.invert_yaxis() |
| ax.set_xlabel("Metric Value") |
| ax.set_title("Dabrafenib — Detailed Metric Comparison", fontsize=13, fontweight="bold") |
| ax.legend(fontsize=10, loc="lower right") |
| ax.grid(axis="x", alpha=0.3) |
|
|
| |
| for i, (_, m, a, b) in enumerate(all_metrics): |
| diff = a - b |
| sign = "+" if diff > 0 else "" |
| ax.annotate(f"{sign}{diff:.4f}", xy=(max(a, b) + 0.01, y_pos[i]), |
| fontsize=7, va="center", color=win_colors[i], fontweight="bold") |
|
|
| fig.tight_layout() |
| fig.savefig(output_dir / "dabrafenib_detail.png", dpi=150) |
| plt.close(fig) |
| print(" Saved dabrafenib_detail.png") |
|
|
|
|
| |
| |
| |
|
|
| def print_summary_table(ada_df, bas_df): |
| """Print a text summary of per-drug results.""" |
| drugs = sorted(set(ada_df["perturbation"]) & set(bas_df["perturbation"])) |
|
|
| key_metrics = [ |
| ("pearson_delta", True), |
| ("mse", False), |
| ("mae", False), |
| ("de_direction_match", True), |
| ("de_spearman_lfc_sig", True), |
| ("de_sig_genes_recall", True), |
| ("overlap_at_N", True), |
| ("pr_auc", True), |
| ("roc_auc", True), |
| ] |
|
|
| print("\n" + "=" * 80) |
| print("EXPERIMENT RESULTS SUMMARY") |
| print("=" * 80) |
|
|
| for drug in drugs: |
| ada = ada_df[ada_df["perturbation"] == drug].iloc[0] |
| bas = bas_df[bas_df["perturbation"] == drug].iloc[0] |
|
|
| print(f"\n--- {drug} ---") |
| print(f"{'Metric':<25} {'Adaptive':>12} {'Baseline':>12} {'Diff':>12} {'Winner':>10}") |
| print("-" * 75) |
|
|
| a_wins = 0 |
| b_wins = 0 |
| for m, higher_better in key_metrics: |
| a_val, b_val = ada[m], bas[m] |
| if a_val == -1.0 and b_val == -1.0: |
| continue |
| diff = a_val - b_val |
| if higher_better: |
| winner = "Adaptive" if diff > 0 else "Baseline" if diff < 0 else "Tie" |
| else: |
| winner = "Adaptive" if diff < 0 else "Baseline" if diff > 0 else "Tie" |
|
|
| if winner == "Adaptive": |
| a_wins += 1 |
| elif winner == "Baseline": |
| b_wins += 1 |
|
|
| sign = "+" if diff > 0 else "" |
| print(f"{NICE_NAMES.get(m, m):<25} {a_val:>12.6f} {b_val:>12.6f} {sign}{diff:>11.6f} {winner:>10}") |
|
|
| print(f"\n Score: Adaptive {a_wins} — Baseline {b_wins}") |
|
|
| |
| nontrivial_drugs = [] |
| for drug in drugs: |
| ada = ada_df[ada_df["perturbation"] == drug].iloc[0] |
| |
| if ada.get("pearson_delta", 0) < 0.999: |
| nontrivial_drugs.append(drug) |
|
|
| if nontrivial_drugs: |
| print("\n" + "=" * 80) |
| print(f"NON-TRIVIAL DRUGS ({len(nontrivial_drugs)}): {nontrivial_drugs}") |
| print("=" * 80) |
| for m, higher_better in key_metrics: |
| a_mean = np.mean([ada_df[ada_df["perturbation"] == d].iloc[0][m] for d in nontrivial_drugs]) |
| b_mean = np.mean([bas_df[bas_df["perturbation"] == d].iloc[0][m] for d in nontrivial_drugs]) |
| if a_mean == -1.0 and b_mean == -1.0: |
| continue |
| diff = a_mean - b_mean |
| if higher_better: |
| winner = "Adaptive" if diff > 0 else "Baseline" |
| else: |
| winner = "Adaptive" if diff < 0 else "Baseline" |
| sign = "+" if diff > 0 else "" |
| print(f" {NICE_NAMES.get(m, m):<25} A={a_mean:.6f} B={b_mean:.6f} diff={sign}{diff:.6f} -> {winner}") |
|
|
|
|
| |
| |
| |
|
|
| def main(): |
| parser = argparse.ArgumentParser(description="Visualize B-cell experiment results") |
| parser.add_argument("--results-dir", required=True, help="Path to bcell_test_results") |
| parser.add_argument("--output-dir", default=None, help="Output dir for figures (default: results-dir/figures)") |
| args = parser.parse_args() |
|
|
| results_dir = Path(args.results_dir) |
| output_dir = Path(args.output_dir) if args.output_dir else results_dir / "figures" |
| output_dir.mkdir(parents=True, exist_ok=True) |
|
|
| print(f"Results dir: {results_dir}") |
| print(f"Output dir: {output_dir}") |
|
|
| |
| ada_df, bas_df = load_per_pert_results(results_dir) |
| comparison_df = load_comparison(results_dir) |
|
|
| |
| print_summary_table(ada_df, bas_df) |
|
|
| |
| print("\nGenerating figures...") |
|
|
| |
| drugs = sorted(set(ada_df["perturbation"]) & set(bas_df["perturbation"])) |
| for drug in drugs: |
| plot_per_drug_bars(ada_df, bas_df, output_dir, drug) |
| ada_row = ada_df[ada_df["perturbation"] == drug].iloc[0] |
| bas_row = bas_df[bas_df["perturbation"] == drug].iloc[0] |
| plot_radar(ada_row, bas_row, output_dir, drug) |
|
|
| |
| plot_grouped_comparison(comparison_df, output_dir) |
|
|
| |
| plot_dabrafenib_detail(ada_df, bas_df, output_dir) |
|
|
| print(f"\nAll figures saved to {output_dir}") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|