| |
| """ |
| Consolidated script to diagnose and fix h5ad files for transcriptformer. |
| |
| This script performs a series of checks to validate an AnnData object and |
| automatically applies fixes for common issues, preparing the data for |
| inference with transcriptformer. |
| |
| Usage: |
| python preprocess_adata.py <input_h5ad_file> <output_h5ad_file> |
| """ |
|
|
| import sys |
| import os |
| import numpy as np |
| import anndata as ad |
| import scanpy as sc |
| from pathlib import Path |
|
|
| def preprocess_adata(input_path, output_path): |
| """ |
| Diagnose and fix an h5ad file for transcriptformer compatibility. |
| """ |
| print(f"🚀 Starting preprocessing for: {input_path}") |
| print("=" * 70) |
|
|
| |
| print("📖 1. Loading AnnData object...") |
| if not os.path.exists(input_path): |
| print(f"❌ ERROR: Input file not found: {input_path}") |
| return False |
| |
| try: |
| adata = ad.read_h5ad(input_path) |
| print(f"✅ Loaded: {adata.shape[0]} cells × {adata.shape[1]} genes") |
| except Exception as e: |
| print(f"❌ ERROR: Could not load AnnData file. Reason: {e}") |
| return False |
|
|
| original_shape = adata.shape |
|
|
| |
| print("\n🔬 2. Running Diagnostics...") |
| issues_found = [] |
|
|
| |
| has_nan = np.isnan(adata.X.data).any() if hasattr(adata.X, 'data') else np.isnan(adata.X).any() |
| has_inf = np.isinf(adata.X.data).any() if hasattr(adata.X, 'data') else np.isinf(adata.X).any() |
| if has_nan: issues_found.append("NaN values found in data matrix.") |
| if has_inf: issues_found.append("Infinite values found in data matrix.") |
| print(f" - NaN/Inf values: {'❌ Found' if has_nan or has_inf else '✅ None'}") |
|
|
| |
| if adata.var.index.nunique() < len(adata.var.index): |
| issues_found.append("Duplicate gene indices (var_names) found.") |
| print(" - Duplicate gene indices: ❌ Found") |
| else: |
| print(" - Duplicate gene indices: ✅ Unique") |
|
|
| |
| if 'ensembl_id' not in adata.var.columns: |
| issues_found.append("'ensembl_id' column missing in var.") |
| print(" - 'ensembl_id' column: ❌ Missing") |
| else: |
| print(" - 'ensembl_id' column: ✅ Present") |
|
|
| |
| genes_before_filter = adata.n_vars |
| sc.pp.filter_genes(adata, min_cells=1) |
| if adata.n_vars < genes_before_filter: |
| num_removed = genes_before_filter - adata.n_vars |
| issues_found.append(f"{num_removed} genes with zero expression found.") |
| print(f" - Zero-expression genes: ❌ Found ({num_removed} genes)") |
| else: |
| print(" - Zero-expression genes: ✅ None") |
| |
| |
| adata = ad.read_h5ad(input_path) |
|
|
| |
| print("\n🔧 3. Applying Fixes...") |
| fixes_applied = [] |
|
|
| |
| if adata.var.index.nunique() < len(adata.var.index): |
| adata.var_names_make_unique() |
| fixes_applied.append("Made var_names unique using .var_names_make_unique()") |
| print(" - ✅ Made gene indices (var_names) unique.") |
| else: |
| print(" - ✅ Gene indices are already unique.") |
|
|
| |
| if 'ensembl_id' not in adata.var.columns: |
| print(" - Adding 'ensembl_id' column from var.index.") |
| adata.var['ensembl_id'] = adata.var.index |
| fixes_applied.append("Added 'ensembl_id' column from var.index.") |
| else: |
| print(" - ✅ 'ensembl_id' column already exists.") |
|
|
| |
| genes_before_filter = adata.n_vars |
| sc.pp.filter_genes(adata, min_cells=1) |
| if adata.n_vars < genes_before_filter: |
| num_removed = genes_before_filter - adata.n_vars |
| fixes_applied.append(f"Removed {num_removed} genes with no expression.") |
| print(f" - ✅ Removed {num_removed} zero-expression genes.") |
| else: |
| print(" - ✅ No zero-expression genes to remove.") |
|
|
| |
| print("\n💾 4. Saving Processed File...") |
| try: |
| adata.write(output_path) |
| print(f" - ✅ Successfully saved to: {output_path}") |
| except Exception as e: |
| print(f"❌ ERROR: Could not save file. Reason: {e}") |
| return False |
|
|
| |
| print("\n📋 5. Summary") |
| print("-" * 70) |
| print(f" - Original shape: {original_shape[0]} cells × {original_shape[1]} genes") |
| print(f" - Final shape: {adata.shape[0]} cells × {adata.shape[1]} genes") |
| print("\n - Issues Found:") |
| if issues_found: |
| for issue in issues_found: |
| print(f" - {issue}") |
| else: |
| print(" - None") |
| |
| print("\n - Fixes Applied:") |
| if fixes_applied: |
| for fix in fixes_applied: |
| print(f" - {fix}") |
| else: |
| print(" - None") |
| |
| print("\n🎉 Preprocessing complete!") |
| return True |
|
|
| def main(): |
| if len(sys.argv) != 3: |
| print("Usage: python preprocess_adata.py <input_h5ad_file> <output_h5ad_file>") |
| sys.exit(1) |
| |
| input_path = sys.argv[1] |
| output_path = sys.argv[2] |
| |
| if os.path.abspath(input_path) == os.path.abspath(output_path): |
| print("❌ ERROR: Input and output paths cannot be the same.") |
| sys.exit(1) |
|
|
| if os.path.exists(output_path): |
| response = input(f"⚠️ Output file already exists: {output_path}\nOverwrite? (y/N): ") |
| if response.lower() != 'y': |
| print("Operation cancelled.") |
| sys.exit(1) |
|
|
| success = preprocess_adata(input_path, output_path) |
| |
| if not success: |
| sys.exit(1) |
|
|
| if __name__ == "__main__": |
| main() |