import pandas as pd import numpy as np # Load the data df = pd.read_csv("~/Desktop/sabdab_summary_with_flags.tsv", sep="\t") # Create the strata column strata_cols = ["antigen_type", "heavy_species", "method", "scfv", "engineered", "light_ctype", "in_nr_set", "curated_quality_dataset"] df["strata"] = df[strata_cols].astype(str).agg("_".join, axis=1) # Initialize split column df["split"] = "" # Define fractions train_frac = 0.8 val_frac = 0.1 test_frac = 0.1 # Set random seed for reproducibility np.random.seed(42) # Get all unique strata all_strata = df["strata"].unique() for s in all_strata: idx = df[df["strata"] == s].index.to_list() np.random.shuffle(idx) n = len(idx) n_train = max(1, int(n * train_frac)) n_val = max(1, int(n * val_frac)) n_test = n - n_train - n_val # whatever remains # Adjust in case rounding caused n_test < 1 if n_test < 1: n_test = 1 if n_val > 1: n_val -= 1 else: n_train -= 1 # Assign df.loc[idx[:n_train], "split"] = "train" df.loc[idx[n_train:n_train+n_val], "split"] = "validation" df.loc[idx[n_train+n_val:], "split"] = "test" # Drop temporary strata column df.drop(columns=["strata"], inplace=True) # Check counts print(df["split"].value_counts(normalize=True) * 100) cols = ["antigen_type", "heavy_species", "method", "scfv", "engineered", "light_ctype", "in_nr_set", "curated_quality_dataset"] for col in cols: print(f"\n=== {col} ===") # Cross-tab of counts by split ct = pd.crosstab(df[col], df["split"]) # Convert counts to percentages **row-wise** so each category sums to 100% ct_percent = ct.div(ct.sum(axis=1), axis=0) * 100 print(ct_percent.round(1)) # Save to TSV df.to_csv("~/Desktop/sabdab_summary_with_splits.tsv", sep="\t", index=False) print("Saved updated DataFrame with split column to ~/Desktop/sabdab_summary_with_splits.tsv")