Datasets:
File size: 1,946 Bytes
bb8455b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 | import pandas as pd
import numpy as np
# Load the data
df = pd.read_csv("~/Desktop/sabdab_summary_with_flags.tsv", sep="\t")
# Create the strata column
strata_cols = ["antigen_type", "heavy_species", "method", "scfv",
"engineered", "light_ctype", "in_nr_set", "curated_quality_dataset"]
df["strata"] = df[strata_cols].astype(str).agg("_".join, axis=1)
# Initialize split column
df["split"] = ""
# Define fractions
train_frac = 0.8
val_frac = 0.1
test_frac = 0.1
# Set random seed for reproducibility
np.random.seed(42)
# Get all unique strata
all_strata = df["strata"].unique()
for s in all_strata:
idx = df[df["strata"] == s].index.to_list()
np.random.shuffle(idx)
n = len(idx)
n_train = max(1, int(n * train_frac))
n_val = max(1, int(n * val_frac))
n_test = n - n_train - n_val # whatever remains
# Adjust in case rounding caused n_test < 1
if n_test < 1:
n_test = 1
if n_val > 1:
n_val -= 1
else:
n_train -= 1
# Assign
df.loc[idx[:n_train], "split"] = "train"
df.loc[idx[n_train:n_train+n_val], "split"] = "validation"
df.loc[idx[n_train+n_val:], "split"] = "test"
# Drop temporary strata column
df.drop(columns=["strata"], inplace=True)
# Check counts
print(df["split"].value_counts(normalize=True) * 100)
cols = ["antigen_type", "heavy_species", "method", "scfv",
"engineered", "light_ctype", "in_nr_set", "curated_quality_dataset"]
for col in cols:
print(f"\n=== {col} ===")
# Cross-tab of counts by split
ct = pd.crosstab(df[col], df["split"])
# Convert counts to percentages **row-wise** so each category sums to 100%
ct_percent = ct.div(ct.sum(axis=1), axis=0) * 100
print(ct_percent.round(1))
# Save to TSV
df.to_csv("~/Desktop/sabdab_summary_with_splits.tsv", sep="\t", index=False)
print("Saved updated DataFrame with split column to ~/Desktop/sabdab_summary_with_splits.tsv") |