ancient-scripts-datasets / scripts /flag_phono_quality.py
Alvin
Add Phono_Quality column, downgrade contested/doubtful pairs
0c98632
#!/usr/bin/env python3
"""
Post-audit quality flagging for cognate pairs dataset.
Changes applied:
1. Adds 'Phono_Quality' column based on SCA Score thresholds
2. Downgrades Robbeets cross-family pairs: Confidence "certain" → "contested"
3. Fixes Sino-Tibetan confidence: propagates doubt markers from source NOTE column
Phono_Quality values:
- "strong" : Score >= 0.5 — clear phonological similarity
- "moderate" : 0.2 <= Score < 0.5 — detectable but weak similarity
- "weak" : 0 < Score < 0.2 — minimal similarity, possibly coincidental
- "none" : Score == 0.0 — zero surface similarity despite cognacy
- "unscored" : Score == -1.0 — no SCA score computed (e.g. ACD)
Streams line-by-line for memory efficiency on the 23.5M row file.
"""
import csv
import os
import sys
from collections import defaultdict
from pathlib import Path
# Force UTF-8 on Windows
if sys.stdout.encoding != 'utf-8':
sys.stdout.reconfigure(encoding='utf-8')
def build_robbeets_family_map(repo_dir: str) -> dict:
"""
Build a mapping from language identifier → family for Robbeets data.
Maps both ISO codes and internal IDs to family names.
"""
lang_csv = Path(repo_dir) / 'cldf' / 'languages.csv'
family_map = {}
with open(lang_csv, encoding='utf-8') as f:
for row in csv.DictReader(f):
internal_id = row['ID']
iso = row.get('ISO639P3code', '').strip()
family = row.get('Family', '').strip()
if not family:
continue
# Map internal ID → family
family_map[internal_id] = family
# Map ISO → family (if available)
if iso:
family_map[iso] = family
return family_map
def build_sinotibetan_doubt_cogids(source_path: str) -> set:
"""
Build a set of COGID values that have doubt-marked forms in the
Sino-Tibetan source data. A COGID is flagged if ANY form in that
cognate set has a doubt marker (!, ?, ?!, doubtful).
"""
doubt_cogids = set()
with open(source_path, encoding='utf-8') as f:
reader = csv.DictReader(f, delimiter='\t')
for row in reader:
note = row.get('NOTE', '').strip()
cogid = row.get('COGID', '').strip()
if not cogid:
continue
# Flag if note starts with ! or ? or is "doubtful"
if note.startswith('!') or note.startswith('?') or note.lower() == 'doubtful':
doubt_cogids.add(cogid)
return doubt_cogids
def classify_phono_quality(score_str: str) -> str:
"""Classify phonological quality based on SCA score."""
try:
score = float(score_str)
except (ValueError, TypeError):
return 'unscored'
if score == -1.0:
return 'unscored'
elif score == 0.0:
return 'none'
elif score < 0.2:
return 'weak'
elif score < 0.5:
return 'moderate'
else:
return 'strong'
def main():
hf_dir = Path(__file__).parent.parent
inherited_tsv = hf_dir / 'data' / 'training' / 'cognate_pairs' / 'cognate_pairs_inherited.tsv'
output_tsv = inherited_tsv.with_suffix('.flagged.tsv')
# ── Pre-load lookup tables ──
# 1. Robbeets family map
robbeets_dir = hf_dir / 'sources_tier1' / 'robbeetstriangulation'
if robbeets_dir.exists():
print('Loading Robbeets family map...')
robbeets_family = build_robbeets_family_map(str(robbeets_dir))
print(f' {len(robbeets_family)} language→family mappings')
else:
print('WARNING: Robbeets source dir not found, skipping cross-family detection')
robbeets_family = {}
# 2. Sino-Tibetan doubt COGIDs
st_source = hf_dir.parent / 'ancient-scripts-datasets' / 'sources' / 'sinotibetan' / 'sinotibetan_dump.tsv'
if st_source.exists():
print('Loading Sino-Tibetan doubt markers...')
st_doubt_cogids = build_sinotibetan_doubt_cogids(str(st_source))
print(f' {len(st_doubt_cogids)} doubt-flagged COGIDs')
else:
print('WARNING: Sino-Tibetan source not found, skipping doubt marker fix')
st_doubt_cogids = set()
# ── Check LFS pointer ──
with open(inherited_tsv, encoding='utf-8') as f:
first_line = f.readline()
if first_line.startswith('version https://git-lfs.github.com'):
print('ERROR: inherited TSV is an LFS pointer. Run: git lfs pull')
sys.exit(1)
# ── Stream-process ──
INPUT_COLUMNS = [
'Lang_A', 'Word_A', 'IPA_A', 'Lang_B', 'Word_B', 'IPA_B',
'Concept_ID', 'Relationship', 'Score', 'Source',
'Relation_Detail', 'Donor_Language', 'Confidence', 'Source_Record_ID',
]
OUTPUT_COLUMNS = INPUT_COLUMNS + ['Phono_Quality']
# Counters
total = 0
phono_counts = defaultdict(int)
robbeets_downgraded = 0
st_doubt_fixed = 0
source_counts = defaultdict(int)
confidence_changes = defaultdict(int)
print(f'\nProcessing {inherited_tsv}...')
print(f'Output: {output_tsv}')
with open(inherited_tsv, encoding='utf-8') as fin, \
open(output_tsv, 'w', encoding='utf-8', newline='') as fout:
reader = csv.DictReader(fin, delimiter='\t')
writer = csv.DictWriter(fout, fieldnames=OUTPUT_COLUMNS, delimiter='\t',
extrasaction='ignore')
writer.writeheader()
for row in reader:
total += 1
source = row.get('Source', '')
source_counts[source] += 1
# ── 1. Phono_Quality from Score ──
phono_quality = classify_phono_quality(row.get('Score', ''))
row['Phono_Quality'] = phono_quality
phono_counts[phono_quality] += 1
# ── 2. Robbeets cross-family → "contested" ──
if source == 'robbeetstriangulation' and robbeets_family:
lang_a = row['Lang_A']
lang_b = row['Lang_B']
fam_a = robbeets_family.get(lang_a, '')
fam_b = robbeets_family.get(lang_b, '')
if fam_a and fam_b and fam_a != fam_b:
if row['Confidence'] == 'certain':
row['Confidence'] = 'contested'
robbeets_downgraded += 1
confidence_changes['certain→contested'] += 1
# ── 3. Sino-Tibetan doubt markers ──
if source == 'sinotibetan' and st_doubt_cogids:
# Source_Record_ID format: st_{COGID}
src_id = row.get('Source_Record_ID', '')
if src_id.startswith('st_'):
cogid = src_id[3:] # strip "st_" prefix
if cogid in st_doubt_cogids:
if row['Confidence'] == 'certain':
row['Confidence'] = 'doubtful'
st_doubt_fixed += 1
confidence_changes['certain→doubtful (ST)'] += 1
writer.writerow(row)
if total % 5_000_000 == 0:
print(f' Processed {total:,} rows...')
# ── Summary ──
print(f'\n=== PROCESSING COMPLETE ===')
print(f'Total rows: {total:,}')
print(f'\n--- Phono_Quality Distribution ---')
for quality in ['strong', 'moderate', 'weak', 'none', 'unscored']:
count = phono_counts[quality]
pct = count / total * 100 if total else 0
print(f' {quality:12s}: {count:>12,} ({pct:5.2f}%)')
print(f'\n--- Source Counts ---')
for src, count in sorted(source_counts.items(), key=lambda x: -x[1]):
print(f' {src:30s}: {count:>12,}')
print(f'\n--- Confidence Changes ---')
print(f' Robbeets cross-family downgraded: {robbeets_downgraded:,}')
print(f' Sino-Tibetan doubt-fixed: {st_doubt_fixed:,}')
for change, count in sorted(confidence_changes.items()):
print(f' {change}: {count:,}')
# ── Replace original with flagged version ──
print(f'\nReplacing original TSV with flagged version...')
backup = inherited_tsv.with_suffix('.tsv.bak')
os.rename(inherited_tsv, backup)
os.rename(output_tsv, inherited_tsv)
print(f' Original backed up to {backup.name}')
print(f' Flagged version now at {inherited_tsv.name}')
# ── Verify ──
print(f'\nVerifying final file...')
verify_count = 0
has_phono_col = False
with open(inherited_tsv, encoding='utf-8') as f:
header = f.readline().strip()
if 'Phono_Quality' in header:
has_phono_col = True
for _ in f:
verify_count += 1
print(f' Header has Phono_Quality: {has_phono_col}')
print(f' Data rows: {verify_count:,}')
assert verify_count == total, f'COUNT MISMATCH: {verify_count} vs {total}'
assert has_phono_col, 'Phono_Quality column missing from header!'
print(f' VERIFICATION PASSED')
if __name__ == '__main__':
main()